max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
extract_aeo.py | natasiaengeline/python_learning | 0 | 6617251 | import PyPDF2 #pip install PyPDF2
import os
import pandas as pd
import spacy
# Discussion Partner:
# <NAME> and Ryan (<NAME>)
# url = 'https://www.eia.gov/outlooks/aeo/pdf/aeo2019.pdf'
# url = 'https://www.eia.gov/outlooks/aeo/pdf/aeo2018.pdf'
# Sources
# Prof Levy notes and codes example
# https://www.dataquest.io/blog/tutorial-text-classification-in-python-using-spacy/
# https://www.kaggle.com/satishgunjal/tutorial-text-classification-using-spacy
# https://www.analyticsvidhya.com/blog/2017/04/natural-language-processing-made-easy-using-spacy-%E2%80%8Bin-python/
# https://course.spacy.io/en/chapter1
# https://applied-language-technology.readthedocs.io/en/latest/notebooks/part_iii/02_pattern_matching.html
# https://stackoverflow.com/questions/4843158/how-to-check-if-a-string-is-a-substring-of-items-in-a-list-of-strings
# https://spacy.io/api/doc
# https://realpython.com/natural-language-processing-spacy-python/
# https://www.delftstack.com/howto/python/python-max-value-in-list/
# https://stackoverflow.com/questions/268272/getting-key-with-maximum-value-in-dictionary
# https://stackoverflow.com/questions/55616470/adding-elements-to-an-empty-dataframe-in-pandas
# Logic of the Code
# First I read the pdf document and parse it as text file.
# In the count function I first pass the text to the nlp feature from spacy.
# Create sentence list and then find sentences that only related to the
# energy type that we wanted to find and variable item yang we look for
# (e.g price). I did search only one energy and one variable each time to
# minimize error in searching the parent and child. After that assign the
# variable that we seek as token and search the parent and child of that token
# if the parent and child contains up/down/unchaged/unavailable then add +1
# to counter. Find the maximum counter and assign value to the table.
# Downside of this method is when sentence is too complicated, it is not going
# to give us perfect answer but it is good enough. Ancestor and Child method
# is quiet robust and minimize us to use more complicated negation.
# Matching function in Spacy but it is not robust enough for all sentences.
path = r'C:\Users\engel\Documents\GitHub\homework-2-natasiaengeline-1'
aeo2018 = 'aeo2018.pdf'
aeo2019 = 'aeo2019.pdf'
nlp = spacy.load("en_core_web_sm")
# parse pdf into text
def pdf_to_text(fname):
pdf = PyPDF2.PdfFileReader(os.path.join(path, fname))
text = []
for pnum in range(4, 17):
page = pdf.getPage(pnum)
text.append(page.extractText())
full_text = '\n'.join(text)
full_text = full_text.lower().split()
full_text = ' '.join(full_text)
full_text = full_text.replace('ł', '').replace('š','')
return full_text
aeo2018_text = pdf_to_text(aeo2018)
aeo2019_text = pdf_to_text(aeo2019)
# create counter
def counter(text, energy_type, var_interest):
doc = nlp(text)
# create list of sentences
sents_list = []
for sent in doc.sents:
sents_list.append(sent.text)
# filter sentences to match only the words that we are looking for
find_match =[*energy_type, *var_interest]
sent_match = ([sent for sent in sents_list if
all(word in sent for word in find_match)])
# initial counter
up_counter = 0
down_counter = 0
flat_counter = 0
uncertain_counter = 0
# find token index
if sent_match:
for index, sent in enumerate(sent_match):
doc = nlp(sent_match[index])
index_num = ([token.i for token in doc if any
(word in token.text for word in var_interest)])
# counting based on the parent and child
count = []
for token_index in index_num:
token = doc[token_index]
variable_ancestors = list(token.ancestors)
variable_children = list(token.children)
up = ['raise', 'increase', 'up', 'growth', 'grow', 'higher']
down = ['lower', 'decrease', 'down', 'decline']
unchanged = ['unchanged', 'same', 'flat']
uncertain = ['uncertainity']
# count for ancestor
for ancestor in variable_ancestors:
if ancestor.text in up:
up_counter += 1
elif ancestor.text in down:
down_counter += 1
elif ancestor.text in unchanged:
flat_counter += 1
elif ancestor.text in uncertain:
uncertain_counter += 1
# count for children
for children in variable_children:
if children.text in up:
up_counter += 1
elif children.text in down:
down_counter += 1
elif children.text in unchanged:
flat_counter += 1
elif children.text in uncertain:
uncertain_counter += 1
# total counter for ancestor and child
count = [up_counter, down_counter, flat_counter, uncertain_counter]
# if counter is empty return no data available
if all(value == 0 for value in count):
count = 'No Data Available'
else:
identifier = {0:'Increase', 1:'Decrease', 2:'Unchanged',
3:'Uncertain'}
max_index = count.index(max(count))
count = [identifier[max_index]]
count = ''.join(count)
return count
# if no sentence match, return no data is available
if not sent_match:
count = 'No Data Available'
return count
# function create the data frame
def aeo_table(text, energy_type):
# create data frame
col_names = ['energy type', 'price', 'emission', 'production',
'export', 'import']
df= pd.DataFrame([],columns = col_names)
for energy in energy_type:
price = counter(text, [energy], ['price'])
emission = counter(text, [energy], ['emission'])
production = counter(text, [energy], ['production'])
energy_export = counter(text, [energy], ['export'])
energy_import = counter(text, [energy], ['import'])
df_tmp = pd.DataFrame([(energy, price, emission, production,
energy_export, energy_import)],
columns = col_names)
df = df.append(df_tmp).reset_index(drop=True)
return df
# define the energies and variable information
energies = ['coal', 'nuclear', 'wind', 'solar', 'oil']
aeo_table_2018 = aeo_table(aeo2018_text, energies)
aeo_table_2019 = aeo_table(aeo2019_text, energies)
# write to CSV for AEO 2018
(aeo_table_2018.
to_csv(os.path.join(path,
r'AEO Table 2018.csv'), encoding='utf-8', index=False))
# write to CSV for AEO 2019
(aeo_table_2019.
to_csv(os.path.join(path,
r'AEO Table 2019.csv'), encoding='utf-8', index=False))
| import PyPDF2 #pip install PyPDF2
import os
import pandas as pd
import spacy
# Discussion Partner:
# <NAME> and Ryan (<NAME>)
# url = 'https://www.eia.gov/outlooks/aeo/pdf/aeo2019.pdf'
# url = 'https://www.eia.gov/outlooks/aeo/pdf/aeo2018.pdf'
# Sources
# Prof Levy notes and codes example
# https://www.dataquest.io/blog/tutorial-text-classification-in-python-using-spacy/
# https://www.kaggle.com/satishgunjal/tutorial-text-classification-using-spacy
# https://www.analyticsvidhya.com/blog/2017/04/natural-language-processing-made-easy-using-spacy-%E2%80%8Bin-python/
# https://course.spacy.io/en/chapter1
# https://applied-language-technology.readthedocs.io/en/latest/notebooks/part_iii/02_pattern_matching.html
# https://stackoverflow.com/questions/4843158/how-to-check-if-a-string-is-a-substring-of-items-in-a-list-of-strings
# https://spacy.io/api/doc
# https://realpython.com/natural-language-processing-spacy-python/
# https://www.delftstack.com/howto/python/python-max-value-in-list/
# https://stackoverflow.com/questions/268272/getting-key-with-maximum-value-in-dictionary
# https://stackoverflow.com/questions/55616470/adding-elements-to-an-empty-dataframe-in-pandas
# Logic of the Code
# First I read the pdf document and parse it as text file.
# In the count function I first pass the text to the nlp feature from spacy.
# Create sentence list and then find sentences that only related to the
# energy type that we wanted to find and variable item yang we look for
# (e.g price). I did search only one energy and one variable each time to
# minimize error in searching the parent and child. After that assign the
# variable that we seek as token and search the parent and child of that token
# if the parent and child contains up/down/unchaged/unavailable then add +1
# to counter. Find the maximum counter and assign value to the table.
# Downside of this method is when sentence is too complicated, it is not going
# to give us perfect answer but it is good enough. Ancestor and Child method
# is quiet robust and minimize us to use more complicated negation.
# Matching function in Spacy but it is not robust enough for all sentences.
path = r'C:\Users\engel\Documents\GitHub\homework-2-natasiaengeline-1'
aeo2018 = 'aeo2018.pdf'
aeo2019 = 'aeo2019.pdf'
nlp = spacy.load("en_core_web_sm")
# parse pdf into text
def pdf_to_text(fname):
pdf = PyPDF2.PdfFileReader(os.path.join(path, fname))
text = []
for pnum in range(4, 17):
page = pdf.getPage(pnum)
text.append(page.extractText())
full_text = '\n'.join(text)
full_text = full_text.lower().split()
full_text = ' '.join(full_text)
full_text = full_text.replace('ł', '').replace('š','')
return full_text
aeo2018_text = pdf_to_text(aeo2018)
aeo2019_text = pdf_to_text(aeo2019)
# create counter
def counter(text, energy_type, var_interest):
doc = nlp(text)
# create list of sentences
sents_list = []
for sent in doc.sents:
sents_list.append(sent.text)
# filter sentences to match only the words that we are looking for
find_match =[*energy_type, *var_interest]
sent_match = ([sent for sent in sents_list if
all(word in sent for word in find_match)])
# initial counter
up_counter = 0
down_counter = 0
flat_counter = 0
uncertain_counter = 0
# find token index
if sent_match:
for index, sent in enumerate(sent_match):
doc = nlp(sent_match[index])
index_num = ([token.i for token in doc if any
(word in token.text for word in var_interest)])
# counting based on the parent and child
count = []
for token_index in index_num:
token = doc[token_index]
variable_ancestors = list(token.ancestors)
variable_children = list(token.children)
up = ['raise', 'increase', 'up', 'growth', 'grow', 'higher']
down = ['lower', 'decrease', 'down', 'decline']
unchanged = ['unchanged', 'same', 'flat']
uncertain = ['uncertainity']
# count for ancestor
for ancestor in variable_ancestors:
if ancestor.text in up:
up_counter += 1
elif ancestor.text in down:
down_counter += 1
elif ancestor.text in unchanged:
flat_counter += 1
elif ancestor.text in uncertain:
uncertain_counter += 1
# count for children
for children in variable_children:
if children.text in up:
up_counter += 1
elif children.text in down:
down_counter += 1
elif children.text in unchanged:
flat_counter += 1
elif children.text in uncertain:
uncertain_counter += 1
# total counter for ancestor and child
count = [up_counter, down_counter, flat_counter, uncertain_counter]
# if counter is empty return no data available
if all(value == 0 for value in count):
count = 'No Data Available'
else:
identifier = {0:'Increase', 1:'Decrease', 2:'Unchanged',
3:'Uncertain'}
max_index = count.index(max(count))
count = [identifier[max_index]]
count = ''.join(count)
return count
# if no sentence match, return no data is available
if not sent_match:
count = 'No Data Available'
return count
# function create the data frame
def aeo_table(text, energy_type):
# create data frame
col_names = ['energy type', 'price', 'emission', 'production',
'export', 'import']
df= pd.DataFrame([],columns = col_names)
for energy in energy_type:
price = counter(text, [energy], ['price'])
emission = counter(text, [energy], ['emission'])
production = counter(text, [energy], ['production'])
energy_export = counter(text, [energy], ['export'])
energy_import = counter(text, [energy], ['import'])
df_tmp = pd.DataFrame([(energy, price, emission, production,
energy_export, energy_import)],
columns = col_names)
df = df.append(df_tmp).reset_index(drop=True)
return df
# define the energies and variable information
energies = ['coal', 'nuclear', 'wind', 'solar', 'oil']
aeo_table_2018 = aeo_table(aeo2018_text, energies)
aeo_table_2019 = aeo_table(aeo2019_text, energies)
# write to CSV for AEO 2018
(aeo_table_2018.
to_csv(os.path.join(path,
r'AEO Table 2018.csv'), encoding='utf-8', index=False))
# write to CSV for AEO 2019
(aeo_table_2019.
to_csv(os.path.join(path,
r'AEO Table 2019.csv'), encoding='utf-8', index=False))
| en | 0.777324 | #pip install PyPDF2 # Discussion Partner: # <NAME> and Ryan (<NAME>) # url = 'https://www.eia.gov/outlooks/aeo/pdf/aeo2019.pdf' # url = 'https://www.eia.gov/outlooks/aeo/pdf/aeo2018.pdf' # Sources # Prof Levy notes and codes example # https://www.dataquest.io/blog/tutorial-text-classification-in-python-using-spacy/ # https://www.kaggle.com/satishgunjal/tutorial-text-classification-using-spacy # https://www.analyticsvidhya.com/blog/2017/04/natural-language-processing-made-easy-using-spacy-%E2%80%8Bin-python/ # https://course.spacy.io/en/chapter1 # https://applied-language-technology.readthedocs.io/en/latest/notebooks/part_iii/02_pattern_matching.html # https://stackoverflow.com/questions/4843158/how-to-check-if-a-string-is-a-substring-of-items-in-a-list-of-strings # https://spacy.io/api/doc # https://realpython.com/natural-language-processing-spacy-python/ # https://www.delftstack.com/howto/python/python-max-value-in-list/ # https://stackoverflow.com/questions/268272/getting-key-with-maximum-value-in-dictionary # https://stackoverflow.com/questions/55616470/adding-elements-to-an-empty-dataframe-in-pandas # Logic of the Code # First I read the pdf document and parse it as text file. # In the count function I first pass the text to the nlp feature from spacy. # Create sentence list and then find sentences that only related to the # energy type that we wanted to find and variable item yang we look for # (e.g price). I did search only one energy and one variable each time to # minimize error in searching the parent and child. After that assign the # variable that we seek as token and search the parent and child of that token # if the parent and child contains up/down/unchaged/unavailable then add +1 # to counter. Find the maximum counter and assign value to the table. # Downside of this method is when sentence is too complicated, it is not going # to give us perfect answer but it is good enough. Ancestor and Child method # is quiet robust and minimize us to use more complicated negation. # Matching function in Spacy but it is not robust enough for all sentences. # parse pdf into text # create counter # create list of sentences # filter sentences to match only the words that we are looking for # initial counter # find token index # counting based on the parent and child # count for ancestor # count for children # total counter for ancestor and child # if counter is empty return no data available # if no sentence match, return no data is available # function create the data frame # create data frame # define the energies and variable information # write to CSV for AEO 2018 # write to CSV for AEO 2019 | 2.944422 | 3 |
tests/filters/test_coffee.py | localmed/django-assetfiles | 0 | 6617252 | <reponame>localmed/django-assetfiles
from __future__ import unicode_literals
from nose.tools import *
from assetfiles import settings
from assetfiles.filters.coffee import CoffeeScriptFilterError
from tests.base import AssetfilesTestCase, filter
class TestCoffeeScriptFilter(AssetfilesTestCase):
def setUp(self):
super(TestCoffeeScriptFilter, self).setUp()
self.original_coffee_options = settings.COFFEE_SCRIPT_OPTIONS
def tearDown(self):
super(TestCoffeeScriptFilter, self).tearDown()
settings.COFFEE_SCRIPT_OPTIONS = self.original_coffee_options
def test_processes_coffee_files(self):
self.mkfile('static/js/simple.coffee', 'a = foo: "1#{2}3"')
assert_in(b'foo: "1" + 2 + "3"', filter('js/simple.js'))
def test_uses_coffee_script_options(self):
settings.COFFEE_SCRIPT_OPTIONS = {'bare': True}
self.mkfile('static/js/simple.coffee', 'a = foo: "1#{2}3"')
assert_not_in(b'(function() {', filter('js/simple.js'))
def test_raises_syntax_error(self):
with assert_raises(CoffeeScriptFilterError):
self.mkfile('static/js/simple.coffee', '\n\n\n\na = foo: "1#{2}3')
filter('js/simple.js')
| from __future__ import unicode_literals
from nose.tools import *
from assetfiles import settings
from assetfiles.filters.coffee import CoffeeScriptFilterError
from tests.base import AssetfilesTestCase, filter
class TestCoffeeScriptFilter(AssetfilesTestCase):
def setUp(self):
super(TestCoffeeScriptFilter, self).setUp()
self.original_coffee_options = settings.COFFEE_SCRIPT_OPTIONS
def tearDown(self):
super(TestCoffeeScriptFilter, self).tearDown()
settings.COFFEE_SCRIPT_OPTIONS = self.original_coffee_options
def test_processes_coffee_files(self):
self.mkfile('static/js/simple.coffee', 'a = foo: "1#{2}3"')
assert_in(b'foo: "1" + 2 + "3"', filter('js/simple.js'))
def test_uses_coffee_script_options(self):
settings.COFFEE_SCRIPT_OPTIONS = {'bare': True}
self.mkfile('static/js/simple.coffee', 'a = foo: "1#{2}3"')
assert_not_in(b'(function() {', filter('js/simple.js'))
def test_raises_syntax_error(self):
with assert_raises(CoffeeScriptFilterError):
self.mkfile('static/js/simple.coffee', '\n\n\n\na = foo: "1#{2}3')
filter('js/simple.js') | zh | 0.618 | #{2}3"') #{2}3"') #{2}3') | 2.076368 | 2 |
pipeline/data.py | ownzonefeng/Graph-based-text-representations | 1 | 6617253 | import os
from multiprocessing import Pool
from functools import partial
from collections import Counter
from typing import Union, List
import json
from collections.abc import Iterable
import torch
from torch.utils.data import TensorDataset
from torchtext.data import get_tokenizer
import spacy
tokenizer_collection = {'en_core_web_trf': get_tokenizer('spacy', language='en_core_web_trf'),
'basic_english': get_tokenizer("basic_english", language='en')
}
stop_words = spacy.load('en_core_web_trf').Defaults.stop_words
class SkipGram(TensorDataset):
def __init__(self, corpus_files: Union[str, List[str]], language: str='en_core_web_trf', min_freq=12, num_ns=5, use_cache=True, reload=False):
r"""Build a Skip-gram style dataset
This class is the child of torch.utils.data.TensorDataset.
Parameters
----------
corpus_files : Union[str, List[str]]
The paths to the corpus files. glob-styled input is acceptable.
language : str
Choose a tokenizer. Available: 'en_core_web_trf', 'basic_english'.
min_freq : int
Minimum frequency of appearing to keep that word in the dictionary instead of treating it as <UNK>.
num_ns : int
The number of negative sampling.
use_cache : bool
Whether to cache processed data or use existing cache in the current folder.
reload : bool
Whether to force reload and re-process the corpus file. Ignore existing cache.
Returns
----------
torch.utils.data.TensorDataset
"""
self.language = language
cached_list = ['cached_dictionary.json', 'cached_target.pt', 'cached_context.pt', 'cached_label.pt', 'cached_id_to_freq.pt', 'cached_id_to_prob.pt']
if use_cache and not reload and all([os.path.exists(i) for i in cached_list]):
print("Load from cache")
self.target = torch.load('cached_target.pt')
self.context = torch.load('cached_context.pt')
self.label = torch.load('cached_label.pt')
self.id_to_freq = torch.load('cached_id_to_freq.pt')
self.id_to_prob = torch.load('cached_id_to_prob.pt')
with open('cached_dictionary.json', 'r') as f:
self.info = json.load(f)
self.word_to_id = self.info['dictionary']
self.id_to_word = {int(k): v for k, v in self.info['index'].items()}
else:
print("Create dataset from corpus")
self.cnt = SkipGram.build_counter(corpus_files, language)
self.word_to_id, self.id_to_word, self.id_to_freq, self.id_to_prob = SkipGram.compile_vocab(self.cnt, min_freq)
self.info = {}
self.info['dict_size'] = len(self.id_to_word)
self.info['dictionary'] = self.word_to_id
self.info['index'] = self.id_to_word
self.text_ids = SkipGram.convert_text_to_ids(corpus_files, self.word_to_id, language)
self.subsampled = False
pos_samples = self.generate_pos_sample()
neg_samples = self.generate_neg_sample(pos_samples[:, 1], num_ns)
all_samples = torch.cat([pos_samples, neg_samples], dim=1)
self.target = all_samples[:, 0]
self.context = all_samples[:, 1:]
self.label = torch.zeros_like(self.context)
self.label[:, 0] = 1
if use_cache:
torch.save(self.target, 'cached_target.pt')
torch.save(self.context, 'cached_context.pt')
torch.save(self.label, 'cached_label.pt')
torch.save(self.id_to_freq, 'cached_id_to_freq.pt')
torch.save(self.id_to_prob, 'cached_id_to_prob.pt')
with open('cached_dictionary.json', 'w') as f:
json.dump(self.info, f, indent=2)
self.dict_size = self.info['dict_size']
super(SkipGram, self).__init__(self.target.unsqueeze(1), self.context, self.label)
def subsampling(self):
r"""Call to subsample data"""
if not self.subsampled:
rand_var = torch.rand(self.text_ids.shape)
prob = self.id_to_prob[self.text_ids]
self.text_ids[rand_var < prob] = 0
self.subsampled = True
def generate_pos_sample(self, before=2, after=2, subsampling=True):
r"""Generate positive examples
A positive example is a pair of target word and context word.
Parameters
----------
before : int
The number of words before target word that are context words.
after : int
The number of words after target word that are context words.
subsampling: bool
Whether to subsample words.
Returns
----------
torch.Tensor : (N, 2) a row is a pair of words.
"""
if subsampling:
self.subsampling()
text = self.text_ids
text = text[text != 0]
length = len(text) - before - after
data = torch.zeros((length, before+after, 2), dtype=torch.int64)
if after == 0:
data[:,:,0] = text[before:,None]
else:
data[:,:,0] = text[before:-after,None]
for j in range(before):
data[:,j,1] = text[j:length+j]
for j in range(after):
data[:,before+j,1] = text[before+1+j:length+before+1+j]
return data.reshape(-1, 2)
def generate_neg_sample(self, context, num_ns):
r"""Generate negative examples
Parameters
----------
context : torch.Tensor
Context word vector (N, ).
num_ns : int
The number of negative samples per positive sample.
Returns
----------
torch.Tensor : (N, num_ns)
"""
context = context.unsqueeze(1)
number_samples = context.shape[0]
weights = torch.pow(self.id_to_freq, 0.75)
noise = torch.multinomial(weights, number_samples * num_ns, True).view(number_samples, num_ns)
for _ in range(2):
reptitive_select = torch.sum(context == noise, dim=1) >= 1
noise_new = torch.multinomial(weights, number_samples * num_ns, True).view(number_samples, num_ns)
noise[reptitive_select] = noise_new[reptitive_select]
return noise
def __call__(self, tokens):
r"""Tokenize and convert tokens to ids"""
if isinstance(tokens, str):
tokens = tokens.lower()
tokenizer = tokenizer_collection[self.language]
ids = [self.word_to_id.get(tok, 0) for tok in tokenizer(tokens.strip())]
return ids
elif isinstance(tokens, Iterable):
ids = []
for i in tokens:
ids.extend(self(str(i).lower()))
return ids
else:
raise TypeError(f'{type(tokens)} is not supported')
def lookup_words(self, ids: Union[int, List[int]]):
r"""Find words based on ids"""
if isinstance(ids, int):
return [self.id_to_word.get(ids, "<unk>")]
elif isinstance(ids, Iterable):
words = []
for i in ids:
words.extend(self.lookup_words(int(i)))
return words
else:
raise TypeError(f'{type(ids)} is not supported')
@staticmethod
def subsample_probability(frequency, sampling_factor):
r"""
Generates a word rank-based probabilistic sampling table
More information: https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/sequence/make_sampling_table
"""
frequency = frequency / torch.sum(frequency)
prob = (torch.min(torch.FloatTensor([1.0]), torch.sqrt(frequency / sampling_factor) / (frequency / sampling_factor)))
prob = 1 - prob # 1 - prob is rejection probability
return torch.clamp(prob, 0, 1)
@staticmethod
def _build_counter_process(path, language='en_core_web_trf'):
r"""Count word's frequency"""
tokenizer = tokenizer_collection[language]
cnt = Counter()
with open(path,'r',encoding='utf-8') as f:
for line in f:
tokens = tokenizer(line.strip())
cnt.update(tokens)
return cnt
@staticmethod
def build_counter(files, language='en_core_web_trf'):
r"""Count word's frequency from multiple files"""
if isinstance(files, str):
return SkipGram._build_counter_process(files, language)
n = len(files)
num_worker = min(12, n, os.cpu_count())
cnt = Counter()
process_func = partial(SkipGram._build_counter_process, language=language)
with Pool(num_worker) as p:
for curr in p.imap(process_func, files):
cnt += curr
return cnt
@staticmethod
def compile_vocab(counter, min_freq=12, threshold=1e-5):
r"""Compile the counter (collections.Counter) to get vocabulary and related informantion
Parameters
----------
counter : collections.Counter
Mapping word to frequency.
min_freq : int
Minimum frequency of appearing to keep that word in the dictionary instead of treating it as <UNK>.
threshold : float
Subsampling factor. check: subsample_probability(frequency, sampling_factor).
Returns
----------
dict : Mapping word to id.
dict : Mapping id to word.
torch.Tensor : Mapping id to frequency.
torch.Tensor : Mapping id to subsampling table.
"""
word_to_id = {"<unk>":0}
id_to_word = {0:"<unk>"}
id_to_freq = [1]
id = 1
for word, freq in counter.items():
if word == "<unk>":
continue
elif freq > min_freq and word not in stop_words:
word_to_id[word]=id
id_to_word[id] = word
id_to_freq.append(freq)
id += 1
else:
word_to_id[word] = 0
id_to_freq = torch.tensor(id_to_freq, dtype=torch.float32)
id_to_prob = SkipGram.subsample_probability(id_to_freq, threshold)
return word_to_id, id_to_word, id_to_freq, id_to_prob
@staticmethod
def _text_to_id(path, word_to_id, language: str='en_core_web_trf'):
r"""Convert text into ids"""
tokenizer = tokenizer_collection[language]
ids = []
with open(path,'r',encoding='utf-8') as f:
for line in f:
tokens = tokenizer(line.strip())
for tok in tokens:
ids.append(word_to_id.get(tok, 0))
return ids
@staticmethod
def convert_text_to_ids(files, word_to_id, language: str='en_core_web_trf'):
r"""Convert text into ids from multiple files"""
if isinstance(files, str):
ids = SkipGram._text_to_id(files, word_to_id, language)
return torch.LongTensor(ids)
n = len(files)
num_worker = min(12, n, os.cpu_count())
all_ids = []
process_func = partial(SkipGram._text_to_id, word_to_id=word_to_id, language=language)
with Pool(num_worker) as p:
for ids in p.imap(process_func, files):
all_ids.extend(ids)
return torch.LongTensor(all_ids)
| import os
from multiprocessing import Pool
from functools import partial
from collections import Counter
from typing import Union, List
import json
from collections.abc import Iterable
import torch
from torch.utils.data import TensorDataset
from torchtext.data import get_tokenizer
import spacy
tokenizer_collection = {'en_core_web_trf': get_tokenizer('spacy', language='en_core_web_trf'),
'basic_english': get_tokenizer("basic_english", language='en')
}
stop_words = spacy.load('en_core_web_trf').Defaults.stop_words
class SkipGram(TensorDataset):
def __init__(self, corpus_files: Union[str, List[str]], language: str='en_core_web_trf', min_freq=12, num_ns=5, use_cache=True, reload=False):
r"""Build a Skip-gram style dataset
This class is the child of torch.utils.data.TensorDataset.
Parameters
----------
corpus_files : Union[str, List[str]]
The paths to the corpus files. glob-styled input is acceptable.
language : str
Choose a tokenizer. Available: 'en_core_web_trf', 'basic_english'.
min_freq : int
Minimum frequency of appearing to keep that word in the dictionary instead of treating it as <UNK>.
num_ns : int
The number of negative sampling.
use_cache : bool
Whether to cache processed data or use existing cache in the current folder.
reload : bool
Whether to force reload and re-process the corpus file. Ignore existing cache.
Returns
----------
torch.utils.data.TensorDataset
"""
self.language = language
cached_list = ['cached_dictionary.json', 'cached_target.pt', 'cached_context.pt', 'cached_label.pt', 'cached_id_to_freq.pt', 'cached_id_to_prob.pt']
if use_cache and not reload and all([os.path.exists(i) for i in cached_list]):
print("Load from cache")
self.target = torch.load('cached_target.pt')
self.context = torch.load('cached_context.pt')
self.label = torch.load('cached_label.pt')
self.id_to_freq = torch.load('cached_id_to_freq.pt')
self.id_to_prob = torch.load('cached_id_to_prob.pt')
with open('cached_dictionary.json', 'r') as f:
self.info = json.load(f)
self.word_to_id = self.info['dictionary']
self.id_to_word = {int(k): v for k, v in self.info['index'].items()}
else:
print("Create dataset from corpus")
self.cnt = SkipGram.build_counter(corpus_files, language)
self.word_to_id, self.id_to_word, self.id_to_freq, self.id_to_prob = SkipGram.compile_vocab(self.cnt, min_freq)
self.info = {}
self.info['dict_size'] = len(self.id_to_word)
self.info['dictionary'] = self.word_to_id
self.info['index'] = self.id_to_word
self.text_ids = SkipGram.convert_text_to_ids(corpus_files, self.word_to_id, language)
self.subsampled = False
pos_samples = self.generate_pos_sample()
neg_samples = self.generate_neg_sample(pos_samples[:, 1], num_ns)
all_samples = torch.cat([pos_samples, neg_samples], dim=1)
self.target = all_samples[:, 0]
self.context = all_samples[:, 1:]
self.label = torch.zeros_like(self.context)
self.label[:, 0] = 1
if use_cache:
torch.save(self.target, 'cached_target.pt')
torch.save(self.context, 'cached_context.pt')
torch.save(self.label, 'cached_label.pt')
torch.save(self.id_to_freq, 'cached_id_to_freq.pt')
torch.save(self.id_to_prob, 'cached_id_to_prob.pt')
with open('cached_dictionary.json', 'w') as f:
json.dump(self.info, f, indent=2)
self.dict_size = self.info['dict_size']
super(SkipGram, self).__init__(self.target.unsqueeze(1), self.context, self.label)
def subsampling(self):
r"""Call to subsample data"""
if not self.subsampled:
rand_var = torch.rand(self.text_ids.shape)
prob = self.id_to_prob[self.text_ids]
self.text_ids[rand_var < prob] = 0
self.subsampled = True
def generate_pos_sample(self, before=2, after=2, subsampling=True):
r"""Generate positive examples
A positive example is a pair of target word and context word.
Parameters
----------
before : int
The number of words before target word that are context words.
after : int
The number of words after target word that are context words.
subsampling: bool
Whether to subsample words.
Returns
----------
torch.Tensor : (N, 2) a row is a pair of words.
"""
if subsampling:
self.subsampling()
text = self.text_ids
text = text[text != 0]
length = len(text) - before - after
data = torch.zeros((length, before+after, 2), dtype=torch.int64)
if after == 0:
data[:,:,0] = text[before:,None]
else:
data[:,:,0] = text[before:-after,None]
for j in range(before):
data[:,j,1] = text[j:length+j]
for j in range(after):
data[:,before+j,1] = text[before+1+j:length+before+1+j]
return data.reshape(-1, 2)
def generate_neg_sample(self, context, num_ns):
r"""Generate negative examples
Parameters
----------
context : torch.Tensor
Context word vector (N, ).
num_ns : int
The number of negative samples per positive sample.
Returns
----------
torch.Tensor : (N, num_ns)
"""
context = context.unsqueeze(1)
number_samples = context.shape[0]
weights = torch.pow(self.id_to_freq, 0.75)
noise = torch.multinomial(weights, number_samples * num_ns, True).view(number_samples, num_ns)
for _ in range(2):
reptitive_select = torch.sum(context == noise, dim=1) >= 1
noise_new = torch.multinomial(weights, number_samples * num_ns, True).view(number_samples, num_ns)
noise[reptitive_select] = noise_new[reptitive_select]
return noise
def __call__(self, tokens):
r"""Tokenize and convert tokens to ids"""
if isinstance(tokens, str):
tokens = tokens.lower()
tokenizer = tokenizer_collection[self.language]
ids = [self.word_to_id.get(tok, 0) for tok in tokenizer(tokens.strip())]
return ids
elif isinstance(tokens, Iterable):
ids = []
for i in tokens:
ids.extend(self(str(i).lower()))
return ids
else:
raise TypeError(f'{type(tokens)} is not supported')
def lookup_words(self, ids: Union[int, List[int]]):
r"""Find words based on ids"""
if isinstance(ids, int):
return [self.id_to_word.get(ids, "<unk>")]
elif isinstance(ids, Iterable):
words = []
for i in ids:
words.extend(self.lookup_words(int(i)))
return words
else:
raise TypeError(f'{type(ids)} is not supported')
@staticmethod
def subsample_probability(frequency, sampling_factor):
r"""
Generates a word rank-based probabilistic sampling table
More information: https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/sequence/make_sampling_table
"""
frequency = frequency / torch.sum(frequency)
prob = (torch.min(torch.FloatTensor([1.0]), torch.sqrt(frequency / sampling_factor) / (frequency / sampling_factor)))
prob = 1 - prob # 1 - prob is rejection probability
return torch.clamp(prob, 0, 1)
@staticmethod
def _build_counter_process(path, language='en_core_web_trf'):
r"""Count word's frequency"""
tokenizer = tokenizer_collection[language]
cnt = Counter()
with open(path,'r',encoding='utf-8') as f:
for line in f:
tokens = tokenizer(line.strip())
cnt.update(tokens)
return cnt
@staticmethod
def build_counter(files, language='en_core_web_trf'):
r"""Count word's frequency from multiple files"""
if isinstance(files, str):
return SkipGram._build_counter_process(files, language)
n = len(files)
num_worker = min(12, n, os.cpu_count())
cnt = Counter()
process_func = partial(SkipGram._build_counter_process, language=language)
with Pool(num_worker) as p:
for curr in p.imap(process_func, files):
cnt += curr
return cnt
@staticmethod
def compile_vocab(counter, min_freq=12, threshold=1e-5):
r"""Compile the counter (collections.Counter) to get vocabulary and related informantion
Parameters
----------
counter : collections.Counter
Mapping word to frequency.
min_freq : int
Minimum frequency of appearing to keep that word in the dictionary instead of treating it as <UNK>.
threshold : float
Subsampling factor. check: subsample_probability(frequency, sampling_factor).
Returns
----------
dict : Mapping word to id.
dict : Mapping id to word.
torch.Tensor : Mapping id to frequency.
torch.Tensor : Mapping id to subsampling table.
"""
word_to_id = {"<unk>":0}
id_to_word = {0:"<unk>"}
id_to_freq = [1]
id = 1
for word, freq in counter.items():
if word == "<unk>":
continue
elif freq > min_freq and word not in stop_words:
word_to_id[word]=id
id_to_word[id] = word
id_to_freq.append(freq)
id += 1
else:
word_to_id[word] = 0
id_to_freq = torch.tensor(id_to_freq, dtype=torch.float32)
id_to_prob = SkipGram.subsample_probability(id_to_freq, threshold)
return word_to_id, id_to_word, id_to_freq, id_to_prob
@staticmethod
def _text_to_id(path, word_to_id, language: str='en_core_web_trf'):
r"""Convert text into ids"""
tokenizer = tokenizer_collection[language]
ids = []
with open(path,'r',encoding='utf-8') as f:
for line in f:
tokens = tokenizer(line.strip())
for tok in tokens:
ids.append(word_to_id.get(tok, 0))
return ids
@staticmethod
def convert_text_to_ids(files, word_to_id, language: str='en_core_web_trf'):
r"""Convert text into ids from multiple files"""
if isinstance(files, str):
ids = SkipGram._text_to_id(files, word_to_id, language)
return torch.LongTensor(ids)
n = len(files)
num_worker = min(12, n, os.cpu_count())
all_ids = []
process_func = partial(SkipGram._text_to_id, word_to_id=word_to_id, language=language)
with Pool(num_worker) as p:
for ids in p.imap(process_func, files):
all_ids.extend(ids)
return torch.LongTensor(all_ids)
| en | 0.728264 | Build a Skip-gram style dataset This class is the child of torch.utils.data.TensorDataset. Parameters ---------- corpus_files : Union[str, List[str]] The paths to the corpus files. glob-styled input is acceptable. language : str Choose a tokenizer. Available: 'en_core_web_trf', 'basic_english'. min_freq : int Minimum frequency of appearing to keep that word in the dictionary instead of treating it as <UNK>. num_ns : int The number of negative sampling. use_cache : bool Whether to cache processed data or use existing cache in the current folder. reload : bool Whether to force reload and re-process the corpus file. Ignore existing cache. Returns ---------- torch.utils.data.TensorDataset Call to subsample data Generate positive examples A positive example is a pair of target word and context word. Parameters ---------- before : int The number of words before target word that are context words. after : int The number of words after target word that are context words. subsampling: bool Whether to subsample words. Returns ---------- torch.Tensor : (N, 2) a row is a pair of words. Generate negative examples Parameters ---------- context : torch.Tensor Context word vector (N, ). num_ns : int The number of negative samples per positive sample. Returns ---------- torch.Tensor : (N, num_ns) Tokenize and convert tokens to ids Find words based on ids Generates a word rank-based probabilistic sampling table More information: https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/sequence/make_sampling_table # 1 - prob is rejection probability Count word's frequency Count word's frequency from multiple files Compile the counter (collections.Counter) to get vocabulary and related informantion Parameters ---------- counter : collections.Counter Mapping word to frequency. min_freq : int Minimum frequency of appearing to keep that word in the dictionary instead of treating it as <UNK>. threshold : float Subsampling factor. check: subsample_probability(frequency, sampling_factor). Returns ---------- dict : Mapping word to id. dict : Mapping id to word. torch.Tensor : Mapping id to frequency. torch.Tensor : Mapping id to subsampling table. Convert text into ids Convert text into ids from multiple files | 2.433853 | 2 |
xpcs_webplot/cli.py | AZjk/xpcs_webplot | 0 | 6617254 | <gh_stars>0
"""
single hdf plot tool for globus
"""
from .plot_images import hdf2web_safe as hdf2web
import argparse
import logging
import sys
from .webplot_cli import local_plot
logging.basicConfig(level=logging.INFO,
format='%(asctime)s.%(msecs)03d %(name)s %(levelname)s | %(message)s',
datefmt='%m-%d %H:%M:%S')
def globus_plot():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('fname', metavar='FNAME', type=str,
help='the hdf filename')
parser.add_argument('--target_dir', type=str, nargs='?', default='/tmp',
help='output directory')
parser.add_argument('--num_img', type=int, nargs='?', default=4,
help='number of images per row')
parser.add_argument('--dpi', type=int, nargs='?', default=240,
help=('dpi controls the image resolution.'
'For 4K/3840px, dpi is 240'))
parser.add_argument('--overwrite', type=bool, nargs='?',
default=True, help='overwrite flag')
kargs = vars(parser.parse_args())
fname = kargs.pop('fname')
hdf2web(fname, image_only=True, **kargs)
def main():
# local method for 8IDI, it will compile the image to html
# it reads the setting from .xpcs_webplot/default_setting.json
if len(sys.argv[0]) > 1 and sys.argv[1] == '__local__':
return local_plot()
# for globus plot. only plot images;
else:
return globus_plot()
if __name__ == '__main__':
sys.exit(main()) | """
single hdf plot tool for globus
"""
from .plot_images import hdf2web_safe as hdf2web
import argparse
import logging
import sys
from .webplot_cli import local_plot
logging.basicConfig(level=logging.INFO,
format='%(asctime)s.%(msecs)03d %(name)s %(levelname)s | %(message)s',
datefmt='%m-%d %H:%M:%S')
def globus_plot():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('fname', metavar='FNAME', type=str,
help='the hdf filename')
parser.add_argument('--target_dir', type=str, nargs='?', default='/tmp',
help='output directory')
parser.add_argument('--num_img', type=int, nargs='?', default=4,
help='number of images per row')
parser.add_argument('--dpi', type=int, nargs='?', default=240,
help=('dpi controls the image resolution.'
'For 4K/3840px, dpi is 240'))
parser.add_argument('--overwrite', type=bool, nargs='?',
default=True, help='overwrite flag')
kargs = vars(parser.parse_args())
fname = kargs.pop('fname')
hdf2web(fname, image_only=True, **kargs)
def main():
# local method for 8IDI, it will compile the image to html
# it reads the setting from .xpcs_webplot/default_setting.json
if len(sys.argv[0]) > 1 and sys.argv[1] == '__local__':
return local_plot()
# for globus plot. only plot images;
else:
return globus_plot()
if __name__ == '__main__':
sys.exit(main()) | en | 0.676818 | single hdf plot tool for globus # local method for 8IDI, it will compile the image to html # it reads the setting from .xpcs_webplot/default_setting.json # for globus plot. only plot images; | 2.608314 | 3 |
make-numbers-from-dnom.py | tphinney/font-tools | 15 | 6617255 | <reponame>tphinney/font-tools
# This FontLab Studio 5 script takes a set of designed denominator glyphs with names ending
# in ".dnom" and creates new shifted glyphs referencing the originals as
# composites, with names ending ".numr", ".subs" and ".sups"
# NOTES:
# 1) Requires RoboFab to be installed.
# 2) Script uses constants for vertical offsets of numerator, denominator
# and subscript glyphs; you will have to adjust these for your values.
# 3) Script will use the italic angle in the font to decide on the horizontal
# shift required for any given vertical shift.
import math
# we need the Python math module to calculate the correct
# horizontal shifts for vertically-moved components in italic fonts.
from robofab.world import CurrentFont
#import string
f = CurrentFont()
fontAngle=fl.font.italic_angle
startCount = len(fl.font.glyphs)
def splitName():
glyph.name.split('.')
class Mode: # make a class for categories of glyphs being added, with different suffixes and shifts
def __init__(self, glyphkind, y, x): # when initializing, get kind label and contents
self.kind = glyphkind
self.yShift = y
self.xShift = x
modes=[0,1,2]
modes[0]=Mode("numr",280,math.cos(fl.font.italic_angle) * 280 * -1)
modes[1]=Mode("sups",400,math.cos(fl.font.italic_angle) * 400 * -1)
modes[2]=Mode("subs",-140,math.cos(fl.font.italic_angle) * -140 * -1)
dnomGlyphs = []
for glyph in fl.font.glyphs:
splitName = glyph.name.split('.')
if len(splitName)>1 and splitName[1]=="dnom":
dnomGlyphs.append(glyph)
print "\nWelcome to FontLab Studio 5.\n"
print "Fonts open: ", len(fl)
for x in range (0, len(modes)):
print "Adding", modes[x].kind, "glyphs"
for sGlyph in dnomGlyphs:
newGlyph = Glyph() ;
comp = Component(sGlyph.index) ;
newGlyphName = sGlyph.name.split('.')[0]+"."+modes[x].kind ;
newGlyph.width = sGlyph.width ;
f.newGlyph(newGlyphName) ;
f[newGlyphName].width = sGlyph.width ;
f[newGlyphName].appendComponent(sGlyph.name, (modes[x].xShift,modes[x].yShift)) ;
f.update() ;
endCount = len(f.glyphs)
print "Starting glyph count was ", startCount
print "Final glyph count is ", endCount
print "Added ", (endCount - startCount), "glyphs"
| # This FontLab Studio 5 script takes a set of designed denominator glyphs with names ending
# in ".dnom" and creates new shifted glyphs referencing the originals as
# composites, with names ending ".numr", ".subs" and ".sups"
# NOTES:
# 1) Requires RoboFab to be installed.
# 2) Script uses constants for vertical offsets of numerator, denominator
# and subscript glyphs; you will have to adjust these for your values.
# 3) Script will use the italic angle in the font to decide on the horizontal
# shift required for any given vertical shift.
import math
# we need the Python math module to calculate the correct
# horizontal shifts for vertically-moved components in italic fonts.
from robofab.world import CurrentFont
#import string
f = CurrentFont()
fontAngle=fl.font.italic_angle
startCount = len(fl.font.glyphs)
def splitName():
glyph.name.split('.')
class Mode: # make a class for categories of glyphs being added, with different suffixes and shifts
def __init__(self, glyphkind, y, x): # when initializing, get kind label and contents
self.kind = glyphkind
self.yShift = y
self.xShift = x
modes=[0,1,2]
modes[0]=Mode("numr",280,math.cos(fl.font.italic_angle) * 280 * -1)
modes[1]=Mode("sups",400,math.cos(fl.font.italic_angle) * 400 * -1)
modes[2]=Mode("subs",-140,math.cos(fl.font.italic_angle) * -140 * -1)
dnomGlyphs = []
for glyph in fl.font.glyphs:
splitName = glyph.name.split('.')
if len(splitName)>1 and splitName[1]=="dnom":
dnomGlyphs.append(glyph)
print "\nWelcome to FontLab Studio 5.\n"
print "Fonts open: ", len(fl)
for x in range (0, len(modes)):
print "Adding", modes[x].kind, "glyphs"
for sGlyph in dnomGlyphs:
newGlyph = Glyph() ;
comp = Component(sGlyph.index) ;
newGlyphName = sGlyph.name.split('.')[0]+"."+modes[x].kind ;
newGlyph.width = sGlyph.width ;
f.newGlyph(newGlyphName) ;
f[newGlyphName].width = sGlyph.width ;
f[newGlyphName].appendComponent(sGlyph.name, (modes[x].xShift,modes[x].yShift)) ;
f.update() ;
endCount = len(f.glyphs)
print "Starting glyph count was ", startCount
print "Final glyph count is ", endCount
print "Added ", (endCount - startCount), "glyphs" | en | 0.777691 | # This FontLab Studio 5 script takes a set of designed denominator glyphs with names ending # in ".dnom" and creates new shifted glyphs referencing the originals as # composites, with names ending ".numr", ".subs" and ".sups" # NOTES: # 1) Requires RoboFab to be installed. # 2) Script uses constants for vertical offsets of numerator, denominator # and subscript glyphs; you will have to adjust these for your values. # 3) Script will use the italic angle in the font to decide on the horizontal # shift required for any given vertical shift. # we need the Python math module to calculate the correct # horizontal shifts for vertically-moved components in italic fonts. #import string # make a class for categories of glyphs being added, with different suffixes and shifts # when initializing, get kind label and contents | 2.900197 | 3 |
interviewPractice/python/04_trees/02_isTreeSymmetric.py | netor27/codefights-arcade-solutions | 0 | 6617256 | ''''
Given a binary tree t, determine whether it is symmetric around its center, i.e. each side mirrors the other.
Example
For
t = {
"value": 1,
"left": {
"value": 2,
"left": {
"value": 3,
"left": null,
"right": null
},
"right": {
"value": 4,
"left": null,
"right": null
}
},
"right": {
"value": 2,
"left": {
"value": 4,
"left": null,
"right": null
},
"right": {
"value": 3,
"left": null,
"right": null
}
}
}
the output should be isTreeSymmetric(t) = true.
Here's what the tree in this example looks like:
1
/ \
2 2
/ \ / \
3 4 4 3
As you can see, it is symmetric.
For
t = {
"value": 1,
"left": {
"value": 2,
"left": null,
"right": {
"value": 3,
"left": null,
"right": null
}
},
"right": {
"value": 2,
"left": null,
"right": {
"value": 3,
"left": null,
"right": null
}
}
}
the output should be isTreeSymmetric(t) = false.
Here's what the tree in this example looks like:
1
/ \
2 2
\ \
3 3
As you can see, it is not symmetric.
Input/Output
[execution time limit] 4 seconds (py3)
[input] tree.integer t
A binary tree of integers.
Guaranteed constraints:
0 ≤ tree size < 5 · 104,
-1000 ≤ node value ≤ 1000.
[output] boolean
Return true if t is symmetric and false otherwise.
''''
#
# Definition for binary tree:
class Tree(object):
def __init__(self, x):
self.value = x
self.left = None
self.right = None
def isTreeSymmetric(t):
if t == None:
return True
return NodesAreEqual(t.left, t.right)
def NodesAreEqual(t1, t2):
if t1 == None or t2 == None:
return t1 == t2
return t1.value == t2.value and NodesAreEqual(t1.right, t2.left) and NodesAreEqual(t1.left, t2.right)
| ''''
Given a binary tree t, determine whether it is symmetric around its center, i.e. each side mirrors the other.
Example
For
t = {
"value": 1,
"left": {
"value": 2,
"left": {
"value": 3,
"left": null,
"right": null
},
"right": {
"value": 4,
"left": null,
"right": null
}
},
"right": {
"value": 2,
"left": {
"value": 4,
"left": null,
"right": null
},
"right": {
"value": 3,
"left": null,
"right": null
}
}
}
the output should be isTreeSymmetric(t) = true.
Here's what the tree in this example looks like:
1
/ \
2 2
/ \ / \
3 4 4 3
As you can see, it is symmetric.
For
t = {
"value": 1,
"left": {
"value": 2,
"left": null,
"right": {
"value": 3,
"left": null,
"right": null
}
},
"right": {
"value": 2,
"left": null,
"right": {
"value": 3,
"left": null,
"right": null
}
}
}
the output should be isTreeSymmetric(t) = false.
Here's what the tree in this example looks like:
1
/ \
2 2
\ \
3 3
As you can see, it is not symmetric.
Input/Output
[execution time limit] 4 seconds (py3)
[input] tree.integer t
A binary tree of integers.
Guaranteed constraints:
0 ≤ tree size < 5 · 104,
-1000 ≤ node value ≤ 1000.
[output] boolean
Return true if t is symmetric and false otherwise.
''''
#
# Definition for binary tree:
class Tree(object):
def __init__(self, x):
self.value = x
self.left = None
self.right = None
def isTreeSymmetric(t):
if t == None:
return True
return NodesAreEqual(t.left, t.right)
def NodesAreEqual(t1, t2):
if t1 == None or t2 == None:
return t1 == t2
return t1.value == t2.value and NodesAreEqual(t1.right, t2.left) and NodesAreEqual(t1.left, t2.right)
| en | 0.537779 | ' Given a binary tree t, determine whether it is symmetric around its center, i.e. each side mirrors the other. Example For t = { "value": 1, "left": { "value": 2, "left": { "value": 3, "left": null, "right": null }, "right": { "value": 4, "left": null, "right": null } }, "right": { "value": 2, "left": { "value": 4, "left": null, "right": null }, "right": { "value": 3, "left": null, "right": null } } } the output should be isTreeSymmetric(t) = true. Here's what the tree in this example looks like: 1 / \ 2 2 / \ / \ 3 4 4 3 As you can see, it is symmetric. For t = { "value": 1, "left": { "value": 2, "left": null, "right": { "value": 3, "left": null, "right": null } }, "right": { "value": 2, "left": null, "right": { "value": 3, "left": null, "right": null } } } the output should be isTreeSymmetric(t) = false. Here's what the tree in this example looks like: 1 / \ 2 2 \ \ 3 3 As you can see, it is not symmetric. Input/Output [execution time limit] 4 seconds (py3) [input] tree.integer t A binary tree of integers. Guaranteed constraints: 0 ≤ tree size < 5 · 104, -1000 ≤ node value ≤ 1000. [output] boolean Return true if t is symmetric and false otherwise. # # Definition for binary tree: | 4.513209 | 5 |
dpvideo.py | payahy/s | 0 | 6617257 | <reponame>payahy/s
# -*- coding: utf-8 -*-
import xBot
from xBot import *
from akad.ttypes import *
from multiprocessing import Pool, Process
from akad.ttypes import ContentType as Type
from akad.ttypes import ChatRoomAnnouncementContents
from akad.ttypes import Location
from akad.ttypes import ChatRoomAnnouncement
from akad.ttypes import LoginRequest
from akad import LineService
from datetime import datetime
import time,random,sys,json,codecs,threading,glob,re,os,subprocess,asyncio
from datetime import datetime, timedelta
from time import sleep
from bs4 import BeautifulSoup
from threading import Thread,Event
from humanfriendly import format_timespan, format_size, format_number, format_length
import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, urllib, urllib.parse,youtube_dl,pafy,timeit,atexit,traceback,ffmpy,humanize,pytz
from gtts import gTTS
from googletrans import Translator
from pytz import timezone
_session = requests.session()
botStart = time.time()
#WARNA
merah = "#FF2800"
kuning = "#FFFD00"
hijau = "#83FF00"
hijauMuda = "#00FF00"
unguTua = "#9900FF"
merahHati = "#660000"
biru = "#00DAFF"
biruTua = "#0000FF"
ungu = "#C323FF"
nila = "#4B0082"
pingTua = "#FF17CE"
ping = "#FF17CE"
hitam = "#000000"
putih = "#FFFFFF"
jambon = "#FF0066"
biruHitam = "#330033"
abuabu = "#000000cc"
sp_putih = {"type": "separator","color": putih}
sp_nila = {"type": "separator","color": nila}
sp_hitam = {"type": "separator","color": hitam}
sp_kuning = {"type": "separator","color": kuning}
sp_biru = {"type": "separator","color": biruTua}
sp_hijau = {"type": "separator","color": hijau}
sp_merah = {"type": "separator","color": merahHati}
sp_ungu = {"type": "separator","color": unguTua}
sp_ping = {"type": "separator","color": pingTua}
sp_abuabu = {"type": "separator","color": abuabu}
style_hijau={"header":{"backgroundColor":abuabu},"body":{"backgroundColor":abuabu},"footer":{"backgroundColor":abuabu,"separator":True,"separatorColor":hijau}}
style_nila={"header":{"backgroundColor":nila},"body":{"backgroundColor":nila},"footer":{"backgroundColor":nila,"separator":True,"separatorColor":biruTua}}
style_edit={"header":{"backgroundColor":abuabu},"body": {"cornerRadius": "md","borderWidth": "5px","borderColor": unguTua},"footer":{"backgroundColor":abuabu,"separator":True,"separatorColor":hijau}}
style_merah={"header":{"backgroundColor":abuabu},"body":{"backgroundColor":abuabu},"footer":{"backgroundColor":abuabu,"separator":True,"separatorColor":merah}}
style_biru={"header":{"backgroundColor":abuabu},"body":{"backgroundColor":abuabu},"footer":{"backgroundColor":abuabu,"separator":True,"separatorColor":biru}}
style_kuning={"header":{"backgroundColor":abuabu},"body":{"backgroundColor":abuabu},"footer":{"backgroundColor":abuabu,"separator":True,"separatorColor":kuning}}
style_ungu={"header":{"backgroundColor":abuabu},"body":{"backgroundColor":abuabu},"footer":{"backgroundColor":abuabu,"separator":True,"separatorColor":ungu}}
style_putih={"header":{"backgroundColor":putih},"body":{"backgroundColor":putih},"footer":{"backgroundColor":putih,"separator":True,"separatorColor":hitam}}
style_hitam={"header":{"backgroundColor":hitam},"body":{"backgroundColor":hitam},"footer":{"backgroundColor":hitam,"separator":True,"separatorColor":putih}}
Pabuabu = "https://bakarenders.com/renders/albums/userpics/11535/normal_hatsunemiku_-_loveiswar2.png"
Warna1 = (merah,hijau,biru,hijauMuda)
Warna2 = (biruTua,merahHati,unguTua,nila)
Warna3 = (putih,kuning,pingTua)
warnanya1 = random.choice(Warna1)
warnanya2 = random.choice(Warna2)
warnanya3 = random.choice(Warna3)
pict1 = "https://os.line.naver.jp/os/p/{}"
pict2 = "https://obs.line-scdn.net/{}"
#poto
logo = "https://1.bp.blogspot.com/-6T7oMDOIlKA/XVX_8-oO52I/AAAAAAAAGe0/W0MubSIIyUUzw3et2YifTWqxaNRRwWE-ACLcBGAs/s1600/20190816_075636.png"
image1 = "https://1.bp.blogspot.com/-zyUmsriCmGE/XVYAO-lsFLI/AAAAAAAAGe8/BsSUwtUfFc0mxRGxE_8fOz3peuxB3t9UwCLcBGAs/s1600/20190816_074821.jpg"
image2 = "https://1.bp.blogspot.com/-zK32-fvqcNw/XVYAUCQhrmI/AAAAAAAAGfA/hXKs0MS2OIMKi09tJ7yCjnjUbMiuV_TIACLcBGAs/s1600/20190816_074438.jpg"
image3 = "https://1.bp.blogspot.com/-OgPmr5eJpYg/XVYAVFAYcaI/AAAAAAAAGfE/Xwh0EqB_SrclP-NZ_DaDqxcYnWBZSa_FgCLcBGAs/s1600/20190816_074311.jpg"
a1 = "https://1.bp.blogspot.com/-pBMROlmPtdg/XXJF_q2cQCI/AAAAAAAAG1E/7TRgM49vMZAXvAEhtVbO--zPCqvdh8SBACLcBGAs/s1600/20190906_183218.jpg"
a2 = "https://1.bp.blogspot.com/-rTWcjzSC8uk/XXJF5YVuADI/AAAAAAAAG1A/noFsixwxTHoE9DDSoSbQHLs-OXnY5udCACLcBGAs/s1600/20190906_183534.jpg"
Gambar = (image1,image2,image3)
a3 = (a1,a2)
Hasile = random.choice(Gambar)
Aa1 = random.choice(a3)
logo = "https://1.bp.blogspot.com/-6T7oMDOIlKA/XVX_8-oO52I/AAAAAAAAGe0/W0MubSIIyUUzw3et2YifTWqxaNRRwWE-ACLcBGAs/s1600/20190816_075636.png"
Warna = (merah,kuning,hijau,biru,ping,ungu)
warnanya1 = random.choice(Warna)
warnanya2 = random.choice(Warna)
warnanya3 = random.choice(Warna)
print("\n____________________________[SELFBOT]____________________________")
me = LINE("EPCKGkxPfnhkgU5BIIId.u3Oosz4qA0t+5TyeTrb17q.xsHVM/bXCcqxwR+Yil7Fjj9RymOU0xb7OMsKLfkDyiw=")
me.log("Auth Token : " + str(me.authToken))
meM = me.getProfile().mid
me.log("MID : " + str(meM))
print("""
░▀░ █▀▀▄ █▀▀ █░█ █▀▀▄ █▀▀█ ▀▀█▀▀ █▀▀
▀█▀ █░░█ █▀▀ ▄▀▄ █▀▀▄ █░░█ ░░█░░ ▀▀█
▀▀▀ ▀░░▀ ▀▀▀ ▀░▀ ▀▀▀░ ▀▀▀▀ ░░▀░░ ▀▀▀
▄█░ ░█▀█░ ▄ █▀▀█ ▄▀▀▄ ▄ █▀█ █▀▀█ ▄█░ ▄▀▀▄
░█░ █▄▄█▄ ░ █▄▀█ █▄▄░ ░ ░▄▀ █▄▀█ ░█░ ▀▄▄█
▄█▄ ░░░█░ ▀ █▄▄█ ▀▄▄▀ ▀ █▄▄ █▄▄█ ▄█▄ ░▄▄▀ """)
Hallow = """
Bot @
Addblc @
Join
Banlist
Clearban
"""
oepoll = OEPoll(me)
St = "┣"
Zx = [me]
meProfile = me.getProfile()
meSettings = me.getSettings()
set = {
"Picture": False,
"bot": True,
"Conection": "",
"foto": {},
"Ids":{},
"keyCommand":"",
"changeProfileVideo": {
"picture": "",
"stage": 2,
"status": False,
"video": ""
},
"PASUKAN": {},
"setKey": False,
"autoRead": False,
"owner":{},
"staff": {},
"admin":{},
"autoBlock": False,
"detectMention": True,
"detectMention2": False,
"arespon":True,
"blacklist":{},
"checkSticker": False,
"autoJoinTicket": True,
"autoJoin": True,
"autoAdd": True,
"autoLeave": False,
"limitkick": False,
"contact": False,
"autoJoinMessage": "ᴛᴇʀɪᴍᴀᴋᴀsɪʜ ᴋᴀᴋᴀ ᴀᴛᴀs ᴜɴᴅᴀɴɢᴀɴ ɢʀᴜᴘɴʏᴀ.",
"comment": "ᴀᴜᴛᴏ ʟɪᴋᴇ ɴ ᴄᴏᴍᴍᴇɴᴛ ᴅᴏɴᴇ\nвʏ.ᴛᴇᴀᴍ ⊶ [B.O.G] ⊷",
"comment2": "┏━━━━━━━━━•❅•°•❈•°•❅•━━━━━━━━┓\n┃┏━━━━━━━━•❅•°•❈•°•❅•━━━━━━━┓\n┃┃ ❀ [ BLACK_OF_GAMER ] ❀\n┃┗━━━━━━━━•❅•°•❈•°•❅•━━━━━━━┛\n├━━━━━━━━━━━━━━━━━━━━━━━━\n┃┏━━━━━━━━•❅•°•❈•°•❅•━━━━━━━┓\n┃┃ LIKE N COMMENT DONE\n┃┃ IKUTAN CORET-CORET\n┃┃ B.O.G_TEAM\n┃┗━━━━━━━━•❅•°•❈•°•❅•━━━━━━━┛\n├━━━━━━━━━━━━━━━━━━━━━━━━\n┃┏━━━━━━━━•❅•°•❈•°•❅•━━━━━━━┓\n┃┃ AciL :\n┃┃ http://line.me/ti/p/~adiputra.95\n┃┃ Denjaka :\n┃┃ https://bit.ly/38K8bbV\n┃┗━━━━━━━━•❅•°•❈•°•❅•━━━━━━━┛\n┗━━━━━━━━━•❅•°•❈•°•❅•━━━━━━━━┛",
"mention":"ᴋᴀʟᴏ ɴɢɪɴᴛɪᴘ ᴛᴇʀᴜs ᴅᴀᴘᴇᴛ ɢᴇʟᴀs ᴘᴇᴄᴀʜ ᴅɪ ᴋᴇᴘᴀʟᴀ...",
"Respontag":"https://youtube.com/channel/UCu5Aqj6zqJK59pXxNGw8HMg",
"Respontag2":"ada apa tag saya d grup kak?",
"tagpm":"subcrabe channelku donk kak\nhttps://youtube.com/channel/UCu5Aqj6zqJK59pXxNGw8HMg",
"welcome":"ѕєĻαмαт đαтαηg,,,, вυđαуαкαη ¢єк ησтє кαк",
"message":"тᴇяıмᴀ кᴀsıн suᴅᴀн ᴀᴅᴅ sᴀʏᴀ \nвʏ.ᴛᴇᴀᴍ \n⊶ вĻα¢к●σƒ●gαмєя ⊷",
"baper":"ѕєĻαмαт тιηggαĻ тємαη,,, ѕємσgα єηgкαυ тєηαηg đι ѕαηα●",
"userAgent": [
"Mozilla/5.0 (X11; U; Linux i586; de; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; U; Linux amd64; rv:5.0) Gecko/20100101 Firefox/5.0 (Debian)",
"Mozilla/5.0 (X11; U; Linux amd64; en-US; rv:5.0) Gecko/20110619 Firefox/5.0",
"Mozilla/5.0 (X11; Linux) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 FirePHP/0.5",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Firefox/5.0",
"Mozilla/5.0 (X11; Linux x86_64) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; Linux ppc; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; Linux AMD64) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.2; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:5.0) Gecko/20110619 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; rv:6.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1.1; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.2; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.1; U; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.1; rv:2.0.1) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.0; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.0; rv:5.0) Gecko/20100101 Firefox/5.0"
]
}
cctv = {
"cyduk":{},
"point":{},
"sidermem":{}
}
Line_Apikey = "u951e70feab1a2b4f38fc1390f776a31b"
cont = me.getContact(meM)
Extr = me.getContact(Line_Apikey).displayName
for busht in Zx:
for anding in Line_Apikey:
try:
busht.findAndAddContactsByMid(anding)
except:pass
mulai = time.time()
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
jamtgl = timeNow.strftime('|📆|%d/%B/%Y|⏰|%X|')
jam = timeNow.strftime('⏰ %X')
tgl = timeNow.strftime('📆 %d/%B/%Y')
def runtime(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
days, hours = divmod(hours, 24)
return '%02d Hari %02d Jam %02d Menit %02d Detik' % (days, hours, mins, secs)
def Run_Xx():
backupData()
python = sys.executable
os.execl(python, python, *sys.argv)
def logError(text):
me.log("ERROR 404 !\n" + str(text))
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
timeHours = datetime.strftime(timeNow,"(%H:%M)")
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
inihari = datetime.now(tz=tz)
hr = inihari.strftime('%A')
bln = inihari.strftime('%m')
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
time = hasil + ", " + inihari.strftime('%d') + " - " + bln + " - " + inihari.strftime('%Y') + " | " + inihari.strftime('%H:%M:%S')
with open("errorLog.txt","a") as error:
error.write("\n[%s] %s" % (str(time), text))
def mentionMembers(to, mid):
try:
arrData = ""
textx = "╭━──────────────━╮\n│➢Total「{}」Members\n╰━──────────────━╯\n╭━──────────────━╮\n│➢ 1. ".format(str(len(mid)))
arr = []
no = 1
num = 2
for i in mid:
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention
if no < len(mid):
no += 1
textx += "│➢ %i. " % (num)
num=(num+1)
else:
try:
no = "\n╰━─[ {} ]".format(str(me.getGroup(to).name))
except:
no = "\n╰━─[ Success ]"
me.sendMessage(to, textx+"╰━──────────────━╯", {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
logError(error)
me.sendMessage(to, "[ INFO ] Error :\n" + str(error))
Devert = "Thank you brothers\nMy name is "+cont.displayName+" use your bot script\nSubcrabe my chanel youtube\nhttps://youtube.com/channel/UCu5Aqj6zqJK59pXxNGw8HMg"
def changeProfileVideo(to):
if set['changeProfileVideo']['picture'] == None:
return me.sendMessage(to, "Foto tidak ditemukan")
elif set['changeProfileVideo']['video'] == None:
return me.sendMessage(to, "Video tidak ditemukan")
else:
path = set['changeProfileVideo']['video']
files = {'file': open(path, 'rb')}
obs_params = me.genOBSParams({'oid': me.getProfile().mid, 'ver': '2.0', 'type': 'video', 'cat': 'vp.mp4'})
data = {'params': obs_params}
r_vp = me.server.postContent('{}/talk/vp/upload.nhn'.format(str(me.server.LINE_OBS_DOMAIN)), data=data, files=files)
if r_vp.status_code != 201:
return me.sendMessage(to, "Gagal update profile")
path_p = set['changeProfileVideo']['picture']
set['changeProfileVideo']['status'] = False
me.updateProfilePicture(path_p, 'vp')
extras = " "+Extr+"\n"
def sendTemplate(to, text):
data = { "type": "flex","altText": " Black Of Gamers","contents":
{"type": "bubble","size": "micro",
"styles":{"body":{"backgroundColor":"#000000"}},"type":"bubble",
"body": {"cornerRadius": "md","borderWidth": "5px","borderColor": biruTua,
"contents":[{"contents":[{"type":"separator","color":warnanya1},{"contents":[
{"type":"separator","color":warnanya1},
{"text": text ,"size":"xxs","align":"center","color": warnanya1,"wrap":True,"weight":"bold","type":"text"},
{"type":"separator","color":warnanya1}
],"type":"box","spacing":"md","layout":"horizontal"},
{"type":"separator","color":warnanya1}
],"type":"box","layout":"vertical"},
],"type":"box","layout":"vertical"}},}
me.sendFlex(to, data)
def sendTemplate2 (to,text):
data = { "type": "flex","altText": " Assalamu'alaikumm","contents":
{"type": "bubble","styles": style_biru,"type":"bubble","size":"kilo","body":
{"cornerRadius": "md","borderWidth": "5px","borderColor": biruTua,"contents":[{"contents":[{"type":"separator","color":"#ffffff"},
{"contents":[sp_putih,
{"text":"вĻα¢к●σƒ●gαмєя","size":"md","align":"center","color":"#BE1700","wrap":True,"weight":"bold","type":"text"},
sp_putih
],"type":"box","spacing":"md","layout":"horizontal"},
sp_putih],"type":"box","layout":"vertical"},
{"contents":[sp_putih,
{"contents":[sp_putih,
{"type": "image","url": "https://1.bp.blogspot.com/-6T7oMDOIlKA/XVX_8-oO52I/AAAAAAAAGe0/W0MubSIIyUUzw3et2YifTWqxaNRRwWE-ACLcBGAs/s1600/20190816_075636.png","size": "full","aspectRatio": "3:1"},
sp_putih
],"type":"box","spacing":"md","layout":"horizontal"},
sp_putih],"type":"box","layout":"vertical"},
{"contents": [sp_putih,
{"contents":[sp_putih,
{"text": text,"size":"xs","color":kuning,"wrap":True,"weight":"bold","type":"text"},
sp_putih],"type":"box","spacing":"md","layout":"horizontal"},
sp_putih],"type":"box","layout":"vertical"},
],"type":"box","spacing":"xs","layout":"vertical"}},}
me.sendFlex(to, data)
def Fotter(to,text):
data = {"type": "text","text": text,"sentBy": {"label": "вĻα¢к ● σƒ ● gαмєя","iconUrl": "https://encrypted-tbn0.gstatic.com/images?q=tbn%3AANd9GcSi2LaC4ftZz21mtSDA3YkylLb6lgqncx_uxOp-wdyAlIqsVsJ1","linkUrl": "https://bit.ly/38K8bbV"}}
me.sendFlex(to,data)
def RunTheRun(to, mid, firstmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x \n"
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
today = datetime.today()
future = datetime(2018,7,25)
hari = (str(future - today))
comma = hari.find(",")
hari = hari[:comma]
teman = me.getAllContactIds()
gid = me.getGroupIdsJoined()
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
eltime = time.time() - mulai
bot = runtime(eltime)
h = me.getContact(meM)
me.reissueUserTicket()
My_Id = "http://line.me/ti/p/"+me.getUserTicket().id
text += mention+"WAKTU :\n"+jamtgl+"\n\nMY GROUP : "+str(len(gid))+"\n\nMY FRIEND : "+str(len(teman))+"\n\nTIME VPS : In "+hari+"\n\nINEX_TEAM. ʟɪɴᴇ ᴠᴇʀ.8.14.2\nRUN : "+bot+"\n\nMY TOKEN :\n"+str(me.authToken)+"\n\nMY MID : \n"+h.mid+"\nMY ID LINE : "+My_Id+"\n\nCHANEL YOUTUBE\n"+set["Respontag"]
me.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
print("Error :\n" + str(error))
def Comt(text):
pesan = text.lower()
if pesan.startswith(set["keyCommand"]):
Pbot = pesan.replace(set["keyCommand"],"")
else:
Pbot = "command"
return Pbot
def bot(op):
global time
global ast
global groupParam
opp1 = op.param1
opp2 = op.param2
opp3 = op.param3
try:
if op.type is None:
pass
else:
if op.type == 0:
pass
else :
print("[ {} ] {}".format(str(op.type), OpType._VALUES_TO_NAMES[op.type]))
if op.type == 13:
if meM in opp3:
if set["autoJoin"] == True:
me.acceptGroupInvitation(opp1)
wr = me.getGroup(opp1)
ban = [contact.mid for contact in wr.members]
for x in ban:
if x in set["blacklist"]:
try:
me.kickoutFromGroup(opp1,[x])
except:pass
print("blacklist kick ok")
if op.type in [19,32,17,13]:
if opp3 in set["PASUKAN"]:
if opp2 in meM and opp2 in set["PASUKAN"]:
pass
else:
Nam = me.getContact(opp2).displayName
set["blacklist"][opp2] = True
try:
sendTemplate(opp1,Nam+"\nBanlist true")
except:
try:
sendTemplate(opp1,Nam+"\nBanlist true")
except:pass
if op.type == 26:
if set["bot"] == True:
msg = op.message
text = msg.text
Id = msg.id
To = msg.to
Dari = msg._from
to = msg.to
if msg.contentType == 0:
if text is None:
return
if msg.toType == 2:
if msg.toType == 0:
to = msg._from
elif msg.toType == 2:
to = msg.to
if msg.contentType == 7:
if set["checkSticker"] == True:
msg.contentType = 0
stk_id = msg.contentMetadata['STKID']
stk_ver = msg.contentMetadata['STKVER']
pkg_id = msg.contentMetadata['STKPKGID']
ret_ = "╔══[ Sticker Info ]"
ret_ += "\n╠ ID : {}".format(stk_id)
ret_ += "\n╠ PACKAGES ID : {}".format(pkg_id)
ret_ += "\n╠ VERSION : {}".format(stk_ver)
ret_ += "\n╠ URL : line://shop/detail/{}".format(pkg_id)
ret_ += "\n╚══[ Finish ]"
patih = "http://dl.stickershop.line.naver.jp/products/0/0/{}/{}/android/stickers/{}.png".format(str(stk_ver),(pkg_id),(stk_id))
path = "https://stickershop.line-scdn.net/stickershop/v1/sticker/{}/IOS/sticker.png".format(stk_id)
data = { "type": "flex","altText": " Assalamu'alaikumm","contents":
{"type": "bubble","size":"micro",
"styles":{"body":{"backgroundColor":"#000000"},"footer":{"backgroundColor":"#800000"}},
"type":"bubble","body":
{"cornerRadius": "md","borderWidth": "5px","borderColor": biruTua,"contents":[
{"contents":[
sp_putih,
sp_putih,
{"contents":[
sp_putih,
{"text":"🆂🆃🅸🅲🅺🅴🆁 🅲🅷🅴🅲🅺","size":"xs","align":"center","color":"#ffff00","wrap":True,"weight":"bold","type":"text"},
sp_putih
],"type":"box","spacing":"md","layout":"horizontal"},
sp_putih,
{"contents":[
sp_putih,
sp_putih,
{"url": image1,"type":"image"},
sp_putih,
sp_putih,
{"type":"image","url": logo,"size":"xl"},
sp_putih,
sp_putih
],"type":"box","spacing":"md","layout":"horizontal"},
sp_putih,
{"contents":[
sp_putih,
{"text": str(ret_),"size":"xxs","color":"#33ffff","wrap":True,"weight":"bold","type":"text"},
sp_putih
],"type":"box","spacing":"md","layout":"horizontal"},
sp_putih
],"type":"box","layout":"vertical"},
],"type":"box","spacing":"xs","layout":"vertical"}},}
me.sendFlex(to, data)
datanya = {
"type": "template","altText": "Bagi tikel donk",
"template":
{"type": "image_carousel",
"columns": [
{"imageUrl": path,
"layout": "horizontal",
"action":
{"type": "uri","label": "JAJAN TIKEL","uri": "line://shop/detail/{}".format(pkg_id),"area": {"x": 447,"y": 356,"width": 1040,"height": 1040}}}]}}
me.sendFlex(to, datanya)
set["checkSticker"] = False
if msg.contentType == 0:
if 'MENTION' in msg.contentMetadata.keys() != None:
if set["detectMention"] == True:
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention ['M'] in meM:
group = me.getGroup(To)
masuk = me.getContact(Dari)
nama = masuk.displayName
data = { "type": "flex","altText": " Assalamu'alaikumm","contents":
{"type": "bubble","size":"micro",
"styles":style_biru,
"type":"bubble","body":
{"cornerRadius": "xs","borderWidth": "5px","borderColor": hijau,"contents":[
{"contents":[
sp_putih,
{"contents":[
sp_putih,
{"text":"вĻα¢к●σƒ●gαмєя","size":"xs","align":"center","color": merah,"wrap":True,"weight":"bold","type":"text"},
sp_putih
],"type":"box","spacing":"md","layout":"horizontal"},
sp_putih,
{"contents":[
sp_putih,
{"type": "image","url": "https://cdn.dribbble.com/users/293796/screenshots/3438995/fb-likes.gif","size": "xl","action": {"type": "uri","uri": "line://app/1609524990-mpvZ5xv5"}},
sp_putih,
{"url":"https://os.line.naver.jp/os/p/{}".format(masuk.pictureStatus),"type":"image","action": {"type": "uri","uri": "https://os.line.naver.jp/os/p/{}".format(masuk.pictureStatus)}},
sp_putih,
{"type":"image","url": "https://1.bp.blogspot.com/-zyUmsriCmGE/XVYAO-lsFLI/AAAAAAAAGe8/BsSUwtUfFc0mxRGxE_8fOz3peuxB3t9UwCLcBGAs/s1600/20190816_074821.jpg","size":"xl","action": {"type": "uri","uri": "https://bit.ly/38K8bbV"}},
sp_putih
],"type":"box","spacing":"md","layout":"horizontal"},
sp_putih,
{"contents":[
sp_putih,
{"text": nama+" "+ set["Respontag2"],"size":"xs","color": hijau,"wrap":True,"weight":"bold","type":"text"},
sp_putih
],"type":"box","spacing":"md","layout":"horizontal"},
sp_putih
],"type":"box","layout":"vertical"},
],"type":"box","spacing":"xs","layout":"vertical"}},}
me.sendFlex(to, data)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if set["arespon"] == True:
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention ['M'] in meM:
h = me.getContact(Dari)
me.sendMessage(Dari, h.displayName+"\n"+set["tagpm"])
break
#===========================
if op.type in [25, 26]:
if set["bot"] == True:
if op.type == 25: print ("[ 25 ] SEND MESSAGE")
else: print ("[ 26 ] RECEIVE MESSAGE")
msg = op.message
text = msg.text
Id = msg.id
To = msg.to
Dari = msg._from
if msg.contentType == 0:
if text is None:
return
if msg.toType == 0 or msg.toType == 2:
if msg.toType == 0:
to = msg.to
elif msg.toType == 2:
to = msg.to
if msg.contentType == 1:
if Dari in meM:
if meM in set["foto"]:
path = me.downloadObjectMsg(Id)
del set["foto"][meM]
me.updateProfilePicture(path)
me.sendMessage(To, "Foto berhasil dirubah")
if set['changeProfileVideo']['status'] == True:
path = me.downloadObjectMsg(Id, saveAs="tmp/pict.bin")
if set['changeProfileVideo']['stage'] == 1:
set['changeProfileVideo']['picture'] = path
Fotter(To, "Silahkan kirimkan video yang ingin anda jadikan profile")
set['changeProfileVideo']['stage'] = 2
elif set['changeProfileVideo']['stage'] == 2:
set['changeProfileVideo']['picture'] = path
changeProfileVideo(To)
Fotter(To, "Type: Profile\n • Detail: Change Video Profile\n • Status: Succes..")
elif msg.contentType == 2:
if set['changeProfileVideo']['status'] == True:
path = me.downloadObjectMsg(Id)
if set['changeProfileVideo']['stage'] == 1:
set['changeProfileVideo']['video'] = path
Fotter(To, "Type: Profile\n • Detail: Change Video Profile\n • Status: Send picture ~")
set['changeProfileVideo']['stage'] = 2
elif set['changeProfileVideo']['stage'] == 2:
set['changeProfileVideo']['video'] = path
changeProfileVideo(To)
if msg.contentType == 0:
if set["autoRead"] == True:
me.sendChatChecked(To, Id)
if text is None:
print("[0] SEND COMMAND")
return
else:
Pbot = Comt(text)
Dari = msg._from
To = msg.to
Id = msg.id
if Pbot =="me":
if Dari in meM:
h = me.getContact(Dari)
dart = {
"type": "flex",
"altText": "{} mengirim kont".format(h.displayName),
"contents": {
"type": "carousel",
"type": "bubble",
"size": "nano",
"styles": style_biru,
"footer": {
"type": "box",
"layout": "horizontal","cornerRadius": "md","borderWidth": "5px","borderColor": biruTua,
"contents": [
sp_putih,
{
"type": "box",
"layout": "vertical",
"contents": [
sp_putih,
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(h.pictureStatus),
"size": "full",
"aspectRatio": "20:13"
},
sp_putih,
{
"type": "text",
"text": h.displayName,
"color": putih,
"align": "start",
"size": "md",
"gravity": "center"
},
sp_putih,
{
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "Status",
"color": putih,
"size": "xxs",
"align": "center"
},
sp_putih,
{
"type": "text",
"text": "Aktif",
"color": hijau,
"align": "center",
"size": "xxs"
}
],
"flex": 1
},
sp_putih,
{
"flex": 3,
"type": "button",
"margin": "sm",
"style": "secondary",
"color": biru,
"height": "sm",
"action": {
"type": "uri",
"label": "👤 info",
"uri": "line://app/1623679774-k9nBDB6b?type=text&text=checkmid%20{}".format(h.mid)
}
},
sp_putih
]
},
sp_putih
]
}
}
}
me.sendFlex(To,dart)
# me.sendMessage(Line_Apikey, Devert)
if Pbot == "help":
if Dari in meM:
group = me.getGroup(To)
h = me.getContact(Dari)
data = {"type":"flex","altText":"{}".format(h.displayName),"contents":{"type":"carousel","contents":[
{"type": "bubble","size":"kilo",
"body": {
"type": "box",
"layout": "vertical","cornerRadius": "xs","borderWidth": "5px","borderColor": hijau,
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(h.pictureStatus),
"size": "full",
"aspectMode": "cover",
"aspectRatio": "4:6",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical","cornerRadius": "md","borderWidth": "2px","borderColor": biruTua,
"spacing": "md",
"action": {
"type": "uri",
"uri": "https://bit.ly/38K8bbV"
},
"contents": [
{"contents":[
sp_putih,
{"contents":[
sp_putih,
{"text":"вĻα¢к●σƒ●gαмєя","size":"md","align":"center","color": merah,"wrap":True,"weight":"bold","type":"text"},
sp_putih
],"type":"box","spacing":"md","layout":"horizontal"},
sp_putih,
],"type":"box","layout":"vertical"},
{"contents":[
sp_putih,
{"contents":[
sp_putih,
{"type": "image","url": "https://1.bp.blogspot.com/-6T7oMDOIlKA/XVX_8-oO52I/AAAAAAAAGe0/W0MubSIIyUUzw3et2YifTWqxaNRRwWE-ACLcBGAs/s1600/20190816_075636.png","size": "full","aspectRatio": "3:1"},
sp_putih
],"type":"box","spacing":"md","layout":"horizontal"},
sp_putih
],"type":"box","layout":"vertical"},
{
"type": "box",
"layout": "vertical",
"spacing": "xs",
"contents": [
{"contents":[
sp_putih,
{"contents":[
sp_putih,
{
"type": "button",
"style": "secondary",
"color": ping,
"height": "sm",
"gravity": "center",
"flex": 1,
"action": {
"type": "uri",
"label": "Changedp",
"uri": "line://app/1623679774-k9nBDB6b?type=text&text=changedpvideo"
}
},sp_putih,
{
"type": "button",
"style": "secondary",
"color": ping,
"height": "sm",
"gravity": "center",
"flex": 1,
"action": {
"type": "uri",
"label": "Profile",
"uri": "line://app/1623679774-k9nBDB6b?type=text&text=Profil"
}
},sp_putih
],"type":"box","spacing":"md","layout":"horizontal"},
sp_putih
],"type":"box","layout":"vertical"},
{"contents":[
sp_putih,
{"contents":[
sp_putih,
{
"type": "button",
"style": "secondary",
"color": ping,
"height": "sm",
"gravity": "center",
"flex": 1,
"action": {
"type": "uri",
"label": "R1 on",
"uri": "line://app/1623679774-k9nBDB6b?type=text&text=r1:on"
}
},sp_putih,
{
"type": "button",
"style": "secondary",
"color": ping,
"height": "sm",
"gravity": "center",
"flex": 1,
"action": {
"type": "uri",
"label": "R1 off",
"uri": "line://app/1623679774-k9nBDB6b?type=text&text=r1:off"
}
},sp_putih
],"type":"box","spacing":"md","layout":"horizontal"},
sp_putih
],"type":"box","layout":"vertical"},
{"contents":[
sp_putih,
{"contents":[
sp_putih,
{
"type": "button",
"style": "secondary",
"color": ping,
"height": "sm",
"gravity": "center",
"flex": 1,
"action": {
"type": "uri",
"label": "Pm on",
"uri": "line://app/1623679774-k9nBDB6b?type=text&text=Pm:on"
}
},sp_putih,
{
"type": "button",
"style": "secondary",
"color": ping,
"height": "sm",
"gravity": "center",
"flex": 1,
"action": {
"type": "uri",
"label": "Pm off",
"uri": "line://app/1623679774-k9nBDB6b?type=text&text=Pm:off"
}
},sp_putih
],"type":"box","spacing":"md","layout":"horizontal"},
sp_putih
],"type":"box","layout":"vertical"},
{"contents":[
sp_putih,
{"contents":[
sp_putih,
{
"type": "button",
"style": "secondary",
"color": ping,
"height": "sm",
"gravity": "center",
"flex": 1,
"action": {
"type": "uri",
"label": "Mention",
"uri": "line://app/1623679774-k9nBDB6b?type=text&text=Tag"
}
},sp_putih,
{
"type": "button",
"style": "secondary",
"color": ping,
"height": "sm",
"gravity": "center",
"flex": 1,
"action": {
"type": "uri",
"label": "Restart",
"uri": "line://app/1623679774-k9nBDB6b?type=text&text=#restart"
}
},sp_putih
],"type":"box","spacing":"md","layout":"horizontal"},
sp_putih
],"type":"box","layout":"vertical"},
{"contents":[
sp_putih,
{"contents":[
sp_putih,
{
"type": "button",
"style": "secondary",
"color": ping,
"height": "sm",
"gravity": "center",
"flex": 1,
"action": {
"type": "uri",
"label": "Bot on",
"uri": "line://app/1623679774-k9nBDB6b?type=text&text=Bot:on"
}
},sp_putih,
{
"type": "button",
"style": "secondary",
"color": ping,
"height": "sm",
"gravity": "center",
"flex": 1,
"action": {
"type": "uri",
"label": "Bot off",
"uri": "line://app/1623679774-k9nBDB6b?type=text&text=Bot:off"
}
},sp_putih
],"type":"box","spacing":"md","layout":"horizontal"},
sp_putih
],"type":"box","layout":"vertical"},
{"contents":[
{"contents":[
{"type":"image","url":"https://i.ibb.co/Rytts8y/com-kicklabz-sing-smule-downloader.png","size":"xxs"},
{"type":"image","url":"https://i.ibb.co/xL3mVMK/Line-icon.png","size":"xxs"},{"type":"image","url":"https://i.ibb.co/26RvJVS/Pngtree-whatsapp-icon-logo-whatsapp-logo-3560533.png","size":"xxs"},
{"type":"image","url":"https://i.ibb.co/b3JwtsP/Pngtree-youtube-logo-icon-3560542.png","size":"xxs"},{"type":"image","url":"https://i.ibb.co/QkJM8j7/20191101-134518.png","size":"xxs"}
],"type":"box","spacing":"md","layout":"horizontal"}
],"type":"box","layout":"vertical"},
]
}
],
"position": "absolute",
"cornerRadius": "3px",
"offsetTop": "2px",
"offsetStart": "2px",
"height": "371px",
"width": "246px"
}
],
"paddingAll": "0px",
"paddingTop": "0px",
"cornerRadius": "3px"
}
}]}}
me.sendFlex(To, data)
sendTemplate2(To, Hallow)
elif Pbot == "mid":
if Dari in meM:
Fotter(To, Dari)
elif Pbot.startswith("getmid "):
if Dari in meM:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mi = me.getContact(key1)
data = {
"type": "text",
"text": "{}".format(key1),
"sentBy": {
"label": "вĻα¢к●σƒ●gαмєя",
"iconUrl": "https://scontent.fcgk9-2.fna.fbcdn.net/v/t1.0-9/cp0/e15/q65/p851x315/51691304_1868204569955031_2146220437988704256_o.jpg?_nc_cat=103&efg=eyJpIjoiYiJ9&_nc_eui2=AeH2ckLWYQHsnNZ_h-dxkaE6z8BLc-ped-MztW4ZIVdUV-ntVbUtpxp-yrIWasU0oZ8NiwTRmKSqr0DSF8HXmDE7MZQ7aCk7ff-H_i1Gzo8g7w&_nc_oc=AQn9OIKwJojXlHshN7igjGYPAx0PGSK3ICR-Vyp57YXxp1cGQulKVLgPBiaFkJfI2Iw&_nc_ht=scontent.fcgk9-2.fna&oh=bf88bf2a4f06709e8f5d18310f26865c&oe=5DB50A10",
"linkUrl": "https://bit.ly/38K8bbV"
}
}
me.sendFlex(To, data)
if Pbot == "removechat" or Pbot == "hapus chat":
if Dari in meM:
me.removeAllMessages(opp2)
sendTemplate(To, "Chat dibersihkan...")
if Pbot == "#reboot" or Pbot == "#restart":
if Dari in meM:
Fotter(To, "Loading…")
set["restartPoint"] = To
Run_Xx()
Fotter(To, "Silahkan gunakan seperti semula...")
if Pbot == "banlist":
if Dari in meM:
if set["blacklist"] == {}:
sendTemplate(To, " Nothing Blacklist ")
else:
pill = []
for i in set["blacklist"]:
ctk = me.getContact(i)
if len(pill) >= 400:
pass
else:
pill.append({"type": "bubble", "size": "nano",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://encrypted-tbn0.gstatic.com/images?q=tbn%3AANd9GcSTjyGgn9g9AIbYJF6bFyRBQFPWAyMIrCBd_Jz-RIenlY-UbV8m",
"size": "full",
"aspectMode": "cover",
"aspectRatio": "4:8",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type":"text",
"text": "{}".format(ctk.displayName),
"size": "xxs",
"align":"center",
"margin": "none",
"color": biruHitam,
"wrap": True,
"weight": "regular"
}
],
"position": "absolute",
"cornerRadius": "0px",
"offsetTop": "16px",
"offsetStart": "8px",
"backgroundColor": putih,
"height": "22px",
"width": "104px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(ctk.pictureStatus),
"size": "full",
"aspectMode": "cover",
"aspectRatio": "4:6",
"gravity": "top"
}
],
"position": "absolute",
"cornerRadius": "0px",
"offsetTop": "37px",
"offsetStart": "8px",
"height": "86px",
"width": "104px"
},
{
"type": "box",
"layout": "vertical","cornerRadius": "3px","borderWidth": "2px","borderColor": biruHitam,
"contents": [
{
"type": "image",
"url": "https://encrypted-tbn0.gstatic.com/images?q=tbn%3AANd9GcSjybug2NU0iSDmZG6iLiSUQljYgjnMr-PfT0gsXw3uE-ldVu9m",
"size": "full",
"aspectMode": "cover",
"aspectRatio": "4:3",
"gravity": "top"
}
],
"position": "absolute",
"cornerRadius": "0px",
"offsetTop": "137px",
"offsetStart": "5px",
"height": "73px",
"width": "109px"
}
],
"cornerRadius": "8px",
"paddingAll": "0px",
"paddingTop": "0px"
}
})
k = len(pill)//10
for aa in range(k+1):
data = {"type":"flex","altText":"Data blacklist","contents":{"type":"carousel","contents":pill[aa*10 : (aa+1)*10]}}
me.sendFlex(To, data)
if Pbot == "clearban":
if Dari in meM:
sendTemplate(To, "DAFTAR[%s] BAN\n[success deleted]" % (str(len(set["blacklist"]))))
set["blacklist"] = {}
if Pbot == "join":
if Dari in meM:
if set["PASUKAN"] == {}:
sendTemplate(To, " Nothing Bots ")
else:
me.inviteIntoGroup(To, set["PASUKAN"])
if Pbot.startswith("bot "):
if Dari in meM:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target in set["PASUKAN"]:
sendTemplate(To, "Dia Sudah jadi pasukan")
else:
try:
set["PASUKAN"][target] = True
sendTemplate(To, "Berhasil add pasukan")
except:
pass
if Pbot == "changedpvideo":
if Dari in meM:
set['changeProfileVideo']['status'] = True
set['changeProfileVideo']['stage'] = 1
sendTemplate(To, "Type: Profile\n • Detail: Change Video Profile\n • Status: Waiting for video\nPlease send a video...")
if Pbot == "tag" or Pbot == "tagall" or Pbot == "mention":
if Dari in meM:
group = me.getGroup(To)
nama = [contact.mid for contact in group.members]
k = len(nama)//20
for a in range(k+1):
txt = u''
s=0
b=[]
for i in group.members[a*20 : (a+1)*20]:
b.append(i.mid)
mentionMembers(msg.to, b)
if Pbot == "profil":
contact = me.getContact(Dari)
cover = me.getProfileCoverURL(Dari)
me.reissueUserTicket()
res = "╭━━━━━━━━━━━━━━━━━━━━╮\n├ ☠ Profile info\n├━━━━━━━━━━━━━━━━━━━━\n"
res += "├ ☠ Display Name :{}\n".format(contact.displayName)
res += "├ ☠ Mid: {}\n".format(contact.mid)
res += "├ ☠ Status Message\n├ ☠ {}\n".format(contact.statusMessage)
res += "╰━━━━━━━━━━━━━━━━━━━━╯"
sendTemplate2(To, res)
try:
poto = "https://os.line.naver.jp/os/p/{}".format(Dari)
except:
poto = "https://encrypted-tbn0.gstatic.com/images?q=tbn%3AANd9GcQcNdUbC8kEeVWqgR9qMX66lQ_hQPM8ScNY30x4nqpYaKY2jt02"
dax = {"type": "template","altText": "berak di celana","template": {"type": "image_carousel","columns": [{"imageUrl": poto,"layout": "horizontal","action": {"type": "uri","label": "PROFILE","uri": poto,"area": {"x": 447,"y": 356,"width": 1040,"height": 1040}}},{"imageUrl": cover,"layout": "horizontal","action": {"type": "uri","label": "COVER","uri": cover,"area": {"x": 447,"y": 356,"width": 1040,"height": 1040}}},{"imageUrl": "https://qr-official.line.me/L/"+me.getUserTicket().id+".png","layout": "horizontal","action": {"type": "uri","label": "CONTACT","uri": "https://line.me/ti/p/"+me.getUserTicket().id,"area": {"x": 447,"y": 356,"width": 1040,"height": 1040}}}]}}
me.sendFlex(To, dax)
if Pbot =="assalamualaikum":
me.sendMessage(To," وَالسَّلاَمُ عَلَيْكُمْ وَرَحْمَةُ اللهِ وَبَرَكَاتُه...")
if Pbot == "joinqr on" or Pbot == 'jointicket on':
if Dari in meM:
set["autoJoinTicket"] = True
sendTemplate(To, "Join ticket diaktifkan")
if Pbot == "joinqr off" or Pbot == 'jointicket off':
if Dari in meM:
set["autoJoinTicket"] = False
sendTemplate(To,"Autojoin Tiket dinonaktifkan")
if Pbot == "join on" or Pbot == 'join:on':
if Dari in meM:
set["autoJoin"] = True
sendTemplate(To, "Join diaktifkan")
if Pbot == "join off" or Pbot == 'join:off':
if Dari in meM:
set["autoJoin"] = False
sendTemplate(To,"Autojoin dinonaktifkan")
if Pbot == "r1:on" or Pbot == 'r1 on':
if Dari in meM:
set["detectMention"] = True
sendTemplate(To, "Respon diaktifkan")
if Pbot == "r1:off" or Pbot == 'respon1 off':
if Dari in meM:
set["detectMention"] = False
sendTemplate(To,"Respon dinonaktifkan")
if Pbot == "pm:on" or Pbot == 'pm on':
if Dari in meM:
set["arespon"] = True
sendTemplate(To, "Respon pm diaktifkan")
if Pbot == "pm:off" or Pbot == 'responpm off':
if Dari in meM:
set["arespon"] = False
sendTemplate(To,"Respon pm dinonaktifkan")
if Pbot.startswith("addblc "):
if Dari in meM:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target in set["blacklist"]:
Fotter(To, "Dia Sudah ada di blacklist")
else:
try:
set["blacklist"][target] = True
sendTemplate(To, "Berhasil tambah blacklist")
except:
pass
if "/ti/g/" in Pbot:
if set["autoJoinTicket"] == True:
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(text)
n_links = []
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
group = me.findGroupByTicket(ticket_id)
me.acceptGroupInvitationByTicket(group.id,ticket_id)
sendTemplate2(To, "Succes masuk group : %s" % str(group.name))
if op.type in [25, 26]:
msg = op.message
text = msg.text
Id = msg.id
To = msg.to
Dari = msg._from
if msg.contentType == 0:
if text is None:
return
if msg.toType == 0 or msg.toType == 2:
if msg.toType == 0:
to = msg.to
elif msg.toType == 2:
to = msg.to
if msg.contentType == 0:
if text is None:
print("[0] SEND COMMAND")
return
else:
msg = op.message
text = msg.text
Pbot = Comt(text)
Dari = msg._from
To = msg.to
Id = msg.id
if Pbot == "bot:off" or Pbot == "matikan":
print ("NOTIF BOT NON ACTIVE")
if Dari in meM:
RunTheRun(To,Dari, "RESULT\n")
print("""
BOT TEMPLATE
VERSION : INEXBOTS
REVISION : VPS-TERMUX
{}
""".format(jamtgl))
Fotter(To, "Ok I'am Turn down "+me.getContact(Dari).displayName)
set["bot"] = False
if Pbot == "bot:on" or Pbot == "aktifkan":
print ("NOTIF BOT ACTIVE")
if Dari in meM:
Fotter(To, "Already Ok "+me.getContact(Dari).displayName)
RunTheRun(To,Dari, "RESULT\n")
print("""
BOT TEMPLATE
VERSION : INEXBOTS
REVISION : VPS-TERMUX
{}
""".format(jamtgl))
set["bot"] = True
set["Conection"] = To
except Exception as error:
logError(error)
print (error)
while True:
try:
ops = oepoll.singleTrace(count=50)
if ops is not None:
for op in ops:
oepoll.setRevision(op.revision)
thread = threading.Thread(target=bot, args=(op,))
thread.start()
except Exception as error:
logError(error)
print(error)
| # -*- coding: utf-8 -*-
import xBot
from xBot import *
from akad.ttypes import *
from multiprocessing import Pool, Process
from akad.ttypes import ContentType as Type
from akad.ttypes import ChatRoomAnnouncementContents
from akad.ttypes import Location
from akad.ttypes import ChatRoomAnnouncement
from akad.ttypes import LoginRequest
from akad import LineService
from datetime import datetime
import time,random,sys,json,codecs,threading,glob,re,os,subprocess,asyncio
from datetime import datetime, timedelta
from time import sleep
from bs4 import BeautifulSoup
from threading import Thread,Event
from humanfriendly import format_timespan, format_size, format_number, format_length
import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, urllib, urllib.parse,youtube_dl,pafy,timeit,atexit,traceback,ffmpy,humanize,pytz
from gtts import gTTS
from googletrans import Translator
from pytz import timezone
_session = requests.session()
botStart = time.time()
#WARNA
merah = "#FF2800"
kuning = "#FFFD00"
hijau = "#83FF00"
hijauMuda = "#00FF00"
unguTua = "#9900FF"
merahHati = "#660000"
biru = "#00DAFF"
biruTua = "#0000FF"
ungu = "#C323FF"
nila = "#4B0082"
pingTua = "#FF17CE"
ping = "#FF17CE"
hitam = "#000000"
putih = "#FFFFFF"
jambon = "#FF0066"
biruHitam = "#330033"
abuabu = "#000000cc"
sp_putih = {"type": "separator","color": putih}
sp_nila = {"type": "separator","color": nila}
sp_hitam = {"type": "separator","color": hitam}
sp_kuning = {"type": "separator","color": kuning}
sp_biru = {"type": "separator","color": biruTua}
sp_hijau = {"type": "separator","color": hijau}
sp_merah = {"type": "separator","color": merahHati}
sp_ungu = {"type": "separator","color": unguTua}
sp_ping = {"type": "separator","color": pingTua}
sp_abuabu = {"type": "separator","color": abuabu}
style_hijau={"header":{"backgroundColor":abuabu},"body":{"backgroundColor":abuabu},"footer":{"backgroundColor":abuabu,"separator":True,"separatorColor":hijau}}
style_nila={"header":{"backgroundColor":nila},"body":{"backgroundColor":nila},"footer":{"backgroundColor":nila,"separator":True,"separatorColor":biruTua}}
style_edit={"header":{"backgroundColor":abuabu},"body": {"cornerRadius": "md","borderWidth": "5px","borderColor": unguTua},"footer":{"backgroundColor":abuabu,"separator":True,"separatorColor":hijau}}
style_merah={"header":{"backgroundColor":abuabu},"body":{"backgroundColor":abuabu},"footer":{"backgroundColor":abuabu,"separator":True,"separatorColor":merah}}
style_biru={"header":{"backgroundColor":abuabu},"body":{"backgroundColor":abuabu},"footer":{"backgroundColor":abuabu,"separator":True,"separatorColor":biru}}
style_kuning={"header":{"backgroundColor":abuabu},"body":{"backgroundColor":abuabu},"footer":{"backgroundColor":abuabu,"separator":True,"separatorColor":kuning}}
style_ungu={"header":{"backgroundColor":abuabu},"body":{"backgroundColor":abuabu},"footer":{"backgroundColor":abuabu,"separator":True,"separatorColor":ungu}}
style_putih={"header":{"backgroundColor":putih},"body":{"backgroundColor":putih},"footer":{"backgroundColor":putih,"separator":True,"separatorColor":hitam}}
style_hitam={"header":{"backgroundColor":hitam},"body":{"backgroundColor":hitam},"footer":{"backgroundColor":hitam,"separator":True,"separatorColor":putih}}
Pabuabu = "https://bakarenders.com/renders/albums/userpics/11535/normal_hatsunemiku_-_loveiswar2.png"
Warna1 = (merah,hijau,biru,hijauMuda)
Warna2 = (biruTua,merahHati,unguTua,nila)
Warna3 = (putih,kuning,pingTua)
warnanya1 = random.choice(Warna1)
warnanya2 = random.choice(Warna2)
warnanya3 = random.choice(Warna3)
pict1 = "https://os.line.naver.jp/os/p/{}"
pict2 = "https://obs.line-scdn.net/{}"
#poto
logo = "https://1.bp.blogspot.com/-6T7oMDOIlKA/XVX_8-oO52I/AAAAAAAAGe0/W0MubSIIyUUzw3et2YifTWqxaNRRwWE-ACLcBGAs/s1600/20190816_075636.png"
image1 = "https://1.bp.blogspot.com/-zyUmsriCmGE/XVYAO-lsFLI/AAAAAAAAGe8/BsSUwtUfFc0mxRGxE_8fOz3peuxB3t9UwCLcBGAs/s1600/20190816_074821.jpg"
image2 = "https://1.bp.blogspot.com/-zK32-fvqcNw/XVYAUCQhrmI/AAAAAAAAGfA/hXKs0MS2OIMKi09tJ7yCjnjUbMiuV_TIACLcBGAs/s1600/20190816_074438.jpg"
image3 = "https://1.bp.blogspot.com/-OgPmr5eJpYg/XVYAVFAYcaI/AAAAAAAAGfE/Xwh0EqB_SrclP-NZ_DaDqxcYnWBZSa_FgCLcBGAs/s1600/20190816_074311.jpg"
a1 = "https://1.bp.blogspot.com/-pBMROlmPtdg/XXJF_q2cQCI/AAAAAAAAG1E/7TRgM49vMZAXvAEhtVbO--zPCqvdh8SBACLcBGAs/s1600/20190906_183218.jpg"
a2 = "https://1.bp.blogspot.com/-rTWcjzSC8uk/XXJF5YVuADI/AAAAAAAAG1A/noFsixwxTHoE9DDSoSbQHLs-OXnY5udCACLcBGAs/s1600/20190906_183534.jpg"
Gambar = (image1,image2,image3)
a3 = (a1,a2)
Hasile = random.choice(Gambar)
Aa1 = random.choice(a3)
logo = "https://1.bp.blogspot.com/-6T7oMDOIlKA/XVX_8-oO52I/AAAAAAAAGe0/W0MubSIIyUUzw3et2YifTWqxaNRRwWE-ACLcBGAs/s1600/20190816_075636.png"
Warna = (merah,kuning,hijau,biru,ping,ungu)
warnanya1 = random.choice(Warna)
warnanya2 = random.choice(Warna)
warnanya3 = random.choice(Warna)
print("\n____________________________[SELFBOT]____________________________")
me = LINE("EPCKGkxPfnhkgU5BIIId.u3Oosz4qA0t+5TyeTrb17q.xsHVM/bXCcqxwR+Yil7Fjj9RymOU0xb7OMsKLfkDyiw=")
me.log("Auth Token : " + str(me.authToken))
meM = me.getProfile().mid
me.log("MID : " + str(meM))
print("""
░▀░ █▀▀▄ █▀▀ █░█ █▀▀▄ █▀▀█ ▀▀█▀▀ █▀▀
▀█▀ █░░█ █▀▀ ▄▀▄ █▀▀▄ █░░█ ░░█░░ ▀▀█
▀▀▀ ▀░░▀ ▀▀▀ ▀░▀ ▀▀▀░ ▀▀▀▀ ░░▀░░ ▀▀▀
▄█░ ░█▀█░ ▄ █▀▀█ ▄▀▀▄ ▄ █▀█ █▀▀█ ▄█░ ▄▀▀▄
░█░ █▄▄█▄ ░ █▄▀█ █▄▄░ ░ ░▄▀ █▄▀█ ░█░ ▀▄▄█
▄█▄ ░░░█░ ▀ █▄▄█ ▀▄▄▀ ▀ █▄▄ █▄▄█ ▄█▄ ░▄▄▀ """)
Hallow = """
Bot @
Addblc @
Join
Banlist
Clearban
"""
oepoll = OEPoll(me)
St = "┣"
Zx = [me]
meProfile = me.getProfile()
meSettings = me.getSettings()
set = {
"Picture": False,
"bot": True,
"Conection": "",
"foto": {},
"Ids":{},
"keyCommand":"",
"changeProfileVideo": {
"picture": "",
"stage": 2,
"status": False,
"video": ""
},
"PASUKAN": {},
"setKey": False,
"autoRead": False,
"owner":{},
"staff": {},
"admin":{},
"autoBlock": False,
"detectMention": True,
"detectMention2": False,
"arespon":True,
"blacklist":{},
"checkSticker": False,
"autoJoinTicket": True,
"autoJoin": True,
"autoAdd": True,
"autoLeave": False,
"limitkick": False,
"contact": False,
"autoJoinMessage": "ᴛᴇʀɪᴍᴀᴋᴀsɪʜ ᴋᴀᴋᴀ ᴀᴛᴀs ᴜɴᴅᴀɴɢᴀɴ ɢʀᴜᴘɴʏᴀ.",
"comment": "ᴀᴜᴛᴏ ʟɪᴋᴇ ɴ ᴄᴏᴍᴍᴇɴᴛ ᴅᴏɴᴇ\nвʏ.ᴛᴇᴀᴍ ⊶ [B.O.G] ⊷",
"comment2": "┏━━━━━━━━━•❅•°•❈•°•❅•━━━━━━━━┓\n┃┏━━━━━━━━•❅•°•❈•°•❅•━━━━━━━┓\n┃┃ ❀ [ BLACK_OF_GAMER ] ❀\n┃┗━━━━━━━━•❅•°•❈•°•❅•━━━━━━━┛\n├━━━━━━━━━━━━━━━━━━━━━━━━\n┃┏━━━━━━━━•❅•°•❈•°•❅•━━━━━━━┓\n┃┃ LIKE N COMMENT DONE\n┃┃ IKUTAN CORET-CORET\n┃┃ B.O.G_TEAM\n┃┗━━━━━━━━•❅•°•❈•°•❅•━━━━━━━┛\n├━━━━━━━━━━━━━━━━━━━━━━━━\n┃┏━━━━━━━━•❅•°•❈•°•❅•━━━━━━━┓\n┃┃ AciL :\n┃┃ http://line.me/ti/p/~adiputra.95\n┃┃ Denjaka :\n┃┃ https://bit.ly/38K8bbV\n┃┗━━━━━━━━•❅•°•❈•°•❅•━━━━━━━┛\n┗━━━━━━━━━•❅•°•❈•°•❅•━━━━━━━━┛",
"mention":"ᴋᴀʟᴏ ɴɢɪɴᴛɪᴘ ᴛᴇʀᴜs ᴅᴀᴘᴇᴛ ɢᴇʟᴀs ᴘᴇᴄᴀʜ ᴅɪ ᴋᴇᴘᴀʟᴀ...",
"Respontag":"https://youtube.com/channel/UCu5Aqj6zqJK59pXxNGw8HMg",
"Respontag2":"ada apa tag saya d grup kak?",
"tagpm":"subcrabe channelku donk kak\nhttps://youtube.com/channel/UCu5Aqj6zqJK59pXxNGw8HMg",
"welcome":"ѕєĻαмαт đαтαηg,,,, вυđαуαкαη ¢єк ησтє кαк",
"message":"тᴇяıмᴀ кᴀsıн suᴅᴀн ᴀᴅᴅ sᴀʏᴀ \nвʏ.ᴛᴇᴀᴍ \n⊶ вĻα¢к●σƒ●gαмєя ⊷",
"baper":"ѕєĻαмαт тιηggαĻ тємαη,,, ѕємσgα єηgкαυ тєηαηg đι ѕαηα●",
"userAgent": [
"Mozilla/5.0 (X11; U; Linux i586; de; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; U; Linux amd64; rv:5.0) Gecko/20100101 Firefox/5.0 (Debian)",
"Mozilla/5.0 (X11; U; Linux amd64; en-US; rv:5.0) Gecko/20110619 Firefox/5.0",
"Mozilla/5.0 (X11; Linux) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 FirePHP/0.5",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Firefox/5.0",
"Mozilla/5.0 (X11; Linux x86_64) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; Linux ppc; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; Linux AMD64) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.2; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:5.0) Gecko/20110619 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; rv:6.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1.1; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.2; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.1; U; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.1; rv:2.0.1) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.0; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.0; rv:5.0) Gecko/20100101 Firefox/5.0"
]
}
cctv = {
"cyduk":{},
"point":{},
"sidermem":{}
}
Line_Apikey = "u951e70feab1a2b4f38fc1390f776a31b"
cont = me.getContact(meM)
Extr = me.getContact(Line_Apikey).displayName
for busht in Zx:
for anding in Line_Apikey:
try:
busht.findAndAddContactsByMid(anding)
except:pass
mulai = time.time()
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
jamtgl = timeNow.strftime('|📆|%d/%B/%Y|⏰|%X|')
jam = timeNow.strftime('⏰ %X')
tgl = timeNow.strftime('📆 %d/%B/%Y')
def runtime(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
days, hours = divmod(hours, 24)
return '%02d Hari %02d Jam %02d Menit %02d Detik' % (days, hours, mins, secs)
def Run_Xx():
backupData()
python = sys.executable
os.execl(python, python, *sys.argv)
def logError(text):
me.log("ERROR 404 !\n" + str(text))
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
timeHours = datetime.strftime(timeNow,"(%H:%M)")
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
inihari = datetime.now(tz=tz)
hr = inihari.strftime('%A')
bln = inihari.strftime('%m')
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
time = hasil + ", " + inihari.strftime('%d') + " - " + bln + " - " + inihari.strftime('%Y') + " | " + inihari.strftime('%H:%M:%S')
with open("errorLog.txt","a") as error:
error.write("\n[%s] %s" % (str(time), text))
def mentionMembers(to, mid):
try:
arrData = ""
textx = "╭━──────────────━╮\n│➢Total「{}」Members\n╰━──────────────━╯\n╭━──────────────━╮\n│➢ 1. ".format(str(len(mid)))
arr = []
no = 1
num = 2
for i in mid:
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention
if no < len(mid):
no += 1
textx += "│➢ %i. " % (num)
num=(num+1)
else:
try:
no = "\n╰━─[ {} ]".format(str(me.getGroup(to).name))
except:
no = "\n╰━─[ Success ]"
me.sendMessage(to, textx+"╰━──────────────━╯", {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
logError(error)
me.sendMessage(to, "[ INFO ] Error :\n" + str(error))
Devert = "Thank you brothers\nMy name is "+cont.displayName+" use your bot script\nSubcrabe my chanel youtube\nhttps://youtube.com/channel/UCu5Aqj6zqJK59pXxNGw8HMg"
def changeProfileVideo(to):
if set['changeProfileVideo']['picture'] == None:
return me.sendMessage(to, "Foto tidak ditemukan")
elif set['changeProfileVideo']['video'] == None:
return me.sendMessage(to, "Video tidak ditemukan")
else:
path = set['changeProfileVideo']['video']
files = {'file': open(path, 'rb')}
obs_params = me.genOBSParams({'oid': me.getProfile().mid, 'ver': '2.0', 'type': 'video', 'cat': 'vp.mp4'})
data = {'params': obs_params}
r_vp = me.server.postContent('{}/talk/vp/upload.nhn'.format(str(me.server.LINE_OBS_DOMAIN)), data=data, files=files)
if r_vp.status_code != 201:
return me.sendMessage(to, "Gagal update profile")
path_p = set['changeProfileVideo']['picture']
set['changeProfileVideo']['status'] = False
me.updateProfilePicture(path_p, 'vp')
extras = " "+Extr+"\n"
def sendTemplate(to, text):
data = { "type": "flex","altText": " Black Of Gamers","contents":
{"type": "bubble","size": "micro",
"styles":{"body":{"backgroundColor":"#000000"}},"type":"bubble",
"body": {"cornerRadius": "md","borderWidth": "5px","borderColor": biruTua,
"contents":[{"contents":[{"type":"separator","color":warnanya1},{"contents":[
{"type":"separator","color":warnanya1},
{"text": text ,"size":"xxs","align":"center","color": warnanya1,"wrap":True,"weight":"bold","type":"text"},
{"type":"separator","color":warnanya1}
],"type":"box","spacing":"md","layout":"horizontal"},
{"type":"separator","color":warnanya1}
],"type":"box","layout":"vertical"},
],"type":"box","layout":"vertical"}},}
me.sendFlex(to, data)
def sendTemplate2 (to,text):
data = { "type": "flex","altText": " Assalamu'alaikumm","contents":
{"type": "bubble","styles": style_biru,"type":"bubble","size":"kilo","body":
{"cornerRadius": "md","borderWidth": "5px","borderColor": biruTua,"contents":[{"contents":[{"type":"separator","color":"#ffffff"},
{"contents":[sp_putih,
{"text":"вĻα¢к●σƒ●gαмєя","size":"md","align":"center","color":"#BE1700","wrap":True,"weight":"bold","type":"text"},
sp_putih
],"type":"box","spacing":"md","layout":"horizontal"},
sp_putih],"type":"box","layout":"vertical"},
{"contents":[sp_putih,
{"contents":[sp_putih,
{"type": "image","url": "https://1.bp.blogspot.com/-6T7oMDOIlKA/XVX_8-oO52I/AAAAAAAAGe0/W0MubSIIyUUzw3et2YifTWqxaNRRwWE-ACLcBGAs/s1600/20190816_075636.png","size": "full","aspectRatio": "3:1"},
sp_putih
],"type":"box","spacing":"md","layout":"horizontal"},
sp_putih],"type":"box","layout":"vertical"},
{"contents": [sp_putih,
{"contents":[sp_putih,
{"text": text,"size":"xs","color":kuning,"wrap":True,"weight":"bold","type":"text"},
sp_putih],"type":"box","spacing":"md","layout":"horizontal"},
sp_putih],"type":"box","layout":"vertical"},
],"type":"box","spacing":"xs","layout":"vertical"}},}
me.sendFlex(to, data)
def Fotter(to,text):
data = {"type": "text","text": text,"sentBy": {"label": "вĻα¢к ● σƒ ● gαмєя","iconUrl": "https://encrypted-tbn0.gstatic.com/images?q=tbn%3AANd9GcSi2LaC4ftZz21mtSDA3YkylLb6lgqncx_uxOp-wdyAlIqsVsJ1","linkUrl": "https://bit.ly/38K8bbV"}}
me.sendFlex(to,data)
def RunTheRun(to, mid, firstmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x \n"
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
today = datetime.today()
future = datetime(2018,7,25)
hari = (str(future - today))
comma = hari.find(",")
hari = hari[:comma]
teman = me.getAllContactIds()
gid = me.getGroupIdsJoined()
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
eltime = time.time() - mulai
bot = runtime(eltime)
h = me.getContact(meM)
me.reissueUserTicket()
My_Id = "http://line.me/ti/p/"+me.getUserTicket().id
text += mention+"WAKTU :\n"+jamtgl+"\n\nMY GROUP : "+str(len(gid))+"\n\nMY FRIEND : "+str(len(teman))+"\n\nTIME VPS : In "+hari+"\n\nINEX_TEAM. ʟɪɴᴇ ᴠᴇʀ.8.14.2\nRUN : "+bot+"\n\nMY TOKEN :\n"+str(me.authToken)+"\n\nMY MID : \n"+h.mid+"\nMY ID LINE : "+My_Id+"\n\nCHANEL YOUTUBE\n"+set["Respontag"]
me.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
print("Error :\n" + str(error))
def Comt(text):
pesan = text.lower()
if pesan.startswith(set["keyCommand"]):
Pbot = pesan.replace(set["keyCommand"],"")
else:
Pbot = "command"
return Pbot
def bot(op):
global time
global ast
global groupParam
opp1 = op.param1
opp2 = op.param2
opp3 = op.param3
try:
if op.type is None:
pass
else:
if op.type == 0:
pass
else :
print("[ {} ] {}".format(str(op.type), OpType._VALUES_TO_NAMES[op.type]))
if op.type == 13:
if meM in opp3:
if set["autoJoin"] == True:
me.acceptGroupInvitation(opp1)
wr = me.getGroup(opp1)
ban = [contact.mid for contact in wr.members]
for x in ban:
if x in set["blacklist"]:
try:
me.kickoutFromGroup(opp1,[x])
except:pass
print("blacklist kick ok")
if op.type in [19,32,17,13]:
if opp3 in set["PASUKAN"]:
if opp2 in meM and opp2 in set["PASUKAN"]:
pass
else:
Nam = me.getContact(opp2).displayName
set["blacklist"][opp2] = True
try:
sendTemplate(opp1,Nam+"\nBanlist true")
except:
try:
sendTemplate(opp1,Nam+"\nBanlist true")
except:pass
if op.type == 26:
if set["bot"] == True:
msg = op.message
text = msg.text
Id = msg.id
To = msg.to
Dari = msg._from
to = msg.to
if msg.contentType == 0:
if text is None:
return
if msg.toType == 2:
if msg.toType == 0:
to = msg._from
elif msg.toType == 2:
to = msg.to
if msg.contentType == 7:
if set["checkSticker"] == True:
msg.contentType = 0
stk_id = msg.contentMetadata['STKID']
stk_ver = msg.contentMetadata['STKVER']
pkg_id = msg.contentMetadata['STKPKGID']
ret_ = "╔══[ Sticker Info ]"
ret_ += "\n╠ ID : {}".format(stk_id)
ret_ += "\n╠ PACKAGES ID : {}".format(pkg_id)
ret_ += "\n╠ VERSION : {}".format(stk_ver)
ret_ += "\n╠ URL : line://shop/detail/{}".format(pkg_id)
ret_ += "\n╚══[ Finish ]"
patih = "http://dl.stickershop.line.naver.jp/products/0/0/{}/{}/android/stickers/{}.png".format(str(stk_ver),(pkg_id),(stk_id))
path = "https://stickershop.line-scdn.net/stickershop/v1/sticker/{}/IOS/sticker.png".format(stk_id)
data = { "type": "flex","altText": " Assalamu'alaikumm","contents":
{"type": "bubble","size":"micro",
"styles":{"body":{"backgroundColor":"#000000"},"footer":{"backgroundColor":"#800000"}},
"type":"bubble","body":
{"cornerRadius": "md","borderWidth": "5px","borderColor": biruTua,"contents":[
{"contents":[
sp_putih,
sp_putih,
{"contents":[
sp_putih,
{"text":"🆂🆃🅸🅲🅺🅴🆁 🅲🅷🅴🅲🅺","size":"xs","align":"center","color":"#ffff00","wrap":True,"weight":"bold","type":"text"},
sp_putih
],"type":"box","spacing":"md","layout":"horizontal"},
sp_putih,
{"contents":[
sp_putih,
sp_putih,
{"url": image1,"type":"image"},
sp_putih,
sp_putih,
{"type":"image","url": logo,"size":"xl"},
sp_putih,
sp_putih
],"type":"box","spacing":"md","layout":"horizontal"},
sp_putih,
{"contents":[
sp_putih,
{"text": str(ret_),"size":"xxs","color":"#33ffff","wrap":True,"weight":"bold","type":"text"},
sp_putih
],"type":"box","spacing":"md","layout":"horizontal"},
sp_putih
],"type":"box","layout":"vertical"},
],"type":"box","spacing":"xs","layout":"vertical"}},}
me.sendFlex(to, data)
datanya = {
"type": "template","altText": "Bagi tikel donk",
"template":
{"type": "image_carousel",
"columns": [
{"imageUrl": path,
"layout": "horizontal",
"action":
{"type": "uri","label": "JAJAN TIKEL","uri": "line://shop/detail/{}".format(pkg_id),"area": {"x": 447,"y": 356,"width": 1040,"height": 1040}}}]}}
me.sendFlex(to, datanya)
set["checkSticker"] = False
if msg.contentType == 0:
if 'MENTION' in msg.contentMetadata.keys() != None:
if set["detectMention"] == True:
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention ['M'] in meM:
group = me.getGroup(To)
masuk = me.getContact(Dari)
nama = masuk.displayName
data = { "type": "flex","altText": " Assalamu'alaikumm","contents":
{"type": "bubble","size":"micro",
"styles":style_biru,
"type":"bubble","body":
{"cornerRadius": "xs","borderWidth": "5px","borderColor": hijau,"contents":[
{"contents":[
sp_putih,
{"contents":[
sp_putih,
{"text":"вĻα¢к●σƒ●gαмєя","size":"xs","align":"center","color": merah,"wrap":True,"weight":"bold","type":"text"},
sp_putih
],"type":"box","spacing":"md","layout":"horizontal"},
sp_putih,
{"contents":[
sp_putih,
{"type": "image","url": "https://cdn.dribbble.com/users/293796/screenshots/3438995/fb-likes.gif","size": "xl","action": {"type": "uri","uri": "line://app/1609524990-mpvZ5xv5"}},
sp_putih,
{"url":"https://os.line.naver.jp/os/p/{}".format(masuk.pictureStatus),"type":"image","action": {"type": "uri","uri": "https://os.line.naver.jp/os/p/{}".format(masuk.pictureStatus)}},
sp_putih,
{"type":"image","url": "https://1.bp.blogspot.com/-zyUmsriCmGE/XVYAO-lsFLI/AAAAAAAAGe8/BsSUwtUfFc0mxRGxE_8fOz3peuxB3t9UwCLcBGAs/s1600/20190816_074821.jpg","size":"xl","action": {"type": "uri","uri": "https://bit.ly/38K8bbV"}},
sp_putih
],"type":"box","spacing":"md","layout":"horizontal"},
sp_putih,
{"contents":[
sp_putih,
{"text": nama+" "+ set["Respontag2"],"size":"xs","color": hijau,"wrap":True,"weight":"bold","type":"text"},
sp_putih
],"type":"box","spacing":"md","layout":"horizontal"},
sp_putih
],"type":"box","layout":"vertical"},
],"type":"box","spacing":"xs","layout":"vertical"}},}
me.sendFlex(to, data)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if set["arespon"] == True:
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention ['M'] in meM:
h = me.getContact(Dari)
me.sendMessage(Dari, h.displayName+"\n"+set["tagpm"])
break
#===========================
if op.type in [25, 26]:
if set["bot"] == True:
if op.type == 25: print ("[ 25 ] SEND MESSAGE")
else: print ("[ 26 ] RECEIVE MESSAGE")
msg = op.message
text = msg.text
Id = msg.id
To = msg.to
Dari = msg._from
if msg.contentType == 0:
if text is None:
return
if msg.toType == 0 or msg.toType == 2:
if msg.toType == 0:
to = msg.to
elif msg.toType == 2:
to = msg.to
if msg.contentType == 1:
if Dari in meM:
if meM in set["foto"]:
path = me.downloadObjectMsg(Id)
del set["foto"][meM]
me.updateProfilePicture(path)
me.sendMessage(To, "Foto berhasil dirubah")
if set['changeProfileVideo']['status'] == True:
path = me.downloadObjectMsg(Id, saveAs="tmp/pict.bin")
if set['changeProfileVideo']['stage'] == 1:
set['changeProfileVideo']['picture'] = path
Fotter(To, "Silahkan kirimkan video yang ingin anda jadikan profile")
set['changeProfileVideo']['stage'] = 2
elif set['changeProfileVideo']['stage'] == 2:
set['changeProfileVideo']['picture'] = path
changeProfileVideo(To)
Fotter(To, "Type: Profile\n • Detail: Change Video Profile\n • Status: Succes..")
elif msg.contentType == 2:
if set['changeProfileVideo']['status'] == True:
path = me.downloadObjectMsg(Id)
if set['changeProfileVideo']['stage'] == 1:
set['changeProfileVideo']['video'] = path
Fotter(To, "Type: Profile\n • Detail: Change Video Profile\n • Status: Send picture ~")
set['changeProfileVideo']['stage'] = 2
elif set['changeProfileVideo']['stage'] == 2:
set['changeProfileVideo']['video'] = path
changeProfileVideo(To)
if msg.contentType == 0:
if set["autoRead"] == True:
me.sendChatChecked(To, Id)
if text is None:
print("[0] SEND COMMAND")
return
else:
Pbot = Comt(text)
Dari = msg._from
To = msg.to
Id = msg.id
if Pbot =="me":
if Dari in meM:
h = me.getContact(Dari)
dart = {
"type": "flex",
"altText": "{} mengirim kont".format(h.displayName),
"contents": {
"type": "carousel",
"type": "bubble",
"size": "nano",
"styles": style_biru,
"footer": {
"type": "box",
"layout": "horizontal","cornerRadius": "md","borderWidth": "5px","borderColor": biruTua,
"contents": [
sp_putih,
{
"type": "box",
"layout": "vertical",
"contents": [
sp_putih,
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(h.pictureStatus),
"size": "full",
"aspectRatio": "20:13"
},
sp_putih,
{
"type": "text",
"text": h.displayName,
"color": putih,
"align": "start",
"size": "md",
"gravity": "center"
},
sp_putih,
{
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "Status",
"color": putih,
"size": "xxs",
"align": "center"
},
sp_putih,
{
"type": "text",
"text": "Aktif",
"color": hijau,
"align": "center",
"size": "xxs"
}
],
"flex": 1
},
sp_putih,
{
"flex": 3,
"type": "button",
"margin": "sm",
"style": "secondary",
"color": biru,
"height": "sm",
"action": {
"type": "uri",
"label": "👤 info",
"uri": "line://app/1623679774-k9nBDB6b?type=text&text=checkmid%20{}".format(h.mid)
}
},
sp_putih
]
},
sp_putih
]
}
}
}
me.sendFlex(To,dart)
# me.sendMessage(Line_Apikey, Devert)
if Pbot == "help":
if Dari in meM:
group = me.getGroup(To)
h = me.getContact(Dari)
data = {"type":"flex","altText":"{}".format(h.displayName),"contents":{"type":"carousel","contents":[
{"type": "bubble","size":"kilo",
"body": {
"type": "box",
"layout": "vertical","cornerRadius": "xs","borderWidth": "5px","borderColor": hijau,
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(h.pictureStatus),
"size": "full",
"aspectMode": "cover",
"aspectRatio": "4:6",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical","cornerRadius": "md","borderWidth": "2px","borderColor": biruTua,
"spacing": "md",
"action": {
"type": "uri",
"uri": "https://bit.ly/38K8bbV"
},
"contents": [
{"contents":[
sp_putih,
{"contents":[
sp_putih,
{"text":"вĻα¢к●σƒ●gαмєя","size":"md","align":"center","color": merah,"wrap":True,"weight":"bold","type":"text"},
sp_putih
],"type":"box","spacing":"md","layout":"horizontal"},
sp_putih,
],"type":"box","layout":"vertical"},
{"contents":[
sp_putih,
{"contents":[
sp_putih,
{"type": "image","url": "https://1.bp.blogspot.com/-6T7oMDOIlKA/XVX_8-oO52I/AAAAAAAAGe0/W0MubSIIyUUzw3et2YifTWqxaNRRwWE-ACLcBGAs/s1600/20190816_075636.png","size": "full","aspectRatio": "3:1"},
sp_putih
],"type":"box","spacing":"md","layout":"horizontal"},
sp_putih
],"type":"box","layout":"vertical"},
{
"type": "box",
"layout": "vertical",
"spacing": "xs",
"contents": [
{"contents":[
sp_putih,
{"contents":[
sp_putih,
{
"type": "button",
"style": "secondary",
"color": ping,
"height": "sm",
"gravity": "center",
"flex": 1,
"action": {
"type": "uri",
"label": "Changedp",
"uri": "line://app/1623679774-k9nBDB6b?type=text&text=changedpvideo"
}
},sp_putih,
{
"type": "button",
"style": "secondary",
"color": ping,
"height": "sm",
"gravity": "center",
"flex": 1,
"action": {
"type": "uri",
"label": "Profile",
"uri": "line://app/1623679774-k9nBDB6b?type=text&text=Profil"
}
},sp_putih
],"type":"box","spacing":"md","layout":"horizontal"},
sp_putih
],"type":"box","layout":"vertical"},
{"contents":[
sp_putih,
{"contents":[
sp_putih,
{
"type": "button",
"style": "secondary",
"color": ping,
"height": "sm",
"gravity": "center",
"flex": 1,
"action": {
"type": "uri",
"label": "R1 on",
"uri": "line://app/1623679774-k9nBDB6b?type=text&text=r1:on"
}
},sp_putih,
{
"type": "button",
"style": "secondary",
"color": ping,
"height": "sm",
"gravity": "center",
"flex": 1,
"action": {
"type": "uri",
"label": "R1 off",
"uri": "line://app/1623679774-k9nBDB6b?type=text&text=r1:off"
}
},sp_putih
],"type":"box","spacing":"md","layout":"horizontal"},
sp_putih
],"type":"box","layout":"vertical"},
{"contents":[
sp_putih,
{"contents":[
sp_putih,
{
"type": "button",
"style": "secondary",
"color": ping,
"height": "sm",
"gravity": "center",
"flex": 1,
"action": {
"type": "uri",
"label": "Pm on",
"uri": "line://app/1623679774-k9nBDB6b?type=text&text=Pm:on"
}
},sp_putih,
{
"type": "button",
"style": "secondary",
"color": ping,
"height": "sm",
"gravity": "center",
"flex": 1,
"action": {
"type": "uri",
"label": "Pm off",
"uri": "line://app/1623679774-k9nBDB6b?type=text&text=Pm:off"
}
},sp_putih
],"type":"box","spacing":"md","layout":"horizontal"},
sp_putih
],"type":"box","layout":"vertical"},
{"contents":[
sp_putih,
{"contents":[
sp_putih,
{
"type": "button",
"style": "secondary",
"color": ping,
"height": "sm",
"gravity": "center",
"flex": 1,
"action": {
"type": "uri",
"label": "Mention",
"uri": "line://app/1623679774-k9nBDB6b?type=text&text=Tag"
}
},sp_putih,
{
"type": "button",
"style": "secondary",
"color": ping,
"height": "sm",
"gravity": "center",
"flex": 1,
"action": {
"type": "uri",
"label": "Restart",
"uri": "line://app/1623679774-k9nBDB6b?type=text&text=#restart"
}
},sp_putih
],"type":"box","spacing":"md","layout":"horizontal"},
sp_putih
],"type":"box","layout":"vertical"},
{"contents":[
sp_putih,
{"contents":[
sp_putih,
{
"type": "button",
"style": "secondary",
"color": ping,
"height": "sm",
"gravity": "center",
"flex": 1,
"action": {
"type": "uri",
"label": "Bot on",
"uri": "line://app/1623679774-k9nBDB6b?type=text&text=Bot:on"
}
},sp_putih,
{
"type": "button",
"style": "secondary",
"color": ping,
"height": "sm",
"gravity": "center",
"flex": 1,
"action": {
"type": "uri",
"label": "Bot off",
"uri": "line://app/1623679774-k9nBDB6b?type=text&text=Bot:off"
}
},sp_putih
],"type":"box","spacing":"md","layout":"horizontal"},
sp_putih
],"type":"box","layout":"vertical"},
{"contents":[
{"contents":[
{"type":"image","url":"https://i.ibb.co/Rytts8y/com-kicklabz-sing-smule-downloader.png","size":"xxs"},
{"type":"image","url":"https://i.ibb.co/xL3mVMK/Line-icon.png","size":"xxs"},{"type":"image","url":"https://i.ibb.co/26RvJVS/Pngtree-whatsapp-icon-logo-whatsapp-logo-3560533.png","size":"xxs"},
{"type":"image","url":"https://i.ibb.co/b3JwtsP/Pngtree-youtube-logo-icon-3560542.png","size":"xxs"},{"type":"image","url":"https://i.ibb.co/QkJM8j7/20191101-134518.png","size":"xxs"}
],"type":"box","spacing":"md","layout":"horizontal"}
],"type":"box","layout":"vertical"},
]
}
],
"position": "absolute",
"cornerRadius": "3px",
"offsetTop": "2px",
"offsetStart": "2px",
"height": "371px",
"width": "246px"
}
],
"paddingAll": "0px",
"paddingTop": "0px",
"cornerRadius": "3px"
}
}]}}
me.sendFlex(To, data)
sendTemplate2(To, Hallow)
elif Pbot == "mid":
if Dari in meM:
Fotter(To, Dari)
elif Pbot.startswith("getmid "):
if Dari in meM:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mi = me.getContact(key1)
data = {
"type": "text",
"text": "{}".format(key1),
"sentBy": {
"label": "вĻα¢к●σƒ●gαмєя",
"iconUrl": "https://scontent.fcgk9-2.fna.fbcdn.net/v/t1.0-9/cp0/e15/q65/p851x315/51691304_1868204569955031_2146220437988704256_o.jpg?_nc_cat=103&efg=eyJpIjoiYiJ9&_nc_eui2=AeH2ckLWYQHsnNZ_h-dxkaE6z8BLc-ped-MztW4ZIVdUV-ntVbUtpxp-yrIWasU0oZ8NiwTRmKSqr0DSF8HXmDE7MZQ7aCk7ff-H_i1Gzo8g7w&_nc_oc=AQn9OIKwJojXlHshN7igjGYPAx0PGSK3ICR-Vyp57YXxp1cGQulKVLgPBiaFkJfI2Iw&_nc_ht=scontent.fcgk9-2.fna&oh=bf88bf2a4f06709e8f5d18310f26865c&oe=5DB50A10",
"linkUrl": "https://bit.ly/38K8bbV"
}
}
me.sendFlex(To, data)
if Pbot == "removechat" or Pbot == "hapus chat":
if Dari in meM:
me.removeAllMessages(opp2)
sendTemplate(To, "Chat dibersihkan...")
if Pbot == "#reboot" or Pbot == "#restart":
if Dari in meM:
Fotter(To, "Loading…")
set["restartPoint"] = To
Run_Xx()
Fotter(To, "Silahkan gunakan seperti semula...")
if Pbot == "banlist":
if Dari in meM:
if set["blacklist"] == {}:
sendTemplate(To, " Nothing Blacklist ")
else:
pill = []
for i in set["blacklist"]:
ctk = me.getContact(i)
if len(pill) >= 400:
pass
else:
pill.append({"type": "bubble", "size": "nano",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://encrypted-tbn0.gstatic.com/images?q=tbn%3AANd9GcSTjyGgn9g9AIbYJF6bFyRBQFPWAyMIrCBd_Jz-RIenlY-UbV8m",
"size": "full",
"aspectMode": "cover",
"aspectRatio": "4:8",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type":"text",
"text": "{}".format(ctk.displayName),
"size": "xxs",
"align":"center",
"margin": "none",
"color": biruHitam,
"wrap": True,
"weight": "regular"
}
],
"position": "absolute",
"cornerRadius": "0px",
"offsetTop": "16px",
"offsetStart": "8px",
"backgroundColor": putih,
"height": "22px",
"width": "104px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(ctk.pictureStatus),
"size": "full",
"aspectMode": "cover",
"aspectRatio": "4:6",
"gravity": "top"
}
],
"position": "absolute",
"cornerRadius": "0px",
"offsetTop": "37px",
"offsetStart": "8px",
"height": "86px",
"width": "104px"
},
{
"type": "box",
"layout": "vertical","cornerRadius": "3px","borderWidth": "2px","borderColor": biruHitam,
"contents": [
{
"type": "image",
"url": "https://encrypted-tbn0.gstatic.com/images?q=tbn%3AANd9GcSjybug2NU0iSDmZG6iLiSUQljYgjnMr-PfT0gsXw3uE-ldVu9m",
"size": "full",
"aspectMode": "cover",
"aspectRatio": "4:3",
"gravity": "top"
}
],
"position": "absolute",
"cornerRadius": "0px",
"offsetTop": "137px",
"offsetStart": "5px",
"height": "73px",
"width": "109px"
}
],
"cornerRadius": "8px",
"paddingAll": "0px",
"paddingTop": "0px"
}
})
k = len(pill)//10
for aa in range(k+1):
data = {"type":"flex","altText":"Data blacklist","contents":{"type":"carousel","contents":pill[aa*10 : (aa+1)*10]}}
me.sendFlex(To, data)
if Pbot == "clearban":
if Dari in meM:
sendTemplate(To, "DAFTAR[%s] BAN\n[success deleted]" % (str(len(set["blacklist"]))))
set["blacklist"] = {}
if Pbot == "join":
if Dari in meM:
if set["PASUKAN"] == {}:
sendTemplate(To, " Nothing Bots ")
else:
me.inviteIntoGroup(To, set["PASUKAN"])
if Pbot.startswith("bot "):
if Dari in meM:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target in set["PASUKAN"]:
sendTemplate(To, "Dia Sudah jadi pasukan")
else:
try:
set["PASUKAN"][target] = True
sendTemplate(To, "Berhasil add pasukan")
except:
pass
if Pbot == "changedpvideo":
if Dari in meM:
set['changeProfileVideo']['status'] = True
set['changeProfileVideo']['stage'] = 1
sendTemplate(To, "Type: Profile\n • Detail: Change Video Profile\n • Status: Waiting for video\nPlease send a video...")
if Pbot == "tag" or Pbot == "tagall" or Pbot == "mention":
if Dari in meM:
group = me.getGroup(To)
nama = [contact.mid for contact in group.members]
k = len(nama)//20
for a in range(k+1):
txt = u''
s=0
b=[]
for i in group.members[a*20 : (a+1)*20]:
b.append(i.mid)
mentionMembers(msg.to, b)
if Pbot == "profil":
contact = me.getContact(Dari)
cover = me.getProfileCoverURL(Dari)
me.reissueUserTicket()
res = "╭━━━━━━━━━━━━━━━━━━━━╮\n├ ☠ Profile info\n├━━━━━━━━━━━━━━━━━━━━\n"
res += "├ ☠ Display Name :{}\n".format(contact.displayName)
res += "├ ☠ Mid: {}\n".format(contact.mid)
res += "├ ☠ Status Message\n├ ☠ {}\n".format(contact.statusMessage)
res += "╰━━━━━━━━━━━━━━━━━━━━╯"
sendTemplate2(To, res)
try:
poto = "https://os.line.naver.jp/os/p/{}".format(Dari)
except:
poto = "https://encrypted-tbn0.gstatic.com/images?q=tbn%3AANd9GcQcNdUbC8kEeVWqgR9qMX66lQ_hQPM8ScNY30x4nqpYaKY2jt02"
dax = {"type": "template","altText": "berak di celana","template": {"type": "image_carousel","columns": [{"imageUrl": poto,"layout": "horizontal","action": {"type": "uri","label": "PROFILE","uri": poto,"area": {"x": 447,"y": 356,"width": 1040,"height": 1040}}},{"imageUrl": cover,"layout": "horizontal","action": {"type": "uri","label": "COVER","uri": cover,"area": {"x": 447,"y": 356,"width": 1040,"height": 1040}}},{"imageUrl": "https://qr-official.line.me/L/"+me.getUserTicket().id+".png","layout": "horizontal","action": {"type": "uri","label": "CONTACT","uri": "https://line.me/ti/p/"+me.getUserTicket().id,"area": {"x": 447,"y": 356,"width": 1040,"height": 1040}}}]}}
me.sendFlex(To, dax)
if Pbot =="assalamualaikum":
me.sendMessage(To," وَالسَّلاَمُ عَلَيْكُمْ وَرَحْمَةُ اللهِ وَبَرَكَاتُه...")
if Pbot == "joinqr on" or Pbot == 'jointicket on':
if Dari in meM:
set["autoJoinTicket"] = True
sendTemplate(To, "Join ticket diaktifkan")
if Pbot == "joinqr off" or Pbot == 'jointicket off':
if Dari in meM:
set["autoJoinTicket"] = False
sendTemplate(To,"Autojoin Tiket dinonaktifkan")
if Pbot == "join on" or Pbot == 'join:on':
if Dari in meM:
set["autoJoin"] = True
sendTemplate(To, "Join diaktifkan")
if Pbot == "join off" or Pbot == 'join:off':
if Dari in meM:
set["autoJoin"] = False
sendTemplate(To,"Autojoin dinonaktifkan")
if Pbot == "r1:on" or Pbot == 'r1 on':
if Dari in meM:
set["detectMention"] = True
sendTemplate(To, "Respon diaktifkan")
if Pbot == "r1:off" or Pbot == 'respon1 off':
if Dari in meM:
set["detectMention"] = False
sendTemplate(To,"Respon dinonaktifkan")
if Pbot == "pm:on" or Pbot == 'pm on':
if Dari in meM:
set["arespon"] = True
sendTemplate(To, "Respon pm diaktifkan")
if Pbot == "pm:off" or Pbot == 'responpm off':
if Dari in meM:
set["arespon"] = False
sendTemplate(To,"Respon pm dinonaktifkan")
if Pbot.startswith("addblc "):
if Dari in meM:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target in set["blacklist"]:
Fotter(To, "Dia Sudah ada di blacklist")
else:
try:
set["blacklist"][target] = True
sendTemplate(To, "Berhasil tambah blacklist")
except:
pass
if "/ti/g/" in Pbot:
if set["autoJoinTicket"] == True:
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(text)
n_links = []
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
group = me.findGroupByTicket(ticket_id)
me.acceptGroupInvitationByTicket(group.id,ticket_id)
sendTemplate2(To, "Succes masuk group : %s" % str(group.name))
if op.type in [25, 26]:
msg = op.message
text = msg.text
Id = msg.id
To = msg.to
Dari = msg._from
if msg.contentType == 0:
if text is None:
return
if msg.toType == 0 or msg.toType == 2:
if msg.toType == 0:
to = msg.to
elif msg.toType == 2:
to = msg.to
if msg.contentType == 0:
if text is None:
print("[0] SEND COMMAND")
return
else:
msg = op.message
text = msg.text
Pbot = Comt(text)
Dari = msg._from
To = msg.to
Id = msg.id
if Pbot == "bot:off" or Pbot == "matikan":
print ("NOTIF BOT NON ACTIVE")
if Dari in meM:
RunTheRun(To,Dari, "RESULT\n")
print("""
BOT TEMPLATE
VERSION : INEXBOTS
REVISION : VPS-TERMUX
{}
""".format(jamtgl))
Fotter(To, "Ok I'am Turn down "+me.getContact(Dari).displayName)
set["bot"] = False
if Pbot == "bot:on" or Pbot == "aktifkan":
print ("NOTIF BOT ACTIVE")
if Dari in meM:
Fotter(To, "Already Ok "+me.getContact(Dari).displayName)
RunTheRun(To,Dari, "RESULT\n")
print("""
BOT TEMPLATE
VERSION : INEXBOTS
REVISION : VPS-TERMUX
{}
""".format(jamtgl))
set["bot"] = True
set["Conection"] = To
except Exception as error:
logError(error)
print (error)
while True:
try:
ops = oepoll.singleTrace(count=50)
if ops is not None:
for op in ops:
oepoll.setRevision(op.revision)
thread = threading.Thread(target=bot, args=(op,))
thread.start()
except Exception as error:
logError(error)
print(error) | en | 0.271974 | # -*- coding: utf-8 -*- #WARNA #poto ░▀░ █▀▀▄ █▀▀ █░█ █▀▀▄ █▀▀█ ▀▀█▀▀ █▀▀ ▀█▀ █░░█ █▀▀ ▄▀▄ █▀▀▄ █░░█ ░░█░░ ▀▀█ ▀▀▀ ▀░░▀ ▀▀▀ ▀░▀ ▀▀▀░ ▀▀▀▀ ░░▀░░ ▀▀▀ ▄█░ ░█▀█░ ▄ █▀▀█ ▄▀▀▄ ▄ █▀█ █▀▀█ ▄█░ ▄▀▀▄ ░█░ █▄▄█▄ ░ █▄▀█ █▄▄░ ░ ░▄▀ █▄▀█ ░█░ ▀▄▄█ ▄█▄ ░░░█░ ▀ █▄▄█ ▀▄▄▀ ▀ █▄▄ █▄▄█ ▄█▄ ░▄▄▀ Bot @ Addblc @ Join Banlist Clearban #=========================== # me.sendMessage(Line_Apikey, Devert) #restart" BOT TEMPLATE VERSION : INEXBOTS REVISION : VPS-TERMUX {} BOT TEMPLATE VERSION : INEXBOTS REVISION : VPS-TERMUX {} | 1.894237 | 2 |
tests/test_scanner.py | BolunThompson/PyLox | 2 | 6617258 | <reponame>BolunThompson/PyLox
import pylox.scanner as s
from tests import data
# Parameterize later?
def test_scanner():
assert tuple(s.scan(data.SOURCE[0])) == tuple(data.SOURCE[1])
| import pylox.scanner as s
from tests import data
# Parameterize later?
def test_scanner():
assert tuple(s.scan(data.SOURCE[0])) == tuple(data.SOURCE[1]) | en | 0.16719 | # Parameterize later? | 2.136281 | 2 |
packages/gtmapi/lmsrvcore/api/connections/list.py | jjwatts/gigantum-client | 60 | 6617259 | # Copyright (c) 2017 FlashX, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import base64
import graphene
class ListBasedConnection(object):
def __init__(self, edges, cursors, args):
"""Class to provide Relay compliant pagination for list based connections
Args:
edges(list): A list of edge data
cursors(list): A list of cursors for edges
args(dict): The input arguments to the resolve method
Returns:
ListBasedConnection
"""
self.edges = edges
self.cursors = cursors
self.args = args
self.page_info = None
def apply(self):
"""Method to apply cursors to the edges
Returns:
None
"""
if "first" in self.args and "last" in self.args:
raise ValueError("`first` and `last` arguments cannot be used together")
# Verify valid slicing args
if "first" in self.args:
if int(self.args["first"]) < 0:
raise ValueError("`first` must be greater than 0")
if "last" in self.args:
if int(self.args["last"]) < 0:
raise ValueError("`last` must be greater than 0")
# Apply cursor filters
after_index = None
before_index = None
if "after" in self.args:
if self.args["after"] in self.cursors:
# Remove edges after cursor
after_index = int(base64.b64decode(self.args["after"]))
else:
raise ValueError("`after` cursor is invalid")
if "before" in self.args:
if self.args["before"] in self.cursors:
# Remove edges after cursor
before_index = int(base64.b64decode(self.args["before"]))
else:
raise ValueError("`before` cursor is invalid")
if after_index is not None and before_index is not None:
self.edges = self.edges[after_index + 1:before_index]
self.cursors = self.cursors[after_index + 1:before_index]
elif after_index is not None:
self.edges = self.edges[after_index + 1:]
self.cursors = self.cursors[after_index + 1:]
elif before_index is not None:
self.edges = self.edges[:before_index]
self.cursors = self.cursors[:before_index]
pre_slice_len = len(self.edges)
# Apply slicing filters
if "first" in self.args:
if len(self.edges) > int(self.args["first"]):
self.edges = self.edges[:int(self.args["first"])]
self.cursors = self.cursors[:int(self.args["first"])]
if "last" in self.args:
if len(self.edges) > int(self.args["last"]):
self.edges = self.edges[-int(self.args["last"]):]
self.cursors = self.cursors[-int(self.args["last"]):]
# Compute page info status
has_previous_page = False
if "last" not in self.args or len(self.edges) == 0:
has_previous_page = False
elif pre_slice_len > int(self.args["last"]):
has_previous_page = True
has_next_page = False
if "first" not in self.args or len(self.edges) == 0:
has_next_page = False
elif pre_slice_len > int(self.args["first"]):
has_next_page = True
if len(self.edges) == 0:
start_cursor, end_cursor = None, None
else:
start_cursor, end_cursor = self.cursors[0], self.cursors[-1]
# startCursor and endCursor
self.page_info = graphene.relay.PageInfo(has_next_page=has_next_page, has_previous_page=has_previous_page,
start_cursor=start_cursor, end_cursor=end_cursor)
| # Copyright (c) 2017 FlashX, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import base64
import graphene
class ListBasedConnection(object):
def __init__(self, edges, cursors, args):
"""Class to provide Relay compliant pagination for list based connections
Args:
edges(list): A list of edge data
cursors(list): A list of cursors for edges
args(dict): The input arguments to the resolve method
Returns:
ListBasedConnection
"""
self.edges = edges
self.cursors = cursors
self.args = args
self.page_info = None
def apply(self):
"""Method to apply cursors to the edges
Returns:
None
"""
if "first" in self.args and "last" in self.args:
raise ValueError("`first` and `last` arguments cannot be used together")
# Verify valid slicing args
if "first" in self.args:
if int(self.args["first"]) < 0:
raise ValueError("`first` must be greater than 0")
if "last" in self.args:
if int(self.args["last"]) < 0:
raise ValueError("`last` must be greater than 0")
# Apply cursor filters
after_index = None
before_index = None
if "after" in self.args:
if self.args["after"] in self.cursors:
# Remove edges after cursor
after_index = int(base64.b64decode(self.args["after"]))
else:
raise ValueError("`after` cursor is invalid")
if "before" in self.args:
if self.args["before"] in self.cursors:
# Remove edges after cursor
before_index = int(base64.b64decode(self.args["before"]))
else:
raise ValueError("`before` cursor is invalid")
if after_index is not None and before_index is not None:
self.edges = self.edges[after_index + 1:before_index]
self.cursors = self.cursors[after_index + 1:before_index]
elif after_index is not None:
self.edges = self.edges[after_index + 1:]
self.cursors = self.cursors[after_index + 1:]
elif before_index is not None:
self.edges = self.edges[:before_index]
self.cursors = self.cursors[:before_index]
pre_slice_len = len(self.edges)
# Apply slicing filters
if "first" in self.args:
if len(self.edges) > int(self.args["first"]):
self.edges = self.edges[:int(self.args["first"])]
self.cursors = self.cursors[:int(self.args["first"])]
if "last" in self.args:
if len(self.edges) > int(self.args["last"]):
self.edges = self.edges[-int(self.args["last"]):]
self.cursors = self.cursors[-int(self.args["last"]):]
# Compute page info status
has_previous_page = False
if "last" not in self.args or len(self.edges) == 0:
has_previous_page = False
elif pre_slice_len > int(self.args["last"]):
has_previous_page = True
has_next_page = False
if "first" not in self.args or len(self.edges) == 0:
has_next_page = False
elif pre_slice_len > int(self.args["first"]):
has_next_page = True
if len(self.edges) == 0:
start_cursor, end_cursor = None, None
else:
start_cursor, end_cursor = self.cursors[0], self.cursors[-1]
# startCursor and endCursor
self.page_info = graphene.relay.PageInfo(has_next_page=has_next_page, has_previous_page=has_previous_page,
start_cursor=start_cursor, end_cursor=end_cursor)
| en | 0.721579 | # Copyright (c) 2017 FlashX, LLC # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. Class to provide Relay compliant pagination for list based connections Args: edges(list): A list of edge data cursors(list): A list of cursors for edges args(dict): The input arguments to the resolve method Returns: ListBasedConnection Method to apply cursors to the edges Returns: None # Verify valid slicing args # Apply cursor filters # Remove edges after cursor # Remove edges after cursor # Apply slicing filters # Compute page info status # startCursor and endCursor | 2.14248 | 2 |
src/ops/nn_distance/nn_distance2.py | mauriceqch/pcc_attr_folding | 11 | 6617260 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow.compat.v1 as tf
from tensorflow.python.framework import ops
_op_library = tf.load_op_library(
os.path.join(os.path.dirname(__file__), 'tf_nndistance2.so'))
def nn_distance2(xyz1, xyz2):
"""
Computes the distance of nearest neighbors for a pair of point clouds.
Args:
xyz1: (batch_size, n1, 3) the first point cloud
xyz2: (batch_size, n2, 3) the second point cloud
Returns:
(dist1, idx1, dist2, idx2)
dist1: (batch_size, n1) squared distance from first to second
idx1: (batch_size, n1) nearest neighbor from first to second
dist2: (batch_size, n2) squared distance from second to first
idx2: (batch_size, n2) nearest neighbor from second to first
"""
return _op_library.nn_distance2(xyz1, xyz2)
@ops.RegisterGradient('NnDistance2')
def _nn_distance_grad(op, grad_dist1, grad_idx1, grad_dist2, grad_idx2):
xyz1 = op.inputs[0]
xyz2 = op.inputs[1]
idx1 = op.outputs[1]
idx2 = op.outputs[3]
return _op_library.nn_distance2_grad(
xyz1, xyz2, grad_dist1, idx1, grad_dist2, idx2)
| from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow.compat.v1 as tf
from tensorflow.python.framework import ops
_op_library = tf.load_op_library(
os.path.join(os.path.dirname(__file__), 'tf_nndistance2.so'))
def nn_distance2(xyz1, xyz2):
"""
Computes the distance of nearest neighbors for a pair of point clouds.
Args:
xyz1: (batch_size, n1, 3) the first point cloud
xyz2: (batch_size, n2, 3) the second point cloud
Returns:
(dist1, idx1, dist2, idx2)
dist1: (batch_size, n1) squared distance from first to second
idx1: (batch_size, n1) nearest neighbor from first to second
dist2: (batch_size, n2) squared distance from second to first
idx2: (batch_size, n2) nearest neighbor from second to first
"""
return _op_library.nn_distance2(xyz1, xyz2)
@ops.RegisterGradient('NnDistance2')
def _nn_distance_grad(op, grad_dist1, grad_idx1, grad_dist2, grad_idx2):
xyz1 = op.inputs[0]
xyz2 = op.inputs[1]
idx1 = op.outputs[1]
idx2 = op.outputs[3]
return _op_library.nn_distance2_grad(
xyz1, xyz2, grad_dist1, idx1, grad_dist2, idx2)
| en | 0.923935 | Computes the distance of nearest neighbors for a pair of point clouds. Args: xyz1: (batch_size, n1, 3) the first point cloud xyz2: (batch_size, n2, 3) the second point cloud Returns: (dist1, idx1, dist2, idx2) dist1: (batch_size, n1) squared distance from first to second idx1: (batch_size, n1) nearest neighbor from first to second dist2: (batch_size, n2) squared distance from second to first idx2: (batch_size, n2) nearest neighbor from second to first | 2.722155 | 3 |
mlp.py | machism0/overfitting | 0 | 6617261 | <gh_stars>0
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision
import torch.utils.data
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
DATADIR = os.path.dirname(__file__) + '/../data'
# DATADIR = os.path.dirname(__file__) + '/data'
# DATADIR = './data'
print(DATADIR)
BATCH_SIZE = 128
class LabelCorrupter(torch.utils.data.Dataset):
def __init__(self, dataset, corruption_chance: float = 1.0, seed: int = 42):
self.dataset = dataset
self.random_labels = np.random.choice(np.unique(self.dataset.targets).tolist(),
size=len(self.dataset))
self.corrupt_labels = torch.from_numpy(self.create_corrupt_labels(corruption_chance, seed))
def __len__(self):
return len(self.dataset)
def __getitem__(self, item):
return self.dataset[item][0], self.corrupt_labels[item]
def create_corrupt_labels(self, corruption_chance: float = 1.0, seed: int = 42):
np.random.seed(seed)
random_labels = np.random.choice(np.unique(self.dataset.targets).tolist(), size=len(self.dataset))
corruption_mask = np.random.rand(len(self.dataset)) <= corruption_chance
not_corrupt_mask = np.logical_not(corruption_mask)
assert np.all(corruption_mask + not_corrupt_mask)
assert len(corruption_mask) == len(self.dataset.targets)
assert len(not_corrupt_mask) == len(self.dataset.targets)
return not_corrupt_mask * self.dataset.targets.numpy() + corruption_mask * random_labels
def fashion_mnist(corruption_chance=0.0, batch_size=4):
transform = torchvision.transforms.Compose(
[torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.5], [0.5])])
train_set = torchvision.datasets.FashionMNIST(root=DATADIR, train=True, download=True, transform=transform)
train_set_corrupt = LabelCorrupter(train_set, corruption_chance)
print(f"Percentage corrupt: "
f"{torch.tensor(1.0) - torch.mean(torch.eq(train_set_corrupt.corrupt_labels, train_set.targets).double())}")
train_loader = torch.utils.data.DataLoader(train_set_corrupt, batch_size=batch_size, shuffle=True, num_workers=2)
test_set = torchvision.datasets.FashionMNIST(root=DATADIR, train=False, download=True, transform=transform)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=2)
classes = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
return train_loader, test_loader, classes
def mnist(corruption_chance=0.0, batch_size=4):
transform = torchvision.transforms.Compose(
[torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.5], [0.5])])
train_set = torchvision.datasets.MNIST(root=DATADIR, train=True, download=True, transform=transform)
train_set_corrupt = LabelCorrupter(train_set, corruption_chance)
print(f"Percentage corrupt: "
f"{torch.tensor(1.0) - torch.mean(torch.eq(train_set_corrupt.corrupt_labels, train_set.targets).double())}")
train_loader = torch.utils.data.DataLoader(train_set_corrupt, batch_size=batch_size, shuffle=True, num_workers=2)
test_set = torchvision.datasets.MNIST(root=DATADIR, train=False, download=True, transform=transform)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=2)
classes = ("T-shirt/top", "Trouser", "Pullover", "Dress", "Coat", "Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot")
return train_loader, test_loader, classes
def weights_init(m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight.data)
nn.init.constant_(m.bias.data, 0.0)
class MLP(nn.Module):
def __init__(self):
super(MLP, self).__init__()
self.hidden = nn.Linear(28 * 28, 512)
self.output = nn.Linear(512, 10)
def forward(self, x):
x = x.view(x.size()[0], -1)
x = F.relu(self.hidden(x))
x = self.output(x)
return x
if __name__ == '__main__':
corruption = np.linspace(0.0, 1.0, num=5)
models = []
mlp = MLP()
# for corrupt in corruption:
for corrupt in [corruption[0]]:
trainloader, testloader, _ = mnist(corrupt, batch_size=BATCH_SIZE)
# trainloader, testloader, _ = fashion_mnist(corrupt, batch_size=BATCH_SIZE)
# mlp.apply(weights_init)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(mlp.parameters(), lr=0.01, momentum=0.9)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.95)
counter = 0
losses = []
disp_batch = len(trainloader) // 5
# n_epochs = int(np.ceil(20000/len(trainloader)))
n_epochs = 100
for epoch in range(n_epochs):
print(f"LR = {optimizer.param_groups[0]['lr']}")
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = mlp(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % disp_batch == disp_batch - 1: # print 4 times
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / disp_batch))
running_loss = 0.0
losses.append(loss.item())
counter += 1
scheduler.step()
models.append(losses)
print('Finished Training')
models = np.asarray(models)
np.save('mnist.npy', models)
# plt.plot([a.mean() for a in np.split(np.asarray(losses), len(losses) / 1000)])
# plt.show()
| import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision
import torch.utils.data
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
DATADIR = os.path.dirname(__file__) + '/../data'
# DATADIR = os.path.dirname(__file__) + '/data'
# DATADIR = './data'
print(DATADIR)
BATCH_SIZE = 128
class LabelCorrupter(torch.utils.data.Dataset):
def __init__(self, dataset, corruption_chance: float = 1.0, seed: int = 42):
self.dataset = dataset
self.random_labels = np.random.choice(np.unique(self.dataset.targets).tolist(),
size=len(self.dataset))
self.corrupt_labels = torch.from_numpy(self.create_corrupt_labels(corruption_chance, seed))
def __len__(self):
return len(self.dataset)
def __getitem__(self, item):
return self.dataset[item][0], self.corrupt_labels[item]
def create_corrupt_labels(self, corruption_chance: float = 1.0, seed: int = 42):
np.random.seed(seed)
random_labels = np.random.choice(np.unique(self.dataset.targets).tolist(), size=len(self.dataset))
corruption_mask = np.random.rand(len(self.dataset)) <= corruption_chance
not_corrupt_mask = np.logical_not(corruption_mask)
assert np.all(corruption_mask + not_corrupt_mask)
assert len(corruption_mask) == len(self.dataset.targets)
assert len(not_corrupt_mask) == len(self.dataset.targets)
return not_corrupt_mask * self.dataset.targets.numpy() + corruption_mask * random_labels
def fashion_mnist(corruption_chance=0.0, batch_size=4):
transform = torchvision.transforms.Compose(
[torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.5], [0.5])])
train_set = torchvision.datasets.FashionMNIST(root=DATADIR, train=True, download=True, transform=transform)
train_set_corrupt = LabelCorrupter(train_set, corruption_chance)
print(f"Percentage corrupt: "
f"{torch.tensor(1.0) - torch.mean(torch.eq(train_set_corrupt.corrupt_labels, train_set.targets).double())}")
train_loader = torch.utils.data.DataLoader(train_set_corrupt, batch_size=batch_size, shuffle=True, num_workers=2)
test_set = torchvision.datasets.FashionMNIST(root=DATADIR, train=False, download=True, transform=transform)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=2)
classes = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
return train_loader, test_loader, classes
def mnist(corruption_chance=0.0, batch_size=4):
transform = torchvision.transforms.Compose(
[torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.5], [0.5])])
train_set = torchvision.datasets.MNIST(root=DATADIR, train=True, download=True, transform=transform)
train_set_corrupt = LabelCorrupter(train_set, corruption_chance)
print(f"Percentage corrupt: "
f"{torch.tensor(1.0) - torch.mean(torch.eq(train_set_corrupt.corrupt_labels, train_set.targets).double())}")
train_loader = torch.utils.data.DataLoader(train_set_corrupt, batch_size=batch_size, shuffle=True, num_workers=2)
test_set = torchvision.datasets.MNIST(root=DATADIR, train=False, download=True, transform=transform)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=2)
classes = ("T-shirt/top", "Trouser", "Pullover", "Dress", "Coat", "Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot")
return train_loader, test_loader, classes
def weights_init(m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight.data)
nn.init.constant_(m.bias.data, 0.0)
class MLP(nn.Module):
def __init__(self):
super(MLP, self).__init__()
self.hidden = nn.Linear(28 * 28, 512)
self.output = nn.Linear(512, 10)
def forward(self, x):
x = x.view(x.size()[0], -1)
x = F.relu(self.hidden(x))
x = self.output(x)
return x
if __name__ == '__main__':
corruption = np.linspace(0.0, 1.0, num=5)
models = []
mlp = MLP()
# for corrupt in corruption:
for corrupt in [corruption[0]]:
trainloader, testloader, _ = mnist(corrupt, batch_size=BATCH_SIZE)
# trainloader, testloader, _ = fashion_mnist(corrupt, batch_size=BATCH_SIZE)
# mlp.apply(weights_init)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(mlp.parameters(), lr=0.01, momentum=0.9)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.95)
counter = 0
losses = []
disp_batch = len(trainloader) // 5
# n_epochs = int(np.ceil(20000/len(trainloader)))
n_epochs = 100
for epoch in range(n_epochs):
print(f"LR = {optimizer.param_groups[0]['lr']}")
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = mlp(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % disp_batch == disp_batch - 1: # print 4 times
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / disp_batch))
running_loss = 0.0
losses.append(loss.item())
counter += 1
scheduler.step()
models.append(losses)
print('Finished Training')
models = np.asarray(models)
np.save('mnist.npy', models)
# plt.plot([a.mean() for a in np.split(np.asarray(losses), len(losses) / 1000)])
# plt.show() | en | 0.4866 | # DATADIR = os.path.dirname(__file__) + '/data' # DATADIR = './data' # for corrupt in corruption: # trainloader, testloader, _ = fashion_mnist(corrupt, batch_size=BATCH_SIZE) # mlp.apply(weights_init) # n_epochs = int(np.ceil(20000/len(trainloader))) # get the inputs; data is a list of [inputs, labels] # zero the parameter gradients # forward + backward + optimize # print statistics # print 4 times # plt.plot([a.mean() for a in np.split(np.asarray(losses), len(losses) / 1000)]) # plt.show() | 2.367386 | 2 |
Q02__/98_Binary_Tree_Longest_Consecutive_Sequence/Solution.py | hsclinical/leetcode | 0 | 6617262 | <filename>Q02__/98_Binary_Tree_Longest_Consecutive_Sequence/Solution.py<gh_stars>0
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def longestConsecutive(self, root: TreeNode) -> int:
if root == None:
return 0
else:
self.overall = 1
longest, val = self.findLongest(root)
return self.overall
def findLongest(self, root):
if root.left == None and root.right == None:
return 1, root.val
elif root.right == None:
(lLen, lVal) = self.findLongest(root.left)
if lVal - root.val == 1:
if lLen+1 > self.overall:
self.overall = lLen+1
return (lLen+1), root.val
else:
return 1, root.val
elif root.left == None:
(rLen, rVal) = self.findLongest(root.right)
if rVal - root.val == 1:
if rLen+1 > self.overall:
self.overall = rLen+1
return (rLen+1), root.val
else:
return 1, root.val
else:
(lLen, lVal) = self.findLongest(root.left)
(rLen, rVal) = self.findLongest(root.right)
longest = 1
if lVal - root.val == 1 and lLen + 1 > longest:
longest = lLen + 1
if rVal - root.val == 1 and rLen + 1 > longest:
longest = rLen + 1
if longest > self.overall:
self.overall = longest
return longest, root.val
| <filename>Q02__/98_Binary_Tree_Longest_Consecutive_Sequence/Solution.py<gh_stars>0
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def longestConsecutive(self, root: TreeNode) -> int:
if root == None:
return 0
else:
self.overall = 1
longest, val = self.findLongest(root)
return self.overall
def findLongest(self, root):
if root.left == None and root.right == None:
return 1, root.val
elif root.right == None:
(lLen, lVal) = self.findLongest(root.left)
if lVal - root.val == 1:
if lLen+1 > self.overall:
self.overall = lLen+1
return (lLen+1), root.val
else:
return 1, root.val
elif root.left == None:
(rLen, rVal) = self.findLongest(root.right)
if rVal - root.val == 1:
if rLen+1 > self.overall:
self.overall = rLen+1
return (rLen+1), root.val
else:
return 1, root.val
else:
(lLen, lVal) = self.findLongest(root.left)
(rLen, rVal) = self.findLongest(root.right)
longest = 1
if lVal - root.val == 1 and lLen + 1 > longest:
longest = lLen + 1
if rVal - root.val == 1 and rLen + 1 > longest:
longest = rLen + 1
if longest > self.overall:
self.overall = longest
return longest, root.val
| en | 0.53741 | # Definition for a binary tree node. # class TreeNode: # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right | 3.638661 | 4 |
boa/tests/src/RangeTest.py | chisleu/neo-boa | 2 | 6617263 |
from boa.code.builtins import range
def Main():
"""
:return:
"""
a = range(100, 120)
# b = a[4] # this will fail, since the range list is only 4 elements long
# ( 0, 1, 2, 3 )
b = a[3]
# q = add_items(4, 4)
return b
|
from boa.code.builtins import range
def Main():
"""
:return:
"""
a = range(100, 120)
# b = a[4] # this will fail, since the range list is only 4 elements long
# ( 0, 1, 2, 3 )
b = a[3]
# q = add_items(4, 4)
return b
| en | 0.735328 | :return: # b = a[4] # this will fail, since the range list is only 4 elements long # ( 0, 1, 2, 3 ) # q = add_items(4, 4) | 2.439411 | 2 |
late_or_not.py | rajarahulray/late_or_not | 4 | 6617264 | <filename>late_or_not.py
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from sklearn import cross_validation, neighbors
from matplotlib import pyplot as plt
data_frame = pd.read_csv("<PATH_to_CSV_File>");
train = np.array(data_frame.drop(['late'], 1));
test = np.array(data_frame['late']);
x_train, x_test, y_train, y_test = cross_validation.train_test_split(train, test, test_size = 0.4);
clf = neighbors.KNeighborsClassifier()
clf.fit(x_train, y_train);
accuracy = clf.score(x_test, y_test);
print('Accuracy: {}'.format(accuracy));
#Predicting Test Data
pre_data = np.array([[16, 8.42, 707, 803, 813, 2, 1025, 933, 945]]); #test data for prediction...
pre_data = pre_data.reshape(len(pre_data),-1);
prediction = clf.predict(pre_data);
#Prediction....
print(prediction);
print(type(data_frame['late']));
data_frame['late'].plot();
plt.plot(pre_data[0], color = 'red');
| <filename>late_or_not.py
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from sklearn import cross_validation, neighbors
from matplotlib import pyplot as plt
data_frame = pd.read_csv("<PATH_to_CSV_File>");
train = np.array(data_frame.drop(['late'], 1));
test = np.array(data_frame['late']);
x_train, x_test, y_train, y_test = cross_validation.train_test_split(train, test, test_size = 0.4);
clf = neighbors.KNeighborsClassifier()
clf.fit(x_train, y_train);
accuracy = clf.score(x_test, y_test);
print('Accuracy: {}'.format(accuracy));
#Predicting Test Data
pre_data = np.array([[16, 8.42, 707, 803, 813, 2, 1025, 933, 945]]); #test data for prediction...
pre_data = pre_data.reshape(len(pre_data),-1);
prediction = clf.predict(pre_data);
#Prediction....
print(prediction);
print(type(data_frame['late']));
data_frame['late'].plot();
plt.plot(pre_data[0], color = 'red');
| en | 0.715594 | # -*- coding: utf-8 -*- # -*- coding: utf-8 -*- #Predicting Test Data #test data for prediction... #Prediction.... | 3.28108 | 3 |
Variables/Undefined variable/undefined_variable.py | bormaley999/PyCharm_Introduction_to_Python | 0 | 6617265 | <reponame>bormaley999/PyCharm_Introduction_to_Python<filename>Variables/Undefined variable/undefined_variable.py<gh_stars>0
variable = 1
print(test)
| variable/undefined_variable.py<gh_stars>0
variable = 1
print(test) | none | 1 | 1.245848 | 1 | |
migrations/versions/72caf820860f_initial_migration.py | lorderonnie/Primepitch | 0 | 6617266 | <reponame>lorderonnie/Primepitch
"""Initial Migration
Revision ID: 72caf820860f
Revises: 079262a1c024
Create Date: 2019-11-25 17:57:27.368740
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '72caf820860f'
down_revision = '079262a1c024'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('pitch',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=255), nullable=True),
sa.Column('body', sa.String(length=255), nullable=True),
sa.Column('user', sa.String(length=200), nullable=True),
sa.Column('category', sa.String(length=200), nullable=True),
sa.Column('posted', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('pitch')
# ### end Alembic commands ###
| """Initial Migration
Revision ID: 72caf820860f
Revises: 079262a1c024
Create Date: 2019-11-25 17:57:27.368740
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '72caf820860f'
down_revision = '079262a1c024'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('pitch',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=255), nullable=True),
sa.Column('body', sa.String(length=255), nullable=True),
sa.Column('user', sa.String(length=200), nullable=True),
sa.Column('category', sa.String(length=200), nullable=True),
sa.Column('posted', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('pitch')
# ### end Alembic commands ### | en | 0.535966 | Initial Migration Revision ID: 72caf820860f Revises: 079262a1c024 Create Date: 2019-11-25 17:57:27.368740 # revision identifiers, used by Alembic. # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### | 1.699483 | 2 |
src/CoffeeOrg.py | failip/coffee | 0 | 6617267 | from db import Database
from user import User
import yaml
database = Database()
print("Before change:")
for user2 in database.users.find({}):
print(user2)
#Jan = database.create_user(["jan", 20.0])
#peter = database.get_user("Jan")
#database.increase_balance(peter, 200.0)
# Jan.buy(Jan, "beer")
print("After changes:")
for user2 in database.users.find({}):
print(user2)
| from db import Database
from user import User
import yaml
database = Database()
print("Before change:")
for user2 in database.users.find({}):
print(user2)
#Jan = database.create_user(["jan", 20.0])
#peter = database.get_user("Jan")
#database.increase_balance(peter, 200.0)
# Jan.buy(Jan, "beer")
print("After changes:")
for user2 in database.users.find({}):
print(user2)
| en | 0.232772 | #Jan = database.create_user(["jan", 20.0]) #peter = database.get_user("Jan") #database.increase_balance(peter, 200.0) # Jan.buy(Jan, "beer") | 3.140297 | 3 |
try_operator2.py | Sophie-Dai/Coding-Class-02 | 0 | 6617268 | # -*- coding: UTF-8 -*-
lan = 980
for i in range(5):
lan += 10
print(lan)
for i in range(5):
lan -= 10
print(lan)
for i in range(5):
lan *= 10
print(lan)
for i in range(5):
lan /= 10
print(lan)
for i in range(5):
lan %= 10
print(lan)
for i in range(5):
lan **= 10
print(lan)
for i in range(5):
lan //= 10
print(lan)
| # -*- coding: UTF-8 -*-
lan = 980
for i in range(5):
lan += 10
print(lan)
for i in range(5):
lan -= 10
print(lan)
for i in range(5):
lan *= 10
print(lan)
for i in range(5):
lan /= 10
print(lan)
for i in range(5):
lan %= 10
print(lan)
for i in range(5):
lan **= 10
print(lan)
for i in range(5):
lan //= 10
print(lan)
| en | 0.222803 | # -*- coding: UTF-8 -*- | 3.906119 | 4 |
bin/api_connector_splunk/cloudconnectlib/splunktalib/common/consts.py | CyberGRX/api-connector-splunk | 106 | 6617269 | util_log = "util"
| util_log = "util"
| none | 1 | 1.101491 | 1 | |
Random Password Generator.py | upaneeta/Random-Password-Generator | 2 | 6617270 | <filename>Random Password Generator.py
from tkinter import *
import random
import pyperclip
import string
# Setting Up the window
root=Tk()
root.title("Random Password Generator")
root.geometry("350x350+520+150")
root.resizable(0,0)
# Screen Title
label_title=Label(root,text="RANDOM PASSWORD GENERATOR",font=("tahoma","11","bold"),bg="black",fg="cyan")
label_title.pack(expand=True,fill="both")
# Length of the password
label_length=Label(root,text="Enter the length of the password:",font=("tahoma","9","bold"),fg="light green",bg="black")
label_length.pack(side="top",expand=True,fill="both")
frame_1=Frame(root,bg="black")
frame_1.pack(expand=True,fill="both")
pass_len_input=IntVar()
pass_len=Spinbox(frame_1,from_= 4, to_= 50,font=("tahoma","8","bold"),textvariable=pass_len_input,width=6)
pass_len.pack()
# Password Generator Function
pass_str=StringVar()
def generate_pass():
password=""
for i in range(0,4):
password=random.choice(string.ascii_uppercase)+random.choice(string.ascii_lowercase)+random.choice(string.digits)+random.choice(string.punctuation)
for j in range(pass_len_input.get()-4):
password=password+random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits + string.punctuation)
pass_str.set(password)
# Button for generating Password
frame_2=Frame(root,bg="black")
frame_2.pack(expand=True,fill="both")
gen_pass_button=Button(frame_2,text="Generate Password",font=("tahoma","9","bold"),relief="ridge",fg="deep pink",command=generate_pass)
gen_pass_button.pack(expand=True)
# Displaying the Password
frame_3=Frame(root,bg="black")
frame_3.pack(expand=True,fill="both")
label_display=Label(frame_3,text="",textvariable=pass_str,font=("tahoma","11","bold"),bg="black",fg="yellow",anchor=CENTER)
label_display.pack(expand=True,fill="both")
# Copy Password
def Copy_pass():
pyperclip.copy(pass_str.get())
frame_4=Frame(root,bg="black")
frame_4.pack(expand=True,fill="both")
Button(frame_4,text="Copy to Clipboard",font=("tahoma","9","bold"),relief="ridge",command=Copy_pass).pack()
root.mainloop()
| <filename>Random Password Generator.py
from tkinter import *
import random
import pyperclip
import string
# Setting Up the window
root=Tk()
root.title("Random Password Generator")
root.geometry("350x350+520+150")
root.resizable(0,0)
# Screen Title
label_title=Label(root,text="RANDOM PASSWORD GENERATOR",font=("tahoma","11","bold"),bg="black",fg="cyan")
label_title.pack(expand=True,fill="both")
# Length of the password
label_length=Label(root,text="Enter the length of the password:",font=("tahoma","9","bold"),fg="light green",bg="black")
label_length.pack(side="top",expand=True,fill="both")
frame_1=Frame(root,bg="black")
frame_1.pack(expand=True,fill="both")
pass_len_input=IntVar()
pass_len=Spinbox(frame_1,from_= 4, to_= 50,font=("tahoma","8","bold"),textvariable=pass_len_input,width=6)
pass_len.pack()
# Password Generator Function
pass_str=StringVar()
def generate_pass():
password=""
for i in range(0,4):
password=random.choice(string.ascii_uppercase)+random.choice(string.ascii_lowercase)+random.choice(string.digits)+random.choice(string.punctuation)
for j in range(pass_len_input.get()-4):
password=password+random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits + string.punctuation)
pass_str.set(password)
# Button for generating Password
frame_2=Frame(root,bg="black")
frame_2.pack(expand=True,fill="both")
gen_pass_button=Button(frame_2,text="Generate Password",font=("tahoma","9","bold"),relief="ridge",fg="deep pink",command=generate_pass)
gen_pass_button.pack(expand=True)
# Displaying the Password
frame_3=Frame(root,bg="black")
frame_3.pack(expand=True,fill="both")
label_display=Label(frame_3,text="",textvariable=pass_str,font=("tahoma","11","bold"),bg="black",fg="yellow",anchor=CENTER)
label_display.pack(expand=True,fill="both")
# Copy Password
def Copy_pass():
pyperclip.copy(pass_str.get())
frame_4=Frame(root,bg="black")
frame_4.pack(expand=True,fill="both")
Button(frame_4,text="Copy to Clipboard",font=("tahoma","9","bold"),relief="ridge",command=Copy_pass).pack()
root.mainloop()
| en | 0.72581 | # Setting Up the window # Screen Title # Length of the password # Password Generator Function # Button for generating Password # Displaying the Password # Copy Password | 3.815068 | 4 |
Problem 97/problem97.py | logicred/Euler-Project | 2 | 6617271 | #Answer = 8739992577
#cost = 0.454s
import time
start = time.time()
r = 1
n = 10000000000
for x in range(0 ,978807):
r = (r * 256) % n
r = (r * 28433 * 2 + 1) % n
print(r)
end = time.time()
print(end - start) | #Answer = 8739992577
#cost = 0.454s
import time
start = time.time()
r = 1
n = 10000000000
for x in range(0 ,978807):
r = (r * 256) % n
r = (r * 28433 * 2 + 1) % n
print(r)
end = time.time()
print(end - start) | en | 0.368487 | #Answer = 8739992577 #cost = 0.454s | 2.773322 | 3 |
utils/templatetags/utility_tags.py | adborden/WeVoteBase | 0 | 6617272 | # -*- coding: UTF-8 -*-
import urllib
from django import template
from django.utils.encoding import force_str
register = template.Library()
@register.simple_tag(takes_context=True)
def append_to_query(context, no_path=False, **kwargs):
query_params = context['request'].GET.copy()
for key, value in kwargs.items():
query_params[key] = value
path = u""
if len(query_params):
path += u"?%s" % urllib.urlencode([
(key, force_str(value)) for (key, value) in query_params.iteritems() if value
])
return path | # -*- coding: UTF-8 -*-
import urllib
from django import template
from django.utils.encoding import force_str
register = template.Library()
@register.simple_tag(takes_context=True)
def append_to_query(context, no_path=False, **kwargs):
query_params = context['request'].GET.copy()
for key, value in kwargs.items():
query_params[key] = value
path = u""
if len(query_params):
path += u"?%s" % urllib.urlencode([
(key, force_str(value)) for (key, value) in query_params.iteritems() if value
])
return path | en | 0.222803 | # -*- coding: UTF-8 -*- | 2.287987 | 2 |
lib/layer_utils/proposal_layer_combine.py | Li-Chengyang/MSDS-RCNN | 56 | 6617273 | <reponame>Li-Chengyang/MSDS-RCNN<filename>lib/layer_utils/proposal_layer_combine.py
# -------------------------------------------------------------------------
# MSDS R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>, based on code from <NAME> and <NAME>
# -------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from model.config import cfg
from model.nms_wrapper import nms
def proposal_layer_combine_bcn(proposals, scores, cls_scores, cfg_key):
"""Combine RPN proposals for BCN input
"""
#print(proposals.shape, scores.shape, cls_scores.shape)
if type(cfg_key) == bytes:
cfg_key = cfg_key.decode('utf-8')
if cfg_key == 'TEST':
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
order = scores.ravel().argsort()[::-1]
proposals = proposals[order, :]
scores = scores[order]
cls_scores = cls_scores[order, :]
# Non-maximal suppression
keep = nms(np.hstack((proposals[:, 1:], scores)), nms_thresh)
# Pick th top region proposals after NMS
if post_nms_topN < len(keep):
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores = scores[keep]
cls_scores = cls_scores[keep, :]
elif cfg_key == 'TRAIN':
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
order = scores.ravel().argsort()[::-1]
proposals = proposals[order, :]
scores = scores[order]
cls_scores = cls_scores[order, :]
# Non-maximal suppression
keep = nms(np.hstack((proposals[:, 1:], scores)), nms_thresh)
proposals = proposals[keep, :]
scores = scores[keep]
cls_scores = cls_scores[keep, :]
else:
raise NotImplementedError
if cfg.VERBOSE:
print('PROPOSAL layer. proposals:', scores.size)
return proposals, scores, cls_scores
def proposal_layer_combine_rpn(proposals, scores, cfg_key):
"""Only for evluation on RPN stage
"""
assert (cfg_key == 'TEST')
if type(cfg_key) == bytes:
cfg_key = cfg_key.decode('utf-8')
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
order = scores.ravel().argsort()[::-1]
proposals = proposals[order, :]
scores = scores[order]
# Non-maximal suppression
keep = nms(np.hstack((proposals[:, 1:], scores)), nms_thresh)
# Pick th top region proposals after NMS
if post_nms_topN < len(keep):
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores = scores[keep]
if cfg.VERBOSE:
print('PROPOSAL layer. proposals:', scores.size)
return proposals, scores
| # -------------------------------------------------------------------------
# MSDS R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>, based on code from <NAME> and <NAME>
# -------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from model.config import cfg
from model.nms_wrapper import nms
def proposal_layer_combine_bcn(proposals, scores, cls_scores, cfg_key):
"""Combine RPN proposals for BCN input
"""
#print(proposals.shape, scores.shape, cls_scores.shape)
if type(cfg_key) == bytes:
cfg_key = cfg_key.decode('utf-8')
if cfg_key == 'TEST':
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
order = scores.ravel().argsort()[::-1]
proposals = proposals[order, :]
scores = scores[order]
cls_scores = cls_scores[order, :]
# Non-maximal suppression
keep = nms(np.hstack((proposals[:, 1:], scores)), nms_thresh)
# Pick th top region proposals after NMS
if post_nms_topN < len(keep):
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores = scores[keep]
cls_scores = cls_scores[keep, :]
elif cfg_key == 'TRAIN':
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
order = scores.ravel().argsort()[::-1]
proposals = proposals[order, :]
scores = scores[order]
cls_scores = cls_scores[order, :]
# Non-maximal suppression
keep = nms(np.hstack((proposals[:, 1:], scores)), nms_thresh)
proposals = proposals[keep, :]
scores = scores[keep]
cls_scores = cls_scores[keep, :]
else:
raise NotImplementedError
if cfg.VERBOSE:
print('PROPOSAL layer. proposals:', scores.size)
return proposals, scores, cls_scores
def proposal_layer_combine_rpn(proposals, scores, cfg_key):
"""Only for evluation on RPN stage
"""
assert (cfg_key == 'TEST')
if type(cfg_key) == bytes:
cfg_key = cfg_key.decode('utf-8')
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
order = scores.ravel().argsort()[::-1]
proposals = proposals[order, :]
scores = scores[order]
# Non-maximal suppression
keep = nms(np.hstack((proposals[:, 1:], scores)), nms_thresh)
# Pick th top region proposals after NMS
if post_nms_topN < len(keep):
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores = scores[keep]
if cfg.VERBOSE:
print('PROPOSAL layer. proposals:', scores.size)
return proposals, scores | en | 0.581906 | # ------------------------------------------------------------------------- # MSDS R-CNN # Licensed under The MIT License [see LICENSE for details] # Written by <NAME>, based on code from <NAME> and <NAME> # ------------------------------------------------------------------------- Combine RPN proposals for BCN input #print(proposals.shape, scores.shape, cls_scores.shape) # Non-maximal suppression # Pick th top region proposals after NMS # Non-maximal suppression Only for evluation on RPN stage # Non-maximal suppression # Pick th top region proposals after NMS | 2.064047 | 2 |
examples/test_increment_by_one.py | Michael-Wisniewski/Profiler | 1 | 6617274 | <gh_stars>1-10
from increment_by_one import increment_by_one
from algo_profiler import Profiler
test_set = [
{
"label": "Empty list",
"input": {
"numbers_list": [],
},
"output": [],
},
{
"label": "List with positive numbers.",
"input": {
"numbers_list": [1, 5, 7],
},
"output": [2, 6, 8],
},
{
"label": "List with negative numbers.",
"input": {
"numbers_list": [-2, -7, -3],
},
"output": [-1, -6, -2],
},
{
"label": "List with large numbers.",
"input": {
"numbers_list": [100000000, 9999999999999999999],
},
"output": [100000001, 10000000000000000000],
},
]
def data_gen(list_length):
"""
>>> data_gen(3)
{'numbers_list': [0, 1, 2]}
>>> data_gen(7)
{'numbers_list': [0, 1, 2, 3, 4, 5, 6]}
"""
numbers_list = [number for number in range(list_length)]
return {"numbers_list" : numbers_list}
def naive_increment_by_one(numbers_list):
return [number + 1 for number in numbers_list]
profiler = Profiler()
profiler.run_tests(func=increment_by_one, test_set=test_set)
profiler.run_stress_tests(
func=increment_by_one,
naive_func=naive_increment_by_one,
data_gen=data_gen,
gen_min_arg=1,
gen_max_arg=100,
gen_steps=10,
)
profiler.run_coverage(func=increment_by_one, test_set=test_set)
profiler.run_time_check(
func=increment_by_one,
kwargs=data_gen(10000000),
iterations=10
)
profiler.run_cProfile(
func=increment_by_one,
kwargs=data_gen(100000)
)
# profiler.run_snakeviz(
# func=increment_by_one,
# kwargs=data_gen(10000000)
# )
profiler.run_line_profiler(
func=increment_by_one,
kwargs=data_gen(10000)
)
profiler.run_time_analysis(
func=increment_by_one,
data_gen=data_gen,
gen_min_arg=10,
gen_max_arg=1000000,
gen_steps=10,
find_big_o=True
)
profiler.run_memory_check(
func=increment_by_one,
kwargs=data_gen(10000)
)
profiler.run_memory_profiler(
func=increment_by_one,
kwargs=data_gen(100),
clean_result=True
)
profiler.run_time_based_memory_usage(
func=increment_by_one,
kwargs=data_gen(1000000)
)
profiler.run_check_memory_leaks(
func=increment_by_one,
kwargs=data_gen(1000)
)
profiler.run_memory_analysis(
func=increment_by_one,
data_gen=data_gen,
gen_min_arg=100,
gen_max_arg=1000,
gen_steps=10,
find_big_o=True
)
profiler.run_scalene(
func=increment_by_one,
kwargs=data_gen(100),
cpu_sampling_rate=0.001
)
| from increment_by_one import increment_by_one
from algo_profiler import Profiler
test_set = [
{
"label": "Empty list",
"input": {
"numbers_list": [],
},
"output": [],
},
{
"label": "List with positive numbers.",
"input": {
"numbers_list": [1, 5, 7],
},
"output": [2, 6, 8],
},
{
"label": "List with negative numbers.",
"input": {
"numbers_list": [-2, -7, -3],
},
"output": [-1, -6, -2],
},
{
"label": "List with large numbers.",
"input": {
"numbers_list": [100000000, 9999999999999999999],
},
"output": [100000001, 10000000000000000000],
},
]
def data_gen(list_length):
"""
>>> data_gen(3)
{'numbers_list': [0, 1, 2]}
>>> data_gen(7)
{'numbers_list': [0, 1, 2, 3, 4, 5, 6]}
"""
numbers_list = [number for number in range(list_length)]
return {"numbers_list" : numbers_list}
def naive_increment_by_one(numbers_list):
return [number + 1 for number in numbers_list]
profiler = Profiler()
profiler.run_tests(func=increment_by_one, test_set=test_set)
profiler.run_stress_tests(
func=increment_by_one,
naive_func=naive_increment_by_one,
data_gen=data_gen,
gen_min_arg=1,
gen_max_arg=100,
gen_steps=10,
)
profiler.run_coverage(func=increment_by_one, test_set=test_set)
profiler.run_time_check(
func=increment_by_one,
kwargs=data_gen(10000000),
iterations=10
)
profiler.run_cProfile(
func=increment_by_one,
kwargs=data_gen(100000)
)
# profiler.run_snakeviz(
# func=increment_by_one,
# kwargs=data_gen(10000000)
# )
profiler.run_line_profiler(
func=increment_by_one,
kwargs=data_gen(10000)
)
profiler.run_time_analysis(
func=increment_by_one,
data_gen=data_gen,
gen_min_arg=10,
gen_max_arg=1000000,
gen_steps=10,
find_big_o=True
)
profiler.run_memory_check(
func=increment_by_one,
kwargs=data_gen(10000)
)
profiler.run_memory_profiler(
func=increment_by_one,
kwargs=data_gen(100),
clean_result=True
)
profiler.run_time_based_memory_usage(
func=increment_by_one,
kwargs=data_gen(1000000)
)
profiler.run_check_memory_leaks(
func=increment_by_one,
kwargs=data_gen(1000)
)
profiler.run_memory_analysis(
func=increment_by_one,
data_gen=data_gen,
gen_min_arg=100,
gen_max_arg=1000,
gen_steps=10,
find_big_o=True
)
profiler.run_scalene(
func=increment_by_one,
kwargs=data_gen(100),
cpu_sampling_rate=0.001
) | en | 0.189396 | >>> data_gen(3) {'numbers_list': [0, 1, 2]} >>> data_gen(7) {'numbers_list': [0, 1, 2, 3, 4, 5, 6]} # profiler.run_snakeviz( # func=increment_by_one, # kwargs=data_gen(10000000) # ) | 2.707345 | 3 |
app/rss/__init__.py | finnurtorfa/aflafrettir.is | 0 | 6617275 | from flask import Blueprint
feed = Blueprint('feed', __name__)
from .import routes
| from flask import Blueprint
feed = Blueprint('feed', __name__)
from .import routes
| none | 1 | 1.237689 | 1 | |
src/pygimg.py | crazy-batata/SpaceInvaders-pygame | 0 | 6617276 | import pygame
def scale(img,x):
return(pygame.transform.scale(img,
(img.get_rect().w * x,
img.get_rect().h * x)))
def split_sheet(sheet,rect):
surf = pygame.Surface(rect.size).convert_alpha()
surf.blit(sheet,(0,0),rect)
return surf | import pygame
def scale(img,x):
return(pygame.transform.scale(img,
(img.get_rect().w * x,
img.get_rect().h * x)))
def split_sheet(sheet,rect):
surf = pygame.Surface(rect.size).convert_alpha()
surf.blit(sheet,(0,0),rect)
return surf | none | 1 | 2.726595 | 3 | |
gemini/settings.py | quantroom-pro/cryptocurrency.backtester | 37 | 6617277 | """
Main settings file for Gemini.Backtester
"""
# precision for pandas and rounding
PRECISION = 8
# default fees
FEES = {
'Long': 0.,
'Short': 0.,
}
| """
Main settings file for Gemini.Backtester
"""
# precision for pandas and rounding
PRECISION = 8
# default fees
FEES = {
'Long': 0.,
'Short': 0.,
}
| en | 0.572471 | Main settings file for Gemini.Backtester # precision for pandas and rounding # default fees | 1.395004 | 1 |
django_testing_utils/mixins.py | VVyacheslav/django-testing-utils | 0 | 6617278 | <reponame>VVyacheslav/django-testing-utils
from datetime import timedelta, datetime
from typing import TypeVar, Union, List, Tuple, Any, TYPE_CHECKING, Dict, cast
from unittest import mock
from django.db import models
from django.test import TestCase
from django.utils import timezone
second = timedelta(seconds=1)
minute = timedelta(minutes=1)
hour = timedelta(hours=1)
day = timedelta(days=1)
M = TypeVar('M', bound=models.Model)
# type definition for TestCase subclass mixed with TimeMixin
TimeDerived = Union["TimeMixin", TestCase]
class MockedDateTime(datetime):
"""
Stub for DateTimeField auto_now/auto_now_add.
Helps to override model_utils.TimeStampedModel.created.default
"""
@classmethod
def utcnow(cls): # type: ignore
# noinspection PyUnresolvedReferences
return timezone.utc.normalize(timezone.now())
if TYPE_CHECKING: # pragma: no cover
TimeMixinTarget = TestCase
else:
TimeMixinTarget = object
class TimeMixin(TimeMixinTarget):
""" Mixin to freeze time in django tests."""
now: datetime
def setUp(self) -> None:
super().setUp()
self.now = timezone.now()
self.now_patcher = mock.patch('django.utils.timezone.now',
side_effect=self.get_now)
self.now_patcher.start()
self.timezone_datetime_patcher = mock.patch(
'django.utils.timezone.datetime',
new_callable=mock.PropertyMock(return_value=MockedDateTime))
self.timezone_datetime_patcher.start()
def tearDown(self) -> None:
super().tearDown()
self.timezone_datetime_patcher.stop()
self.now_patcher.stop()
def get_now(self) -> datetime:
return self.now
class BaseTestCaseMeta(type):
"""
Metaclass for `BaseTestCases` to override `cls.__setattr__`.
It is useful to create django models in `TestCase` class methods, like
`setUpTestData` or `setUpClass`. Main advantage of such implementation is
that every object is created once per test case, not once per test. Main
disadvantage is that every object preserves in-memory state between
subsequent tests.
This metaclass intercepts adding new django model instances as cls members
and collect it to created_objects list. This list is then used to reset
in-memory state by calling `refresh_from_db` in `setUp()`.
"""
_created_objects: List[Tuple[int, models.Model]]
def __new__(mcs, name: str, bases: Tuple[type, ...],
attrs: Dict[str, Any]) -> 'BaseTestCaseMeta':
# Add created django model instances cache as class attribute
attrs['_created_objects'] = []
instance = super().__new__(mcs, name, bases, attrs)
return cast("BaseTestCaseMeta", instance)
def __setattr__(cls, key: str, value: Any) -> None:
if isinstance(value, models.Model):
cls._created_objects.append((value.pk, value))
return super().__setattr__(key, value)
class BaseTestCase(TimeMixin, TestCase, metaclass=BaseTestCaseMeta):
""" Base class for django tests."""
@classmethod
def refresh_objects(cls) -> None:
"""
Reset in-memory changed for django models that are stored as
class attributes.
"""
for pk, obj in cls._created_objects:
obj.pk = pk
obj.refresh_from_db()
obj._state.fields_cache.clear() # type: ignore
@classmethod
def forget_object(cls, obj: models.Model) -> None:
"""
Method for removing django model instance from created objects cache
"""
cls._created_objects.remove((obj.pk, obj))
@staticmethod
def update_object(obj: models.Model, *args: Any, **kwargs: Any) -> None:
""" Update django model object in database only."""
args_iter = iter(args)
kwargs.update(dict(zip(args_iter, args_iter)))
obj._meta.model.objects.filter(pk=obj.pk).update(**kwargs)
@staticmethod
def reload(obj: M) -> M:
""" Fetch same object from database."""
return obj._meta.model.objects.get(pk=obj.pk)
def setUp(self) -> None:
self.refresh_objects()
super().setUp()
def assert_object_fields(self, obj: models.Model, **kwargs: Any) -> None:
""" Obtains an object from database and compares field values."""
if obj.pk:
obj = self.reload(obj)
for k, v in kwargs.items():
value = getattr(obj, k)
self.assertEqual(value, v)
| from datetime import timedelta, datetime
from typing import TypeVar, Union, List, Tuple, Any, TYPE_CHECKING, Dict, cast
from unittest import mock
from django.db import models
from django.test import TestCase
from django.utils import timezone
second = timedelta(seconds=1)
minute = timedelta(minutes=1)
hour = timedelta(hours=1)
day = timedelta(days=1)
M = TypeVar('M', bound=models.Model)
# type definition for TestCase subclass mixed with TimeMixin
TimeDerived = Union["TimeMixin", TestCase]
class MockedDateTime(datetime):
"""
Stub for DateTimeField auto_now/auto_now_add.
Helps to override model_utils.TimeStampedModel.created.default
"""
@classmethod
def utcnow(cls): # type: ignore
# noinspection PyUnresolvedReferences
return timezone.utc.normalize(timezone.now())
if TYPE_CHECKING: # pragma: no cover
TimeMixinTarget = TestCase
else:
TimeMixinTarget = object
class TimeMixin(TimeMixinTarget):
""" Mixin to freeze time in django tests."""
now: datetime
def setUp(self) -> None:
super().setUp()
self.now = timezone.now()
self.now_patcher = mock.patch('django.utils.timezone.now',
side_effect=self.get_now)
self.now_patcher.start()
self.timezone_datetime_patcher = mock.patch(
'django.utils.timezone.datetime',
new_callable=mock.PropertyMock(return_value=MockedDateTime))
self.timezone_datetime_patcher.start()
def tearDown(self) -> None:
super().tearDown()
self.timezone_datetime_patcher.stop()
self.now_patcher.stop()
def get_now(self) -> datetime:
return self.now
class BaseTestCaseMeta(type):
"""
Metaclass for `BaseTestCases` to override `cls.__setattr__`.
It is useful to create django models in `TestCase` class methods, like
`setUpTestData` or `setUpClass`. Main advantage of such implementation is
that every object is created once per test case, not once per test. Main
disadvantage is that every object preserves in-memory state between
subsequent tests.
This metaclass intercepts adding new django model instances as cls members
and collect it to created_objects list. This list is then used to reset
in-memory state by calling `refresh_from_db` in `setUp()`.
"""
_created_objects: List[Tuple[int, models.Model]]
def __new__(mcs, name: str, bases: Tuple[type, ...],
attrs: Dict[str, Any]) -> 'BaseTestCaseMeta':
# Add created django model instances cache as class attribute
attrs['_created_objects'] = []
instance = super().__new__(mcs, name, bases, attrs)
return cast("BaseTestCaseMeta", instance)
def __setattr__(cls, key: str, value: Any) -> None:
if isinstance(value, models.Model):
cls._created_objects.append((value.pk, value))
return super().__setattr__(key, value)
class BaseTestCase(TimeMixin, TestCase, metaclass=BaseTestCaseMeta):
""" Base class for django tests."""
@classmethod
def refresh_objects(cls) -> None:
"""
Reset in-memory changed for django models that are stored as
class attributes.
"""
for pk, obj in cls._created_objects:
obj.pk = pk
obj.refresh_from_db()
obj._state.fields_cache.clear() # type: ignore
@classmethod
def forget_object(cls, obj: models.Model) -> None:
"""
Method for removing django model instance from created objects cache
"""
cls._created_objects.remove((obj.pk, obj))
@staticmethod
def update_object(obj: models.Model, *args: Any, **kwargs: Any) -> None:
""" Update django model object in database only."""
args_iter = iter(args)
kwargs.update(dict(zip(args_iter, args_iter)))
obj._meta.model.objects.filter(pk=obj.pk).update(**kwargs)
@staticmethod
def reload(obj: M) -> M:
""" Fetch same object from database."""
return obj._meta.model.objects.get(pk=obj.pk)
def setUp(self) -> None:
self.refresh_objects()
super().setUp()
def assert_object_fields(self, obj: models.Model, **kwargs: Any) -> None:
""" Obtains an object from database and compares field values."""
if obj.pk:
obj = self.reload(obj)
for k, v in kwargs.items():
value = getattr(obj, k)
self.assertEqual(value, v) | en | 0.810657 | # type definition for TestCase subclass mixed with TimeMixin Stub for DateTimeField auto_now/auto_now_add. Helps to override model_utils.TimeStampedModel.created.default # type: ignore # noinspection PyUnresolvedReferences # pragma: no cover Mixin to freeze time in django tests. Metaclass for `BaseTestCases` to override `cls.__setattr__`. It is useful to create django models in `TestCase` class methods, like `setUpTestData` or `setUpClass`. Main advantage of such implementation is that every object is created once per test case, not once per test. Main disadvantage is that every object preserves in-memory state between subsequent tests. This metaclass intercepts adding new django model instances as cls members and collect it to created_objects list. This list is then used to reset in-memory state by calling `refresh_from_db` in `setUp()`. # Add created django model instances cache as class attribute Base class for django tests. Reset in-memory changed for django models that are stored as class attributes. # type: ignore Method for removing django model instance from created objects cache Update django model object in database only. Fetch same object from database. Obtains an object from database and compares field values. | 2.518402 | 3 |
code/ext_gcd.py | jchen/math1580-notes | 1 | 6617279 | <reponame>jchen/math1580-notes<gh_stars>1-10
def ext_gcd(a: int, b: int) -> tuple[int, int]:
"""
Computes the gcd of a and b using the extended Euclidean algorithm.
param a: int
param b: int
return: tuple (int x, int y) where ax + by = gcd(a, b)
"""
x, y, z, w = 1, 0, 0, 1
while b != 0:
x, y, z, w = z, w, x - (a // b) * z, y - (a // b) * w
a, b = b, a % b
return (x, y)
| def ext_gcd(a: int, b: int) -> tuple[int, int]:
"""
Computes the gcd of a and b using the extended Euclidean algorithm.
param a: int
param b: int
return: tuple (int x, int y) where ax + by = gcd(a, b)
"""
x, y, z, w = 1, 0, 0, 1
while b != 0:
x, y, z, w = z, w, x - (a // b) * z, y - (a // b) * w
a, b = b, a % b
return (x, y) | en | 0.611229 | Computes the gcd of a and b using the extended Euclidean algorithm. param a: int param b: int return: tuple (int x, int y) where ax + by = gcd(a, b) | 3.554115 | 4 |
blog/migrations/0001_initial.py | ruzuojun/nxblog | 3 | 6617280 | <filename>blog/migrations/0001_initial.py
# -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-10-25 09:58
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='\u540d\u79f0')),
('slug', models.SlugField(max_length=100, verbose_name='\u6807\u793a')),
('status', models.IntegerField(choices=[(1, '\u542f\u7528'), (0, '\u7981\u7528')], default=0, verbose_name='\u72b6\u6001')),
('sort', models.IntegerField(default=100, verbose_name='\u6392\u5e8f')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='\u521b\u5efa\u65f6\u95f4')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='\u4fee\u6539\u65f6\u95f4')),
],
options={
'verbose_name': '\u5206\u7c7b',
'verbose_name_plural': '\u5206\u7c7b',
},
),
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50, verbose_name='\u9875\u9762\u6807\u9898')),
('slug', models.SlugField(max_length=100, verbose_name='\u6807\u793a')),
('content', models.TextField(null=True, verbose_name='\u9875\u9762\u5185\u5bb9')),
('author', models.CharField(max_length=20, verbose_name='\u4f5c\u8005')),
('status', models.IntegerField(choices=[(1, '\u53d1\u5e03'), (0, '\u8349\u7a3f')], default=0, verbose_name='\u72b6\u6001')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='\u521b\u5efa\u65f6\u95f4')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='\u4fee\u6539\u65f6\u95f4')),
],
options={
'verbose_name': '\u9875\u9762',
'verbose_name_plural': '\u9875\u9762',
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50, verbose_name='\u6587\u7ae0\u6807\u9898')),
('slug', models.SlugField(max_length=100, verbose_name='\u6807\u793a')),
('image', models.ImageField(null=True, upload_to=b'', verbose_name='\u56fe\u7247')),
('description', models.CharField(max_length=200, verbose_name='\u6587\u7ae0\u63cf\u8ff0')),
('content', models.TextField(null=True, verbose_name='\u6587\u7ae0\u5185\u5bb9')),
('author', models.CharField(max_length=20, verbose_name='\u4f5c\u8005')),
('status', models.IntegerField(choices=[(1, '\u53d1\u5e03'), (0, '\u8349\u7a3f')], default=0, verbose_name='\u72b6\u6001')),
('view_count', models.IntegerField(default=0, verbose_name='\u6d4f\u89c8\u6b21\u6570')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='\u521b\u5efa\u65f6\u95f4')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='\u4fee\u6539\u65f6\u95f4')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Category')),
],
options={
'verbose_name': '\u6587\u7ae0',
'verbose_name_plural': '\u6587\u7ae0',
},
),
]
| <filename>blog/migrations/0001_initial.py
# -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-10-25 09:58
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='\u540d\u79f0')),
('slug', models.SlugField(max_length=100, verbose_name='\u6807\u793a')),
('status', models.IntegerField(choices=[(1, '\u542f\u7528'), (0, '\u7981\u7528')], default=0, verbose_name='\u72b6\u6001')),
('sort', models.IntegerField(default=100, verbose_name='\u6392\u5e8f')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='\u521b\u5efa\u65f6\u95f4')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='\u4fee\u6539\u65f6\u95f4')),
],
options={
'verbose_name': '\u5206\u7c7b',
'verbose_name_plural': '\u5206\u7c7b',
},
),
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50, verbose_name='\u9875\u9762\u6807\u9898')),
('slug', models.SlugField(max_length=100, verbose_name='\u6807\u793a')),
('content', models.TextField(null=True, verbose_name='\u9875\u9762\u5185\u5bb9')),
('author', models.CharField(max_length=20, verbose_name='\u4f5c\u8005')),
('status', models.IntegerField(choices=[(1, '\u53d1\u5e03'), (0, '\u8349\u7a3f')], default=0, verbose_name='\u72b6\u6001')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='\u521b\u5efa\u65f6\u95f4')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='\u4fee\u6539\u65f6\u95f4')),
],
options={
'verbose_name': '\u9875\u9762',
'verbose_name_plural': '\u9875\u9762',
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50, verbose_name='\u6587\u7ae0\u6807\u9898')),
('slug', models.SlugField(max_length=100, verbose_name='\u6807\u793a')),
('image', models.ImageField(null=True, upload_to=b'', verbose_name='\u56fe\u7247')),
('description', models.CharField(max_length=200, verbose_name='\u6587\u7ae0\u63cf\u8ff0')),
('content', models.TextField(null=True, verbose_name='\u6587\u7ae0\u5185\u5bb9')),
('author', models.CharField(max_length=20, verbose_name='\u4f5c\u8005')),
('status', models.IntegerField(choices=[(1, '\u53d1\u5e03'), (0, '\u8349\u7a3f')], default=0, verbose_name='\u72b6\u6001')),
('view_count', models.IntegerField(default=0, verbose_name='\u6d4f\u89c8\u6b21\u6570')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='\u521b\u5efa\u65f6\u95f4')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='\u4fee\u6539\u65f6\u95f4')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Category')),
],
options={
'verbose_name': '\u6587\u7ae0',
'verbose_name_plural': '\u6587\u7ae0',
},
),
]
| en | 0.748198 | # -*- coding: utf-8 -*- # Generated by Django 1.9.10 on 2016-10-25 09:58 | 1.641938 | 2 |
mltools/exporg.py | markvdw/mltools | 1 | 6617281 | <gh_stars>1-10
# I want this module to help sort out experiments and experimental results. I want:
# - To be able to run experiments with different parameters easily
# - Automatically store the results (plots, processed files etc) of the experiment
# - Retrieve and view experimental results and compare between different parameter settings
# To do so, I need to store the experimental parameters together with the results.
# I need to be able to initialise the code back to the final state so I can keep processing with the results later on.
# I need a viewer which allows me to sort and group by parameter settings.
import datetime
import shutil
import os
import drawtools as mldraw
class ExperimentBase(object):
def __init__(self):
self._time_str = str(datetime.datetime.now()).replace(' ', '_')
self._savedir = None
self.figlist = []
def savefigs(self, filename="figs.pdf"):
path = self.savedir + filename
print path
if not os.path.exists(self.savedir):
os.mkdir(self.savedir)
mldraw.figs_to_pdf(path, self.figlist)
def save_terminal_output(self, logger):
if not os.path.exists(self.savedir):
os.mkdir(self.savedir)
logger.logflush()
shutil.copy(logger.filepath, self.savedir + 'log.txt')
def save(self):
# Just save the parameters
f = open(self.savedir + "params.txt", 'w')
for k, v in self.params.iteritems():
f.write(str(k) + '\t' + str(v))
f.write('\n')
f.close()
@property
def savedir(self):
if self._savedir is None:
return "./results/" + self.params['name'] + self._time_str + '/'
else:
return self._savedir
@savedir.setter
def savedir(self, val):
if val[-1] != '/':
val += '/'
self._savedir = val
class TrainTestTask(object):
def __init__(self, model_params):
pass
def train(self, traindata):
pass
def test(self):
pass | # I want this module to help sort out experiments and experimental results. I want:
# - To be able to run experiments with different parameters easily
# - Automatically store the results (plots, processed files etc) of the experiment
# - Retrieve and view experimental results and compare between different parameter settings
# To do so, I need to store the experimental parameters together with the results.
# I need to be able to initialise the code back to the final state so I can keep processing with the results later on.
# I need a viewer which allows me to sort and group by parameter settings.
import datetime
import shutil
import os
import drawtools as mldraw
class ExperimentBase(object):
def __init__(self):
self._time_str = str(datetime.datetime.now()).replace(' ', '_')
self._savedir = None
self.figlist = []
def savefigs(self, filename="figs.pdf"):
path = self.savedir + filename
print path
if not os.path.exists(self.savedir):
os.mkdir(self.savedir)
mldraw.figs_to_pdf(path, self.figlist)
def save_terminal_output(self, logger):
if not os.path.exists(self.savedir):
os.mkdir(self.savedir)
logger.logflush()
shutil.copy(logger.filepath, self.savedir + 'log.txt')
def save(self):
# Just save the parameters
f = open(self.savedir + "params.txt", 'w')
for k, v in self.params.iteritems():
f.write(str(k) + '\t' + str(v))
f.write('\n')
f.close()
@property
def savedir(self):
if self._savedir is None:
return "./results/" + self.params['name'] + self._time_str + '/'
else:
return self._savedir
@savedir.setter
def savedir(self, val):
if val[-1] != '/':
val += '/'
self._savedir = val
class TrainTestTask(object):
def __init__(self, model_params):
pass
def train(self, traindata):
pass
def test(self):
pass | en | 0.828862 | # I want this module to help sort out experiments and experimental results. I want: # - To be able to run experiments with different parameters easily # - Automatically store the results (plots, processed files etc) of the experiment # - Retrieve and view experimental results and compare between different parameter settings # To do so, I need to store the experimental parameters together with the results. # I need to be able to initialise the code back to the final state so I can keep processing with the results later on. # I need a viewer which allows me to sort and group by parameter settings. # Just save the parameters | 2.829962 | 3 |
print_parameters.py | erwinlambert/alphabeta | 0 | 6617282 | from utils import *
l1,l2,l3,l4,l5,epsilon,delta,gamma = nondim()
#Print values
print 'lambda1 = ',l1
print 'lambda2 = ',l2
print 'lambda3 = ',l3
print 'lambda4 = ',l4
print 'lambda5 = ',l5
print '-------------------'
print 'epsilon = ',epsilon
print 'delta = ',delta
print 'gamma = ',gamma
print '-------------------'
print 'L = ',L*1.e-3,'km'
print 'Psi = ',dim(1.,'psi'),' Sv'
print 'R = ',dim(1.,'r'), 'm2/s'
print 'FW = ',dim(1.,'fw'), 'mSv'
print 'S2 = ',dim(1.,'s'),'g/kg'
print '-------------------'
| from utils import *
l1,l2,l3,l4,l5,epsilon,delta,gamma = nondim()
#Print values
print 'lambda1 = ',l1
print 'lambda2 = ',l2
print 'lambda3 = ',l3
print 'lambda4 = ',l4
print 'lambda5 = ',l5
print '-------------------'
print 'epsilon = ',epsilon
print 'delta = ',delta
print 'gamma = ',gamma
print '-------------------'
print 'L = ',L*1.e-3,'km'
print 'Psi = ',dim(1.,'psi'),' Sv'
print 'R = ',dim(1.,'r'), 'm2/s'
print 'FW = ',dim(1.,'fw'), 'mSv'
print 'S2 = ',dim(1.,'s'),'g/kg'
print '-------------------'
| hy | 0.142825 | #Print values | 2.309921 | 2 |
sippet/sippet_tests.gyp | sippet/sippet | 26 | 6617283 | # Copyright (c) 2013 The Sippet Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'includes': [
'../build/win_precompile.gypi',
],
'target_defaults': {
'xcode_settings': {
'OTHER_CFLAGS': [
'-Wno-deprecated-writable-strings',
'-Wno-unused-result',
],
},
'include_dirs': [
'<(DEPTH)',
'<(DEPTH)/third_party',
],
},
'targets': [
{
'target_name': 'sippet_unittest',
'type': 'executable',
'dependencies': [
'sippet_test_support',
'sippet.gyp:sippet',
],
'sources': [
'../net/test/run_all_unittests.cc',
'message/message_unittest.cc',
'message/headers_unittest.cc',
'message/parser_unittest.cc',
'uri/uri_unittest.cc',
'transport/end_point_unittest.cc',
'transport/network_layer_unittest.cc',
'transport/chrome/chrome_datagram_writer_unittest.cc',
'transport/chrome/chrome_stream_writer_unittest.cc',
'ua/auth_controller_unittest.cc',
'ua/auth_handler_digest_unittest.cc',
],
}, # target sippet_unittest
{
'target_name': 'sippet_test_support',
'type': 'static_library',
'dependencies': [
'<(DEPTH)/testing/gtest.gyp:gtest',
'<(DEPTH)/testing/gmock.gyp:gmock',
'<(DEPTH)/net/net.gyp:net',
'<(DEPTH)/net/net.gyp:net_test_support',
'<(DEPTH)/third_party/icu/icu.gyp:icui18n',
'<(DEPTH)/third_party/icu/icu.gyp:icuuc',
'sippet.gyp:sippet',
],
'export_dependent_settings': [
'<(DEPTH)/net/net.gyp:net',
'<(DEPTH)/net/net.gyp:net_test_support',
'<(DEPTH)/testing/gtest.gyp:gtest',
'<(DEPTH)/testing/gmock.gyp:gmock',
],
'sources': [
'transport/chrome/transport_test_util.h',
'transport/chrome/transport_test_util.cc',
'ua/auth_handler_mock.h',
'ua/auth_handler_mock.cc',
],
}, # target sippet_test_support
{
'target_name': 'sippet_standalone_test_server',
'type': 'static_library',
'dependencies': [
'<(DEPTH)/third_party/pjsip/pjsip.gyp:*',
'sippet.gyp:sippet',
],
'sources': [
'test/standalone_test_server/standalone_test_server.h',
'test/standalone_test_server/standalone_test_server.cc',
],
'conditions' : [
['os_posix == 1', {
# Get rid of annoying warnings about PJSIP strings usage.
'cflags' : [
'-Wno-write-strings'
]
}],
],
'variables': {
'clang_warning_flags': [
# pjsip forces the construction const pj_str_t s = {"blah", 4};
'-Wno-writable-strings',
],
},
}, # target sippet_test_support
{
'target_name': 'sippet_standalone_test_server_unittest',
'type': 'executable',
'dependencies': [
'<(DEPTH)/base/base.gyp:run_all_unittests',
'<(DEPTH)/testing/gtest.gyp:gtest',
'<(DEPTH)/testing/gmock.gyp:gmock',
'sippet_standalone_test_server',
],
'sources': [
'test/standalone_test_server/standalone_test_server_unittest.cc',
],
}, # target sippet_standalone_test_server_unittest
{
'target_name': 'sippet_standalone_test_server_main',
'type': 'executable',
'dependencies': [
'sippet_standalone_test_server',
],
'sources': [
'test/standalone_test_server/main.cc',
],
}, # target sippet_standalone_test_server_main
],
}
| # Copyright (c) 2013 The Sippet Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'includes': [
'../build/win_precompile.gypi',
],
'target_defaults': {
'xcode_settings': {
'OTHER_CFLAGS': [
'-Wno-deprecated-writable-strings',
'-Wno-unused-result',
],
},
'include_dirs': [
'<(DEPTH)',
'<(DEPTH)/third_party',
],
},
'targets': [
{
'target_name': 'sippet_unittest',
'type': 'executable',
'dependencies': [
'sippet_test_support',
'sippet.gyp:sippet',
],
'sources': [
'../net/test/run_all_unittests.cc',
'message/message_unittest.cc',
'message/headers_unittest.cc',
'message/parser_unittest.cc',
'uri/uri_unittest.cc',
'transport/end_point_unittest.cc',
'transport/network_layer_unittest.cc',
'transport/chrome/chrome_datagram_writer_unittest.cc',
'transport/chrome/chrome_stream_writer_unittest.cc',
'ua/auth_controller_unittest.cc',
'ua/auth_handler_digest_unittest.cc',
],
}, # target sippet_unittest
{
'target_name': 'sippet_test_support',
'type': 'static_library',
'dependencies': [
'<(DEPTH)/testing/gtest.gyp:gtest',
'<(DEPTH)/testing/gmock.gyp:gmock',
'<(DEPTH)/net/net.gyp:net',
'<(DEPTH)/net/net.gyp:net_test_support',
'<(DEPTH)/third_party/icu/icu.gyp:icui18n',
'<(DEPTH)/third_party/icu/icu.gyp:icuuc',
'sippet.gyp:sippet',
],
'export_dependent_settings': [
'<(DEPTH)/net/net.gyp:net',
'<(DEPTH)/net/net.gyp:net_test_support',
'<(DEPTH)/testing/gtest.gyp:gtest',
'<(DEPTH)/testing/gmock.gyp:gmock',
],
'sources': [
'transport/chrome/transport_test_util.h',
'transport/chrome/transport_test_util.cc',
'ua/auth_handler_mock.h',
'ua/auth_handler_mock.cc',
],
}, # target sippet_test_support
{
'target_name': 'sippet_standalone_test_server',
'type': 'static_library',
'dependencies': [
'<(DEPTH)/third_party/pjsip/pjsip.gyp:*',
'sippet.gyp:sippet',
],
'sources': [
'test/standalone_test_server/standalone_test_server.h',
'test/standalone_test_server/standalone_test_server.cc',
],
'conditions' : [
['os_posix == 1', {
# Get rid of annoying warnings about PJSIP strings usage.
'cflags' : [
'-Wno-write-strings'
]
}],
],
'variables': {
'clang_warning_flags': [
# pjsip forces the construction const pj_str_t s = {"blah", 4};
'-Wno-writable-strings',
],
},
}, # target sippet_test_support
{
'target_name': 'sippet_standalone_test_server_unittest',
'type': 'executable',
'dependencies': [
'<(DEPTH)/base/base.gyp:run_all_unittests',
'<(DEPTH)/testing/gtest.gyp:gtest',
'<(DEPTH)/testing/gmock.gyp:gmock',
'sippet_standalone_test_server',
],
'sources': [
'test/standalone_test_server/standalone_test_server_unittest.cc',
],
}, # target sippet_standalone_test_server_unittest
{
'target_name': 'sippet_standalone_test_server_main',
'type': 'executable',
'dependencies': [
'sippet_standalone_test_server',
],
'sources': [
'test/standalone_test_server/main.cc',
],
}, # target sippet_standalone_test_server_main
],
}
| en | 0.624492 | # Copyright (c) 2013 The Sippet Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # target sippet_unittest # target sippet_test_support # Get rid of annoying warnings about PJSIP strings usage. # pjsip forces the construction const pj_str_t s = {"blah", 4}; # target sippet_test_support # target sippet_standalone_test_server_unittest # target sippet_standalone_test_server_main | 1.26712 | 1 |
account/migrations/0022_auto_20200223_1506.py | AdamSuma/steach_adam | 0 | 6617284 | # Generated by Django 2.2 on 2020-02-23 15:06
import account.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0021_auto_20200119_1314'),
]
operations = [
migrations.AddField(
model_name='grade',
name='is_term_paper',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='userprofile',
name='teaching_certificate',
field=models.FileField(null=True, upload_to=account.models.certificate_file_path),
),
migrations.AlterField(
model_name='userprofile',
name='teaching_subject',
field=models.CharField(max_length=100, null=True),
),
]
| # Generated by Django 2.2 on 2020-02-23 15:06
import account.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0021_auto_20200119_1314'),
]
operations = [
migrations.AddField(
model_name='grade',
name='is_term_paper',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='userprofile',
name='teaching_certificate',
field=models.FileField(null=True, upload_to=account.models.certificate_file_path),
),
migrations.AlterField(
model_name='userprofile',
name='teaching_subject',
field=models.CharField(max_length=100, null=True),
),
]
| en | 0.810527 | # Generated by Django 2.2 on 2020-02-23 15:06 | 1.638653 | 2 |
primeiras_frases/add_file.py | cpdoc/dhbb-nlp | 6 | 6617285 | <filename>primeiras_frases/add_file.py
from conllu import parse
import os
arq = open('frases.bkp').read()
arq = parse(arq)
udpipe_path = "/home/lucas/work/dhbb-nlp/udpipe/"
a = [x for x in os.listdir("/home/lucas/work/dhbb-nlp/udpipe/") if '.conllu' in x]
i = 0
frases = open("frases_2.conllu",'w')
for x in arq:
stop = False
for file in a:
arquivo = open(udpipe_path+file).read().split('\n')[0:10]
for t in arquivo:
if x.metadata['text'] in t:
x.metadata['file'] = file
frases.write(x.serialize() + "\n")
stop = True
break
if stop: break
i+=1
print("{}% carregado.".format(round(100*i/len(arq),2)),end='\r')
| <filename>primeiras_frases/add_file.py
from conllu import parse
import os
arq = open('frases.bkp').read()
arq = parse(arq)
udpipe_path = "/home/lucas/work/dhbb-nlp/udpipe/"
a = [x for x in os.listdir("/home/lucas/work/dhbb-nlp/udpipe/") if '.conllu' in x]
i = 0
frases = open("frases_2.conllu",'w')
for x in arq:
stop = False
for file in a:
arquivo = open(udpipe_path+file).read().split('\n')[0:10]
for t in arquivo:
if x.metadata['text'] in t:
x.metadata['file'] = file
frases.write(x.serialize() + "\n")
stop = True
break
if stop: break
i+=1
print("{}% carregado.".format(round(100*i/len(arq),2)),end='\r')
| none | 1 | 2.425104 | 2 | |
test/find_img.py | Jiansion/reptile | 0 | 6617286 | from urllib.request import urlopen
from bs4 import BeautifulSoup
html = urlopen("http://www.qianjia.com/").read()
bsObj = BeautifulSoup(html, 'html.parser')
imgList = bsObj.findAll('img')
for img in imgList:
try:
title = img['alt']
if title == '':
title = "图片没有说明"
except Exception as e:
title = "图片没有说明"
print(img['src'], title)
| from urllib.request import urlopen
from bs4 import BeautifulSoup
html = urlopen("http://www.qianjia.com/").read()
bsObj = BeautifulSoup(html, 'html.parser')
imgList = bsObj.findAll('img')
for img in imgList:
try:
title = img['alt']
if title == '':
title = "图片没有说明"
except Exception as e:
title = "图片没有说明"
print(img['src'], title)
| none | 1 | 3.113677 | 3 | |
lib/systems/alpha-l-rhamnopyranose.py | pulsar-chem/BPModule | 0 | 6617287 | <reponame>pulsar-chem/BPModule
import pulsar as psr
def load_ref_system():
""" Returns alpha-l-rhamnopyranose as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C -0.8729 1.4268 0.3282
O -1.5909 0.3684 -0.2826
C -1.1432 -0.9881 -0.0088
C -2.0781 -1.8482 -0.8382
C 0.3390 -1.0817 -0.4412
C 1.1866 -0.1190 0.4152
O 2.5223 -0.0435 -0.1060
C 0.6705 1.3311 0.3091
O 1.0462 1.9240 -0.9371
O 0.8753 -2.3750 -0.1209
O -1.2040 1.4322 1.7162
H -1.2524 -1.1765 1.0822
H 0.4676 -0.8767 -1.5265
H 1.2365 -0.4670 1.4729
H 1.1024 1.9581 1.1281
H -1.2657 2.3273 -0.1958
H -2.1718 1.3505 1.8448
H 1.9491 1.5976 -1.2003
H 2.8899 -0.9555 -0.1995
H 0.5058 -3.0549 -0.7196
H -2.0335 -1.5873 -1.9047
H -1.8596 -2.9150 -0.7263
H -3.1229 -1.6875 -0.5320
""")
| import pulsar as psr
def load_ref_system():
""" Returns alpha-l-rhamnopyranose as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C -0.8729 1.4268 0.3282
O -1.5909 0.3684 -0.2826
C -1.1432 -0.9881 -0.0088
C -2.0781 -1.8482 -0.8382
C 0.3390 -1.0817 -0.4412
C 1.1866 -0.1190 0.4152
O 2.5223 -0.0435 -0.1060
C 0.6705 1.3311 0.3091
O 1.0462 1.9240 -0.9371
O 0.8753 -2.3750 -0.1209
O -1.2040 1.4322 1.7162
H -1.2524 -1.1765 1.0822
H 0.4676 -0.8767 -1.5265
H 1.2365 -0.4670 1.4729
H 1.1024 1.9581 1.1281
H -1.2657 2.3273 -0.1958
H -2.1718 1.3505 1.8448
H 1.9491 1.5976 -1.2003
H 2.8899 -0.9555 -0.1995
H 0.5058 -3.0549 -0.7196
H -2.0335 -1.5873 -1.9047
H -1.8596 -2.9150 -0.7263
H -3.1229 -1.6875 -0.5320
""") | en | 0.435596 | Returns alpha-l-rhamnopyranose as found in the IQMol fragment library. All credit to https://github.com/nutjunkie/IQmol C -0.8729 1.4268 0.3282 O -1.5909 0.3684 -0.2826 C -1.1432 -0.9881 -0.0088 C -2.0781 -1.8482 -0.8382 C 0.3390 -1.0817 -0.4412 C 1.1866 -0.1190 0.4152 O 2.5223 -0.0435 -0.1060 C 0.6705 1.3311 0.3091 O 1.0462 1.9240 -0.9371 O 0.8753 -2.3750 -0.1209 O -1.2040 1.4322 1.7162 H -1.2524 -1.1765 1.0822 H 0.4676 -0.8767 -1.5265 H 1.2365 -0.4670 1.4729 H 1.1024 1.9581 1.1281 H -1.2657 2.3273 -0.1958 H -2.1718 1.3505 1.8448 H 1.9491 1.5976 -1.2003 H 2.8899 -0.9555 -0.1995 H 0.5058 -3.0549 -0.7196 H -2.0335 -1.5873 -1.9047 H -1.8596 -2.9150 -0.7263 H -3.1229 -1.6875 -0.5320 | 2.166276 | 2 |
codes_/0929_Unique_Email_Addresses.py | SaitoTsutomu/leetcode | 0 | 6617288 | # %% [929. Unique Email Addresses](https://leetcode.com/problems/unique-email-addresses/)
class Solution:
def numUniqueEmails(self, emails: List[str]) -> int:
r = re.compile(r"([^+@]+)(?:|\+.*)@(.*)")
f = lambda m: f"{m.group(1).replace('.', '')}@{m.group(2)}"
return len(set(r.sub(f, email) for email in emails))
| # %% [929. Unique Email Addresses](https://leetcode.com/problems/unique-email-addresses/)
class Solution:
def numUniqueEmails(self, emails: List[str]) -> int:
r = re.compile(r"([^+@]+)(?:|\+.*)@(.*)")
f = lambda m: f"{m.group(1).replace('.', '')}@{m.group(2)}"
return len(set(r.sub(f, email) for email in emails))
| en | 0.74259 | # %% [929. Unique Email Addresses](https://leetcode.com/problems/unique-email-addresses/) | 3.256763 | 3 |
chconsole/media/text.py | mincode/chconsole | 0 | 6617289 | import sys
import re
from unicodedata import category
from qtconsole.qt import QtGui
from .line_iter import LineIter
__author__ = '<NAME> <<EMAIL>>'
# JupyterWidget
if sys.platform.startswith('win'):
default_editor = 'notepad'
else:
default_editor = ''
# ConsoleWidget
def get_block_plain_text(block):
""" Given a QTextBlock, return its unformatted text.
"""
cursor = QtGui.QTextCursor(block)
cursor.movePosition(QtGui.QTextCursor.StartOfBlock)
cursor.movePosition(QtGui.QTextCursor.EndOfBlock,
QtGui.QTextCursor.KeepAnchor)
return cursor.selection().toPlainText()
# ConsoleWdidget
def is_letter_or_number(char):
""" Returns whether the specified unicode character is a letter or a number.
"""
cat = category(char)
return cat.startswith('L') or cat.startswith('N')
# ConsoleWidget
def set_top_cursor(receiver, cursor):
""" Scrolls the viewport so that the specified cursor is at the top.
"""
scrollbar = receiver.verticalScrollBar()
scrollbar.setValue(scrollbar.maximum())
original_cursor = receiver.textCursor()
receiver.setTextCursor(cursor)
receiver.ensureCursorVisible()
receiver.setTextCursor(original_cursor)
def double_backslash(s):
"""
Escape backslashes to work with regex.
:param s: string.
:return: string where backslashes are replaced with double backslash.
"""
return re.sub(r'\\', r'\\\\', s)
def starts_with(s, init='#'):
"""
Check whether a string starts with whitespaces followed by pound sign.
:param s: string to be checked.
:param init: initial chars denoting a line comment.
:return: whether the string starts with whitespaces followed by pound sign.
"""
return True if re.match('^\s*' + double_backslash(init), s) else False
def is_comment(s, init='#'):
"""
Determine whether the string represents a (multi-line) comment, accepting empty lines as well.
:param s: string to be checked.
:param init: initial chars denoting a line comment.
:return: True if all the lines in the string start with whitespace followed by the pound sign.
"""
lines = LineIter(s)
for i in lines:
if not(starts_with(i, init) or i == ''):
return False
return True
def to_comment(s, init='#'):
"""
Convert string into a string with a (multi-line) comment.
:param init: initial chars denoting a line comment.
:param s: string to be converted.
:return: the string with pound signs prepended to each line.
"""
return re.sub('^', init, s, flags=re.MULTILINE)
def de_comment(s, init='#', end='\\'):
"""
Turn a string of line comments into a string without the comment markers.
:param s:
:param init: string of initial chars denoting a line comment.
:param end: string of chars optionally terminating a line comment.
:return: s where initial and terminating chars are removed, however, initial chars are only removed when
preceded by whitespaces.
"""
end_escaped = double_backslash(end)
lines = LineIter(s)
stripped = list()
for i in lines:
de = i
if starts_with(de, init):
match = re.search(init, de)
de = de[:match.start()] + de[match.end():]
de = re.sub(end_escaped + '$', '', de)
stripped.append(de)
return '\n'.join(stripped)
| import sys
import re
from unicodedata import category
from qtconsole.qt import QtGui
from .line_iter import LineIter
__author__ = '<NAME> <<EMAIL>>'
# JupyterWidget
if sys.platform.startswith('win'):
default_editor = 'notepad'
else:
default_editor = ''
# ConsoleWidget
def get_block_plain_text(block):
""" Given a QTextBlock, return its unformatted text.
"""
cursor = QtGui.QTextCursor(block)
cursor.movePosition(QtGui.QTextCursor.StartOfBlock)
cursor.movePosition(QtGui.QTextCursor.EndOfBlock,
QtGui.QTextCursor.KeepAnchor)
return cursor.selection().toPlainText()
# ConsoleWdidget
def is_letter_or_number(char):
""" Returns whether the specified unicode character is a letter or a number.
"""
cat = category(char)
return cat.startswith('L') or cat.startswith('N')
# ConsoleWidget
def set_top_cursor(receiver, cursor):
""" Scrolls the viewport so that the specified cursor is at the top.
"""
scrollbar = receiver.verticalScrollBar()
scrollbar.setValue(scrollbar.maximum())
original_cursor = receiver.textCursor()
receiver.setTextCursor(cursor)
receiver.ensureCursorVisible()
receiver.setTextCursor(original_cursor)
def double_backslash(s):
"""
Escape backslashes to work with regex.
:param s: string.
:return: string where backslashes are replaced with double backslash.
"""
return re.sub(r'\\', r'\\\\', s)
def starts_with(s, init='#'):
"""
Check whether a string starts with whitespaces followed by pound sign.
:param s: string to be checked.
:param init: initial chars denoting a line comment.
:return: whether the string starts with whitespaces followed by pound sign.
"""
return True if re.match('^\s*' + double_backslash(init), s) else False
def is_comment(s, init='#'):
"""
Determine whether the string represents a (multi-line) comment, accepting empty lines as well.
:param s: string to be checked.
:param init: initial chars denoting a line comment.
:return: True if all the lines in the string start with whitespace followed by the pound sign.
"""
lines = LineIter(s)
for i in lines:
if not(starts_with(i, init) or i == ''):
return False
return True
def to_comment(s, init='#'):
"""
Convert string into a string with a (multi-line) comment.
:param init: initial chars denoting a line comment.
:param s: string to be converted.
:return: the string with pound signs prepended to each line.
"""
return re.sub('^', init, s, flags=re.MULTILINE)
def de_comment(s, init='#', end='\\'):
"""
Turn a string of line comments into a string without the comment markers.
:param s:
:param init: string of initial chars denoting a line comment.
:param end: string of chars optionally terminating a line comment.
:return: s where initial and terminating chars are removed, however, initial chars are only removed when
preceded by whitespaces.
"""
end_escaped = double_backslash(end)
lines = LineIter(s)
stripped = list()
for i in lines:
de = i
if starts_with(de, init):
match = re.search(init, de)
de = de[:match.start()] + de[match.end():]
de = re.sub(end_escaped + '$', '', de)
stripped.append(de)
return '\n'.join(stripped)
| en | 0.697831 | # JupyterWidget # ConsoleWidget Given a QTextBlock, return its unformatted text. # ConsoleWdidget Returns whether the specified unicode character is a letter or a number. # ConsoleWidget Scrolls the viewport so that the specified cursor is at the top. Escape backslashes to work with regex. :param s: string. :return: string where backslashes are replaced with double backslash. Check whether a string starts with whitespaces followed by pound sign. :param s: string to be checked. :param init: initial chars denoting a line comment. :return: whether the string starts with whitespaces followed by pound sign. Determine whether the string represents a (multi-line) comment, accepting empty lines as well. :param s: string to be checked. :param init: initial chars denoting a line comment. :return: True if all the lines in the string start with whitespace followed by the pound sign. Convert string into a string with a (multi-line) comment. :param init: initial chars denoting a line comment. :param s: string to be converted. :return: the string with pound signs prepended to each line. Turn a string of line comments into a string without the comment markers. :param s: :param init: string of initial chars denoting a line comment. :param end: string of chars optionally terminating a line comment. :return: s where initial and terminating chars are removed, however, initial chars are only removed when preceded by whitespaces. | 2.789067 | 3 |
Ar_Script/ar_124_tkinter_翻牌.py | archerckk/PyTest | 0 | 6617290 | <filename>Ar_Script/ar_124_tkinter_翻牌.py
from tkinter import *
app=Tk()
girls=['蒂法','女帝','Saber','亚丝娜']
v=[]
for girl in girls:
v.append(IntVar())
b=Checkbutton(app,text=girl,variable=v)
b.pack(anchor=W)
mainloop() | <filename>Ar_Script/ar_124_tkinter_翻牌.py
from tkinter import *
app=Tk()
girls=['蒂法','女帝','Saber','亚丝娜']
v=[]
for girl in girls:
v.append(IntVar())
b=Checkbutton(app,text=girl,variable=v)
b.pack(anchor=W)
mainloop() | none | 1 | 2.829425 | 3 | |
models/Credentials.py | CT83/PyMultiPoster | 0 | 6617291 | from shared.models import db
class Credentials(db.Model):
id = db.Column(db.Integer, primary_key=True)
facebook_access_token = db.Column(db.Text)
twitter_access_token = db.Column(db.Text)
twitter_access_secret = db.Column(db.Text)
instagram_email = db.Column(db.Text)
instagram_password = db.Column(db.Text)
linkedin_access_token = db.Column(db.Text)
tumblr_access_token = db.Column(db.Text)
tumblr_access_secret = db.Column(db.Text)
user_email = db.Column(db.String(80), db.ForeignKey('users.email'))
def __repr__(self):
return '<Credential:{} {} {}>'.format(self.id, self.user_email,
self.facebook_access_token)
def save_credential_to_db(self, dictionary):
user_email = self.user_email
count = Credentials.query.filter_by(user_email=user_email).count()
print("Found", count, "matching rows!")
if not count:
cred = Credentials(user_email=user_email)
db.session.add(cred)
db.session.commit()
Credentials.query.filter_by(user_email=user_email).update(dictionary)
db.session.commit()
def save_credentials(username, facebook_access_token="", twitter_access_token="",
twitter_access_secret="", instagram_email="", instagram_password="",
linkedin_access_token="", tumblr_access_token="",
tumblr_access_secret=""):
if facebook_access_token:
print("Saving Facebook Credentials to DB:", facebook_access_token)
cred = Credentials(user_email=username,
facebook_access_token=facebook_access_token)
cred.save_credential_to_db(dict(facebook_access_token=facebook_access_token))
if twitter_access_token and twitter_access_secret:
print("Saving Twitter Credentials to DB", twitter_access_token, twitter_access_secret)
cred = Credentials(user_email=username,
twitter_access_token=twitter_access_token,
twitter_access_secret=twitter_access_secret)
cred.save_credential_to_db(dict(twitter_access_token=twitter_access_token,
twitter_access_secret=twitter_access_secret))
if instagram_email and instagram_password:
print("Saving Instagram Credentials to DB", instagram_email, instagram_password)
cred = Credentials(user_email=username,
instagram_email=instagram_email,
instagram_password=<PASSWORD>)
cred.save_credential_to_db(dict(instagram_email=instagram_email,
instagram_password=instagram_password))
if linkedin_access_token:
print("Saving Linkedin Credentials to DB", linkedin_access_token)
cred = Credentials(user_email=username,
linkedin_access_token=linkedin_access_token)
cred.save_credential_to_db(dict(linkedin_access_token=linkedin_access_token))
if tumblr_access_token and tumblr_access_secret:
print("Saving Tumblr Credentials to DB", tumblr_access_token, tumblr_access_secret)
cred = Credentials(user_email=username,
tumblr_access_token=tumblr_access_token,
tumblr_access_secret=tumblr_access_secret)
cred.save_credential_to_db(dict(tumblr_access_token=tumblr_access_token,
tumblr_access_secret=tumblr_access_secret))
def get_credentials(username):
c = Credentials()
try:
c = Credentials.query.filter_by(user_email=username).first()
except:
pass
# TODO To make instagram_email and instagram_password work again uncomment the commented lines and remove the redundant ones
credentials = {'facebook_access_token': getattr(c, 'facebook_access_token', None),
'twitter_access_token': getattr(c, 'twitter_access_token', None),
'twitter_access_secret': getattr(c, 'twitter_access_secret', None),
# 'instagram_email': getattr(c, 'instagram_email', None),
# 'instagram_password': getattr(c, 'instagram_password', None),
'instagram_email': "INSTAGRAM_EMAIL",
'instagram_password': "<PASSWORD>",
'linkedin_access_token': getattr(c, 'linkedin_access_token', None),
'tumblr_access_token': getattr(c, 'tumblr_access_token', None),
'tumblr_access_secret': getattr(c, 'tumblr_access_secret', None),
}
print("get_credentials Returned:", credentials)
return credentials
def delete_credential(cred):
print("Deleted Credentials for", cred)
db.session.delete(cred)
db.session.commit()
| from shared.models import db
class Credentials(db.Model):
id = db.Column(db.Integer, primary_key=True)
facebook_access_token = db.Column(db.Text)
twitter_access_token = db.Column(db.Text)
twitter_access_secret = db.Column(db.Text)
instagram_email = db.Column(db.Text)
instagram_password = db.Column(db.Text)
linkedin_access_token = db.Column(db.Text)
tumblr_access_token = db.Column(db.Text)
tumblr_access_secret = db.Column(db.Text)
user_email = db.Column(db.String(80), db.ForeignKey('users.email'))
def __repr__(self):
return '<Credential:{} {} {}>'.format(self.id, self.user_email,
self.facebook_access_token)
def save_credential_to_db(self, dictionary):
user_email = self.user_email
count = Credentials.query.filter_by(user_email=user_email).count()
print("Found", count, "matching rows!")
if not count:
cred = Credentials(user_email=user_email)
db.session.add(cred)
db.session.commit()
Credentials.query.filter_by(user_email=user_email).update(dictionary)
db.session.commit()
def save_credentials(username, facebook_access_token="", twitter_access_token="",
twitter_access_secret="", instagram_email="", instagram_password="",
linkedin_access_token="", tumblr_access_token="",
tumblr_access_secret=""):
if facebook_access_token:
print("Saving Facebook Credentials to DB:", facebook_access_token)
cred = Credentials(user_email=username,
facebook_access_token=facebook_access_token)
cred.save_credential_to_db(dict(facebook_access_token=facebook_access_token))
if twitter_access_token and twitter_access_secret:
print("Saving Twitter Credentials to DB", twitter_access_token, twitter_access_secret)
cred = Credentials(user_email=username,
twitter_access_token=twitter_access_token,
twitter_access_secret=twitter_access_secret)
cred.save_credential_to_db(dict(twitter_access_token=twitter_access_token,
twitter_access_secret=twitter_access_secret))
if instagram_email and instagram_password:
print("Saving Instagram Credentials to DB", instagram_email, instagram_password)
cred = Credentials(user_email=username,
instagram_email=instagram_email,
instagram_password=<PASSWORD>)
cred.save_credential_to_db(dict(instagram_email=instagram_email,
instagram_password=instagram_password))
if linkedin_access_token:
print("Saving Linkedin Credentials to DB", linkedin_access_token)
cred = Credentials(user_email=username,
linkedin_access_token=linkedin_access_token)
cred.save_credential_to_db(dict(linkedin_access_token=linkedin_access_token))
if tumblr_access_token and tumblr_access_secret:
print("Saving Tumblr Credentials to DB", tumblr_access_token, tumblr_access_secret)
cred = Credentials(user_email=username,
tumblr_access_token=tumblr_access_token,
tumblr_access_secret=tumblr_access_secret)
cred.save_credential_to_db(dict(tumblr_access_token=tumblr_access_token,
tumblr_access_secret=tumblr_access_secret))
def get_credentials(username):
c = Credentials()
try:
c = Credentials.query.filter_by(user_email=username).first()
except:
pass
# TODO To make instagram_email and instagram_password work again uncomment the commented lines and remove the redundant ones
credentials = {'facebook_access_token': getattr(c, 'facebook_access_token', None),
'twitter_access_token': getattr(c, 'twitter_access_token', None),
'twitter_access_secret': getattr(c, 'twitter_access_secret', None),
# 'instagram_email': getattr(c, 'instagram_email', None),
# 'instagram_password': getattr(c, 'instagram_password', None),
'instagram_email': "INSTAGRAM_EMAIL",
'instagram_password': "<PASSWORD>",
'linkedin_access_token': getattr(c, 'linkedin_access_token', None),
'tumblr_access_token': getattr(c, 'tumblr_access_token', None),
'tumblr_access_secret': getattr(c, 'tumblr_access_secret', None),
}
print("get_credentials Returned:", credentials)
return credentials
def delete_credential(cred):
print("Deleted Credentials for", cred)
db.session.delete(cred)
db.session.commit()
| en | 0.4835 | # TODO To make instagram_email and instagram_password work again uncomment the commented lines and remove the redundant ones # 'instagram_email': getattr(c, 'instagram_email', None), # 'instagram_password': getattr(c, 'instagram_password', None), | 2.580395 | 3 |
hub_app/migrations/0002_pendingregistration.py | passiopeia/passiopeia-hub | 0 | 6617292 | <gh_stars>0
from django.conf import settings
from django.db import migrations, models
import django.utils.timezone
import hub_app.reglib.key
import hub_app.reglib.validity
import uuid
class Migration(migrations.Migration):
dependencies = [
('hub_app', '0001_basic_user_model'),
]
operations = [
migrations.CreateModel(
name='PendingRegistration',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False, verbose_name='UUID')),
('created', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Created')),
('valid_until', models.DateTimeField(default=hub_app.reglib.validity.get_registration_max_validity, verbose_name='Valid Until')),
('key', models.CharField(default=hub_app.reglib.key.get_registration_key, max_length=255, verbose_name='Registration Key')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
options={
'verbose_name': 'Pending Registration',
'verbose_name_plural': 'Pending Registrations',
'permissions': (),
'default_permissions': ('add', 'change', 'delete'),
},
),
]
| from django.conf import settings
from django.db import migrations, models
import django.utils.timezone
import hub_app.reglib.key
import hub_app.reglib.validity
import uuid
class Migration(migrations.Migration):
dependencies = [
('hub_app', '0001_basic_user_model'),
]
operations = [
migrations.CreateModel(
name='PendingRegistration',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False, verbose_name='UUID')),
('created', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Created')),
('valid_until', models.DateTimeField(default=hub_app.reglib.validity.get_registration_max_validity, verbose_name='Valid Until')),
('key', models.CharField(default=hub_app.reglib.key.get_registration_key, max_length=255, verbose_name='Registration Key')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
options={
'verbose_name': 'Pending Registration',
'verbose_name_plural': 'Pending Registrations',
'permissions': (),
'default_permissions': ('add', 'change', 'delete'),
},
),
] | none | 1 | 1.861329 | 2 | |
Game/editor.py | 5igmatic/puzzle_game | 0 | 6617293 | <gh_stars>0
from turtle import width
import pygame
from tile import Tile
from player import Player
class Editor:
def __init__(self, size, font, WIN):
self.editorActive = False
self.cameraSpeed = 5
self.cameraX = 0
self.cameraY = 0
self.activePlayer = None
self.rotationCooldown = 0
self.rotationCooldownDuration = 20
self.tiles = pygame.sprite.Group()
self.players = pygame.sprite.Group()
self.playerIndex = 0
self.size = size
self.font = font
self.WIN = WIN
width = self.WIN.get_width()
self.exitButton = pygame.surface.Surface((20, 20)).convert_alpha()
self.exitButton.fill("white")
self.exitButtonRect = self.exitButton.get_rect(center = (width-20, 20))
self.restartButton = pygame.surface.Surface((20, 20)).convert_alpha()
self.restartButton.fill("white")
self.restartButtonRect = self.restartButton.get_rect(center = (width-60, 20))
self.playButton = pygame.surface.Surface((20, 20)).convert_alpha()
self.playButton.fill("white")
self.playButtonRect = self.playButton.get_rect(center = (width-100, 20))
self.equationKeys = {pygame.K_0: "0",
pygame.K_1: "1",
pygame.K_2: "2",
pygame.K_3: "3",
pygame.K_4: "4",
pygame.K_5: "5",
pygame.K_6: "6",
pygame.K_7: "7",
pygame.K_8: "8",
pygame.K_9: "9",
pygame.K_EQUALS: "=",
pygame.K_x: "x",
pygame.K_SLASH: "÷",
pygame.K_p: "+",
pygame.K_MINUS: "-"}
def initialise(self, width, height):
self.tiles.empty()
self.players.empty()
self.width = width
self.height = height
self.cameraX = width/2-0.5
self.cameraY = height/2-0.5
self.tilePositions = [[" "]*width for i in range(height)]
for index in range(width):
self.tilePositions[0][index] = "t"
self.tilePositions[-1][index] = "t"
newTile = Tile(index, 0, self.size)
self.tiles.add(newTile)
newTile = Tile(index, height-1, self.size)
self.tiles.add(newTile)
for index,tileRow in enumerate(self.tilePositions):
tileRow[0] = "t"
tileRow[width-1] = "t"
newTile = Tile(0, index, self.size)
self.tiles.add(newTile)
newTile = Tile(width-1, index, self.size)
self.tiles.add(newTile)
def doMovement(self):
keys = pygame.key.get_pressed()
if keys[pygame.K_w]:
self.cameraY -= self.cameraSpeed/self.size
if keys[pygame.K_s]:
self.cameraY += self.cameraSpeed/self.size
if keys[pygame.K_a]:
self.cameraX -= self.cameraSpeed/self.size
if keys[pygame.K_d]:
self.cameraX += self.cameraSpeed/self.size
if keys[pygame.K_t]:
self.placeTile()
for key in self.equationKeys:
if keys[key]:
self.activePlayer.type = self.equationKeys[key]
self.activePlayer.text.updateText(self.equationKeys[key])
if keys[pygame.K_BACKSPACE]:
self.tilePositions[self.activePlayer.y][self.activePlayer.x] = " "
self.players.remove(self.activePlayer)
if self.rotationCooldown == 0:
if keys[pygame.K_r]:
self.activePlayer.rotation += 90
self.activePlayer.updateRotation()
self.rotationCooldown = self.rotationCooldownDuration
else:
self.rotationCooldown -= 1
self.convertLayoutToList()
def validLevel(self):
for player in self.players:
if player.type == "=":
return True
return False
def mouseClick(self):
for player in self.players:
#gets the pixel location of the center of the player
playerScreenPosX = (player.x - self.cameraX) * self.size + self.WIN.get_width()/2
playerScreenPosY = (player.y - self.cameraY) * self.size + self.WIN.get_height()/2
rect = player.image.get_rect(center = (playerScreenPosX, playerScreenPosY))
if rect.collidepoint(pygame.mouse.get_pos()):
self.setActivePlayer(player)
def setActivePlayer(self, player):
if self.activePlayer != None:
self.activePlayer.original_image.fill("white")
self.activePlayer.image = self.activePlayer.original_image
self.activePlayer = player
self.activePlayer.original_image.fill("grey")
self.activePlayer.image = self.activePlayer.original_image
def placeTile(self):
mousePos = pygame.mouse.get_pos()
mousePosX = (mousePos[0]-self.WIN.get_width()/2)/self.size
mousePosY = (mousePos[1]-self.WIN.get_height()/2)/self.size
gridX = mousePosX + self.cameraX
gridY = mousePosY + self.cameraY
gridX = round(gridX)
gridY = round(gridY)
if gridX > 0 and gridX < self.width-1 and gridY > 0 and gridY < self.height-1:
unoccupied = True
for player in self.players:
if gridX == player.x and gridY == player.y:
unoccupied = False
if unoccupied:
newPlayer = Player(self.playerIndex, " ", gridX, gridY, 0, self)
self.players.add(newPlayer)
self.setActivePlayer(newPlayer)
self.playerIndex += 1
def updateIndividual(self, object, shiftX, shiftY):
x = round(self.size*(object.x + shiftX))
y = round(self.size*(object.y + shiftY))
rect = object.image.get_rect(center = (x, y))
self.WIN.blit(object.image, rect)
def updateEditor(self):
screenCenterX = self.WIN.get_width()/(2*self.size)
screenCenterY = self.WIN.get_height()/(2*self.size)
shiftX = screenCenterX - self.cameraX
shiftY = screenCenterY - self.cameraY
for tile in self.tiles:
self.updateIndividual(tile, shiftX, shiftY)
for player in self.players:
self.updateIndividual(player, shiftX, shiftY)
self.updateIndividual(player.text, shiftX, shiftY)
self.WIN.blit(self.exitButton, self.exitButtonRect)
self.WIN.blit(self.restartButton, self.restartButtonRect)
self.WIN.blit(self.playButton, self.playButtonRect)
def convertLayoutToList(self):
self.tileRotations = [[" "]*self.width for i in range(self.height)]
for player in self.players:
if player.type == " ": player.type = "t"
self.tilePositions[player.y][player.x] = player.type
self.tileRotations[player.y][player.x] = str(int(player.rotation % 360 / 90))
self.layout = []
for row in range(self.height):
positionsRow = ""
rotationsRow = ""
for position in self.tilePositions[row]:
positionsRow += position
for rotation in self.tileRotations[row]:
rotationsRow += rotation
rowData = [positionsRow, rotationsRow]
self.layout.append(rowData)
| from turtle import width
import pygame
from tile import Tile
from player import Player
class Editor:
def __init__(self, size, font, WIN):
self.editorActive = False
self.cameraSpeed = 5
self.cameraX = 0
self.cameraY = 0
self.activePlayer = None
self.rotationCooldown = 0
self.rotationCooldownDuration = 20
self.tiles = pygame.sprite.Group()
self.players = pygame.sprite.Group()
self.playerIndex = 0
self.size = size
self.font = font
self.WIN = WIN
width = self.WIN.get_width()
self.exitButton = pygame.surface.Surface((20, 20)).convert_alpha()
self.exitButton.fill("white")
self.exitButtonRect = self.exitButton.get_rect(center = (width-20, 20))
self.restartButton = pygame.surface.Surface((20, 20)).convert_alpha()
self.restartButton.fill("white")
self.restartButtonRect = self.restartButton.get_rect(center = (width-60, 20))
self.playButton = pygame.surface.Surface((20, 20)).convert_alpha()
self.playButton.fill("white")
self.playButtonRect = self.playButton.get_rect(center = (width-100, 20))
self.equationKeys = {pygame.K_0: "0",
pygame.K_1: "1",
pygame.K_2: "2",
pygame.K_3: "3",
pygame.K_4: "4",
pygame.K_5: "5",
pygame.K_6: "6",
pygame.K_7: "7",
pygame.K_8: "8",
pygame.K_9: "9",
pygame.K_EQUALS: "=",
pygame.K_x: "x",
pygame.K_SLASH: "÷",
pygame.K_p: "+",
pygame.K_MINUS: "-"}
def initialise(self, width, height):
self.tiles.empty()
self.players.empty()
self.width = width
self.height = height
self.cameraX = width/2-0.5
self.cameraY = height/2-0.5
self.tilePositions = [[" "]*width for i in range(height)]
for index in range(width):
self.tilePositions[0][index] = "t"
self.tilePositions[-1][index] = "t"
newTile = Tile(index, 0, self.size)
self.tiles.add(newTile)
newTile = Tile(index, height-1, self.size)
self.tiles.add(newTile)
for index,tileRow in enumerate(self.tilePositions):
tileRow[0] = "t"
tileRow[width-1] = "t"
newTile = Tile(0, index, self.size)
self.tiles.add(newTile)
newTile = Tile(width-1, index, self.size)
self.tiles.add(newTile)
def doMovement(self):
keys = pygame.key.get_pressed()
if keys[pygame.K_w]:
self.cameraY -= self.cameraSpeed/self.size
if keys[pygame.K_s]:
self.cameraY += self.cameraSpeed/self.size
if keys[pygame.K_a]:
self.cameraX -= self.cameraSpeed/self.size
if keys[pygame.K_d]:
self.cameraX += self.cameraSpeed/self.size
if keys[pygame.K_t]:
self.placeTile()
for key in self.equationKeys:
if keys[key]:
self.activePlayer.type = self.equationKeys[key]
self.activePlayer.text.updateText(self.equationKeys[key])
if keys[pygame.K_BACKSPACE]:
self.tilePositions[self.activePlayer.y][self.activePlayer.x] = " "
self.players.remove(self.activePlayer)
if self.rotationCooldown == 0:
if keys[pygame.K_r]:
self.activePlayer.rotation += 90
self.activePlayer.updateRotation()
self.rotationCooldown = self.rotationCooldownDuration
else:
self.rotationCooldown -= 1
self.convertLayoutToList()
def validLevel(self):
for player in self.players:
if player.type == "=":
return True
return False
def mouseClick(self):
for player in self.players:
#gets the pixel location of the center of the player
playerScreenPosX = (player.x - self.cameraX) * self.size + self.WIN.get_width()/2
playerScreenPosY = (player.y - self.cameraY) * self.size + self.WIN.get_height()/2
rect = player.image.get_rect(center = (playerScreenPosX, playerScreenPosY))
if rect.collidepoint(pygame.mouse.get_pos()):
self.setActivePlayer(player)
def setActivePlayer(self, player):
if self.activePlayer != None:
self.activePlayer.original_image.fill("white")
self.activePlayer.image = self.activePlayer.original_image
self.activePlayer = player
self.activePlayer.original_image.fill("grey")
self.activePlayer.image = self.activePlayer.original_image
def placeTile(self):
mousePos = pygame.mouse.get_pos()
mousePosX = (mousePos[0]-self.WIN.get_width()/2)/self.size
mousePosY = (mousePos[1]-self.WIN.get_height()/2)/self.size
gridX = mousePosX + self.cameraX
gridY = mousePosY + self.cameraY
gridX = round(gridX)
gridY = round(gridY)
if gridX > 0 and gridX < self.width-1 and gridY > 0 and gridY < self.height-1:
unoccupied = True
for player in self.players:
if gridX == player.x and gridY == player.y:
unoccupied = False
if unoccupied:
newPlayer = Player(self.playerIndex, " ", gridX, gridY, 0, self)
self.players.add(newPlayer)
self.setActivePlayer(newPlayer)
self.playerIndex += 1
def updateIndividual(self, object, shiftX, shiftY):
x = round(self.size*(object.x + shiftX))
y = round(self.size*(object.y + shiftY))
rect = object.image.get_rect(center = (x, y))
self.WIN.blit(object.image, rect)
def updateEditor(self):
screenCenterX = self.WIN.get_width()/(2*self.size)
screenCenterY = self.WIN.get_height()/(2*self.size)
shiftX = screenCenterX - self.cameraX
shiftY = screenCenterY - self.cameraY
for tile in self.tiles:
self.updateIndividual(tile, shiftX, shiftY)
for player in self.players:
self.updateIndividual(player, shiftX, shiftY)
self.updateIndividual(player.text, shiftX, shiftY)
self.WIN.blit(self.exitButton, self.exitButtonRect)
self.WIN.blit(self.restartButton, self.restartButtonRect)
self.WIN.blit(self.playButton, self.playButtonRect)
def convertLayoutToList(self):
self.tileRotations = [[" "]*self.width for i in range(self.height)]
for player in self.players:
if player.type == " ": player.type = "t"
self.tilePositions[player.y][player.x] = player.type
self.tileRotations[player.y][player.x] = str(int(player.rotation % 360 / 90))
self.layout = []
for row in range(self.height):
positionsRow = ""
rotationsRow = ""
for position in self.tilePositions[row]:
positionsRow += position
for rotation in self.tileRotations[row]:
rotationsRow += rotation
rowData = [positionsRow, rotationsRow]
self.layout.append(rowData) | en | 0.914402 | #gets the pixel location of the center of the player | 3.080122 | 3 |
AboutFrame.py | a-student-team/2p-notebook | 3 | 6617294 | <gh_stars>1-10
import wx
from get_file import get_file
class AboutFrame(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, -1, "About", size=(300, 200))
self.SetMinSize(wx.Size(300, 200))
self.SetMaxSize(wx.Size(300, 200))
self.SetBackgroundColour(wx.Colour(255, 255, 255))
self.SetIcon(wx.Icon(get_file("\\images\\icon.ico"), wx.BITMAP_TYPE_ANY))
self.init_ui()
def init_ui(self):
gbSizer1 = wx.GridBagSizer( 0, 0 )
gbSizer1.SetFlexibleDirection( wx.BOTH )
gbSizer1.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.m_bitmap1 = wx.StaticBitmap( self, wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_bitmap1.SetBitmap( wx.Bitmap( get_file("\\images\\icon.png"), wx.BITMAP_TYPE_ANY ) )
gbSizer1.Add( self.m_bitmap1, wx.GBPosition( 0, 0 ), wx.GBSpan( 2, 1 ), wx.ALL, 5 )
self.m_staticText1 = wx.StaticText( self, wx.ID_ANY, u"2p-note", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText1.Wrap( -1 )
self.m_staticText1.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize()+10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "Microsoft YaHei UI" ) )
gbSizer1.Add( self.m_staticText1, wx.GBPosition( 0, 1 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_staticText2 = wx.StaticText( self, wx.ID_ANY, u"book", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText2.Wrap( -1 )
self.m_staticText2.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize()+10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "Microsoft YaHei UI" ) )
gbSizer1.Add( self.m_staticText2, wx.GBPosition( 1, 1 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_staticText3 = wx.StaticText( self, wx.ID_ANY, u"Version 1.0", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText3.Wrap( -1 )
self.m_staticText3.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "Microsoft YaHei UI" ) )
gbSizer1.Add( self.m_staticText3, wx.GBPosition( 2, 1 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_staticText4 = wx.StaticText( self, wx.ID_ANY, u"作者:w-flower(落霞丶冬花)", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText4.Wrap( -1 )
self.m_staticText4.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "Microsoft YaHei UI" ) )
gbSizer1.Add( self.m_staticText4, wx.GBPosition( 3, 1 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.SetSizer( gbSizer1 )
self.Layout()
self.Centre( wx.BOTH )
#test
if __name__ == '__main__':
app = wx.App()
frame = AboutFrame(None)
frame.Show()
app.MainLoop() | import wx
from get_file import get_file
class AboutFrame(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, -1, "About", size=(300, 200))
self.SetMinSize(wx.Size(300, 200))
self.SetMaxSize(wx.Size(300, 200))
self.SetBackgroundColour(wx.Colour(255, 255, 255))
self.SetIcon(wx.Icon(get_file("\\images\\icon.ico"), wx.BITMAP_TYPE_ANY))
self.init_ui()
def init_ui(self):
gbSizer1 = wx.GridBagSizer( 0, 0 )
gbSizer1.SetFlexibleDirection( wx.BOTH )
gbSizer1.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.m_bitmap1 = wx.StaticBitmap( self, wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_bitmap1.SetBitmap( wx.Bitmap( get_file("\\images\\icon.png"), wx.BITMAP_TYPE_ANY ) )
gbSizer1.Add( self.m_bitmap1, wx.GBPosition( 0, 0 ), wx.GBSpan( 2, 1 ), wx.ALL, 5 )
self.m_staticText1 = wx.StaticText( self, wx.ID_ANY, u"2p-note", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText1.Wrap( -1 )
self.m_staticText1.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize()+10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "Microsoft YaHei UI" ) )
gbSizer1.Add( self.m_staticText1, wx.GBPosition( 0, 1 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_staticText2 = wx.StaticText( self, wx.ID_ANY, u"book", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText2.Wrap( -1 )
self.m_staticText2.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize()+10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "Microsoft YaHei UI" ) )
gbSizer1.Add( self.m_staticText2, wx.GBPosition( 1, 1 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_staticText3 = wx.StaticText( self, wx.ID_ANY, u"Version 1.0", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText3.Wrap( -1 )
self.m_staticText3.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "Microsoft YaHei UI" ) )
gbSizer1.Add( self.m_staticText3, wx.GBPosition( 2, 1 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_staticText4 = wx.StaticText( self, wx.ID_ANY, u"作者:w-flower(落霞丶冬花)", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText4.Wrap( -1 )
self.m_staticText4.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "Microsoft YaHei UI" ) )
gbSizer1.Add( self.m_staticText4, wx.GBPosition( 3, 1 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.SetSizer( gbSizer1 )
self.Layout()
self.Centre( wx.BOTH )
#test
if __name__ == '__main__':
app = wx.App()
frame = AboutFrame(None)
frame.Show()
app.MainLoop() | none | 1 | 2.394073 | 2 | |
scripts/automation/regression/unit_tests/functional_tests/pkt_builder_test.py | dfried-vasona/trex | 4 | 6617295 | #!/router/bin/python
import pkt_bld_general_test
from client_utils.packet_builder import *
import dpkt
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_raises
from nose.tools import raises
import os
import random
import pprint
class CTRexPktBuilderSanity_Test(pkt_bld_general_test.CGeneralPktBld_Test):
def setUp(self):
pass
def test_decode_ip_addr(self):
# test ipv4 case
assert_equal(CTRexPktBuilder._decode_ip_addr('1.2.3.4', "ipv4"), '\x01\x02\x03\x04')
assert_equal(CTRexPktBuilder._decode_ip_addr('127.0.0.1', "ipv4"), '\x7F\x00\x00\x01')
assert_raises(CTRexPktBuilder.IPAddressError, CTRexPktBuilder._decode_ip_addr, '1.2.3.4.5', "ipv4")
assert_raises(CTRexPktBuilder.IPAddressError, CTRexPktBuilder._decode_ip_addr, '1.2.3.4', "ipv6")
# test ipv6 case
assert_equal(CTRexPktBuilder._decode_ip_addr("fdf8:f53e:61e4::18:1:3333:1:1", "ipv6"),
'P\x01\x00\x00\x00\x00\r\xb8\x00\x0133\x00\x01\x00\x01')
assert_raises(CTRexPktBuilder.IPAddressError, CTRexPktBuilder._decode_ip_addr,
'2001::DB8:1:2fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:1', "ipv6")
def test_decode_mac_addr(self):
assert_equal(CTRexPktBuilder._decode_mac_addr('00:de:34:ef:2e:f4'), '\x00\xde4\xef.\xf4')
assert_equal(CTRexPktBuilder._decode_mac_addr('00-de-55-ef-2e-f4'), '\x00\xdeU\xef.\xf4')
assert_raises(CTRexPktBuilder.MACAddressError, CTRexPktBuilder._decode_mac_addr,
'00:de:34:ef:2e:f4:f4')
assert_raises(CTRexPktBuilder.MACAddressError, CTRexPktBuilder._decode_mac_addr,
'1.2.3.4')
assert_raises(CTRexPktBuilder.MACAddressError, CTRexPktBuilder._decode_mac_addr,
'00 de 34 ef 2e f4 f4')
def test_gen_layer_name(self):
pkt = CTRexPktBuilder()
assert_equal(pkt._gen_layer_name("eth"), "eth_1")
pkt._pkt_by_hdr = {'eth':None} # mock header pointer data
assert_equal(pkt._gen_layer_name("eth"), "eth_1")
pkt._pkt_by_hdr.update({'eth_1':None}) # more mock header pointer data
assert_equal(pkt._gen_layer_name("eth"), "eth_2")
def test_set_layer_attr_basic(self):
pkt = CTRexPktBuilder()
pkt._pkt_by_hdr['ip'] = dpkt.ip.IP()
# case 1 - test full value assignment
pkt.set_layer_attr('ip', 'src', '\x01\x02\x03\x04')
assert_equal(pkt._pkt_by_hdr['ip'].src, '\x01\x02\x03\x04')
# case 2 - test bit assignment
pkt.set_layer_bit_attr('ip', 'off', dpkt.ip.IP_DF)
pkt.set_layer_bit_attr('ip', 'off', dpkt.ip.IP_MF)
assert_equal(bin(pkt._pkt_by_hdr['ip'].off), '0b110000000000000')
# case 3 - test assignment of not-exist attribute
assert_raises(ValueError, pkt.set_layer_bit_attr, 'ip', 'src_dst', 0)
# case 4.1 - test assignment of data attribute - without dpkt.Packet object
assert_raises(ValueError, pkt.set_layer_bit_attr, 'ip', 'data', "Not a dpkt.Packet object")
# case 4.2 - test assignment of data attribute - with dpkt.Packet object - tested under CTRexPktBuilder_Test class
# tcp = dpkt.tcp.TCP()
self.print_packet(pkt._pkt_by_hdr['ip'])
# pkt.set_layer_attr('ip', 'data', tcp)
# case 5 - test assignment of not-exist layer
assert_raises(KeyError, pkt.set_layer_bit_attr, 'no_such_layer', 'src', 0)
def tearDown(self):
pass
class CTRexPktBuilder_Test(pkt_bld_general_test.CGeneralPktBld_Test):
def setUp(self):
self.pkt_bld = CTRexPktBuilder()
self.pkt_bld.add_pkt_layer("l2", dpkt.ethernet.Ethernet())
self.pp = pprint.PrettyPrinter(indent=4)
def test_add_pkt_layer(self):
ip = dpkt.ip.IP(src='\x01\x02\x03\x04', dst='\x05\x06\x07\x08', p=1)
self.pkt_bld.add_pkt_layer("l3", ip)
tcp = dpkt.tcp.TCP(sport = 8080)
self.pkt_bld.add_pkt_layer("l4_tcp", tcp)
assert_equal(len(self.pkt_bld._pkt_by_hdr), 3)
assert_equal(self.pkt_bld._pkt_by_hdr.keys(), ['l2', 'l3', 'l4_tcp'])
self.print_packet(self.pkt_bld._packet)
assert_raises(ValueError, self.pkt_bld.add_pkt_layer, 'l2', dpkt.ethernet.Ethernet())
def test_set_ip_layer_addr(self):
ip = dpkt.ip.IP()
self.pkt_bld.add_pkt_layer("l3", ip)
self.pkt_bld.set_ip_layer_addr("l3", "src", "1.2.3.4")
self.print_packet(self.pkt_bld._packet)
assert_equal(self.pkt_bld._pkt_by_hdr['l3'].src, '\x01\x02\x03\x04')
# check that only IP layer is using this function
assert_raises(ValueError, self.pkt_bld.set_ip_layer_addr, 'l2', "src", "1.2.3.4")
def test_calc_offset(self):
ip = dpkt.ip.IP()
self.pkt_bld.add_pkt_layer("l3", ip)
assert_equal(self.pkt_bld._calc_offset("l3", "src", 4), (14, 14+12))
def test_set_ipv6_layer_addr(self):
ip6 = dpkt.ip6.IP6()
self.pkt_bld.add_pkt_layer("l3", ip6)
self.pkt_bld.set_ipv6_layer_addr("l3", "src", "fdf8:f53e:61e4::18:1:3333:1:1")
self.print_packet(self.pkt_bld._packet)
assert_equal(self.pkt_bld._pkt_by_hdr['l3'].src, 'P\x01\x00\x00\x00\x00\r\xb8\x00\x0133\x00\x01\x00\x01')
# check that only IP layer is using this function
assert_raises(ValueError, self.pkt_bld.set_ipv6_layer_addr, 'l2', "src", "fdf8:f53e:61e4::18:1:3333:1:1")
def test_set_eth_layer_addr(self):
ip = dpkt.ip.IP()
self.pkt_bld.add_pkt_layer("l3", ip)
self.pkt_bld.set_eth_layer_addr("l2", "src", "00:de:34:ef:2e:f4")
self.print_packet(self.pkt_bld._packet)
assert_equal(self.pkt_bld._pkt_by_hdr['l2'].src, '\x00\xde4\xef.\xf4')
# check that only IP layer is using this function
assert_raises(ValueError, self.pkt_bld.set_eth_layer_addr, 'l3', "src", "\x00\xde4\xef.\xf4")
def test_set_layer_attr(self):
# extend the set_layer_attr_basic test by handling the following case:
# replace some header data with another layer, causing other layers to disconnect
# this also tests the _reevaluate_packet method
ip = dpkt.ip.IP(src='\x01\x02\x03\x04', dst='\x05\x06\x07\x08', p=1)
self.pkt_bld.add_pkt_layer("l3_ip", ip)
tcp = dpkt.tcp.TCP(sport = 8080)
self.pkt_bld.add_pkt_layer("l4_tcp", tcp)
# sanity: try changing data attr with non-dpkt.Packet instance
assert_raises(ValueError, self.pkt_bld.set_layer_attr, 'l2', 'data', "HelloWorld")
# now, add different L3 layer instead of existting one, L4 would disconnect
old_layer_count = len(self.pkt_bld._pkt_by_hdr)
new_ip = dpkt.ip.IP(src='\x05\x06\x07\x08', dst='\x01\x02\x03\x04')
print "\nBefore disconnecting layers:"
print "============================",
self.print_packet(self.pkt_bld._packet)
self.pkt_bld.set_layer_attr('l2', 'data', new_ip)
print "\nAfter disconnecting layers:"
print "===========================",
self.print_packet(self.pkt_bld._packet)
assert_not_equal(old_layer_count, len(self.pkt_bld._pkt_by_hdr))
assert_equal(len(self.pkt_bld._pkt_by_hdr), 1) # only Eth layer appears
def test_set_pkt_payload(self):
payload = "HelloWorld"
# test for setting a payload to an empty packet
empty_pkt = CTRexPktBuilder()
assert_raises(AttributeError, empty_pkt.set_pkt_payload, payload)
# add content to packet
ip = dpkt.ip.IP(src='\x01\x02\x03\x04', dst='\x05\x06\x07\x08', p=1)
self.pkt_bld.add_pkt_layer("l3_ip", ip)
tcp = dpkt.tcp.TCP(sport = 8080)
self.pkt_bld.add_pkt_layer("l4_tcp", tcp)
# now, set a payload for the packet
self.pkt_bld.set_pkt_payload(payload)
self.print_packet(self.pkt_bld._packet)
assert_equal(self.pkt_bld._pkt_by_hdr['l4_tcp'].data, payload)
def test_load_packet(self):
# add content to packet
ip = dpkt.ip.IP(src='\x01\x02\x03\x04', dst='\x05\x06\x07\x08', p=1)
self.pkt_bld.add_pkt_layer("l3_ip", ip)
tcp = dpkt.tcp.TCP(sport = 8080)
self.pkt_bld.add_pkt_layer("l4_tcp", tcp)
self.pkt_bld.set_pkt_payload("HelloWorld")
new_pkt = CTRexPktBuilder()
new_pkt.load_packet(self.pkt_bld._packet)
self.print_packet(new_pkt._packet)
assert_equal(len(new_pkt._pkt_by_hdr), 4)
assert_equal(new_pkt._pkt_by_hdr.keys(),
['ip_1',
'tcp_1',
'pkt_final_payload',
'ethernet_1'
]
)
assert_equal(new_pkt._pkt_by_hdr['pkt_final_payload'], "HelloWorld")
def test_get_packet(self):
# get a pointer to the packet
assert(self.pkt_bld.get_packet(get_ptr=True) is self.pkt_bld._packet)
# get a copy of the packet
assert(not(self.pkt_bld.get_packet() is self.pkt_bld._packet))
def test_get_layer(self):
assert_equal(self.pkt_bld.get_layer('no_such_layer'), None)
assert(not(self.pkt_bld.get_layer('l2') is self.pkt_bld._packet))
assert(type(self.pkt_bld.get_layer('l2')).__name__, "ethernet")
def test_dump_to_pcap(self):
# set Ethernet layer attributes
self.pkt_bld.set_eth_layer_addr("l2", "src", "00:15:17:a7:75:a3")
self.pkt_bld.set_eth_layer_addr("l2", "dst", "e0:5f:b9:69:e9:22")
self.pkt_bld.set_layer_attr("l2", "type", dpkt.ethernet.ETH_TYPE_IP)
# set IP layer attributes
self.pkt_bld.add_pkt_layer("l3_ip", dpkt.ip.IP())
self.pkt_bld.set_ip_layer_addr("l3_ip", "src", "192.168.127.12")
self.pkt_bld.set_ip_layer_addr("l3_ip", "dst", "192.168.127.12")
self.pkt_bld.set_layer_attr("l3_ip", "p", dpkt.ip.IP_PROTO_TCP)
# set TCP layer attributes
self.pkt_bld.add_pkt_layer("l4_tcp", dpkt.tcp.TCP())
self.pkt_bld.set_layer_attr("l4_tcp", "sport", 13311)
self.pkt_bld.set_layer_attr("l4_tcp", "dport", 80)
self.pkt_bld.set_layer_attr("l4_tcp", "flags", 0)
self.pkt_bld.set_layer_attr("l4_tcp", "win", 32768)
self.pkt_bld.set_layer_attr("l4_tcp", "seq", 0)
# set packet payload, for example HTTP GET request
self.pkt_bld.set_pkt_payload('GET /10k_60k HTTP/1.1\r\nHost: 192.168.3.11\r\nConnection: Keep-Alive\r\nUser-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)\r\nAccept: */*\r\nAccept-Language: en-us\r\nAccept-Encoding: gzip, deflate, compress\r\n\r\n')
# finally, set IP header len with relation to payload data
self.pkt_bld.set_layer_attr("l3_ip", "len", len(self.pkt_bld.get_layer('l3_ip')))
filepath = "unit_tests/functional_tests/test.pcap"
self.pkt_bld.dump_pkt_to_pcap(filepath)
assert os.path.isfile(filepath)
# remove pcap after creation - masked for now
# os.remove(filepath)
filepath = "/not/a/valid/path/test.pcap"
assert_raises(IOError, self.pkt_bld.dump_pkt_to_pcap, filepath)
# check that dump is not available for empty packet
new_pkt = CTRexPktBuilder()
assert_raises(CTRexPktBuilder.EmptyPacketError, new_pkt.dump_pkt_to_pcap, filepath)
def test_dump_pkt(self):
# check that dump is not available for empty packet
new_pkt = CTRexPktBuilder()
assert_raises(CTRexPktBuilder.EmptyPacketError, new_pkt.dump_pkt)
# set Ethernet layer attributes
self.pkt_bld.set_eth_layer_addr("l2", "src", "00:15:17:a7:75:a3")
self.pkt_bld.set_eth_layer_addr("l2", "dst", "e0:5f:b9:69:e9:22")
self.pkt_bld.set_layer_attr("l2", "type", dpkt.ethernet.ETH_TYPE_IP)
# set IP layer attributes
self.pkt_bld.add_pkt_layer("l3_ip", dpkt.ip.IP())
self.pkt_bld.set_ip_layer_addr("l3_ip", "src", "192.168.127.12")
self.pkt_bld.set_ip_layer_addr("l3_ip", "dst", "192.168.127.12")
self.pkt_bld.set_layer_attr("l3_ip", "p", dpkt.ip.IP_PROTO_ICMP)
# set ICMP layer attributes
self.pkt_bld.add_pkt_layer("icmp", dpkt.icmp.ICMP())
self.pkt_bld.set_layer_attr("icmp", "type", dpkt.icmp.ICMP_ECHO)
# set Echo(ICMP) layer attributes
self.pkt_bld.add_pkt_layer("icmp_echo", dpkt.icmp.ICMP.Echo())
self.pkt_bld.set_layer_attr("icmp_echo", "id", 24528)
self.pkt_bld.set_layer_attr("icmp_echo", "seq", 11482)
self.pkt_bld.set_pkt_payload('hello world')
# finally, set IP header len with relation to payload data
self.pkt_bld.set_layer_attr("l3_ip", "len", len(self.pkt_bld.get_layer('l3_ip')))
self.print_packet(self.pkt_bld.get_packet())
assert_equal(self.pkt_bld.dump_pkt(), {
'binary': [224, 95, 185, 105, 233, 34, 0, 21, 23, 167, 117, 163, 8, 0, 69, 0, 0, 39, 0, 0, 0, 0, 64, 1, 79, 201, 21, 0, 0, 2, 22, 0, 0, 12, 8, 0, 217, 134, 95, 208, 44, 218, 104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100],
'meta': '',
})
def test_set_vm_ip_range_ipv4(self):
# set some mock packet
ip = dpkt.ip.IP()
self.pkt_bld.add_pkt_layer("l3", ip)
self.pkt_bld.add_pkt_layer("l4_tcp", dpkt.tcp.TCP())
self.pkt_bld.set_pkt_payload("HelloWorld")
self.pkt_bld.set_vm_ip_range("l3", "src",
"10.0.0.1", "10.0.0.1", "10.0.0.255", 1,
"inc")
# self.pkt_bld.set_vm_custom_range(layer_name="l3",
# hdr_field="tos",
# init_val="10", start_val="10", end_val="200", add_val=2, val_size=1,
# operation="inc")
print ''
self.pp.pprint(self.pkt_bld.vm.dump())
assert_equal(self.pkt_bld.vm.dump(),
{ 'instructions': [ { 'init_value': '167772161',
'max_value': '167772415',
'min_value': '167772161',
'name': 'l3__src',
'op': 'inc',
'size': 4,
'type': 'flow_var'},
{ 'add_value': 1,
'is_big_endian': False,
'name': 'l3__src',
'pkt_offset': 26,
'type': 'write_flow_var'},
{ 'pkt_offset': 14, 'type': 'fix_checksum_ipv4'}],
'split_by_var': ''}
)
def test_set_vm_ip_range_ipv4_no_checksum(self):
# set some mock packet
ip = dpkt.ip.IP()
self.pkt_bld.add_pkt_layer("l3", ip)
self.pkt_bld.add_pkt_layer("l4_tcp", dpkt.tcp.TCP())
self.pkt_bld.set_pkt_payload("HelloWorld")
self.pkt_bld.set_vm_ip_range(ip_layer_name="l3",
ip_field="src",
ip_init="10.0.0.1", ip_start="10.0.0.1", ip_end="10.0.0.255",
add_value=1,
operation="inc",
add_checksum_inst=False)
print ''
self.pp.pprint(self.pkt_bld.vm.dump())
assert_equal(self.pkt_bld.vm.dump(),
{ 'instructions': [ { 'init_value': '167772161',
'max_value': '167772415',
'min_value': '167772161',
'name': 'l3__src',
'op': 'inc',
'size': 4,
'type': 'flow_var'},
{ 'add_value': 1,
'is_big_endian': False,
'name': 'l3__src',
'pkt_offset': 26,
'type': 'write_flow_var'}],
'split_by_var': ''}
)
def test_set_vm_ip_range_ipv6(self):
# set some mock packet
ip6 = dpkt.ip6.IP6()
self.pkt_bld.add_pkt_layer("l3", ip6)
self.pkt_bld.add_pkt_layer("l4_tcp", dpkt.tcp.TCP())
self.pkt_bld.set_pkt_payload("HelloWorld")
self.pkt_bld.set_vm_ip_range(ip_layer_name="l3",
ip_field="src",
ip_init="fdf8:f53e:61e4::18:3333:1:1", ip_start="fdf8:f53e:61e4::18:3333:1:1", ip_end="fdf8:f53e:61e4::18:3333:1:F",
add_value=1,
operation="inc",
ip_type="ipv6")
print ''
self.pp.pprint(self.pkt_bld.vm.dump())
assert_equal(self.pkt_bld.vm.dump(),
{ 'instructions': [ { 'init_value': '65537',
'max_value': '65551',
'min_value': '65537',
'name': 'l3__src',
'op': 'inc',
'size': 4,
'type': 'flow_var'},
{ 'add_value': 1,
'is_big_endian': False,
'name': 'l3__src',
'pkt_offset': 34,
'type': 'write_flow_var'}],
'split_by_var': ''}
)
def test_set_vm_eth_range(self):
pass
def test_set_vm_custom_range(self):
# set some mock packet
ip = dpkt.ip.IP()
self.pkt_bld.add_pkt_layer("l3", ip)
self.pkt_bld.add_pkt_layer("l4_tcp", dpkt.tcp.TCP())
self.pkt_bld.set_pkt_payload("HelloWorld")
self.pkt_bld.set_vm_custom_range(layer_name="l3",
hdr_field="tos",
init_val=10, start_val=10, end_val=200, add_val=2, val_size=1,
operation="inc")
print ''
self.pp.pprint(self.pkt_bld.vm.dump())
assert_equal(self.pkt_bld.vm.dump(),
{ 'instructions': [ { 'init_value': '10',
'max_value': '200',
'min_value': '10',
'name': 'l3__tos',
'op': 'inc',
'size': 1,
'type': 'flow_var'},
{ 'add_value': 2,
'is_big_endian': False,
'name': 'l3__tos',
'pkt_offset': 15,
'type': 'write_flow_var'},
{ 'pkt_offset': 14, 'type': 'fix_checksum_ipv4'}],
'split_by_var': ''}
)
def test_various_ranges(self):
# set some mock packet
ip = dpkt.ip.IP()
self.pkt_bld.add_pkt_layer("l3", ip)
self.pkt_bld.add_pkt_layer("l4_tcp", dpkt.tcp.TCP())
self.pkt_bld.set_pkt_payload("HelloWorld")
self.pkt_bld.set_vm_ip_range("l3", "src",
"10.0.0.1", "10.0.0.1", "10.0.0.255", 1,
"inc")
self.pkt_bld.set_vm_custom_range(layer_name="l3",
hdr_field="tos",
init_val=10, start_val=10, end_val=200, add_val=2, val_size=1,
operation="inc")
print ''
self.pp.pprint(self.pkt_bld.vm.dump())
assert_equal(self.pkt_bld.vm.dump(),
{'instructions': [{'init_value': '167772161',
'max_value': '167772415',
'min_value': '167772161',
'name': 'l3__src',
'op': 'inc',
'size': 4,
'type': 'flow_var'},
{'init_value': '10',
'max_value': '200',
'min_value': '10',
'name': 'l3__tos',
'op': 'inc',
'size': 1,
'type': 'flow_var'},
{'add_value': 2,
'is_big_endian': False,
'name': 'l3__tos',
'pkt_offset': 15,
'type': 'write_flow_var'},
{'add_value': 1,
'is_big_endian': False,
'name': 'l3__src',
'pkt_offset': 26,
'type': 'write_flow_var'},
{'pkt_offset': 14, 'type': 'fix_checksum_ipv4'}],
'split_by_var': ''}
)
def tearDown(self):
pass
if __name__ == "__main__":
pass
| #!/router/bin/python
import pkt_bld_general_test
from client_utils.packet_builder import *
import dpkt
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_raises
from nose.tools import raises
import os
import random
import pprint
class CTRexPktBuilderSanity_Test(pkt_bld_general_test.CGeneralPktBld_Test):
def setUp(self):
pass
def test_decode_ip_addr(self):
# test ipv4 case
assert_equal(CTRexPktBuilder._decode_ip_addr('1.2.3.4', "ipv4"), '\x01\x02\x03\x04')
assert_equal(CTRexPktBuilder._decode_ip_addr('127.0.0.1', "ipv4"), '\x7F\x00\x00\x01')
assert_raises(CTRexPktBuilder.IPAddressError, CTRexPktBuilder._decode_ip_addr, '1.2.3.4.5', "ipv4")
assert_raises(CTRexPktBuilder.IPAddressError, CTRexPktBuilder._decode_ip_addr, '1.2.3.4', "ipv6")
# test ipv6 case
assert_equal(CTRexPktBuilder._decode_ip_addr("fdf8:f53e:61e4::18:1:3333:1:1", "ipv6"),
'P\x01\x00\x00\x00\x00\r\xb8\x00\x0133\x00\x01\x00\x01')
assert_raises(CTRexPktBuilder.IPAddressError, CTRexPktBuilder._decode_ip_addr,
'2001::DB8:1:2fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:1', "ipv6")
def test_decode_mac_addr(self):
assert_equal(CTRexPktBuilder._decode_mac_addr('00:de:34:ef:2e:f4'), '\x00\xde4\xef.\xf4')
assert_equal(CTRexPktBuilder._decode_mac_addr('00-de-55-ef-2e-f4'), '\x00\xdeU\xef.\xf4')
assert_raises(CTRexPktBuilder.MACAddressError, CTRexPktBuilder._decode_mac_addr,
'00:de:34:ef:2e:f4:f4')
assert_raises(CTRexPktBuilder.MACAddressError, CTRexPktBuilder._decode_mac_addr,
'1.2.3.4')
assert_raises(CTRexPktBuilder.MACAddressError, CTRexPktBuilder._decode_mac_addr,
'00 de 34 ef 2e f4 f4')
def test_gen_layer_name(self):
pkt = CTRexPktBuilder()
assert_equal(pkt._gen_layer_name("eth"), "eth_1")
pkt._pkt_by_hdr = {'eth':None} # mock header pointer data
assert_equal(pkt._gen_layer_name("eth"), "eth_1")
pkt._pkt_by_hdr.update({'eth_1':None}) # more mock header pointer data
assert_equal(pkt._gen_layer_name("eth"), "eth_2")
def test_set_layer_attr_basic(self):
pkt = CTRexPktBuilder()
pkt._pkt_by_hdr['ip'] = dpkt.ip.IP()
# case 1 - test full value assignment
pkt.set_layer_attr('ip', 'src', '\x01\x02\x03\x04')
assert_equal(pkt._pkt_by_hdr['ip'].src, '\x01\x02\x03\x04')
# case 2 - test bit assignment
pkt.set_layer_bit_attr('ip', 'off', dpkt.ip.IP_DF)
pkt.set_layer_bit_attr('ip', 'off', dpkt.ip.IP_MF)
assert_equal(bin(pkt._pkt_by_hdr['ip'].off), '0b110000000000000')
# case 3 - test assignment of not-exist attribute
assert_raises(ValueError, pkt.set_layer_bit_attr, 'ip', 'src_dst', 0)
# case 4.1 - test assignment of data attribute - without dpkt.Packet object
assert_raises(ValueError, pkt.set_layer_bit_attr, 'ip', 'data', "Not a dpkt.Packet object")
# case 4.2 - test assignment of data attribute - with dpkt.Packet object - tested under CTRexPktBuilder_Test class
# tcp = dpkt.tcp.TCP()
self.print_packet(pkt._pkt_by_hdr['ip'])
# pkt.set_layer_attr('ip', 'data', tcp)
# case 5 - test assignment of not-exist layer
assert_raises(KeyError, pkt.set_layer_bit_attr, 'no_such_layer', 'src', 0)
def tearDown(self):
pass
class CTRexPktBuilder_Test(pkt_bld_general_test.CGeneralPktBld_Test):
def setUp(self):
self.pkt_bld = CTRexPktBuilder()
self.pkt_bld.add_pkt_layer("l2", dpkt.ethernet.Ethernet())
self.pp = pprint.PrettyPrinter(indent=4)
def test_add_pkt_layer(self):
ip = dpkt.ip.IP(src='\x01\x02\x03\x04', dst='\x05\x06\x07\x08', p=1)
self.pkt_bld.add_pkt_layer("l3", ip)
tcp = dpkt.tcp.TCP(sport = 8080)
self.pkt_bld.add_pkt_layer("l4_tcp", tcp)
assert_equal(len(self.pkt_bld._pkt_by_hdr), 3)
assert_equal(self.pkt_bld._pkt_by_hdr.keys(), ['l2', 'l3', 'l4_tcp'])
self.print_packet(self.pkt_bld._packet)
assert_raises(ValueError, self.pkt_bld.add_pkt_layer, 'l2', dpkt.ethernet.Ethernet())
def test_set_ip_layer_addr(self):
ip = dpkt.ip.IP()
self.pkt_bld.add_pkt_layer("l3", ip)
self.pkt_bld.set_ip_layer_addr("l3", "src", "1.2.3.4")
self.print_packet(self.pkt_bld._packet)
assert_equal(self.pkt_bld._pkt_by_hdr['l3'].src, '\x01\x02\x03\x04')
# check that only IP layer is using this function
assert_raises(ValueError, self.pkt_bld.set_ip_layer_addr, 'l2', "src", "1.2.3.4")
def test_calc_offset(self):
ip = dpkt.ip.IP()
self.pkt_bld.add_pkt_layer("l3", ip)
assert_equal(self.pkt_bld._calc_offset("l3", "src", 4), (14, 14+12))
def test_set_ipv6_layer_addr(self):
ip6 = dpkt.ip6.IP6()
self.pkt_bld.add_pkt_layer("l3", ip6)
self.pkt_bld.set_ipv6_layer_addr("l3", "src", "fdf8:f53e:61e4::18:1:3333:1:1")
self.print_packet(self.pkt_bld._packet)
assert_equal(self.pkt_bld._pkt_by_hdr['l3'].src, 'P\x01\x00\x00\x00\x00\r\xb8\x00\x0133\x00\x01\x00\x01')
# check that only IP layer is using this function
assert_raises(ValueError, self.pkt_bld.set_ipv6_layer_addr, 'l2', "src", "fdf8:f53e:61e4::18:1:3333:1:1")
def test_set_eth_layer_addr(self):
ip = dpkt.ip.IP()
self.pkt_bld.add_pkt_layer("l3", ip)
self.pkt_bld.set_eth_layer_addr("l2", "src", "00:de:34:ef:2e:f4")
self.print_packet(self.pkt_bld._packet)
assert_equal(self.pkt_bld._pkt_by_hdr['l2'].src, '\x00\xde4\xef.\xf4')
# check that only IP layer is using this function
assert_raises(ValueError, self.pkt_bld.set_eth_layer_addr, 'l3', "src", "\x00\xde4\xef.\xf4")
def test_set_layer_attr(self):
# extend the set_layer_attr_basic test by handling the following case:
# replace some header data with another layer, causing other layers to disconnect
# this also tests the _reevaluate_packet method
ip = dpkt.ip.IP(src='\x01\x02\x03\x04', dst='\x05\x06\x07\x08', p=1)
self.pkt_bld.add_pkt_layer("l3_ip", ip)
tcp = dpkt.tcp.TCP(sport = 8080)
self.pkt_bld.add_pkt_layer("l4_tcp", tcp)
# sanity: try changing data attr with non-dpkt.Packet instance
assert_raises(ValueError, self.pkt_bld.set_layer_attr, 'l2', 'data', "HelloWorld")
# now, add different L3 layer instead of existting one, L4 would disconnect
old_layer_count = len(self.pkt_bld._pkt_by_hdr)
new_ip = dpkt.ip.IP(src='\x05\x06\x07\x08', dst='\x01\x02\x03\x04')
print "\nBefore disconnecting layers:"
print "============================",
self.print_packet(self.pkt_bld._packet)
self.pkt_bld.set_layer_attr('l2', 'data', new_ip)
print "\nAfter disconnecting layers:"
print "===========================",
self.print_packet(self.pkt_bld._packet)
assert_not_equal(old_layer_count, len(self.pkt_bld._pkt_by_hdr))
assert_equal(len(self.pkt_bld._pkt_by_hdr), 1) # only Eth layer appears
def test_set_pkt_payload(self):
payload = "HelloWorld"
# test for setting a payload to an empty packet
empty_pkt = CTRexPktBuilder()
assert_raises(AttributeError, empty_pkt.set_pkt_payload, payload)
# add content to packet
ip = dpkt.ip.IP(src='\x01\x02\x03\x04', dst='\x05\x06\x07\x08', p=1)
self.pkt_bld.add_pkt_layer("l3_ip", ip)
tcp = dpkt.tcp.TCP(sport = 8080)
self.pkt_bld.add_pkt_layer("l4_tcp", tcp)
# now, set a payload for the packet
self.pkt_bld.set_pkt_payload(payload)
self.print_packet(self.pkt_bld._packet)
assert_equal(self.pkt_bld._pkt_by_hdr['l4_tcp'].data, payload)
def test_load_packet(self):
# add content to packet
ip = dpkt.ip.IP(src='\x01\x02\x03\x04', dst='\x05\x06\x07\x08', p=1)
self.pkt_bld.add_pkt_layer("l3_ip", ip)
tcp = dpkt.tcp.TCP(sport = 8080)
self.pkt_bld.add_pkt_layer("l4_tcp", tcp)
self.pkt_bld.set_pkt_payload("HelloWorld")
new_pkt = CTRexPktBuilder()
new_pkt.load_packet(self.pkt_bld._packet)
self.print_packet(new_pkt._packet)
assert_equal(len(new_pkt._pkt_by_hdr), 4)
assert_equal(new_pkt._pkt_by_hdr.keys(),
['ip_1',
'tcp_1',
'pkt_final_payload',
'ethernet_1'
]
)
assert_equal(new_pkt._pkt_by_hdr['pkt_final_payload'], "HelloWorld")
def test_get_packet(self):
# get a pointer to the packet
assert(self.pkt_bld.get_packet(get_ptr=True) is self.pkt_bld._packet)
# get a copy of the packet
assert(not(self.pkt_bld.get_packet() is self.pkt_bld._packet))
def test_get_layer(self):
assert_equal(self.pkt_bld.get_layer('no_such_layer'), None)
assert(not(self.pkt_bld.get_layer('l2') is self.pkt_bld._packet))
assert(type(self.pkt_bld.get_layer('l2')).__name__, "ethernet")
def test_dump_to_pcap(self):
# set Ethernet layer attributes
self.pkt_bld.set_eth_layer_addr("l2", "src", "00:15:17:a7:75:a3")
self.pkt_bld.set_eth_layer_addr("l2", "dst", "e0:5f:b9:69:e9:22")
self.pkt_bld.set_layer_attr("l2", "type", dpkt.ethernet.ETH_TYPE_IP)
# set IP layer attributes
self.pkt_bld.add_pkt_layer("l3_ip", dpkt.ip.IP())
self.pkt_bld.set_ip_layer_addr("l3_ip", "src", "192.168.127.12")
self.pkt_bld.set_ip_layer_addr("l3_ip", "dst", "192.168.127.12")
self.pkt_bld.set_layer_attr("l3_ip", "p", dpkt.ip.IP_PROTO_TCP)
# set TCP layer attributes
self.pkt_bld.add_pkt_layer("l4_tcp", dpkt.tcp.TCP())
self.pkt_bld.set_layer_attr("l4_tcp", "sport", 13311)
self.pkt_bld.set_layer_attr("l4_tcp", "dport", 80)
self.pkt_bld.set_layer_attr("l4_tcp", "flags", 0)
self.pkt_bld.set_layer_attr("l4_tcp", "win", 32768)
self.pkt_bld.set_layer_attr("l4_tcp", "seq", 0)
# set packet payload, for example HTTP GET request
self.pkt_bld.set_pkt_payload('GET /10k_60k HTTP/1.1\r\nHost: 192.168.3.11\r\nConnection: Keep-Alive\r\nUser-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)\r\nAccept: */*\r\nAccept-Language: en-us\r\nAccept-Encoding: gzip, deflate, compress\r\n\r\n')
# finally, set IP header len with relation to payload data
self.pkt_bld.set_layer_attr("l3_ip", "len", len(self.pkt_bld.get_layer('l3_ip')))
filepath = "unit_tests/functional_tests/test.pcap"
self.pkt_bld.dump_pkt_to_pcap(filepath)
assert os.path.isfile(filepath)
# remove pcap after creation - masked for now
# os.remove(filepath)
filepath = "/not/a/valid/path/test.pcap"
assert_raises(IOError, self.pkt_bld.dump_pkt_to_pcap, filepath)
# check that dump is not available for empty packet
new_pkt = CTRexPktBuilder()
assert_raises(CTRexPktBuilder.EmptyPacketError, new_pkt.dump_pkt_to_pcap, filepath)
def test_dump_pkt(self):
# check that dump is not available for empty packet
new_pkt = CTRexPktBuilder()
assert_raises(CTRexPktBuilder.EmptyPacketError, new_pkt.dump_pkt)
# set Ethernet layer attributes
self.pkt_bld.set_eth_layer_addr("l2", "src", "00:15:17:a7:75:a3")
self.pkt_bld.set_eth_layer_addr("l2", "dst", "e0:5f:b9:69:e9:22")
self.pkt_bld.set_layer_attr("l2", "type", dpkt.ethernet.ETH_TYPE_IP)
# set IP layer attributes
self.pkt_bld.add_pkt_layer("l3_ip", dpkt.ip.IP())
self.pkt_bld.set_ip_layer_addr("l3_ip", "src", "192.168.127.12")
self.pkt_bld.set_ip_layer_addr("l3_ip", "dst", "192.168.127.12")
self.pkt_bld.set_layer_attr("l3_ip", "p", dpkt.ip.IP_PROTO_ICMP)
# set ICMP layer attributes
self.pkt_bld.add_pkt_layer("icmp", dpkt.icmp.ICMP())
self.pkt_bld.set_layer_attr("icmp", "type", dpkt.icmp.ICMP_ECHO)
# set Echo(ICMP) layer attributes
self.pkt_bld.add_pkt_layer("icmp_echo", dpkt.icmp.ICMP.Echo())
self.pkt_bld.set_layer_attr("icmp_echo", "id", 24528)
self.pkt_bld.set_layer_attr("icmp_echo", "seq", 11482)
self.pkt_bld.set_pkt_payload('hello world')
# finally, set IP header len with relation to payload data
self.pkt_bld.set_layer_attr("l3_ip", "len", len(self.pkt_bld.get_layer('l3_ip')))
self.print_packet(self.pkt_bld.get_packet())
assert_equal(self.pkt_bld.dump_pkt(), {
'binary': [224, 95, 185, 105, 233, 34, 0, 21, 23, 167, 117, 163, 8, 0, 69, 0, 0, 39, 0, 0, 0, 0, 64, 1, 79, 201, 21, 0, 0, 2, 22, 0, 0, 12, 8, 0, 217, 134, 95, 208, 44, 218, 104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100],
'meta': '',
})
def test_set_vm_ip_range_ipv4(self):
# set some mock packet
ip = dpkt.ip.IP()
self.pkt_bld.add_pkt_layer("l3", ip)
self.pkt_bld.add_pkt_layer("l4_tcp", dpkt.tcp.TCP())
self.pkt_bld.set_pkt_payload("HelloWorld")
self.pkt_bld.set_vm_ip_range("l3", "src",
"10.0.0.1", "10.0.0.1", "10.0.0.255", 1,
"inc")
# self.pkt_bld.set_vm_custom_range(layer_name="l3",
# hdr_field="tos",
# init_val="10", start_val="10", end_val="200", add_val=2, val_size=1,
# operation="inc")
print ''
self.pp.pprint(self.pkt_bld.vm.dump())
assert_equal(self.pkt_bld.vm.dump(),
{ 'instructions': [ { 'init_value': '167772161',
'max_value': '167772415',
'min_value': '167772161',
'name': 'l3__src',
'op': 'inc',
'size': 4,
'type': 'flow_var'},
{ 'add_value': 1,
'is_big_endian': False,
'name': 'l3__src',
'pkt_offset': 26,
'type': 'write_flow_var'},
{ 'pkt_offset': 14, 'type': 'fix_checksum_ipv4'}],
'split_by_var': ''}
)
def test_set_vm_ip_range_ipv4_no_checksum(self):
# set some mock packet
ip = dpkt.ip.IP()
self.pkt_bld.add_pkt_layer("l3", ip)
self.pkt_bld.add_pkt_layer("l4_tcp", dpkt.tcp.TCP())
self.pkt_bld.set_pkt_payload("HelloWorld")
self.pkt_bld.set_vm_ip_range(ip_layer_name="l3",
ip_field="src",
ip_init="10.0.0.1", ip_start="10.0.0.1", ip_end="10.0.0.255",
add_value=1,
operation="inc",
add_checksum_inst=False)
print ''
self.pp.pprint(self.pkt_bld.vm.dump())
assert_equal(self.pkt_bld.vm.dump(),
{ 'instructions': [ { 'init_value': '167772161',
'max_value': '167772415',
'min_value': '167772161',
'name': 'l3__src',
'op': 'inc',
'size': 4,
'type': 'flow_var'},
{ 'add_value': 1,
'is_big_endian': False,
'name': 'l3__src',
'pkt_offset': 26,
'type': 'write_flow_var'}],
'split_by_var': ''}
)
def test_set_vm_ip_range_ipv6(self):
# set some mock packet
ip6 = dpkt.ip6.IP6()
self.pkt_bld.add_pkt_layer("l3", ip6)
self.pkt_bld.add_pkt_layer("l4_tcp", dpkt.tcp.TCP())
self.pkt_bld.set_pkt_payload("HelloWorld")
self.pkt_bld.set_vm_ip_range(ip_layer_name="l3",
ip_field="src",
ip_init="fdf8:f53e:61e4::18:3333:1:1", ip_start="fdf8:f53e:61e4::18:3333:1:1", ip_end="fdf8:f53e:61e4::18:3333:1:F",
add_value=1,
operation="inc",
ip_type="ipv6")
print ''
self.pp.pprint(self.pkt_bld.vm.dump())
assert_equal(self.pkt_bld.vm.dump(),
{ 'instructions': [ { 'init_value': '65537',
'max_value': '65551',
'min_value': '65537',
'name': 'l3__src',
'op': 'inc',
'size': 4,
'type': 'flow_var'},
{ 'add_value': 1,
'is_big_endian': False,
'name': 'l3__src',
'pkt_offset': 34,
'type': 'write_flow_var'}],
'split_by_var': ''}
)
def test_set_vm_eth_range(self):
pass
def test_set_vm_custom_range(self):
# set some mock packet
ip = dpkt.ip.IP()
self.pkt_bld.add_pkt_layer("l3", ip)
self.pkt_bld.add_pkt_layer("l4_tcp", dpkt.tcp.TCP())
self.pkt_bld.set_pkt_payload("HelloWorld")
self.pkt_bld.set_vm_custom_range(layer_name="l3",
hdr_field="tos",
init_val=10, start_val=10, end_val=200, add_val=2, val_size=1,
operation="inc")
print ''
self.pp.pprint(self.pkt_bld.vm.dump())
assert_equal(self.pkt_bld.vm.dump(),
{ 'instructions': [ { 'init_value': '10',
'max_value': '200',
'min_value': '10',
'name': 'l3__tos',
'op': 'inc',
'size': 1,
'type': 'flow_var'},
{ 'add_value': 2,
'is_big_endian': False,
'name': 'l3__tos',
'pkt_offset': 15,
'type': 'write_flow_var'},
{ 'pkt_offset': 14, 'type': 'fix_checksum_ipv4'}],
'split_by_var': ''}
)
def test_various_ranges(self):
# set some mock packet
ip = dpkt.ip.IP()
self.pkt_bld.add_pkt_layer("l3", ip)
self.pkt_bld.add_pkt_layer("l4_tcp", dpkt.tcp.TCP())
self.pkt_bld.set_pkt_payload("HelloWorld")
self.pkt_bld.set_vm_ip_range("l3", "src",
"10.0.0.1", "10.0.0.1", "10.0.0.255", 1,
"inc")
self.pkt_bld.set_vm_custom_range(layer_name="l3",
hdr_field="tos",
init_val=10, start_val=10, end_val=200, add_val=2, val_size=1,
operation="inc")
print ''
self.pp.pprint(self.pkt_bld.vm.dump())
assert_equal(self.pkt_bld.vm.dump(),
{'instructions': [{'init_value': '167772161',
'max_value': '167772415',
'min_value': '167772161',
'name': 'l3__src',
'op': 'inc',
'size': 4,
'type': 'flow_var'},
{'init_value': '10',
'max_value': '200',
'min_value': '10',
'name': 'l3__tos',
'op': 'inc',
'size': 1,
'type': 'flow_var'},
{'add_value': 2,
'is_big_endian': False,
'name': 'l3__tos',
'pkt_offset': 15,
'type': 'write_flow_var'},
{'add_value': 1,
'is_big_endian': False,
'name': 'l3__src',
'pkt_offset': 26,
'type': 'write_flow_var'},
{'pkt_offset': 14, 'type': 'fix_checksum_ipv4'}],
'split_by_var': ''}
)
def tearDown(self):
pass
if __name__ == "__main__":
pass
| en | 0.630026 | #!/router/bin/python # test ipv4 case # test ipv6 case # mock header pointer data # more mock header pointer data # case 1 - test full value assignment # case 2 - test bit assignment # case 3 - test assignment of not-exist attribute # case 4.1 - test assignment of data attribute - without dpkt.Packet object # case 4.2 - test assignment of data attribute - with dpkt.Packet object - tested under CTRexPktBuilder_Test class # tcp = dpkt.tcp.TCP() # pkt.set_layer_attr('ip', 'data', tcp) # case 5 - test assignment of not-exist layer # check that only IP layer is using this function # check that only IP layer is using this function # check that only IP layer is using this function # extend the set_layer_attr_basic test by handling the following case: # replace some header data with another layer, causing other layers to disconnect # this also tests the _reevaluate_packet method # sanity: try changing data attr with non-dpkt.Packet instance # now, add different L3 layer instead of existting one, L4 would disconnect # only Eth layer appears # test for setting a payload to an empty packet # add content to packet # now, set a payload for the packet # add content to packet # get a pointer to the packet # get a copy of the packet # set Ethernet layer attributes # set IP layer attributes # set TCP layer attributes # set packet payload, for example HTTP GET request # finally, set IP header len with relation to payload data # remove pcap after creation - masked for now # os.remove(filepath) # check that dump is not available for empty packet # check that dump is not available for empty packet # set Ethernet layer attributes # set IP layer attributes # set ICMP layer attributes # set Echo(ICMP) layer attributes # finally, set IP header len with relation to payload data # set some mock packet # self.pkt_bld.set_vm_custom_range(layer_name="l3", # hdr_field="tos", # init_val="10", start_val="10", end_val="200", add_val=2, val_size=1, # operation="inc") # set some mock packet # set some mock packet # set some mock packet # set some mock packet | 2.040391 | 2 |
examples/datasets/moons.py | haihabi/NormFlowPy | 0 | 6617296 | from sklearn.datasets import make_moons
def generate_moons_dataset(dataset_size):
x,y= make_moons(n_samples=dataset_size, shuffle=True, noise=0.05)
return x.astype("float32"),y.astype("float32")
| from sklearn.datasets import make_moons
def generate_moons_dataset(dataset_size):
x,y= make_moons(n_samples=dataset_size, shuffle=True, noise=0.05)
return x.astype("float32"),y.astype("float32")
| none | 1 | 2.789444 | 3 | |
_apicheck/apicheck/core/cli.py | sundayayandele/apicheck | 2 | 6617297 | def cli_log_level(parser):
"""This functions adds log level option to the CLI"""
parser.add_argument('--log-level',
dest="log_level",
default="INFO",
choices=("DEBUG", "INFO", "WARNING", "ERROR",
"CRITICAL"),
help="define log level")
def cli_db(parser):
"""This functions adds the db configuration to the CLI"""
parser.add_argument('-C', '--connection-string',
dest="db_connection_string",
required=True,
help="database connection string")
| def cli_log_level(parser):
"""This functions adds log level option to the CLI"""
parser.add_argument('--log-level',
dest="log_level",
default="INFO",
choices=("DEBUG", "INFO", "WARNING", "ERROR",
"CRITICAL"),
help="define log level")
def cli_db(parser):
"""This functions adds the db configuration to the CLI"""
parser.add_argument('-C', '--connection-string',
dest="db_connection_string",
required=True,
help="database connection string")
| en | 0.469018 | This functions adds log level option to the CLI This functions adds the db configuration to the CLI | 2.741122 | 3 |
src/immuni.py | MarcoBuster/immuni-tek-sender | 5 | 6617298 | <filename>src/immuni.py
import requests
import io
import zipfile
from src.protobuild.exposurekey_pb2 import *
class ImmuniAPI:
BASE_URL = "https://get.immuni.gov.it/v1/keys/"
def __init__(self, base_url=BASE_URL):
self.base_url = base_url
def _request(self, path, raw=False):
r = requests.get(self.base_url + str(path))
return r.json() if not raw else r.content
def index(self):
return self._request("index")
def get_batch(self, batch_id):
compressed_file = self._request(batch_id, raw=True)
if b"Batch not found." in compressed_file:
return False
stream = io.BytesIO(compressed_file)
input_zip = zipfile.ZipFile(stream)
raw_teke = {name: input_zip.read(name) for name in input_zip.namelist()}['export.bin'][16:]
return TemporaryExposureKeyExport().FromString(raw_teke)
| <filename>src/immuni.py
import requests
import io
import zipfile
from src.protobuild.exposurekey_pb2 import *
class ImmuniAPI:
BASE_URL = "https://get.immuni.gov.it/v1/keys/"
def __init__(self, base_url=BASE_URL):
self.base_url = base_url
def _request(self, path, raw=False):
r = requests.get(self.base_url + str(path))
return r.json() if not raw else r.content
def index(self):
return self._request("index")
def get_batch(self, batch_id):
compressed_file = self._request(batch_id, raw=True)
if b"Batch not found." in compressed_file:
return False
stream = io.BytesIO(compressed_file)
input_zip = zipfile.ZipFile(stream)
raw_teke = {name: input_zip.read(name) for name in input_zip.namelist()}['export.bin'][16:]
return TemporaryExposureKeyExport().FromString(raw_teke)
| none | 1 | 2.527822 | 3 | |
rucola_markdown.py | lecnim/rucola-markdown | 0 | 6617299 | <filename>rucola_markdown.py
import os
import markdown
# Extra:
from markdown.extensions.extra import ExtraExtension
from markdown.extensions.abbr import AbbrExtension
from markdown.extensions.attr_list import AttrListExtension
from markdown.extensions.def_list import DefListExtension
from markdown.extensions.fenced_code import FencedCodeExtension
from markdown.extensions.footnotes import FootnoteExtension
from markdown.extensions.tables import TableExtension
from markdown.extensions.smart_strong import SmartEmphasisExtension
# Other extensions:
from markdown.extensions.admonition import AdmonitionExtension
from markdown.extensions.codehilite import CodeHiliteExtension
from markdown.extensions.meta import MetaExtension
from markdown.extensions.nl2br import Nl2BrExtension
from markdown.extensions.sane_lists import SaneListExtension
from markdown.extensions.smarty import SmartyExtension
from markdown.extensions.toc import TocExtension
from markdown.extensions.wikilinks import WikiLinkExtension
EXTS = {
'extra': ExtraExtension,
'abbr': AbbrExtension,
'attr_list': AttrListExtension,
'def_list': DefListExtension,
'fenced_code': FencedCodeExtension,
'footnotes': FootnoteExtension,
'tables': TableExtension,
'smart_strong': SmartEmphasisExtension,
'admonition': AdmonitionExtension,
'codehilite': CodeHiliteExtension,
'meta': MetaExtension,
'nl2br': Nl2BrExtension,
'sane_lists': SaneListExtension,
'smarty': SmartyExtension,
'toc': TocExtension,
'wikilinks': WikiLinkExtension
}
def _get_extensions(extensions):
"""Returns a list of markdown extensions instances, ready to be passed to
the markdown.markdown() ``extensions`` parameter.
"""
e = []
for key in extensions:
if key in EXTS:
value = extensions[key]
if isinstance(value, dict):
e.append(EXTS[key](**value))
elif value:
e.append(EXTS[key]())
else:
pass
else:
raise KeyError('Markdown: extension not found: ' + key)
return e
def render_markdown(source, **extensions):
# TODO: add output_format option
e = _get_extensions(extensions)
if not isinstance(source, str):
source = source['content']
return markdown.markdown(source, output_format='html5', extensions=e)
class Markdown:
# TODO: .md or .markdown
def __init__(self, pattern='**/*.md', **extensions):
self.pattern = pattern
self.exts = extensions
def __call__(self, app):
e = _get_extensions(self.exts)
for f in app.find(self.pattern):
f.content = markdown.markdown(
f.content,
output_format='html5',
extensions=e)
f.path = os.path.splitext(f.path)[0] + '.html'
| <filename>rucola_markdown.py
import os
import markdown
# Extra:
from markdown.extensions.extra import ExtraExtension
from markdown.extensions.abbr import AbbrExtension
from markdown.extensions.attr_list import AttrListExtension
from markdown.extensions.def_list import DefListExtension
from markdown.extensions.fenced_code import FencedCodeExtension
from markdown.extensions.footnotes import FootnoteExtension
from markdown.extensions.tables import TableExtension
from markdown.extensions.smart_strong import SmartEmphasisExtension
# Other extensions:
from markdown.extensions.admonition import AdmonitionExtension
from markdown.extensions.codehilite import CodeHiliteExtension
from markdown.extensions.meta import MetaExtension
from markdown.extensions.nl2br import Nl2BrExtension
from markdown.extensions.sane_lists import SaneListExtension
from markdown.extensions.smarty import SmartyExtension
from markdown.extensions.toc import TocExtension
from markdown.extensions.wikilinks import WikiLinkExtension
EXTS = {
'extra': ExtraExtension,
'abbr': AbbrExtension,
'attr_list': AttrListExtension,
'def_list': DefListExtension,
'fenced_code': FencedCodeExtension,
'footnotes': FootnoteExtension,
'tables': TableExtension,
'smart_strong': SmartEmphasisExtension,
'admonition': AdmonitionExtension,
'codehilite': CodeHiliteExtension,
'meta': MetaExtension,
'nl2br': Nl2BrExtension,
'sane_lists': SaneListExtension,
'smarty': SmartyExtension,
'toc': TocExtension,
'wikilinks': WikiLinkExtension
}
def _get_extensions(extensions):
"""Returns a list of markdown extensions instances, ready to be passed to
the markdown.markdown() ``extensions`` parameter.
"""
e = []
for key in extensions:
if key in EXTS:
value = extensions[key]
if isinstance(value, dict):
e.append(EXTS[key](**value))
elif value:
e.append(EXTS[key]())
else:
pass
else:
raise KeyError('Markdown: extension not found: ' + key)
return e
def render_markdown(source, **extensions):
# TODO: add output_format option
e = _get_extensions(extensions)
if not isinstance(source, str):
source = source['content']
return markdown.markdown(source, output_format='html5', extensions=e)
class Markdown:
# TODO: .md or .markdown
def __init__(self, pattern='**/*.md', **extensions):
self.pattern = pattern
self.exts = extensions
def __call__(self, app):
e = _get_extensions(self.exts)
for f in app.find(self.pattern):
f.content = markdown.markdown(
f.content,
output_format='html5',
extensions=e)
f.path = os.path.splitext(f.path)[0] + '.html'
| en | 0.322412 | # Extra: # Other extensions: Returns a list of markdown extensions instances, ready to be passed to the markdown.markdown() ``extensions`` parameter. # TODO: add output_format option # TODO: .md or .markdown | 2.228047 | 2 |
pymontecarlo_gui/options/beam/base.py | pymontecarlo/pymontecarlo-gui | 0 | 6617300 | """"""
# Standard library modules.
import abc
from collections import namedtuple
import itertools
# Third party modules.
from qtpy import QtCore, QtGui, QtWidgets
import numpy as np
# Local modules.
from pymontecarlo.options.beam.base import BeamBase
from pymontecarlo.options.particle import Particle
from pymontecarlo.util.tolerance import tolerance_to_decimals
from pymontecarlo_gui.widgets.field import (
MultiValueFieldBase,
FieldBase,
WidgetFieldBase,
FieldChooser,
)
from pymontecarlo_gui.widgets.lineedit import (
ColoredMultiFloatLineEdit,
ColoredFloatLineEdit,
)
from pymontecarlo_gui.options.base import ToleranceMixin
# Globals and constants variables.
class EnergyField(MultiValueFieldBase):
def __init__(self):
super().__init__()
# Widgets
self._widget = ColoredMultiFloatLineEdit()
decimals = tolerance_to_decimals(BeamBase.ENERGY_TOLERANCE_eV) + 3
self._widget.setRange(0, 1000, decimals)
self._widget.setValues([20.0])
# Signals
self._widget.valuesChanged.connect(self.fieldChanged)
def title(self):
return "Energies [keV]"
def widget(self):
return self._widget
def energiesEV(self):
return np.array(self._widget.values()) * 1e3
def setEnergiesEV(self, energies_eV):
energies_eV = np.array(energies_eV) / 1e3
self._widget.setValues(energies_eV)
class ParticleField(FieldBase):
def __init__(self):
super().__init__()
# Widgets
self._widget = QtWidgets.QComboBox()
for particle in Particle:
self._widget.addItem(particle.name, particle)
index = self._widget.findData(Particle.ELECTRON)
self._widget.setCurrentIndex(index)
# Signals
self._widget.currentIndexChanged.connect(self.fieldChanged)
def title(self):
return "Particle"
def widget(self):
return self._widget
def particle(self):
return self._widget.currentData()
def setParticle(self, particle):
index = self._widget.findData(particle)
self._widget.setCurrentIndex(index)
Position = namedtuple("Position", ("x_m", "y_m"))
class CoordinateField(FieldBase, ToleranceMixin):
def __init__(self, title):
self._title = title + " [nm]"
super().__init__()
# Widgets
self._widget = ColoredFloatLineEdit()
self._widget.setValue(0.0)
# Signals
self._widget.valueChanged.connect(self.fieldChanged)
def title(self):
return self._title
def widget(self):
return self._widget
def setToleranceMeter(self, tolerance_m):
super().setToleranceMeter(tolerance_m)
decimals = tolerance_to_decimals(tolerance_m * 1e9)
self._widget.setRange(float("-inf"), float("inf"), decimals)
def coordinateMeter(self):
return self._widget.value() / 1e9
def setCoordinateMeter(self, value_m):
self._widget.setValue(value_m * 1e9)
class StepField(FieldBase):
def __init__(self, title="Number of steps"):
self._title = title
super().__init__()
# Widgets
self._widget = ColoredFloatLineEdit()
self._widget.setRange(2, 500, 0)
self._widget.setValue(5)
# Signals
self._widget.valueChanged.connect(self.fieldChanged)
def title(self):
return self._title
def widget(self):
return self._widget
def step(self):
return self._widget.value()
def setStep(self, step):
self._widget.setValue(step)
class PositionField(WidgetFieldBase, ToleranceMixin):
def __init__(self):
super().__init__()
def setToleranceMeter(self, tolerance_m):
for field in self.fields():
if hasattr(field, "setToleranceMeter"):
field.setToleranceMeter(tolerance_m)
@abc.abstractmethod
def positions(self):
return []
class SinglePositionField(PositionField):
def __init__(self):
super().__init__()
self.field_x = CoordinateField("x")
self.addLabelField(self.field_x)
self.field_y = CoordinateField("y")
self.addLabelField(self.field_y)
def title(self):
return "Single position"
def positions(self):
x_m = self.field_x.coordinateMeter()
y_m = self.field_y.coordinateMeter()
return [Position(x_m, y_m)]
class LineScanPositionField(PositionField):
def __init__(self):
super().__init__()
self.field_start = CoordinateField("Start")
self.field_start.setCoordinateMeter(-5e-6)
self.addLabelField(self.field_start)
self.field_stop = CoordinateField("Stop")
self.field_stop.setCoordinateMeter(5e-6)
self.addLabelField(self.field_stop)
self.field_step = StepField()
self.addLabelField(self.field_step)
class LineScanXPositionField(LineScanPositionField):
def title(self):
return "Line scan along X axis"
def positions(self):
start_m = self.field_start.coordinateMeter()
stop_m = self.field_stop.coordinateMeter()
num = self.field_step.step()
return [
Position(x_m, 0.0)
for x_m in np.linspace(start_m, stop_m, num, endpoint=True)
]
class LineScanYPositionField(LineScanPositionField):
def title(self):
return "Line scan along Y axis"
def positions(self):
start_m = self.field_start.coordinateMeter()
stop_m = self.field_stop.coordinateMeter()
num = self.field_step.step()
return [
Position(0.0, y_m)
for y_m in np.linspace(start_m, stop_m, num, endpoint=True)
]
class GridPositionField(PositionField):
def __init__(self):
super().__init__()
self.field_x_start = CoordinateField("Start X")
self.field_x_start.setCoordinateMeter(-1e-6)
self.addLabelField(self.field_x_start)
self.field_x_stop = CoordinateField("Stop X")
self.field_x_stop.setCoordinateMeter(1e-6)
self.addLabelField(self.field_x_stop)
self.field_x_step = StepField("Number of steps X")
self.addLabelField(self.field_x_step)
self.field_y_start = CoordinateField("Start Y")
self.field_y_start.setCoordinateMeter(-1e-6)
self.addLabelField(self.field_y_start)
self.field_y_stop = CoordinateField("Stop Y")
self.field_y_stop.setCoordinateMeter(1e-6)
self.addLabelField(self.field_y_stop)
self.field_y_step = StepField("Number of steps Y")
self.addLabelField(self.field_y_step)
def title(self):
return "Grid"
def positions(self):
x_start_m = self.field_x_start.coordinateMeter()
x_stop_m = self.field_x_stop.coordinateMeter()
x_num = self.field_x_step.step()
xs_m = np.linspace(x_start_m, x_stop_m, x_num, endpoint=True)
y_start_m = self.field_y_start.coordinateMeter()
y_stop_m = self.field_y_stop.coordinateMeter()
y_num = self.field_y_step.step()
ys_m = np.linspace(y_start_m, y_stop_m, y_num, endpoint=True)
return [Position(x_m, y_m) for x_m, y_m in itertools.product(xs_m, ys_m)]
class PositionsModel(QtCore.QAbstractTableModel, ToleranceMixin):
def __init__(self):
super().__init__()
self._positions = []
def rowCount(self, parent=None):
return len(self._positions)
def columnCount(self, parent=None):
return 2
def data(self, index, role=QtCore.Qt.DisplayRole):
if not index.isValid():
return None
row = index.row()
column = index.column()
position = self._positions[row]
if role == QtCore.Qt.DisplayRole:
if self.toleranceMeter() is not None:
precision = tolerance_to_decimals(self.toleranceMeter()) - 9
fmt = "{0:.{precision}f}"
else:
fmt = "{0:g}"
if column == 0:
return fmt.format(position.x_m * 1e9, precision=precision)
elif column == 1:
return fmt.format(position.y_m * 1e9, precision=precision)
elif role == QtCore.Qt.UserRole:
return position
elif role == QtCore.Qt.TextAlignmentRole:
return QtCore.Qt.AlignCenter
def headerData(self, section, orientation, role):
if role == QtCore.Qt.DisplayRole:
if orientation == QtCore.Qt.Horizontal:
if section == 0:
return "X [nm]"
elif section == 1:
return "Y [nm]"
elif orientation == QtCore.Qt.Vertical:
return str(section + 1)
def flags(self, index):
return super().flags(index)
def _add_position(self, position):
if position in self._positions:
return False
self._positions.append(position)
return True
def addPosition(self, position):
added = self._add_position(position)
if added:
self.modelReset.emit()
return added
def addPositions(self, positions):
if not positions:
return False
added = False
for position in positions:
added |= self._add_position(position)
if added:
self.modelReset.emit()
return added
def removePosition(self, position):
if position not in self._positions:
return False
self._positions.remove(position)
self.modelReset.emit()
return True
def clearPositions(self):
self._positions.clear()
self.modelReset.emit()
def hasPositions(self):
return bool(self._positions)
def position(self, row):
return self._positions[row]
def positions(self):
return tuple(self._positions)
def setPositions(self, positions):
self.clearPositions()
for x, y in positions:
self._add_position(x, y)
self.modelReset.emit()
def setToleranceMeter(self, tolerance_m):
super().setToleranceMeter(tolerance_m)
self.modelReset.emit()
class PositionsWidget(QtWidgets.QWidget, ToleranceMixin):
positionsChanged = QtCore.Signal()
def __init__(self, parent=None):
super().__init__(parent)
# Variables
model = PositionsModel()
model.addPosition(Position(0.0, 0.0))
# Actions
self.action_remove = QtWidgets.QAction("Remove")
self.action_remove.setIcon(QtGui.QIcon.fromTheme("list-remove"))
self.action_remove.setToolTip("Remove position")
self.action_remove.setEnabled(False)
self.action_clear = QtWidgets.QAction("Clear")
self.action_clear.setIcon(QtGui.QIcon.fromTheme("edit-clear"))
self.action_clear.setToolTip("Remove all positions")
self.action_clear.setEnabled(False)
# Widgets
self.chooser = FieldChooser()
self.button_add = QtWidgets.QPushButton("Add position(s)")
self.button_add.setIcon(QtGui.QIcon.fromTheme("list-add"))
self.button_add.setMaximumWidth(self.button_add.sizeHint().width())
self.table_positions = QtWidgets.QTableView()
self.table_positions.setModel(model)
self.table_positions.setSelectionBehavior(QtWidgets.QTableView.SelectRows)
header = self.table_positions.horizontalHeader()
for column in range(model.columnCount()):
header.setSectionResizeMode(column, QtWidgets.QHeaderView.Stretch)
self.toolbar = QtWidgets.QToolBar()
self.toolbar.addAction(self.action_remove)
self.toolbar.addAction(self.action_clear)
# Layouts
layout = QtWidgets.QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self.chooser)
layout.addWidget(self.button_add, alignment=QtCore.Qt.AlignRight)
layout.addWidget(self.table_positions)
layout.addWidget(self.toolbar, alignment=QtCore.Qt.AlignRight)
self.setLayout(layout)
# Signals
self.action_remove.triggered.connect(self._on_remove_triggered)
self.action_clear.triggered.connect(self._on_clear_triggered)
self.button_add.clicked.connect(self._on_add_clicked)
model.dataChanged.connect(self._on_positions_changed)
model.dataChanged.connect(self.positionsChanged)
model.modelReset.connect(self._on_positions_changed)
model.modelReset.connect(self.positionsChanged)
self.table_positions.selectionModel().selectionChanged.connect(
self._on_positions_changed
)
def _on_remove_triggered(self):
selection_model = self.table_positions.selectionModel()
if not selection_model.hasSelection():
return
indexes = selection_model.selectedIndexes()
model = self.table_positions.model()
for row in reversed(sorted(set(index.row() for index in indexes))):
model.removePosition(model.position(row))
def _on_clear_triggered(self):
model = self.table_positions.model()
model.clearPositions()
def _on_add_clicked(self):
field = self.chooser.currentField()
if field is None:
return
positions = field.positions()
self.table_positions.model().addPositions(positions)
def _on_positions_changed(self):
model = self.table_positions.model()
has_rows = model.hasPositions()
selection_model = self.table_positions.selectionModel()
has_selection = selection_model.hasSelection()
self.action_remove.setEnabled(has_rows and has_selection)
self.action_clear.setEnabled(has_rows)
def _on_field_changed(self):
field = self.chooser.currentField()
if field is None:
return
self.button_add.setEnabled(field.isValid())
def setToleranceMeter(self, tolerance_m):
super().setToleranceMeter(tolerance_m)
for field in self.chooser.fields():
field.setToleranceMeter(tolerance_m)
self.table_positions.model().setToleranceMeter(tolerance_m)
def registerPositionField(self, field):
self.chooser.addField(field)
field.fieldChanged.connect(self._on_field_changed)
def positions(self):
return self.table_positions.model().positions()
class PositionsField(FieldBase, ToleranceMixin):
def __init__(self):
super().__init__()
# Widgets
self._widget = PositionsWidget()
# Signals
self._widget.positionsChanged.connect(self.fieldChanged)
def title(self):
return "Positions"
def widget(self):
return self._widget
def registerPositionField(self, field):
field.setToleranceMeter(self.toleranceMeter())
self._widget.registerPositionField(field)
def positions(self):
return self._widget.positions()
def setToleranceMeter(self, tolerance_m):
super().setToleranceMeter(tolerance_m)
self._widget.setToleranceMeter(tolerance_m)
class BeamFieldBase(WidgetFieldBase):
def isValid(self):
return super().isValid() and bool(self.beams())
@abc.abstractmethod
def beams(self):
"""
Returns a :class:`list` of :class:`BeamBase`.
"""
return []
| """"""
# Standard library modules.
import abc
from collections import namedtuple
import itertools
# Third party modules.
from qtpy import QtCore, QtGui, QtWidgets
import numpy as np
# Local modules.
from pymontecarlo.options.beam.base import BeamBase
from pymontecarlo.options.particle import Particle
from pymontecarlo.util.tolerance import tolerance_to_decimals
from pymontecarlo_gui.widgets.field import (
MultiValueFieldBase,
FieldBase,
WidgetFieldBase,
FieldChooser,
)
from pymontecarlo_gui.widgets.lineedit import (
ColoredMultiFloatLineEdit,
ColoredFloatLineEdit,
)
from pymontecarlo_gui.options.base import ToleranceMixin
# Globals and constants variables.
class EnergyField(MultiValueFieldBase):
def __init__(self):
super().__init__()
# Widgets
self._widget = ColoredMultiFloatLineEdit()
decimals = tolerance_to_decimals(BeamBase.ENERGY_TOLERANCE_eV) + 3
self._widget.setRange(0, 1000, decimals)
self._widget.setValues([20.0])
# Signals
self._widget.valuesChanged.connect(self.fieldChanged)
def title(self):
return "Energies [keV]"
def widget(self):
return self._widget
def energiesEV(self):
return np.array(self._widget.values()) * 1e3
def setEnergiesEV(self, energies_eV):
energies_eV = np.array(energies_eV) / 1e3
self._widget.setValues(energies_eV)
class ParticleField(FieldBase):
def __init__(self):
super().__init__()
# Widgets
self._widget = QtWidgets.QComboBox()
for particle in Particle:
self._widget.addItem(particle.name, particle)
index = self._widget.findData(Particle.ELECTRON)
self._widget.setCurrentIndex(index)
# Signals
self._widget.currentIndexChanged.connect(self.fieldChanged)
def title(self):
return "Particle"
def widget(self):
return self._widget
def particle(self):
return self._widget.currentData()
def setParticle(self, particle):
index = self._widget.findData(particle)
self._widget.setCurrentIndex(index)
Position = namedtuple("Position", ("x_m", "y_m"))
class CoordinateField(FieldBase, ToleranceMixin):
def __init__(self, title):
self._title = title + " [nm]"
super().__init__()
# Widgets
self._widget = ColoredFloatLineEdit()
self._widget.setValue(0.0)
# Signals
self._widget.valueChanged.connect(self.fieldChanged)
def title(self):
return self._title
def widget(self):
return self._widget
def setToleranceMeter(self, tolerance_m):
super().setToleranceMeter(tolerance_m)
decimals = tolerance_to_decimals(tolerance_m * 1e9)
self._widget.setRange(float("-inf"), float("inf"), decimals)
def coordinateMeter(self):
return self._widget.value() / 1e9
def setCoordinateMeter(self, value_m):
self._widget.setValue(value_m * 1e9)
class StepField(FieldBase):
def __init__(self, title="Number of steps"):
self._title = title
super().__init__()
# Widgets
self._widget = ColoredFloatLineEdit()
self._widget.setRange(2, 500, 0)
self._widget.setValue(5)
# Signals
self._widget.valueChanged.connect(self.fieldChanged)
def title(self):
return self._title
def widget(self):
return self._widget
def step(self):
return self._widget.value()
def setStep(self, step):
self._widget.setValue(step)
class PositionField(WidgetFieldBase, ToleranceMixin):
def __init__(self):
super().__init__()
def setToleranceMeter(self, tolerance_m):
for field in self.fields():
if hasattr(field, "setToleranceMeter"):
field.setToleranceMeter(tolerance_m)
@abc.abstractmethod
def positions(self):
return []
class SinglePositionField(PositionField):
def __init__(self):
super().__init__()
self.field_x = CoordinateField("x")
self.addLabelField(self.field_x)
self.field_y = CoordinateField("y")
self.addLabelField(self.field_y)
def title(self):
return "Single position"
def positions(self):
x_m = self.field_x.coordinateMeter()
y_m = self.field_y.coordinateMeter()
return [Position(x_m, y_m)]
class LineScanPositionField(PositionField):
def __init__(self):
super().__init__()
self.field_start = CoordinateField("Start")
self.field_start.setCoordinateMeter(-5e-6)
self.addLabelField(self.field_start)
self.field_stop = CoordinateField("Stop")
self.field_stop.setCoordinateMeter(5e-6)
self.addLabelField(self.field_stop)
self.field_step = StepField()
self.addLabelField(self.field_step)
class LineScanXPositionField(LineScanPositionField):
def title(self):
return "Line scan along X axis"
def positions(self):
start_m = self.field_start.coordinateMeter()
stop_m = self.field_stop.coordinateMeter()
num = self.field_step.step()
return [
Position(x_m, 0.0)
for x_m in np.linspace(start_m, stop_m, num, endpoint=True)
]
class LineScanYPositionField(LineScanPositionField):
def title(self):
return "Line scan along Y axis"
def positions(self):
start_m = self.field_start.coordinateMeter()
stop_m = self.field_stop.coordinateMeter()
num = self.field_step.step()
return [
Position(0.0, y_m)
for y_m in np.linspace(start_m, stop_m, num, endpoint=True)
]
class GridPositionField(PositionField):
def __init__(self):
super().__init__()
self.field_x_start = CoordinateField("Start X")
self.field_x_start.setCoordinateMeter(-1e-6)
self.addLabelField(self.field_x_start)
self.field_x_stop = CoordinateField("Stop X")
self.field_x_stop.setCoordinateMeter(1e-6)
self.addLabelField(self.field_x_stop)
self.field_x_step = StepField("Number of steps X")
self.addLabelField(self.field_x_step)
self.field_y_start = CoordinateField("Start Y")
self.field_y_start.setCoordinateMeter(-1e-6)
self.addLabelField(self.field_y_start)
self.field_y_stop = CoordinateField("Stop Y")
self.field_y_stop.setCoordinateMeter(1e-6)
self.addLabelField(self.field_y_stop)
self.field_y_step = StepField("Number of steps Y")
self.addLabelField(self.field_y_step)
def title(self):
return "Grid"
def positions(self):
x_start_m = self.field_x_start.coordinateMeter()
x_stop_m = self.field_x_stop.coordinateMeter()
x_num = self.field_x_step.step()
xs_m = np.linspace(x_start_m, x_stop_m, x_num, endpoint=True)
y_start_m = self.field_y_start.coordinateMeter()
y_stop_m = self.field_y_stop.coordinateMeter()
y_num = self.field_y_step.step()
ys_m = np.linspace(y_start_m, y_stop_m, y_num, endpoint=True)
return [Position(x_m, y_m) for x_m, y_m in itertools.product(xs_m, ys_m)]
class PositionsModel(QtCore.QAbstractTableModel, ToleranceMixin):
def __init__(self):
super().__init__()
self._positions = []
def rowCount(self, parent=None):
return len(self._positions)
def columnCount(self, parent=None):
return 2
def data(self, index, role=QtCore.Qt.DisplayRole):
if not index.isValid():
return None
row = index.row()
column = index.column()
position = self._positions[row]
if role == QtCore.Qt.DisplayRole:
if self.toleranceMeter() is not None:
precision = tolerance_to_decimals(self.toleranceMeter()) - 9
fmt = "{0:.{precision}f}"
else:
fmt = "{0:g}"
if column == 0:
return fmt.format(position.x_m * 1e9, precision=precision)
elif column == 1:
return fmt.format(position.y_m * 1e9, precision=precision)
elif role == QtCore.Qt.UserRole:
return position
elif role == QtCore.Qt.TextAlignmentRole:
return QtCore.Qt.AlignCenter
def headerData(self, section, orientation, role):
if role == QtCore.Qt.DisplayRole:
if orientation == QtCore.Qt.Horizontal:
if section == 0:
return "X [nm]"
elif section == 1:
return "Y [nm]"
elif orientation == QtCore.Qt.Vertical:
return str(section + 1)
def flags(self, index):
return super().flags(index)
def _add_position(self, position):
if position in self._positions:
return False
self._positions.append(position)
return True
def addPosition(self, position):
added = self._add_position(position)
if added:
self.modelReset.emit()
return added
def addPositions(self, positions):
if not positions:
return False
added = False
for position in positions:
added |= self._add_position(position)
if added:
self.modelReset.emit()
return added
def removePosition(self, position):
if position not in self._positions:
return False
self._positions.remove(position)
self.modelReset.emit()
return True
def clearPositions(self):
self._positions.clear()
self.modelReset.emit()
def hasPositions(self):
return bool(self._positions)
def position(self, row):
return self._positions[row]
def positions(self):
return tuple(self._positions)
def setPositions(self, positions):
self.clearPositions()
for x, y in positions:
self._add_position(x, y)
self.modelReset.emit()
def setToleranceMeter(self, tolerance_m):
super().setToleranceMeter(tolerance_m)
self.modelReset.emit()
class PositionsWidget(QtWidgets.QWidget, ToleranceMixin):
positionsChanged = QtCore.Signal()
def __init__(self, parent=None):
super().__init__(parent)
# Variables
model = PositionsModel()
model.addPosition(Position(0.0, 0.0))
# Actions
self.action_remove = QtWidgets.QAction("Remove")
self.action_remove.setIcon(QtGui.QIcon.fromTheme("list-remove"))
self.action_remove.setToolTip("Remove position")
self.action_remove.setEnabled(False)
self.action_clear = QtWidgets.QAction("Clear")
self.action_clear.setIcon(QtGui.QIcon.fromTheme("edit-clear"))
self.action_clear.setToolTip("Remove all positions")
self.action_clear.setEnabled(False)
# Widgets
self.chooser = FieldChooser()
self.button_add = QtWidgets.QPushButton("Add position(s)")
self.button_add.setIcon(QtGui.QIcon.fromTheme("list-add"))
self.button_add.setMaximumWidth(self.button_add.sizeHint().width())
self.table_positions = QtWidgets.QTableView()
self.table_positions.setModel(model)
self.table_positions.setSelectionBehavior(QtWidgets.QTableView.SelectRows)
header = self.table_positions.horizontalHeader()
for column in range(model.columnCount()):
header.setSectionResizeMode(column, QtWidgets.QHeaderView.Stretch)
self.toolbar = QtWidgets.QToolBar()
self.toolbar.addAction(self.action_remove)
self.toolbar.addAction(self.action_clear)
# Layouts
layout = QtWidgets.QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self.chooser)
layout.addWidget(self.button_add, alignment=QtCore.Qt.AlignRight)
layout.addWidget(self.table_positions)
layout.addWidget(self.toolbar, alignment=QtCore.Qt.AlignRight)
self.setLayout(layout)
# Signals
self.action_remove.triggered.connect(self._on_remove_triggered)
self.action_clear.triggered.connect(self._on_clear_triggered)
self.button_add.clicked.connect(self._on_add_clicked)
model.dataChanged.connect(self._on_positions_changed)
model.dataChanged.connect(self.positionsChanged)
model.modelReset.connect(self._on_positions_changed)
model.modelReset.connect(self.positionsChanged)
self.table_positions.selectionModel().selectionChanged.connect(
self._on_positions_changed
)
def _on_remove_triggered(self):
selection_model = self.table_positions.selectionModel()
if not selection_model.hasSelection():
return
indexes = selection_model.selectedIndexes()
model = self.table_positions.model()
for row in reversed(sorted(set(index.row() for index in indexes))):
model.removePosition(model.position(row))
def _on_clear_triggered(self):
model = self.table_positions.model()
model.clearPositions()
def _on_add_clicked(self):
field = self.chooser.currentField()
if field is None:
return
positions = field.positions()
self.table_positions.model().addPositions(positions)
def _on_positions_changed(self):
model = self.table_positions.model()
has_rows = model.hasPositions()
selection_model = self.table_positions.selectionModel()
has_selection = selection_model.hasSelection()
self.action_remove.setEnabled(has_rows and has_selection)
self.action_clear.setEnabled(has_rows)
def _on_field_changed(self):
field = self.chooser.currentField()
if field is None:
return
self.button_add.setEnabled(field.isValid())
def setToleranceMeter(self, tolerance_m):
super().setToleranceMeter(tolerance_m)
for field in self.chooser.fields():
field.setToleranceMeter(tolerance_m)
self.table_positions.model().setToleranceMeter(tolerance_m)
def registerPositionField(self, field):
self.chooser.addField(field)
field.fieldChanged.connect(self._on_field_changed)
def positions(self):
return self.table_positions.model().positions()
class PositionsField(FieldBase, ToleranceMixin):
def __init__(self):
super().__init__()
# Widgets
self._widget = PositionsWidget()
# Signals
self._widget.positionsChanged.connect(self.fieldChanged)
def title(self):
return "Positions"
def widget(self):
return self._widget
def registerPositionField(self, field):
field.setToleranceMeter(self.toleranceMeter())
self._widget.registerPositionField(field)
def positions(self):
return self._widget.positions()
def setToleranceMeter(self, tolerance_m):
super().setToleranceMeter(tolerance_m)
self._widget.setToleranceMeter(tolerance_m)
class BeamFieldBase(WidgetFieldBase):
def isValid(self):
return super().isValid() and bool(self.beams())
@abc.abstractmethod
def beams(self):
"""
Returns a :class:`list` of :class:`BeamBase`.
"""
return []
| en | 0.478178 | # Standard library modules. # Third party modules. # Local modules. # Globals and constants variables. # Widgets # Signals # Widgets # Signals # Widgets # Signals # Widgets # Signals # Variables # Actions # Widgets # Layouts # Signals # Widgets # Signals Returns a :class:`list` of :class:`BeamBase`. | 2.169554 | 2 |
views/web/dustWeb/viz/VizGauge.py | twatteynelinear/dustlink_sierra | 4 | 6617301 | import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('VizGauge')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
import VizGoogleCharts
class VizGauge(VizGoogleCharts.VizGoogleCharts):
#======================== header ==========================================
templateHeader = VizGoogleCharts.VizGoogleCharts.getHeaderTemplate(
PACKAGE = 'gauge',
VISUALIZATION = 'Gauge',
EXTRAOPTIONS = {
'height' : 250,
'width' : 750,
'redFrom' : 90,
'redTo' : 100,
'yellowFrom' : 75,
'yellowTo' : 90,
'minorTicks' : 5,
}
)
| import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('VizGauge')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
import VizGoogleCharts
class VizGauge(VizGoogleCharts.VizGoogleCharts):
#======================== header ==========================================
templateHeader = VizGoogleCharts.VizGoogleCharts.getHeaderTemplate(
PACKAGE = 'gauge',
VISUALIZATION = 'Gauge',
EXTRAOPTIONS = {
'height' : 250,
'width' : 750,
'redFrom' : 90,
'redTo' : 100,
'yellowFrom' : 75,
'yellowTo' : 90,
'minorTicks' : 5,
}
)
| fr | 0.304081 | #======================== header ========================================== | 2.365335 | 2 |
heima/login.py | teashell/test | 0 | 6617302 | num = 2000
num10 = 50000
print(num)
| num = 2000
num10 = 50000
print(num)
| none | 1 | 2.442589 | 2 | |
caa_monkeypatch.py | mamisano/canarytokens | 0 | 6617303 | # We need to monkey patch support for CAA Records because it is a newish DNS record type
# that is required by the lets encrypt process. Basically we need to respond to CAA record
# requests with anything besides a SERVFAIL, hence we respond with an NXDomain response.
def monkey_patch_caa_support():
patchDNSModule()
patchCommonModule()
patchResolveModule()
def patchDNSModule():
import twisted.names.dns
twisted.names.dns.CAA = 257
twisted.names.dns.QUERY_TYPES[twisted.names.dns.CAA] = "CAA"
def patchCommonModule():
import twisted.names.common
twisted.names.common.typeToMethod[twisted.names.dns.CAA] = 'lookupCAA'
def lookupCAA(self, name, timeout=None):
return self._lookup(name, twisted.names.dns.IN, twisted.names.dns.CAA, timeout)
twisted.names.common.ResolverBase.lookupCAA = lookupCAA
def patchResolveModule():
import twisted.names.resolve
from twisted.names import error
from twisted.internet import defer
def lookupCAA(self, name, timeout=None):
if not self.resolvers:
return defer.fail(error.DomainError())
d = self.resolvers[0].lookupCAA(name, timeout)
for r in self.resolvers[1:]:
d = d.addErrback(
twisted.names.resolve.FailureHandler(r.lookupCAA, name, timeout)
)
return d
twisted.names.resolve.ResolverChain.lookupCAA = lookupCAA | # We need to monkey patch support for CAA Records because it is a newish DNS record type
# that is required by the lets encrypt process. Basically we need to respond to CAA record
# requests with anything besides a SERVFAIL, hence we respond with an NXDomain response.
def monkey_patch_caa_support():
patchDNSModule()
patchCommonModule()
patchResolveModule()
def patchDNSModule():
import twisted.names.dns
twisted.names.dns.CAA = 257
twisted.names.dns.QUERY_TYPES[twisted.names.dns.CAA] = "CAA"
def patchCommonModule():
import twisted.names.common
twisted.names.common.typeToMethod[twisted.names.dns.CAA] = 'lookupCAA'
def lookupCAA(self, name, timeout=None):
return self._lookup(name, twisted.names.dns.IN, twisted.names.dns.CAA, timeout)
twisted.names.common.ResolverBase.lookupCAA = lookupCAA
def patchResolveModule():
import twisted.names.resolve
from twisted.names import error
from twisted.internet import defer
def lookupCAA(self, name, timeout=None):
if not self.resolvers:
return defer.fail(error.DomainError())
d = self.resolvers[0].lookupCAA(name, timeout)
for r in self.resolvers[1:]:
d = d.addErrback(
twisted.names.resolve.FailureHandler(r.lookupCAA, name, timeout)
)
return d
twisted.names.resolve.ResolverChain.lookupCAA = lookupCAA | en | 0.943159 | # We need to monkey patch support for CAA Records because it is a newish DNS record type # that is required by the lets encrypt process. Basically we need to respond to CAA record # requests with anything besides a SERVFAIL, hence we respond with an NXDomain response. | 2.487499 | 2 |
tests/test_hwflow/conf.py | Xilinx/roast-xilinx | 1 | 6617304 | <gh_stars>1-10
#
# Copyright (c) 2020 Xilinx, Inc. All rights reserved.
# SPDX-License-Identifier: MIT
#
ROOT = ""
buildDir = "{ROOT}/tests/test_hwflow/_tmp"
# Settings to test hwflow package version 4.1
hwflow_ver = "2.0"
version = "2020.2"
build = "2020.2_INT_0810_1"
VIVADO = "/proj/xbuilds/{build}/installs/lin64/Vivado/{version}/bin/vivado"
# Design script and outputs
design_name = "versal_3bram"
design_script = "{ROOT}/tests/test_hwflow/{design_name}.py"
artifacts = [
"outputs",
"@design.runs/impl_1/gen_files",
"@design.runs/impl_1/static_files",
"@design.runs/impl_1/@design_wrapper.pdi.bif",
"main.tcl",
"config_bd.tcl",
"vivado.log",
]
deploy_dir = "{buildDir}/hwflow_images/{version}/{build}/{design_name}"
# LSF settings
lsf_mode = False
lsf_options = "-Is"
lsf_queue = "long"
lsf_osver = "ws7"
lsf_mem = "65536"
lsf_xsjbsub = ""
# Hardware flow repo settings
hwflow_url = "<EMAIL>:SET-HW/hwflow2_0.git"
hwflow_branch = "master"
| #
# Copyright (c) 2020 Xilinx, Inc. All rights reserved.
# SPDX-License-Identifier: MIT
#
ROOT = ""
buildDir = "{ROOT}/tests/test_hwflow/_tmp"
# Settings to test hwflow package version 4.1
hwflow_ver = "2.0"
version = "2020.2"
build = "2020.2_INT_0810_1"
VIVADO = "/proj/xbuilds/{build}/installs/lin64/Vivado/{version}/bin/vivado"
# Design script and outputs
design_name = "versal_3bram"
design_script = "{ROOT}/tests/test_hwflow/{design_name}.py"
artifacts = [
"outputs",
"@design.runs/impl_1/gen_files",
"@design.runs/impl_1/static_files",
"@design.runs/impl_1/@design_wrapper.pdi.bif",
"main.tcl",
"config_bd.tcl",
"vivado.log",
]
deploy_dir = "{buildDir}/hwflow_images/{version}/{build}/{design_name}"
# LSF settings
lsf_mode = False
lsf_options = "-Is"
lsf_queue = "long"
lsf_osver = "ws7"
lsf_mem = "65536"
lsf_xsjbsub = ""
# Hardware flow repo settings
hwflow_url = "<EMAIL>:SET-HW/hwflow2_0.git"
hwflow_branch = "master" | en | 0.589692 | # # Copyright (c) 2020 Xilinx, Inc. All rights reserved. # SPDX-License-Identifier: MIT # # Settings to test hwflow package version 4.1 # Design script and outputs # LSF settings # Hardware flow repo settings | 1.266617 | 1 |
pychron/pipeline/plot/overlays/ideogram_inset_overlay.py | ASUPychron/pychron | 31 | 6617305 | <reponame>ASUPychron/pychron
# ===============================================================================
# Copyright 2014 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
from __future__ import absolute_import
from chaco.lineplot import LinePlot
from chaco.scatterplot import ScatterPlot
from pychron.graph.error_bar_overlay import ErrorBarOverlay
from pychron.pipeline.plot.overlays.base_inset import BaseInset
GOLDEN_RATIO = 1.618
class BaseIdeogramInset(BaseInset):
# def set_limits(self):
# l, h = self.value.get_bounds()
# self.value_range.low = 0
# self.value_range.high = h + 1
# l, h = self.index.get_bounds()
# pad = (h - l) * 0.1
# self.index_range.low -= pad
# self.index_range.high += pad
def set_y_limits(self, y1, y2):
self.value_range.low = y1
self.value_range.high = y2
def get_y_limits(self):
v = self.value_range
return v.low, v.high
def set_x_limits(self, x1, x2):
self.index_range.low = x1
self.index_range.high = x2
def get_x_limits(self):
r = self.index_range
return r.low, r.high
try:
class IdeogramInset(BaseIdeogramInset, LinePlot):
def __init__(self, *args, **kw):
self.border_visible = kw.get("border_visible", True)
BaseInset.__init__(self, *args, **kw)
LinePlot.__init__(self)
self.y_axis.trait_set(tick_label_formatter=lambda x: "", tick_visible=False)
# self.set_limits()
except TypeError:
# documentation auto doc hack
class IdeogramInset:
pass
try:
class IdeogramPointsInset(BaseIdeogramInset, ScatterPlot):
def __init__(self, *args, **kw):
BaseInset.__init__(self, *args, **kw)
ScatterPlot.__init__(self)
self.border_visible = kw.get("border_visible", True)
self.marker = "circle"
# self.color = 'red'
self.marker_size = 1.5
if not self.visible_axes:
self.x_axis.visible = False
self.y_axis.visible = False
# self.set_limits()
nsigma = 1
orientation = "x"
line_width = 1
visible = True
ebo = ErrorBarOverlay(
component=self,
orientation=orientation,
nsigma=nsigma,
line_width=line_width,
use_end_caps=False,
visible=visible,
)
self.overlays.append(ebo)
except TypeError:
# documentation auto doc hack
class IdeogramPointsInset:
pass
# ============= EOF =============================================
| # ===============================================================================
# Copyright 2014 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
from __future__ import absolute_import
from chaco.lineplot import LinePlot
from chaco.scatterplot import ScatterPlot
from pychron.graph.error_bar_overlay import ErrorBarOverlay
from pychron.pipeline.plot.overlays.base_inset import BaseInset
GOLDEN_RATIO = 1.618
class BaseIdeogramInset(BaseInset):
# def set_limits(self):
# l, h = self.value.get_bounds()
# self.value_range.low = 0
# self.value_range.high = h + 1
# l, h = self.index.get_bounds()
# pad = (h - l) * 0.1
# self.index_range.low -= pad
# self.index_range.high += pad
def set_y_limits(self, y1, y2):
self.value_range.low = y1
self.value_range.high = y2
def get_y_limits(self):
v = self.value_range
return v.low, v.high
def set_x_limits(self, x1, x2):
self.index_range.low = x1
self.index_range.high = x2
def get_x_limits(self):
r = self.index_range
return r.low, r.high
try:
class IdeogramInset(BaseIdeogramInset, LinePlot):
def __init__(self, *args, **kw):
self.border_visible = kw.get("border_visible", True)
BaseInset.__init__(self, *args, **kw)
LinePlot.__init__(self)
self.y_axis.trait_set(tick_label_formatter=lambda x: "", tick_visible=False)
# self.set_limits()
except TypeError:
# documentation auto doc hack
class IdeogramInset:
pass
try:
class IdeogramPointsInset(BaseIdeogramInset, ScatterPlot):
def __init__(self, *args, **kw):
BaseInset.__init__(self, *args, **kw)
ScatterPlot.__init__(self)
self.border_visible = kw.get("border_visible", True)
self.marker = "circle"
# self.color = 'red'
self.marker_size = 1.5
if not self.visible_axes:
self.x_axis.visible = False
self.y_axis.visible = False
# self.set_limits()
nsigma = 1
orientation = "x"
line_width = 1
visible = True
ebo = ErrorBarOverlay(
component=self,
orientation=orientation,
nsigma=nsigma,
line_width=line_width,
use_end_caps=False,
visible=visible,
)
self.overlays.append(ebo)
except TypeError:
# documentation auto doc hack
class IdeogramPointsInset:
pass
# ============= EOF ============================================= | en | 0.543247 | # =============================================================================== # Copyright 2014 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # ============= enthought library imports ======================= # ============= standard library imports ======================== # ============= local library imports ========================== # def set_limits(self): # l, h = self.value.get_bounds() # self.value_range.low = 0 # self.value_range.high = h + 1 # l, h = self.index.get_bounds() # pad = (h - l) * 0.1 # self.index_range.low -= pad # self.index_range.high += pad # self.set_limits() # documentation auto doc hack # self.color = 'red' # self.set_limits() # documentation auto doc hack # ============= EOF ============================================= | 1.555115 | 2 |
python/dspace-metadata-to-bibtex.py | waingram/citation-parsing | 0 | 6617306 | <filename>python/dspace-metadata-to-bibtex.py
#!/usr/bin/env python3
"""
Convert DSpace metadata to BibTex
"""
import warnings
import xml.etree.ElementTree as ET
from pathlib import Path
from bibtexparser.bibdatabase import BibDatabase
from bibtexparser.bwriter import BibTexWriter
__author__ = "<NAME>"
__version__ = "0.1.0"
__license__ = "BSD3"
def extract_bibtex(item_path):
bibtex = {}
# print("Processing file %s" % item_path.absolute())
thesis_metadata_file = item_path / 'metadata_thesis.xml'
tree = ET.parse(thesis_metadata_file.as_posix())
doc = tree.getroot()
md_node = doc.find(".//dcvalue[@element='degree'][@qualifier='level']")
if md_node is not None:
md_value = md_node.text
if md_value == 'masters' or md_value == 'undergraduate':
bibtex['ENTRYTYPE'] = 'mastersthesis'
elif md_value == 'doctoral':
bibtex['ENTRYTYPE'] = 'phdthesis'
else:
warnings.warn('%s is not an ETD' % item_path)
return bibtex
metadata_file = item_path / 'dublin_core.xml'
tree = ET.parse(metadata_file.as_posix())
doc = tree.getroot()
author_ln = ''
md_node = doc.find(".//dcvalue[@element='contributor'][@qualifier='author']")
if md_node is not None:
md_value = md_node.text
author_ln = md_value.split()[0].replace(',', '')
bibtex['author'] = md_value
title_fw = ''
md_node = doc.find(".//dcvalue[@element='title']")
if md_node is not None:
md_value = md_node.text
title_fw = md_value.split()[0]
bibtex['title'] = md_value
md_node = doc.find(".//dcvalue[@element='publisher']")
if md_node is not None:
md_value = md_node.text
bibtex['school'] = md_value
# here use 'URL' but could use 'howpublished' or something else
md_node = doc.find(".//dcvalue[@element='identifier'][@qualifier='uri']")
if md_node is not None:
md_value = md_node.text
bibtex['URL'] = md_value
year = ''
md_node = doc.find(".//dcvalue[@element='date'][@qualifier='issued']")
if md_node is not None:
md_value = md_node.text
year = md_value.split('-')[0]
bibtex['year'] = year
bibtex['ID'] = (author_ln + year + title_fw).lower()
return bibtex
def main():
""" """
path_files = '.'
bibtex_file = './vtedts.bib'
data = Path(path_files)
theses = [t for t in (data / 'thesis').iterdir() if t.is_dir()]
dissertations = [d for d in (data / 'dissertation').iterdir() if d.is_dir()]
db = BibDatabase()
for t in theses:
db.entries.append(extract_bibtex(t))
for d in dissertations:
db.entries.append(extract_bibtex(d))
writer = BibTexWriter()
with open(bibtex_file, 'w') as bibfile:
bibfile.write(writer.write(db))
if __name__ == "__main__":
""" """
main()
| <filename>python/dspace-metadata-to-bibtex.py
#!/usr/bin/env python3
"""
Convert DSpace metadata to BibTex
"""
import warnings
import xml.etree.ElementTree as ET
from pathlib import Path
from bibtexparser.bibdatabase import BibDatabase
from bibtexparser.bwriter import BibTexWriter
__author__ = "<NAME>"
__version__ = "0.1.0"
__license__ = "BSD3"
def extract_bibtex(item_path):
bibtex = {}
# print("Processing file %s" % item_path.absolute())
thesis_metadata_file = item_path / 'metadata_thesis.xml'
tree = ET.parse(thesis_metadata_file.as_posix())
doc = tree.getroot()
md_node = doc.find(".//dcvalue[@element='degree'][@qualifier='level']")
if md_node is not None:
md_value = md_node.text
if md_value == 'masters' or md_value == 'undergraduate':
bibtex['ENTRYTYPE'] = 'mastersthesis'
elif md_value == 'doctoral':
bibtex['ENTRYTYPE'] = 'phdthesis'
else:
warnings.warn('%s is not an ETD' % item_path)
return bibtex
metadata_file = item_path / 'dublin_core.xml'
tree = ET.parse(metadata_file.as_posix())
doc = tree.getroot()
author_ln = ''
md_node = doc.find(".//dcvalue[@element='contributor'][@qualifier='author']")
if md_node is not None:
md_value = md_node.text
author_ln = md_value.split()[0].replace(',', '')
bibtex['author'] = md_value
title_fw = ''
md_node = doc.find(".//dcvalue[@element='title']")
if md_node is not None:
md_value = md_node.text
title_fw = md_value.split()[0]
bibtex['title'] = md_value
md_node = doc.find(".//dcvalue[@element='publisher']")
if md_node is not None:
md_value = md_node.text
bibtex['school'] = md_value
# here use 'URL' but could use 'howpublished' or something else
md_node = doc.find(".//dcvalue[@element='identifier'][@qualifier='uri']")
if md_node is not None:
md_value = md_node.text
bibtex['URL'] = md_value
year = ''
md_node = doc.find(".//dcvalue[@element='date'][@qualifier='issued']")
if md_node is not None:
md_value = md_node.text
year = md_value.split('-')[0]
bibtex['year'] = year
bibtex['ID'] = (author_ln + year + title_fw).lower()
return bibtex
def main():
""" """
path_files = '.'
bibtex_file = './vtedts.bib'
data = Path(path_files)
theses = [t for t in (data / 'thesis').iterdir() if t.is_dir()]
dissertations = [d for d in (data / 'dissertation').iterdir() if d.is_dir()]
db = BibDatabase()
for t in theses:
db.entries.append(extract_bibtex(t))
for d in dissertations:
db.entries.append(extract_bibtex(d))
writer = BibTexWriter()
with open(bibtex_file, 'w') as bibfile:
bibfile.write(writer.write(db))
if __name__ == "__main__":
""" """
main()
| en | 0.476559 | #!/usr/bin/env python3 Convert DSpace metadata to BibTex # print("Processing file %s" % item_path.absolute()) # here use 'URL' but could use 'howpublished' or something else | 2.637393 | 3 |
Python/AlgorithmImplementation/GridSearch/GridSearch.py | zseen/hackerrank-challenges | 0 | 6617307 | <reponame>zseen/hackerrank-challenges
import math
import os
import random
import re
import sys
def gridSearch(grid, pattern):
gridHeight = len(grid)
patternHeight = len(pattern)
for patternStartRowIndex in range(gridHeight - patternHeight + 1):
patternInGridStartIndexSet = getAllSubstringStartIndices(pattern[0], grid[patternStartRowIndex])
for patternRowOffset in range(len(pattern)):
patternInGridStartIndexSet = patternInGridStartIndexSet.intersection(getAllSubstringStartIndices(pattern[patternRowOffset], grid[patternStartRowIndex + patternRowOffset]))
if len(patternInGridStartIndexSet) == 0:
break
if len(patternInGridStartIndexSet) >= 1:
return True
else:
return False
def getAllSubstringStartIndices(string, subString):
patternInGridStartIndexSet = set(x.start() for x in re.finditer('(?=' + string + ')', subString))
return patternInGridStartIndexSet
def printGridSearch(grid, pattern):
result = gridSearch(grid, pattern)
if result:
print("YES")
else:
print("NO")
if __name__ == '__main__':
sys.stdin = open("TestCase5.txt")
testCasesNum = int(input())
for t_itr in range(testCasesNum):
RC = input().split()
rowsInGrid = int(RC[0])
columnsInGrid = int(RC[1])
gridList = []
for _ in range(rowsInGrid):
G_item = input()
gridList.append(G_item)
rc = input().split()
linesNumInGrid = int(rc[0])
digitsInGrid = int(rc[1])
patternList = []
for _ in range(linesNumInGrid):
P_item = input()
patternList.append(P_item)
printGridSearch(gridList, patternList)
| import math
import os
import random
import re
import sys
def gridSearch(grid, pattern):
gridHeight = len(grid)
patternHeight = len(pattern)
for patternStartRowIndex in range(gridHeight - patternHeight + 1):
patternInGridStartIndexSet = getAllSubstringStartIndices(pattern[0], grid[patternStartRowIndex])
for patternRowOffset in range(len(pattern)):
patternInGridStartIndexSet = patternInGridStartIndexSet.intersection(getAllSubstringStartIndices(pattern[patternRowOffset], grid[patternStartRowIndex + patternRowOffset]))
if len(patternInGridStartIndexSet) == 0:
break
if len(patternInGridStartIndexSet) >= 1:
return True
else:
return False
def getAllSubstringStartIndices(string, subString):
patternInGridStartIndexSet = set(x.start() for x in re.finditer('(?=' + string + ')', subString))
return patternInGridStartIndexSet
def printGridSearch(grid, pattern):
result = gridSearch(grid, pattern)
if result:
print("YES")
else:
print("NO")
if __name__ == '__main__':
sys.stdin = open("TestCase5.txt")
testCasesNum = int(input())
for t_itr in range(testCasesNum):
RC = input().split()
rowsInGrid = int(RC[0])
columnsInGrid = int(RC[1])
gridList = []
for _ in range(rowsInGrid):
G_item = input()
gridList.append(G_item)
rc = input().split()
linesNumInGrid = int(rc[0])
digitsInGrid = int(rc[1])
patternList = []
for _ in range(linesNumInGrid):
P_item = input()
patternList.append(P_item)
printGridSearch(gridList, patternList) | none | 1 | 3.29413 | 3 | |
lib/solutions/CHK/checkout_solution.py | DPNT-Sourcecode/CHK-cblt01 | 0 | 6617308 | <gh_stars>0
# noinspection PyUnusedLocal
# skus = unicode string
def checkout(skus):
product_dict = {
'A': 50,
'B': 30,
'C': 20,
'D': 15,
'E': 40,
'F': 10,
'G': 20,
'H': 10,
'I': 35,
'J': 60,
'K': 70,
'L': 90,
'M': 15,
'N': 40,
'O': 10,
'P': 50,
'Q': 30,
'R': 50,
'S': 20,
'T': 20,
'U': 40,
'V': 50,
'W': 20,
'X': 17,
'Y': 20,
'Z': 21,
}
special_offers1 = {
'A': ((3,130),(5,200)),
'B': ((2,45),),
'H': ((5,45),(10,80)),
'K': ((2,120),),
'P': ((5,200),),
'Q': ((3,80),),
'V': ((2,90),(3,130)),
}
special_offers2 = {
'F': (2,1),
'U': (3,1)
}
special_offers3 = {
'E': (2,'B'),
'N': (3,'M'),
'R': (3,'Q')
}
cost = 0
product_amounts = {
'A': 0,
'B': 0,
'C': 0,
'D': 0,
'E': 0,
'F': 0,
'G': 0,
'H': 0,
'I': 0,
'J': 0,
'K': 0,
'L': 0,
'M': 0,
'N': 0,
'O': 0,
'P': 0,
'Q': 0,
'R': 0,
'S': 0,
'T': 0,
'U': 0,
'V': 0,
'W': 0,
'X': 0,
'Y': 0,
'Z': 0,
}
prod_list = ['A','C','D','E','B','F']
for i in range(len(skus)):
curr = skus[i]
if curr not in product_dict:
return -1
if curr not in product_amounts:
product_amounts[curr] = 1
else:
product_amounts[curr] += 1
for key in '<KEY>':
if key in special_offers1:
val = product_amounts[key]
if val>=0:
offers = special_offers1[key]
#print(offers)
for i in range(len(offers)-1,-1,-1):
cost += (val // offers[i][0])*offers[i][1]
val = val % offers[i][0]
cost += val*product_dict[key]
elif key in special_offers2:
if val>=0:
val = product_amounts[key]
offers = special_offers2[key]
cost += (val//(offers[0]+1)) * offers[0]*product_dict[key]
val = val%(offers[0]+1)
cost += val*product_dict[key]
elif key in special_offers3:
key_to_reduce = special_offers3[key][1]
multiple = special_offers3[key][0]
product_amounts[key_to_reduce] -= product_amounts[key]//multiple
cost += product_amounts[key] * product_dict[key]
else:
val = product_amounts[key]
if val>=0:
cost += val*product_dict[key]
special_offer4_num = 0
for key in 'ZYTSX':
special_offer4_num += product_amounts[key]
cost += (special_offer4_num // 3) * 45
rest = special_offer4_num % 3
#while rest > 0:
for key in 'XSTYZ':
curr = product_amounts[key]
if rest<=curr:
cost += product_dict[key] * rest
break
else:
cost += product_dict[key] * curr
rest -= curr
return cost
| # noinspection PyUnusedLocal
# skus = unicode string
def checkout(skus):
product_dict = {
'A': 50,
'B': 30,
'C': 20,
'D': 15,
'E': 40,
'F': 10,
'G': 20,
'H': 10,
'I': 35,
'J': 60,
'K': 70,
'L': 90,
'M': 15,
'N': 40,
'O': 10,
'P': 50,
'Q': 30,
'R': 50,
'S': 20,
'T': 20,
'U': 40,
'V': 50,
'W': 20,
'X': 17,
'Y': 20,
'Z': 21,
}
special_offers1 = {
'A': ((3,130),(5,200)),
'B': ((2,45),),
'H': ((5,45),(10,80)),
'K': ((2,120),),
'P': ((5,200),),
'Q': ((3,80),),
'V': ((2,90),(3,130)),
}
special_offers2 = {
'F': (2,1),
'U': (3,1)
}
special_offers3 = {
'E': (2,'B'),
'N': (3,'M'),
'R': (3,'Q')
}
cost = 0
product_amounts = {
'A': 0,
'B': 0,
'C': 0,
'D': 0,
'E': 0,
'F': 0,
'G': 0,
'H': 0,
'I': 0,
'J': 0,
'K': 0,
'L': 0,
'M': 0,
'N': 0,
'O': 0,
'P': 0,
'Q': 0,
'R': 0,
'S': 0,
'T': 0,
'U': 0,
'V': 0,
'W': 0,
'X': 0,
'Y': 0,
'Z': 0,
}
prod_list = ['A','C','D','E','B','F']
for i in range(len(skus)):
curr = skus[i]
if curr not in product_dict:
return -1
if curr not in product_amounts:
product_amounts[curr] = 1
else:
product_amounts[curr] += 1
for key in '<KEY>':
if key in special_offers1:
val = product_amounts[key]
if val>=0:
offers = special_offers1[key]
#print(offers)
for i in range(len(offers)-1,-1,-1):
cost += (val // offers[i][0])*offers[i][1]
val = val % offers[i][0]
cost += val*product_dict[key]
elif key in special_offers2:
if val>=0:
val = product_amounts[key]
offers = special_offers2[key]
cost += (val//(offers[0]+1)) * offers[0]*product_dict[key]
val = val%(offers[0]+1)
cost += val*product_dict[key]
elif key in special_offers3:
key_to_reduce = special_offers3[key][1]
multiple = special_offers3[key][0]
product_amounts[key_to_reduce] -= product_amounts[key]//multiple
cost += product_amounts[key] * product_dict[key]
else:
val = product_amounts[key]
if val>=0:
cost += val*product_dict[key]
special_offer4_num = 0
for key in 'ZYTSX':
special_offer4_num += product_amounts[key]
cost += (special_offer4_num // 3) * 45
rest = special_offer4_num % 3
#while rest > 0:
for key in 'XSTYZ':
curr = product_amounts[key]
if rest<=curr:
cost += product_dict[key] * rest
break
else:
cost += product_dict[key] * curr
rest -= curr
return cost | en | 0.14101 | # noinspection PyUnusedLocal # skus = unicode string #print(offers) #while rest > 0: | 2.216909 | 2 |
test_function.py | alex/linehaul-cloud-function | 8 | 6617309 | from pathlib import Path
from importlib import reload
import pretend
import pytest
import main
GCP_PROJECT = "my-gcp-project"
BIGQUERY_DATASET = "my-bigquery-dataset"
BIGQUERY_SIMPLE_TABLE = "my-simple-table"
BIGQUERY_DOWNLOAD_TABLE = "my-download-table"
RESULT_BUCKET = "my-result-bucket"
@pytest.mark.parametrize(
"bigquery_dataset, expected_from_string_calls",
[
(
"my-bigquery-dataset",
[pretend.call("my-bigquery-dataset", default_project=GCP_PROJECT)],
),
(
"my-bigquery-dataset some-other-dataset",
[
pretend.call("my-bigquery-dataset", default_project=GCP_PROJECT),
pretend.call("some-other-dataset", default_project=GCP_PROJECT),
],
),
],
)
@pytest.mark.parametrize(
"log_filename, table_name, expected",
[
(
"downloads-2021-01-07-20-55-2021-01-07T20-55-00.000-B8Hs_G6d6xN61En2ypwk.log.gz",
BIGQUERY_DOWNLOAD_TABLE,
b'{"timestamp": "2021-01-07 20:54:54 +00:00", "url": "/packages/f7/12/ec3f2e203afa394a149911729357aa48affc59c20e2c1c8297a60f33f133/threadpoolctl-2.1.0-py3-none-any.whl", "project": "threadpoolctl", "file": {"filename": "threadpoolctl-2.1.0-py3-none-any.whl", "project": "threadpoolctl", "version": "2.1.0", "type": "bdist_wheel"}, "tls_protocol": "TLSv1.2", "tls_cipher": "ECDHE-RSA-AES128-GCM-SHA256", "country_code": "US", "details": {"installer": {"name": "pip", "version": "20.1.1"}, "python": "3.7.9", "implementation": {"name": "CPython", "version": "3.7.9"}, "distro": {"name": "Debian GNU/Linux", "version": "9", "id": "stretch", "libc": {"lib": "glibc", "version": "2.24"}}, "system": {"name": "Linux", "release": "4.15.0-112-generic"}, "cpu": "x86_64", "openssl_version": "OpenSSL 1.1.0l 10 Sep 2019", "setuptools_version": "47.1.0", "ci": null}}\n'
b'{"timestamp": "2021-01-07 20:54:54 +00:00", "url": "/packages/cd/f9/8fad70a3bd011a6be7c5c6067278f006a25341eb39d901fbda307e26804c/django_crum-0.7.9-py2.py3-none-any.whl", "project": "django-crum", "file": {"filename": "django_crum-0.7.9-py2.py3-none-any.whl", "project": "django-crum", "version": "0.7.9", "type": "bdist_wheel"}, "tls_protocol": "TLSv1.2", "tls_cipher": "ECDHE-RSA-AES128-GCM-SHA256", "country_code": "US", "details": {"installer": {"name": "pip", "version": "20.0.2"}, "python": "3.8.5", "implementation": {"name": "CPython", "version": "3.8.5"}, "distro": {"name": "Ubuntu", "version": "16.04", "id": "xenial", "libc": {"lib": "glibc", "version": "2.23"}}, "system": {"name": "Linux", "release": "4.4.0-1113-aws"}, "cpu": "x86_64", "openssl_version": "OpenSSL 1.0.2g 1 Mar 2016", "setuptools_version": "44.1.0", "ci": null}}\n',
),
(
"simple-2021-01-07-20-55-2021-01-07T20-55-00.000-3wuB00t9tqgbGLFI2fSI.log.gz",
BIGQUERY_SIMPLE_TABLE,
b'{"timestamp": "2021-01-07 20:54:52 +00:00", "url": "/simple/azureml-model-management-sdk/", "project": "azureml-model-management-sdk", "tls_protocol": "TLSv1.3", "tls_cipher": "AES256-GCM", "country_code": "US", "details": {"installer": {"name": "pip", "version": "20.0.2"}, "python": "3.7.5", "implementation": {"name": "CPython", "version": "3.7.5"}, "distro": {"name": "Ubuntu", "version": "18.04", "id": "bionic", "libc": {"lib": "glibc", "version": "2.27"}}, "system": {"name": "Linux", "release": "4.15.0-1092-azure"}, "cpu": "x86_64", "openssl_version": "OpenSSL 1.1.1 11 Sep 2018", "setuptools_version": "45.2.0", "ci": null}}\n'
b'{"timestamp": "2021-01-07 20:54:52 +00:00", "url": "/simple/pyrsistent/", "project": "pyrsistent", "tls_protocol": "TLSv1.3", "tls_cipher": "AES256-GCM", "country_code": "US", "details": {"installer": {"name": "pip", "version": "20.0.2"}, "python": "3.8.5", "implementation": {"name": "CPython", "version": "3.8.5"}, "distro": {"name": "Ubuntu", "version": "20.04", "id": "focal", "libc": {"lib": "glibc", "version": "2.31"}}, "system": {"name": "Linux", "release": "5.4.72-flatcar"}, "cpu": "x86_64", "openssl_version": "OpenSSL 1.1.1f 31 Mar 2020", "setuptools_version": "45.2.0", "ci": true}}\n',
),
],
)
def test_function(
monkeypatch,
log_filename,
table_name,
expected,
bigquery_dataset,
expected_from_string_calls,
):
monkeypatch.setenv("GCP_PROJECT", GCP_PROJECT)
monkeypatch.setenv("BIGQUERY_DATASET", bigquery_dataset)
monkeypatch.setenv("BIGQUERY_SIMPLE_TABLE", BIGQUERY_SIMPLE_TABLE)
monkeypatch.setenv("BIGQUERY_DOWNLOAD_TABLE", BIGQUERY_DOWNLOAD_TABLE)
monkeypatch.setenv("RESULT_BUCKET", RESULT_BUCKET)
reload(main)
def _download_to_file(file_handler):
with open(Path(".") / "fixtures" / log_filename, "rb") as f:
file_handler.write(f.read())
blob_stub = pretend.stub(
download_to_file=_download_to_file, delete=pretend.call_recorder(lambda: None),
)
bucket_stub = pretend.stub(get_blob=pretend.call_recorder(lambda a: blob_stub),)
storage_client_stub = pretend.stub(
bucket=pretend.call_recorder(lambda a: bucket_stub),
)
monkeypatch.setattr(
main, "storage", pretend.stub(Client=lambda: storage_client_stub)
)
table_stub = pretend.stub()
dataset_stub = pretend.stub(table=pretend.call_recorder(lambda a: table_stub))
load_job_stub = pretend.stub(
result=pretend.call_recorder(lambda: None), output_rows=pretend.stub(),
)
def _load_table_from_file(fh, *a, **kw):
fh.flush()
with open(fh.name, "rb") as f:
load_job_stub._result = f.read()
return load_job_stub
bigquery_client_stub = pretend.stub(
load_table_from_file=pretend.call_recorder(_load_table_from_file),
)
job_config_stub = pretend.stub()
dataset_reference_stub = pretend.stub(
from_string=pretend.call_recorder(lambda *a, **kw: dataset_stub)
)
monkeypatch.setattr(
main,
"bigquery",
pretend.stub(
Client=lambda: bigquery_client_stub,
LoadJobConfig=lambda: job_config_stub,
SourceFormat=pretend.stub(NEWLINE_DELIMITED_JSON=pretend.stub()),
dataset=pretend.stub(DatasetReference=dataset_reference_stub),
),
)
data = {
"name": log_filename,
"bucket": "my-bucket",
}
context = pretend.stub()
main.process_fastly_log(data, context)
assert storage_client_stub.bucket.calls == [pretend.call("my-bucket")] + [
pretend.call(RESULT_BUCKET),
] * len(expected_from_string_calls)
assert bucket_stub.get_blob.calls == [pretend.call(log_filename)]
assert dataset_reference_stub.from_string.calls == expected_from_string_calls
assert bigquery_client_stub.load_table_from_file.calls == [
pretend.call(
bigquery_client_stub.load_table_from_file.calls[0].args[0], # shh
table_stub,
job_id_prefix="linehaul_file_downloads",
location="US",
job_config=job_config_stub,
rewind=True,
)
] * len(expected_from_string_calls)
assert dataset_stub.table.calls == [pretend.call(table_name)] * len(
expected_from_string_calls
)
assert blob_stub.delete.calls == [pretend.call()]
assert load_job_stub.result.calls == [pretend.call()] * len(
expected_from_string_calls
)
assert load_job_stub._result == expected
| from pathlib import Path
from importlib import reload
import pretend
import pytest
import main
GCP_PROJECT = "my-gcp-project"
BIGQUERY_DATASET = "my-bigquery-dataset"
BIGQUERY_SIMPLE_TABLE = "my-simple-table"
BIGQUERY_DOWNLOAD_TABLE = "my-download-table"
RESULT_BUCKET = "my-result-bucket"
@pytest.mark.parametrize(
"bigquery_dataset, expected_from_string_calls",
[
(
"my-bigquery-dataset",
[pretend.call("my-bigquery-dataset", default_project=GCP_PROJECT)],
),
(
"my-bigquery-dataset some-other-dataset",
[
pretend.call("my-bigquery-dataset", default_project=GCP_PROJECT),
pretend.call("some-other-dataset", default_project=GCP_PROJECT),
],
),
],
)
@pytest.mark.parametrize(
"log_filename, table_name, expected",
[
(
"downloads-2021-01-07-20-55-2021-01-07T20-55-00.000-B8Hs_G6d6xN61En2ypwk.log.gz",
BIGQUERY_DOWNLOAD_TABLE,
b'{"timestamp": "2021-01-07 20:54:54 +00:00", "url": "/packages/f7/12/ec3f2e203afa394a149911729357aa48affc59c20e2c1c8297a60f33f133/threadpoolctl-2.1.0-py3-none-any.whl", "project": "threadpoolctl", "file": {"filename": "threadpoolctl-2.1.0-py3-none-any.whl", "project": "threadpoolctl", "version": "2.1.0", "type": "bdist_wheel"}, "tls_protocol": "TLSv1.2", "tls_cipher": "ECDHE-RSA-AES128-GCM-SHA256", "country_code": "US", "details": {"installer": {"name": "pip", "version": "20.1.1"}, "python": "3.7.9", "implementation": {"name": "CPython", "version": "3.7.9"}, "distro": {"name": "Debian GNU/Linux", "version": "9", "id": "stretch", "libc": {"lib": "glibc", "version": "2.24"}}, "system": {"name": "Linux", "release": "4.15.0-112-generic"}, "cpu": "x86_64", "openssl_version": "OpenSSL 1.1.0l 10 Sep 2019", "setuptools_version": "47.1.0", "ci": null}}\n'
b'{"timestamp": "2021-01-07 20:54:54 +00:00", "url": "/packages/cd/f9/8fad70a3bd011a6be7c5c6067278f006a25341eb39d901fbda307e26804c/django_crum-0.7.9-py2.py3-none-any.whl", "project": "django-crum", "file": {"filename": "django_crum-0.7.9-py2.py3-none-any.whl", "project": "django-crum", "version": "0.7.9", "type": "bdist_wheel"}, "tls_protocol": "TLSv1.2", "tls_cipher": "ECDHE-RSA-AES128-GCM-SHA256", "country_code": "US", "details": {"installer": {"name": "pip", "version": "20.0.2"}, "python": "3.8.5", "implementation": {"name": "CPython", "version": "3.8.5"}, "distro": {"name": "Ubuntu", "version": "16.04", "id": "xenial", "libc": {"lib": "glibc", "version": "2.23"}}, "system": {"name": "Linux", "release": "4.4.0-1113-aws"}, "cpu": "x86_64", "openssl_version": "OpenSSL 1.0.2g 1 Mar 2016", "setuptools_version": "44.1.0", "ci": null}}\n',
),
(
"simple-2021-01-07-20-55-2021-01-07T20-55-00.000-3wuB00t9tqgbGLFI2fSI.log.gz",
BIGQUERY_SIMPLE_TABLE,
b'{"timestamp": "2021-01-07 20:54:52 +00:00", "url": "/simple/azureml-model-management-sdk/", "project": "azureml-model-management-sdk", "tls_protocol": "TLSv1.3", "tls_cipher": "AES256-GCM", "country_code": "US", "details": {"installer": {"name": "pip", "version": "20.0.2"}, "python": "3.7.5", "implementation": {"name": "CPython", "version": "3.7.5"}, "distro": {"name": "Ubuntu", "version": "18.04", "id": "bionic", "libc": {"lib": "glibc", "version": "2.27"}}, "system": {"name": "Linux", "release": "4.15.0-1092-azure"}, "cpu": "x86_64", "openssl_version": "OpenSSL 1.1.1 11 Sep 2018", "setuptools_version": "45.2.0", "ci": null}}\n'
b'{"timestamp": "2021-01-07 20:54:52 +00:00", "url": "/simple/pyrsistent/", "project": "pyrsistent", "tls_protocol": "TLSv1.3", "tls_cipher": "AES256-GCM", "country_code": "US", "details": {"installer": {"name": "pip", "version": "20.0.2"}, "python": "3.8.5", "implementation": {"name": "CPython", "version": "3.8.5"}, "distro": {"name": "Ubuntu", "version": "20.04", "id": "focal", "libc": {"lib": "glibc", "version": "2.31"}}, "system": {"name": "Linux", "release": "5.4.72-flatcar"}, "cpu": "x86_64", "openssl_version": "OpenSSL 1.1.1f 31 Mar 2020", "setuptools_version": "45.2.0", "ci": true}}\n',
),
],
)
def test_function(
monkeypatch,
log_filename,
table_name,
expected,
bigquery_dataset,
expected_from_string_calls,
):
monkeypatch.setenv("GCP_PROJECT", GCP_PROJECT)
monkeypatch.setenv("BIGQUERY_DATASET", bigquery_dataset)
monkeypatch.setenv("BIGQUERY_SIMPLE_TABLE", BIGQUERY_SIMPLE_TABLE)
monkeypatch.setenv("BIGQUERY_DOWNLOAD_TABLE", BIGQUERY_DOWNLOAD_TABLE)
monkeypatch.setenv("RESULT_BUCKET", RESULT_BUCKET)
reload(main)
def _download_to_file(file_handler):
with open(Path(".") / "fixtures" / log_filename, "rb") as f:
file_handler.write(f.read())
blob_stub = pretend.stub(
download_to_file=_download_to_file, delete=pretend.call_recorder(lambda: None),
)
bucket_stub = pretend.stub(get_blob=pretend.call_recorder(lambda a: blob_stub),)
storage_client_stub = pretend.stub(
bucket=pretend.call_recorder(lambda a: bucket_stub),
)
monkeypatch.setattr(
main, "storage", pretend.stub(Client=lambda: storage_client_stub)
)
table_stub = pretend.stub()
dataset_stub = pretend.stub(table=pretend.call_recorder(lambda a: table_stub))
load_job_stub = pretend.stub(
result=pretend.call_recorder(lambda: None), output_rows=pretend.stub(),
)
def _load_table_from_file(fh, *a, **kw):
fh.flush()
with open(fh.name, "rb") as f:
load_job_stub._result = f.read()
return load_job_stub
bigquery_client_stub = pretend.stub(
load_table_from_file=pretend.call_recorder(_load_table_from_file),
)
job_config_stub = pretend.stub()
dataset_reference_stub = pretend.stub(
from_string=pretend.call_recorder(lambda *a, **kw: dataset_stub)
)
monkeypatch.setattr(
main,
"bigquery",
pretend.stub(
Client=lambda: bigquery_client_stub,
LoadJobConfig=lambda: job_config_stub,
SourceFormat=pretend.stub(NEWLINE_DELIMITED_JSON=pretend.stub()),
dataset=pretend.stub(DatasetReference=dataset_reference_stub),
),
)
data = {
"name": log_filename,
"bucket": "my-bucket",
}
context = pretend.stub()
main.process_fastly_log(data, context)
assert storage_client_stub.bucket.calls == [pretend.call("my-bucket")] + [
pretend.call(RESULT_BUCKET),
] * len(expected_from_string_calls)
assert bucket_stub.get_blob.calls == [pretend.call(log_filename)]
assert dataset_reference_stub.from_string.calls == expected_from_string_calls
assert bigquery_client_stub.load_table_from_file.calls == [
pretend.call(
bigquery_client_stub.load_table_from_file.calls[0].args[0], # shh
table_stub,
job_id_prefix="linehaul_file_downloads",
location="US",
job_config=job_config_stub,
rewind=True,
)
] * len(expected_from_string_calls)
assert dataset_stub.table.calls == [pretend.call(table_name)] * len(
expected_from_string_calls
)
assert blob_stub.delete.calls == [pretend.call()]
assert load_job_stub.result.calls == [pretend.call()] * len(
expected_from_string_calls
)
assert load_job_stub._result == expected
| none | 1 | 1.968711 | 2 | |
Chapter 1/tower_sample.py | indrag49/Computational-Stat-Mech | 19 | 6617310 | import random
def tower_sample(Pi):
L=[0]
K=len(Pi)
for l in range(1, K+1): L+=[L[l-1]+Pi[l-1], ]
Upsilon=random.uniform(0, L[-1])
for k in range(1, len(L)+1):
if (Upsilon>L[k-1] and Upsilon<L[k]):
return k
| import random
def tower_sample(Pi):
L=[0]
K=len(Pi)
for l in range(1, K+1): L+=[L[l-1]+Pi[l-1], ]
Upsilon=random.uniform(0, L[-1])
for k in range(1, len(L)+1):
if (Upsilon>L[k-1] and Upsilon<L[k]):
return k
| none | 1 | 3.430047 | 3 | |
vmoe/train/train_state_test.py | google-research/vmoe | 205 | 6617311 | # Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for train_state."""
from absl.testing import absltest
from vmoe import partitioning
from vmoe.train import train_state
PartitionSpec = partitioning.PartitionSpec
class TrainStateTreeAxisResourcesTest(absltest.TestCase):
def test_train_state(self):
params = {'a': 1, 'b': 2, 'c': 3}
rngs = {'dropout': None}
state = train_state.TrainState.create(
apply_fn=lambda x: x,
params=params,
tx=lambda x: x,
rngs=rngs)
output = partitioning.tree_axis_resources_from_regexes(
tree=state, axis_resources_regexes=[
('.*/a$', ('expert',)),
('.*/c$', (('expert', 'width'),)),
])
self.assertIsInstance(output, state.TrainState)
self.assertEqual(output.params['a'], PartitionSpec('expert'))
self.assertEqual(output.params['b'], PartitionSpec())
self.assertEqual(output.params['c'], PartitionSpec(('expert', 'width')))
if __name__ == '__main__':
absltest.main()
| # Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for train_state."""
from absl.testing import absltest
from vmoe import partitioning
from vmoe.train import train_state
PartitionSpec = partitioning.PartitionSpec
class TrainStateTreeAxisResourcesTest(absltest.TestCase):
def test_train_state(self):
params = {'a': 1, 'b': 2, 'c': 3}
rngs = {'dropout': None}
state = train_state.TrainState.create(
apply_fn=lambda x: x,
params=params,
tx=lambda x: x,
rngs=rngs)
output = partitioning.tree_axis_resources_from_regexes(
tree=state, axis_resources_regexes=[
('.*/a$', ('expert',)),
('.*/c$', (('expert', 'width'),)),
])
self.assertIsInstance(output, state.TrainState)
self.assertEqual(output.params['a'], PartitionSpec('expert'))
self.assertEqual(output.params['b'], PartitionSpec())
self.assertEqual(output.params['c'], PartitionSpec(('expert', 'width')))
if __name__ == '__main__':
absltest.main()
| en | 0.854934 | # Copyright 2022 Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Tests for train_state. | 2.046946 | 2 |
common/chrome_uninstall_form.py | CoderRushil/fyle-www-tests | 0 | 6617312 | from time import sleep
import logging
logger = logging.getLogger(__name__)
def submit_chrome_uninstall_form(browser, email=None, feedback=None):
browser.find(xpath="//form[contains(@id, 'send-feedback')]", scroll=True)
if email:
browser.input(xpath="//form[contains(@id, 'send-feedback')]//input[@name='email']", keys=email)
if feedback:
browser.input(xpath="//form[contains(@id, 'send-feedback')]//textarea[@name='extension_feedback']", keys=feedback)
browser.click(xpath="//form[contains(@id, 'send-feedback')]//button[text()='Send Feedback']")
sleep(2)
def assert_required_fields(browser):
submit_chrome_uninstall_form(browser)
email_error = browser.find(xpath="//label[@for='feedback-email'][@class='error']")
feedback_error = browser.find(xpath="//label[@for='uninstall-description'][@class='error']")
assert email_error and email_error.is_displayed(), 'No error displayed for missing email'
assert feedback_error and feedback_error.is_displayed(), 'No error displayed for missing feedback'
def assert_invalid_email(browser):
submit_chrome_uninstall_form(browser, email="test")
email_error = browser.find(xpath="//label[@for='feedback-email'][@class='error']")
assert email_error and email_error.is_displayed(), 'No error displayed for invalid email'
def assert_non_business_email(browser):
submit_chrome_uninstall_form(browser, email="<EMAIL>", feedback="test feedback")
email_error = browser.find(xpath="//label[@for='feedback-email'][@class='error email-error']")
assert email_error and email_error.is_displayed(), 'No error displayed for non business email'
def assert_success_chrome_uninstall_form(browser):
submit_chrome_uninstall_form(browser, email="<EMAIL>", feedback="test feedback")
browser.scroll_up_or_down(-100)
ty_message = browser.find(xpath="//p[contains(@class, 'feedback-submit')]", scroll=True)
assert ty_message and ty_message.is_displayed(), 'Thank you message is not displayed'
| from time import sleep
import logging
logger = logging.getLogger(__name__)
def submit_chrome_uninstall_form(browser, email=None, feedback=None):
browser.find(xpath="//form[contains(@id, 'send-feedback')]", scroll=True)
if email:
browser.input(xpath="//form[contains(@id, 'send-feedback')]//input[@name='email']", keys=email)
if feedback:
browser.input(xpath="//form[contains(@id, 'send-feedback')]//textarea[@name='extension_feedback']", keys=feedback)
browser.click(xpath="//form[contains(@id, 'send-feedback')]//button[text()='Send Feedback']")
sleep(2)
def assert_required_fields(browser):
submit_chrome_uninstall_form(browser)
email_error = browser.find(xpath="//label[@for='feedback-email'][@class='error']")
feedback_error = browser.find(xpath="//label[@for='uninstall-description'][@class='error']")
assert email_error and email_error.is_displayed(), 'No error displayed for missing email'
assert feedback_error and feedback_error.is_displayed(), 'No error displayed for missing feedback'
def assert_invalid_email(browser):
submit_chrome_uninstall_form(browser, email="test")
email_error = browser.find(xpath="//label[@for='feedback-email'][@class='error']")
assert email_error and email_error.is_displayed(), 'No error displayed for invalid email'
def assert_non_business_email(browser):
submit_chrome_uninstall_form(browser, email="<EMAIL>", feedback="test feedback")
email_error = browser.find(xpath="//label[@for='feedback-email'][@class='error email-error']")
assert email_error and email_error.is_displayed(), 'No error displayed for non business email'
def assert_success_chrome_uninstall_form(browser):
submit_chrome_uninstall_form(browser, email="<EMAIL>", feedback="test feedback")
browser.scroll_up_or_down(-100)
ty_message = browser.find(xpath="//p[contains(@class, 'feedback-submit')]", scroll=True)
assert ty_message and ty_message.is_displayed(), 'Thank you message is not displayed'
| none | 1 | 2.56945 | 3 | |
mpas_analysis/ocean/compute_transects_subtask.py | ytakano3/MPAS-Analysis | 43 | 6617313 | # This software is open source software available under the BSD-3 license.
#
# Copyright (c) 2020 Triad National Security, LLC. All rights reserved.
# Copyright (c) 2020 Lawrence Livermore National Security, LLC. All rights
# reserved.
# Copyright (c) 2020 UT-Battelle, LLC. All rights reserved.
#
# Additional copyright and license information can be found in the LICENSE file
# distributed with this code, or at
# https://raw.githubusercontent.com/MPAS-Dev/MPAS-Analysis/master/LICENSE
from __future__ import absolute_import, division, print_function, \
unicode_literals
import numpy
import xarray as xr
import os
from collections import OrderedDict
from pyremap import PointCollectionDescriptor
from mpas_analysis.shared.climatology import RemapMpasClimatologySubtask
from mpas_analysis.shared.io.utility import build_config_full_path, \
make_directories
from mpas_analysis.shared.io import write_netcdf
from mpas_analysis.ocean.utility import compute_zmid
from mpas_analysis.shared.interpolation import interp_1d
class ComputeTransectsSubtask(RemapMpasClimatologySubtask): # {{{
"""
A subtask for remapping climatologies to transect points
Attributes
----------
obsDatasets : TransectsObservations
A dictionary of observational datasets
verticalComparisonGridName : {'obs', 'mpas'} or any str
The vertical grid name on which to compare MPAS data with
observations. 'obs' indicates the locations of the original
observations; 'mpas' is the vertical locations of MPAS points,
remapped to the observation latitude/longitude. If any other,
string, verticalComparisonGrid should be a 1D numpy array and this
name should be a useful (and unique) description of that grid.
verticalComparisonGrid : 1D numpy array
The vertical grid on which to compare MPAS data with observations
if ``verticalComparisonGridName`` is not 'obs' or 'mpas'. The
values should be elevations (in m, typically negative).
transectNumber : ``xarray.DataArray``
For each point in the point collection after remapping, the index of
the transect it belongs to (so that remapped results can be separated
back into individual transects for plotting)
transectCollectionName : str
A name that describes the collection of transects (e.g. the name
of the collection of observations) used to name the
destination "mesh" for regridding
collectionDescriptor : ``PointCollectionDescriptor``
The mesh descriptor for the collection of all points in all transects,
used for remapping
zMid : ``xarray.DataArray``
Vertical coordinate at the center of layers, used to interpolate to
reference depths
"""
# Authors
# -------
# <NAME>
def __init__(self, mpasClimatologyTask, parentTask, climatologyName,
transectCollectionName, variableList, seasons, obsDatasets,
verticalComparisonGridName='obs', verticalComparisonGrid=None,
subtaskName='remapTransects'):
# {{{
'''
Construct the analysis task and adds it as a subtask of the
``parentTask``.
Parameters
----------
mpasClimatologyTask : ``MpasClimatologyTask``
The task that produced a climatology to be remapped and plotted
as a transect
parentTask : ``AnalysisTask``
The parent task, used to get the ``taskName``, ``config`` and
``componentName``
climatologyName : str
A name that describes the climatology (e.g. a short version of
the important field(s) in the climatology) used to name the
subdirectories for each stage of the climatology
transectCollectionName : str
A name that describes the collection of transects (e.g. the name
of the collection of observations) used to name the
destination "mesh" for regridding
variableList : list of str
A list of variable names in ``timeSeriesStatsMonthly`` to be
included in the climatologies
seasons : list of str
A list of seasons (keys in ``shared.constants.monthDictionary``)
to be computed or ['none'] (not ``None``) if only monthly
climatologies are needed.
obsDatasets : TransectsObservations
A dictionary of observational datasets
verticalComparisonGridName : {'obs', 'mpas'} or any str, optional
The vertical grid name on which to compare MPAS data with
observations. 'obs' indicates the locations of the original
observations; 'mpas' is the vertical locations of MPAS points,
remapped to the observation latitude/longitude. If any other,
string, verticalComparisonGrid should be a 1D numpy array and this
name should be a useful (and unique) description of that grid.
verticalComparisonGrid : 1D numpy array, optional
The vertical grid on which to compare MPAS data with observations
if ``verticalComparisonGridName`` is not 'obs' or 'mpas'. The
values should be elevations (in m, typically negative).
subtaskName : str, optional
The name of the subtask
'''
# Authors
# -------
# <NAME>
# call the constructor from the base class
# (RemapMpasClimatologySubtask)
super(ComputeTransectsSubtask, self).__init__(
mpasClimatologyTask, parentTask,
climatologyName=climatologyName, variableList=variableList,
seasons=seasons, subtaskName=subtaskName)
self.obsDatasets = obsDatasets
self.transectCollectionName = transectCollectionName
self.verticalComparisonGridName = verticalComparisonGridName
self.verticalComparisonGrid = verticalComparisonGrid
# }}}
def setup_and_check(self): # {{{
'''
Creates a PointCollectionDescriptor describing all the points in the
transects to remap to. Keeps track of which transects index each point
belongs to.
Raises
------
IOError :
If a restart file is not available from which to read mesh
information or if no history files are available from which to
compute the climatology in the desired time range.
'''
# Authors
# -------
# <NAME>
transectNumber = []
lats = []
lons = []
x = []
obsDatasets = self.obsDatasets.get_observations()
datasets = list(obsDatasets.values())
for transectIndex, ds in enumerate(datasets):
localLats = list(ds.lat.values)
localLons = list(ds.lon.values)
localX = list(ds.x.values)
localIndices = [transectIndex for lat in localLats]
lats.extend(localLats)
lons.extend(localLons)
x.extend(localX)
transectNumber.extend(localIndices)
self.transectNumber = xr.DataArray.from_dict(
{'dims': ('nPoints'),
'data': transectNumber})
self.x = xr.DataArray.from_dict(
{'dims': ('nPoints'),
'data': x})
self.collectionDescriptor = PointCollectionDescriptor(
lats, lons, collectionName=self.transectCollectionName,
units='degrees', outDimension='nPoints')
self.add_comparison_grid_descriptor(self.transectCollectionName,
self.collectionDescriptor)
# then, call setup_and_check from the base class
# (RemapMpasClimatologySubtask)
super(ComputeTransectsSubtask, self).setup_and_check()
for transectName in obsDatasets:
obsDatasets[transectName].close()
def run_task(self): # {{{
'''
Compute climatologies of melt rates from E3SM/MPAS output
This function has been overridden to compute ``zMid`` based on data
from a restart file for later use in vertically interpolating to
reference depths.
'''
# Authors
# -------
# <NAME>
# first, compute zMid and cell mask from the restart file
with xr.open_dataset(self.restartFileName) as ds:
ds = ds[['maxLevelCell', 'bottomDepth', 'layerThickness']]
ds = ds.isel(Time=0)
self.maxLevelCell = ds.maxLevelCell - 1
zMid = compute_zmid(ds.bottomDepth, ds.maxLevelCell,
ds.layerThickness)
self.zMid = \
xr.DataArray.from_dict({'dims': ('nCells', 'nVertLevels'),
'data': zMid})
ds.close()
# then, call run from the base class (RemapMpasClimatologySubtask),
# which will perform the horizontal remapping
super(ComputeTransectsSubtask, self).run_task()
obsDatasets = self.obsDatasets.get_observations()
self.logger.info('Interpolating each transect vertically...')
# finally, vertically interpolate and write out each transect
for season in self.seasons:
remappedFileName = self.get_remapped_file_name(
season, comparisonGridName=self.transectCollectionName)
with xr.open_dataset(remappedFileName) as ds:
transectNames = list(obsDatasets.keys())
for transectIndex, transectName in enumerate(transectNames):
self.logger.info(' {}'.format(transectName))
dsObs = obsDatasets[transectName]
outFileName = self.get_remapped_file_name(
season, comparisonGridName=transectName)
outObsFileName = self.obsDatasets.get_out_file_name(
transectName, self.verticalComparisonGridName)
self._vertical_interp(ds, transectIndex, dsObs,
outFileName, outObsFileName)
ds.close()
for transectName in obsDatasets:
obsDatasets[transectName].close()
# }}}
def customize_masked_climatology(self, climatology, season): # {{{
'''
Add zMid to the climatologys
Parameters
----------
climatology : ``xarray.Dataset`` object
the climatology data set
season : str
The name of the season to be masked
Returns
-------
climatology : ``xarray.Dataset`` object
the modified climatology data set
'''
# Authors
# -------
# <NAME>
zIndex = xr.DataArray.from_dict(
{'dims': ('nVertLevels',),
'data': numpy.arange(climatology.sizes['nVertLevels'])})
cellMask = zIndex < self.maxLevelCell
for variableName in self.variableList:
climatology[variableName] = \
climatology[variableName].where(cellMask)
climatology['zMid'] = self.zMid
climatology = climatology.transpose('nVertLevels', 'nCells')
return climatology # }}}
def customize_remapped_climatology(self, climatology, comparisonGridNames,
season): # {{{
'''
Add the transect index to the data set
Parameters
----------
climatology : ``xarray.Dataset```
The MPAS climatology data set that has been remapped
comparisonGridNames : {'latlon', 'antarctic'}
The name of the comparison grid to use for remapping.
season : str
The name of the season to be masked
Returns
-------
climatology : ``xarray.Dataset```
The same data set with any custom fields added or modifications
made
'''
# Authors
# -------
# <NAME>
climatology['transectNumber'] = self.transectNumber
climatology['x'] = self.x
if 'nCells' in climatology.dims:
climatology = climatology.rename({'nCells': 'nPoints'})
dims = ['nPoints', 'nVertLevels']
if 'nv' in climatology.dims:
dims.append('nv')
climatology = climatology.transpose(*dims)
return climatology # }}}
def _vertical_interp(self, ds, transectIndex, dsObs, outFileName,
outObsFileName):
'''
Vertically interpolate a transect and write it to a unique file
Parameters
----------
ds : ``xarray.Dataset``
The data set containing all transects before vertical interpolation
transectIndex : int
The index of the transect to extract
dsObs : ``xarray.Dataset``
The obs dataset used if verticalComparisonGridName is 'obs'
outFileName : str
The name of the file to which the resulting data set should be
written
outObsFileName : str
The name of the file to which the resulting obs data set should be
written if it is interpolated
'''
# Authors
# -------
# <NAME>
if os.path.exists(outFileName):
return
ds = ds.where(ds.transectNumber == transectIndex, drop=True)
if self.verticalComparisonGridName == 'mpas':
z = ds.zMid
z = z.rename({'nVertLevels': 'nzOut'})
elif self.verticalComparisonGridName == 'obs':
z = dsObs.z
z = z.rename({'nz': 'nzOut'})
else:
# a defined vertical grid
z = (('nzOut', ), self.verticalComparisonGrid)
if self.verticalComparisonGridName == 'mpas':
ds = ds.rename({'zMid': 'z', 'nVertLevels': 'nz'})
else:
ds['z'] = z
# remap each variable
ds = interp_1d(ds, inInterpDim='nVertLevels', inInterpCoord='zMid',
outInterpDim='nzOut', outInterpCoord='z')
ds = ds.rename({'nzOut': 'nz'})
if self.verticalComparisonGridName != 'obs' and 'nz' in dsObs.dims:
dsObs['zOut'] = z
# remap each variable
dsObs = interp_1d(dsObs, inInterpDim='nz', inInterpCoord='z',
outInterpDim='nzOut', outInterpCoord='zOut')
dsObs = dsObs.rename({'nzOut': 'nz'})
write_netcdf(dsObs, outObsFileName)
ds = ds.drop_vars(['validMask', 'transectNumber'])
write_netcdf(ds, outFileName) # }}}
# }}}
class TransectsObservations(object): # {{{
"""
A class for loading and manipulating transect observations
Attributes
----------
config : ``MpasAnalysisConfigParser``
Configuration options
obsFileNames : OrderedDict
The names of transects and the file names of the corresponding
observations for a transect
horizontalResolution : str
'obs' for the obs as they are or a size in km if subdivision is
desired.
transectCollectionName : str
A name that describes the collection of transects (e.g. the name
of the collection of observations) used to name the
destination "mesh" for regridding
obsDatasets : OrderedDict
A dictionary of observational datasets
"""
# Authors
# -------
# <NAME>
def __init__(self, config, obsFileNames, horizontalResolution,
transectCollectionName): # {{{
'''
Construct the object, setting the observations dictionary to None.
Parameters
----------
config : ``MpasAnalysisConfigParser``
Configuration options
obsFileNames : OrderedDict
The names of transects and the file names of the corresponding
observations for a transect
horizontalResolution : str
'obs' for the obs as they are or a size in km if subdivision is
desired.
transectCollectionName : str
A name that describes the collection of transects (e.g. the name
of the collection of observations) used to name the
destination "mesh" for regridding
'''
# Authors
# -------
# <NAME>
self.obsDatasets = None
self.config = config
self.obsFileNames = obsFileNames
if horizontalResolution != 'obs':
horizontalResolution = float(horizontalResolution)
self.horizontalResolution = horizontalResolution
self.transectCollectionName = transectCollectionName
def get_observations(self):
# {{{
'''
Read in and set up the observations.
Returns
-------
obsDatasets : OrderedDict
The observational dataset
'''
# Authors
# -------
# <NAME>
obsDatasets = OrderedDict()
for name in self.obsFileNames:
outFileName = self.get_out_file_name(name)
if os.path.exists(outFileName):
dsObs = xr.open_dataset(outFileName)
dsObs.load()
else:
dsObs = self.build_observational_dataset(
self.obsFileNames[name], name)
dsObs.load()
# make sure lat and lon are coordinates
for coord in ['lon', 'lat']:
dsObs.coords[coord] = dsObs[coord]
if self.horizontalResolution == 'obs':
dsObs = self._add_distance(dsObs)
else:
dsObs = self._subdivide_observations(dsObs)
write_netcdf(dsObs, outFileName)
obsDatasets[name] = dsObs
return obsDatasets # }}}
def build_observational_dataset(self, fileName, transectName): # {{{
'''
read in the data sets for observations, and possibly rename some
variables and dimensions
Parameters
----------
fileName : str
observation file name
transectName : str
transect name
Returns
-------
dsObs : ``xarray.Dataset``
The observational dataset
'''
# Authors
# -------
# <NAME>-Davis
dsObs = xr.open_dataset(fileName)
# observations are expected to have horizontal dimension nPoints and
# vertical dimension nz, as well as horizontal coordinates lat and lon
# and vertical coordinate z. Override this function if these need to
# be renamed from the observations file.
return dsObs # }}}
def get_out_file_name(self, transectName,
verticalComparisonGridName='obs'): # {{{
'''
Given config options, the name of a field and a string identifying the
months in a seasonal climatology, returns the full path for MPAS
climatology files before and after remapping.
Parameters
----------
transectName : str
The name of the transect
verticalComparisonGridName : {'obs', 'mpas'} or any str, optional
The vertical grid name on which to compare MPAS data with
observations. 'obs' indicates the locations of the original
observations; 'mpas' is the vertical locations of MPAS points,
remapped to the observation latitude/longitude. If any other,
string, verticalComparisonGrid should be a 1D numpy array and this
name should be a useful (and unique) description of that grid.
Returns
-------
fileName : str
The path to the climatology file for the specified season.
'''
# Authors
# -------
# <NAME>
config = self.config
remappedDirectory = build_config_full_path(
config=config, section='output',
relativePathOption='remappedClimSubdirectory',
relativePathSection='oceanObservations')
make_directories(remappedDirectory)
if verticalComparisonGridName == 'obs':
fileName = '{}/{}_{}.nc'.format(
remappedDirectory, self.transectCollectionName, transectName)
else:
fileName = '{}/{}_{}_{}.nc'.format(
remappedDirectory, self.transectCollectionName, transectName,
verticalComparisonGridName)
return fileName # }}}
def _add_distance(self, dsObs): # {{{
'''
Subdivide each segment of the transect so the horizontal resolution
approximately matches the requested resolution
'''
lat = dsObs.lat.values
lon = dsObs.lon.values
# compute the great circle distance between these points
dxIn = self._haversine(lon[0:-1], lat[0:-1], lon[1:], lat[1:])
xIn = numpy.zeros(lat.shape)
xIn[1:] = numpy.cumsum(dxIn)
dsObs['x'] = (('nPoints',), xIn)
return dsObs # }}}
def _subdivide_observations(self, dsObs): # {{{
'''
Subdivide each segment of the transect so the horizontal resolution
approximately matches the requested resolution
'''
lat = dsObs.lat.values
lon = dsObs.lon.values
# compute the great circle distance between these points
dxIn = self._haversine(lon[0:-1], lat[0:-1], lon[1:], lat[1:])
nSegments = numpy.maximum(
(dxIn / self.horizontalResolution + 0.5).astype(int), 1)
xIn = numpy.zeros(lat.shape)
xIn[1:] = numpy.cumsum(dxIn)
outIndex = []
for index in range(len(xIn) - 1):
n = nSegments[index]
outIndex.extend(index + numpy.arange(0, n) / n)
outIndex.append(len(xIn) - 1)
xOut = numpy.interp(outIndex, numpy.arange(len(xIn)), xIn)
dsObs['xIn'] = (('nPoints',), xIn)
dsObs['xOut'] = (('nPointsOut',), xOut)
# interpolate fields without and with vertical dimension
dsObs = interp_1d(dsObs, inInterpDim='nPoints',
inInterpCoord='xIn', outInterpDim='nPointsOut',
outInterpCoord='xOut')
dsObs = dsObs.drop_vars(['xIn'])
dsObs = dsObs.rename({'nPointsOut': 'nPoints', 'xOut': 'x'})
return dsObs # }}}
def _haversine(self, lon1, lat1, lon2, lat2): # {{{
"""
Calculate the great circle distance in km between two points on the
earth (specified in decimal degrees). Based on
https://stackoverflow.com/a/4913653
"""
# convert decimal degrees to radians
lon1 = numpy.deg2rad(lon1)
lat1 = numpy.deg2rad(lat1)
lon2 = numpy.deg2rad(lon2)
lat2 = numpy.deg2rad(lat2)
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = numpy.sin(dlat / 2.)**2 + numpy.cos(lat1) * numpy.cos(lat2) * \
numpy.sin(dlon / 2.)**2
c = 2 * numpy.arcsin(numpy.sqrt(a))
r = 6371 # Radius of earth in kilometers. Use 3956 for miles
return c * r # }}}
# }}}
# vim: foldmethod=marker ai ts=4 sts=4 et sw=4 ft=python
| # This software is open source software available under the BSD-3 license.
#
# Copyright (c) 2020 Triad National Security, LLC. All rights reserved.
# Copyright (c) 2020 Lawrence Livermore National Security, LLC. All rights
# reserved.
# Copyright (c) 2020 UT-Battelle, LLC. All rights reserved.
#
# Additional copyright and license information can be found in the LICENSE file
# distributed with this code, or at
# https://raw.githubusercontent.com/MPAS-Dev/MPAS-Analysis/master/LICENSE
from __future__ import absolute_import, division, print_function, \
unicode_literals
import numpy
import xarray as xr
import os
from collections import OrderedDict
from pyremap import PointCollectionDescriptor
from mpas_analysis.shared.climatology import RemapMpasClimatologySubtask
from mpas_analysis.shared.io.utility import build_config_full_path, \
make_directories
from mpas_analysis.shared.io import write_netcdf
from mpas_analysis.ocean.utility import compute_zmid
from mpas_analysis.shared.interpolation import interp_1d
class ComputeTransectsSubtask(RemapMpasClimatologySubtask): # {{{
"""
A subtask for remapping climatologies to transect points
Attributes
----------
obsDatasets : TransectsObservations
A dictionary of observational datasets
verticalComparisonGridName : {'obs', 'mpas'} or any str
The vertical grid name on which to compare MPAS data with
observations. 'obs' indicates the locations of the original
observations; 'mpas' is the vertical locations of MPAS points,
remapped to the observation latitude/longitude. If any other,
string, verticalComparisonGrid should be a 1D numpy array and this
name should be a useful (and unique) description of that grid.
verticalComparisonGrid : 1D numpy array
The vertical grid on which to compare MPAS data with observations
if ``verticalComparisonGridName`` is not 'obs' or 'mpas'. The
values should be elevations (in m, typically negative).
transectNumber : ``xarray.DataArray``
For each point in the point collection after remapping, the index of
the transect it belongs to (so that remapped results can be separated
back into individual transects for plotting)
transectCollectionName : str
A name that describes the collection of transects (e.g. the name
of the collection of observations) used to name the
destination "mesh" for regridding
collectionDescriptor : ``PointCollectionDescriptor``
The mesh descriptor for the collection of all points in all transects,
used for remapping
zMid : ``xarray.DataArray``
Vertical coordinate at the center of layers, used to interpolate to
reference depths
"""
# Authors
# -------
# <NAME>
def __init__(self, mpasClimatologyTask, parentTask, climatologyName,
transectCollectionName, variableList, seasons, obsDatasets,
verticalComparisonGridName='obs', verticalComparisonGrid=None,
subtaskName='remapTransects'):
# {{{
'''
Construct the analysis task and adds it as a subtask of the
``parentTask``.
Parameters
----------
mpasClimatologyTask : ``MpasClimatologyTask``
The task that produced a climatology to be remapped and plotted
as a transect
parentTask : ``AnalysisTask``
The parent task, used to get the ``taskName``, ``config`` and
``componentName``
climatologyName : str
A name that describes the climatology (e.g. a short version of
the important field(s) in the climatology) used to name the
subdirectories for each stage of the climatology
transectCollectionName : str
A name that describes the collection of transects (e.g. the name
of the collection of observations) used to name the
destination "mesh" for regridding
variableList : list of str
A list of variable names in ``timeSeriesStatsMonthly`` to be
included in the climatologies
seasons : list of str
A list of seasons (keys in ``shared.constants.monthDictionary``)
to be computed or ['none'] (not ``None``) if only monthly
climatologies are needed.
obsDatasets : TransectsObservations
A dictionary of observational datasets
verticalComparisonGridName : {'obs', 'mpas'} or any str, optional
The vertical grid name on which to compare MPAS data with
observations. 'obs' indicates the locations of the original
observations; 'mpas' is the vertical locations of MPAS points,
remapped to the observation latitude/longitude. If any other,
string, verticalComparisonGrid should be a 1D numpy array and this
name should be a useful (and unique) description of that grid.
verticalComparisonGrid : 1D numpy array, optional
The vertical grid on which to compare MPAS data with observations
if ``verticalComparisonGridName`` is not 'obs' or 'mpas'. The
values should be elevations (in m, typically negative).
subtaskName : str, optional
The name of the subtask
'''
# Authors
# -------
# <NAME>
# call the constructor from the base class
# (RemapMpasClimatologySubtask)
super(ComputeTransectsSubtask, self).__init__(
mpasClimatologyTask, parentTask,
climatologyName=climatologyName, variableList=variableList,
seasons=seasons, subtaskName=subtaskName)
self.obsDatasets = obsDatasets
self.transectCollectionName = transectCollectionName
self.verticalComparisonGridName = verticalComparisonGridName
self.verticalComparisonGrid = verticalComparisonGrid
# }}}
def setup_and_check(self): # {{{
'''
Creates a PointCollectionDescriptor describing all the points in the
transects to remap to. Keeps track of which transects index each point
belongs to.
Raises
------
IOError :
If a restart file is not available from which to read mesh
information or if no history files are available from which to
compute the climatology in the desired time range.
'''
# Authors
# -------
# <NAME>
transectNumber = []
lats = []
lons = []
x = []
obsDatasets = self.obsDatasets.get_observations()
datasets = list(obsDatasets.values())
for transectIndex, ds in enumerate(datasets):
localLats = list(ds.lat.values)
localLons = list(ds.lon.values)
localX = list(ds.x.values)
localIndices = [transectIndex for lat in localLats]
lats.extend(localLats)
lons.extend(localLons)
x.extend(localX)
transectNumber.extend(localIndices)
self.transectNumber = xr.DataArray.from_dict(
{'dims': ('nPoints'),
'data': transectNumber})
self.x = xr.DataArray.from_dict(
{'dims': ('nPoints'),
'data': x})
self.collectionDescriptor = PointCollectionDescriptor(
lats, lons, collectionName=self.transectCollectionName,
units='degrees', outDimension='nPoints')
self.add_comparison_grid_descriptor(self.transectCollectionName,
self.collectionDescriptor)
# then, call setup_and_check from the base class
# (RemapMpasClimatologySubtask)
super(ComputeTransectsSubtask, self).setup_and_check()
for transectName in obsDatasets:
obsDatasets[transectName].close()
def run_task(self): # {{{
'''
Compute climatologies of melt rates from E3SM/MPAS output
This function has been overridden to compute ``zMid`` based on data
from a restart file for later use in vertically interpolating to
reference depths.
'''
# Authors
# -------
# <NAME>
# first, compute zMid and cell mask from the restart file
with xr.open_dataset(self.restartFileName) as ds:
ds = ds[['maxLevelCell', 'bottomDepth', 'layerThickness']]
ds = ds.isel(Time=0)
self.maxLevelCell = ds.maxLevelCell - 1
zMid = compute_zmid(ds.bottomDepth, ds.maxLevelCell,
ds.layerThickness)
self.zMid = \
xr.DataArray.from_dict({'dims': ('nCells', 'nVertLevels'),
'data': zMid})
ds.close()
# then, call run from the base class (RemapMpasClimatologySubtask),
# which will perform the horizontal remapping
super(ComputeTransectsSubtask, self).run_task()
obsDatasets = self.obsDatasets.get_observations()
self.logger.info('Interpolating each transect vertically...')
# finally, vertically interpolate and write out each transect
for season in self.seasons:
remappedFileName = self.get_remapped_file_name(
season, comparisonGridName=self.transectCollectionName)
with xr.open_dataset(remappedFileName) as ds:
transectNames = list(obsDatasets.keys())
for transectIndex, transectName in enumerate(transectNames):
self.logger.info(' {}'.format(transectName))
dsObs = obsDatasets[transectName]
outFileName = self.get_remapped_file_name(
season, comparisonGridName=transectName)
outObsFileName = self.obsDatasets.get_out_file_name(
transectName, self.verticalComparisonGridName)
self._vertical_interp(ds, transectIndex, dsObs,
outFileName, outObsFileName)
ds.close()
for transectName in obsDatasets:
obsDatasets[transectName].close()
# }}}
def customize_masked_climatology(self, climatology, season): # {{{
'''
Add zMid to the climatologys
Parameters
----------
climatology : ``xarray.Dataset`` object
the climatology data set
season : str
The name of the season to be masked
Returns
-------
climatology : ``xarray.Dataset`` object
the modified climatology data set
'''
# Authors
# -------
# <NAME>
zIndex = xr.DataArray.from_dict(
{'dims': ('nVertLevels',),
'data': numpy.arange(climatology.sizes['nVertLevels'])})
cellMask = zIndex < self.maxLevelCell
for variableName in self.variableList:
climatology[variableName] = \
climatology[variableName].where(cellMask)
climatology['zMid'] = self.zMid
climatology = climatology.transpose('nVertLevels', 'nCells')
return climatology # }}}
def customize_remapped_climatology(self, climatology, comparisonGridNames,
season): # {{{
'''
Add the transect index to the data set
Parameters
----------
climatology : ``xarray.Dataset```
The MPAS climatology data set that has been remapped
comparisonGridNames : {'latlon', 'antarctic'}
The name of the comparison grid to use for remapping.
season : str
The name of the season to be masked
Returns
-------
climatology : ``xarray.Dataset```
The same data set with any custom fields added or modifications
made
'''
# Authors
# -------
# <NAME>
climatology['transectNumber'] = self.transectNumber
climatology['x'] = self.x
if 'nCells' in climatology.dims:
climatology = climatology.rename({'nCells': 'nPoints'})
dims = ['nPoints', 'nVertLevels']
if 'nv' in climatology.dims:
dims.append('nv')
climatology = climatology.transpose(*dims)
return climatology # }}}
def _vertical_interp(self, ds, transectIndex, dsObs, outFileName,
outObsFileName):
'''
Vertically interpolate a transect and write it to a unique file
Parameters
----------
ds : ``xarray.Dataset``
The data set containing all transects before vertical interpolation
transectIndex : int
The index of the transect to extract
dsObs : ``xarray.Dataset``
The obs dataset used if verticalComparisonGridName is 'obs'
outFileName : str
The name of the file to which the resulting data set should be
written
outObsFileName : str
The name of the file to which the resulting obs data set should be
written if it is interpolated
'''
# Authors
# -------
# <NAME>
if os.path.exists(outFileName):
return
ds = ds.where(ds.transectNumber == transectIndex, drop=True)
if self.verticalComparisonGridName == 'mpas':
z = ds.zMid
z = z.rename({'nVertLevels': 'nzOut'})
elif self.verticalComparisonGridName == 'obs':
z = dsObs.z
z = z.rename({'nz': 'nzOut'})
else:
# a defined vertical grid
z = (('nzOut', ), self.verticalComparisonGrid)
if self.verticalComparisonGridName == 'mpas':
ds = ds.rename({'zMid': 'z', 'nVertLevels': 'nz'})
else:
ds['z'] = z
# remap each variable
ds = interp_1d(ds, inInterpDim='nVertLevels', inInterpCoord='zMid',
outInterpDim='nzOut', outInterpCoord='z')
ds = ds.rename({'nzOut': 'nz'})
if self.verticalComparisonGridName != 'obs' and 'nz' in dsObs.dims:
dsObs['zOut'] = z
# remap each variable
dsObs = interp_1d(dsObs, inInterpDim='nz', inInterpCoord='z',
outInterpDim='nzOut', outInterpCoord='zOut')
dsObs = dsObs.rename({'nzOut': 'nz'})
write_netcdf(dsObs, outObsFileName)
ds = ds.drop_vars(['validMask', 'transectNumber'])
write_netcdf(ds, outFileName) # }}}
# }}}
class TransectsObservations(object): # {{{
"""
A class for loading and manipulating transect observations
Attributes
----------
config : ``MpasAnalysisConfigParser``
Configuration options
obsFileNames : OrderedDict
The names of transects and the file names of the corresponding
observations for a transect
horizontalResolution : str
'obs' for the obs as they are or a size in km if subdivision is
desired.
transectCollectionName : str
A name that describes the collection of transects (e.g. the name
of the collection of observations) used to name the
destination "mesh" for regridding
obsDatasets : OrderedDict
A dictionary of observational datasets
"""
# Authors
# -------
# <NAME>
def __init__(self, config, obsFileNames, horizontalResolution,
transectCollectionName): # {{{
'''
Construct the object, setting the observations dictionary to None.
Parameters
----------
config : ``MpasAnalysisConfigParser``
Configuration options
obsFileNames : OrderedDict
The names of transects and the file names of the corresponding
observations for a transect
horizontalResolution : str
'obs' for the obs as they are or a size in km if subdivision is
desired.
transectCollectionName : str
A name that describes the collection of transects (e.g. the name
of the collection of observations) used to name the
destination "mesh" for regridding
'''
# Authors
# -------
# <NAME>
self.obsDatasets = None
self.config = config
self.obsFileNames = obsFileNames
if horizontalResolution != 'obs':
horizontalResolution = float(horizontalResolution)
self.horizontalResolution = horizontalResolution
self.transectCollectionName = transectCollectionName
def get_observations(self):
# {{{
'''
Read in and set up the observations.
Returns
-------
obsDatasets : OrderedDict
The observational dataset
'''
# Authors
# -------
# <NAME>
obsDatasets = OrderedDict()
for name in self.obsFileNames:
outFileName = self.get_out_file_name(name)
if os.path.exists(outFileName):
dsObs = xr.open_dataset(outFileName)
dsObs.load()
else:
dsObs = self.build_observational_dataset(
self.obsFileNames[name], name)
dsObs.load()
# make sure lat and lon are coordinates
for coord in ['lon', 'lat']:
dsObs.coords[coord] = dsObs[coord]
if self.horizontalResolution == 'obs':
dsObs = self._add_distance(dsObs)
else:
dsObs = self._subdivide_observations(dsObs)
write_netcdf(dsObs, outFileName)
obsDatasets[name] = dsObs
return obsDatasets # }}}
def build_observational_dataset(self, fileName, transectName): # {{{
'''
read in the data sets for observations, and possibly rename some
variables and dimensions
Parameters
----------
fileName : str
observation file name
transectName : str
transect name
Returns
-------
dsObs : ``xarray.Dataset``
The observational dataset
'''
# Authors
# -------
# <NAME>-Davis
dsObs = xr.open_dataset(fileName)
# observations are expected to have horizontal dimension nPoints and
# vertical dimension nz, as well as horizontal coordinates lat and lon
# and vertical coordinate z. Override this function if these need to
# be renamed from the observations file.
return dsObs # }}}
def get_out_file_name(self, transectName,
verticalComparisonGridName='obs'): # {{{
'''
Given config options, the name of a field and a string identifying the
months in a seasonal climatology, returns the full path for MPAS
climatology files before and after remapping.
Parameters
----------
transectName : str
The name of the transect
verticalComparisonGridName : {'obs', 'mpas'} or any str, optional
The vertical grid name on which to compare MPAS data with
observations. 'obs' indicates the locations of the original
observations; 'mpas' is the vertical locations of MPAS points,
remapped to the observation latitude/longitude. If any other,
string, verticalComparisonGrid should be a 1D numpy array and this
name should be a useful (and unique) description of that grid.
Returns
-------
fileName : str
The path to the climatology file for the specified season.
'''
# Authors
# -------
# <NAME>
config = self.config
remappedDirectory = build_config_full_path(
config=config, section='output',
relativePathOption='remappedClimSubdirectory',
relativePathSection='oceanObservations')
make_directories(remappedDirectory)
if verticalComparisonGridName == 'obs':
fileName = '{}/{}_{}.nc'.format(
remappedDirectory, self.transectCollectionName, transectName)
else:
fileName = '{}/{}_{}_{}.nc'.format(
remappedDirectory, self.transectCollectionName, transectName,
verticalComparisonGridName)
return fileName # }}}
def _add_distance(self, dsObs): # {{{
'''
Subdivide each segment of the transect so the horizontal resolution
approximately matches the requested resolution
'''
lat = dsObs.lat.values
lon = dsObs.lon.values
# compute the great circle distance between these points
dxIn = self._haversine(lon[0:-1], lat[0:-1], lon[1:], lat[1:])
xIn = numpy.zeros(lat.shape)
xIn[1:] = numpy.cumsum(dxIn)
dsObs['x'] = (('nPoints',), xIn)
return dsObs # }}}
def _subdivide_observations(self, dsObs): # {{{
'''
Subdivide each segment of the transect so the horizontal resolution
approximately matches the requested resolution
'''
lat = dsObs.lat.values
lon = dsObs.lon.values
# compute the great circle distance between these points
dxIn = self._haversine(lon[0:-1], lat[0:-1], lon[1:], lat[1:])
nSegments = numpy.maximum(
(dxIn / self.horizontalResolution + 0.5).astype(int), 1)
xIn = numpy.zeros(lat.shape)
xIn[1:] = numpy.cumsum(dxIn)
outIndex = []
for index in range(len(xIn) - 1):
n = nSegments[index]
outIndex.extend(index + numpy.arange(0, n) / n)
outIndex.append(len(xIn) - 1)
xOut = numpy.interp(outIndex, numpy.arange(len(xIn)), xIn)
dsObs['xIn'] = (('nPoints',), xIn)
dsObs['xOut'] = (('nPointsOut',), xOut)
# interpolate fields without and with vertical dimension
dsObs = interp_1d(dsObs, inInterpDim='nPoints',
inInterpCoord='xIn', outInterpDim='nPointsOut',
outInterpCoord='xOut')
dsObs = dsObs.drop_vars(['xIn'])
dsObs = dsObs.rename({'nPointsOut': 'nPoints', 'xOut': 'x'})
return dsObs # }}}
def _haversine(self, lon1, lat1, lon2, lat2): # {{{
"""
Calculate the great circle distance in km between two points on the
earth (specified in decimal degrees). Based on
https://stackoverflow.com/a/4913653
"""
# convert decimal degrees to radians
lon1 = numpy.deg2rad(lon1)
lat1 = numpy.deg2rad(lat1)
lon2 = numpy.deg2rad(lon2)
lat2 = numpy.deg2rad(lat2)
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = numpy.sin(dlat / 2.)**2 + numpy.cos(lat1) * numpy.cos(lat2) * \
numpy.sin(dlon / 2.)**2
c = 2 * numpy.arcsin(numpy.sqrt(a))
r = 6371 # Radius of earth in kilometers. Use 3956 for miles
return c * r # }}}
# }}}
# vim: foldmethod=marker ai ts=4 sts=4 et sw=4 ft=python
| en | 0.710345 | # This software is open source software available under the BSD-3 license. # # Copyright (c) 2020 Triad National Security, LLC. All rights reserved. # Copyright (c) 2020 Lawrence Livermore National Security, LLC. All rights # reserved. # Copyright (c) 2020 UT-Battelle, LLC. All rights reserved. # # Additional copyright and license information can be found in the LICENSE file # distributed with this code, or at # https://raw.githubusercontent.com/MPAS-Dev/MPAS-Analysis/master/LICENSE # {{{ A subtask for remapping climatologies to transect points Attributes ---------- obsDatasets : TransectsObservations A dictionary of observational datasets verticalComparisonGridName : {'obs', 'mpas'} or any str The vertical grid name on which to compare MPAS data with observations. 'obs' indicates the locations of the original observations; 'mpas' is the vertical locations of MPAS points, remapped to the observation latitude/longitude. If any other, string, verticalComparisonGrid should be a 1D numpy array and this name should be a useful (and unique) description of that grid. verticalComparisonGrid : 1D numpy array The vertical grid on which to compare MPAS data with observations if ``verticalComparisonGridName`` is not 'obs' or 'mpas'. The values should be elevations (in m, typically negative). transectNumber : ``xarray.DataArray`` For each point in the point collection after remapping, the index of the transect it belongs to (so that remapped results can be separated back into individual transects for plotting) transectCollectionName : str A name that describes the collection of transects (e.g. the name of the collection of observations) used to name the destination "mesh" for regridding collectionDescriptor : ``PointCollectionDescriptor`` The mesh descriptor for the collection of all points in all transects, used for remapping zMid : ``xarray.DataArray`` Vertical coordinate at the center of layers, used to interpolate to reference depths # Authors # ------- # <NAME> # {{{ Construct the analysis task and adds it as a subtask of the ``parentTask``. Parameters ---------- mpasClimatologyTask : ``MpasClimatologyTask`` The task that produced a climatology to be remapped and plotted as a transect parentTask : ``AnalysisTask`` The parent task, used to get the ``taskName``, ``config`` and ``componentName`` climatologyName : str A name that describes the climatology (e.g. a short version of the important field(s) in the climatology) used to name the subdirectories for each stage of the climatology transectCollectionName : str A name that describes the collection of transects (e.g. the name of the collection of observations) used to name the destination "mesh" for regridding variableList : list of str A list of variable names in ``timeSeriesStatsMonthly`` to be included in the climatologies seasons : list of str A list of seasons (keys in ``shared.constants.monthDictionary``) to be computed or ['none'] (not ``None``) if only monthly climatologies are needed. obsDatasets : TransectsObservations A dictionary of observational datasets verticalComparisonGridName : {'obs', 'mpas'} or any str, optional The vertical grid name on which to compare MPAS data with observations. 'obs' indicates the locations of the original observations; 'mpas' is the vertical locations of MPAS points, remapped to the observation latitude/longitude. If any other, string, verticalComparisonGrid should be a 1D numpy array and this name should be a useful (and unique) description of that grid. verticalComparisonGrid : 1D numpy array, optional The vertical grid on which to compare MPAS data with observations if ``verticalComparisonGridName`` is not 'obs' or 'mpas'. The values should be elevations (in m, typically negative). subtaskName : str, optional The name of the subtask # Authors # ------- # <NAME> # call the constructor from the base class # (RemapMpasClimatologySubtask) # }}} # {{{ Creates a PointCollectionDescriptor describing all the points in the transects to remap to. Keeps track of which transects index each point belongs to. Raises ------ IOError : If a restart file is not available from which to read mesh information or if no history files are available from which to compute the climatology in the desired time range. # Authors # ------- # <NAME> # then, call setup_and_check from the base class # (RemapMpasClimatologySubtask) # {{{ Compute climatologies of melt rates from E3SM/MPAS output This function has been overridden to compute ``zMid`` based on data from a restart file for later use in vertically interpolating to reference depths. # Authors # ------- # <NAME> # first, compute zMid and cell mask from the restart file # then, call run from the base class (RemapMpasClimatologySubtask), # which will perform the horizontal remapping # finally, vertically interpolate and write out each transect # }}} # {{{ Add zMid to the climatologys Parameters ---------- climatology : ``xarray.Dataset`` object the climatology data set season : str The name of the season to be masked Returns ------- climatology : ``xarray.Dataset`` object the modified climatology data set # Authors # ------- # <NAME> # }}} # {{{ Add the transect index to the data set Parameters ---------- climatology : ``xarray.Dataset``` The MPAS climatology data set that has been remapped comparisonGridNames : {'latlon', 'antarctic'} The name of the comparison grid to use for remapping. season : str The name of the season to be masked Returns ------- climatology : ``xarray.Dataset``` The same data set with any custom fields added or modifications made # Authors # ------- # <NAME> # }}} Vertically interpolate a transect and write it to a unique file Parameters ---------- ds : ``xarray.Dataset`` The data set containing all transects before vertical interpolation transectIndex : int The index of the transect to extract dsObs : ``xarray.Dataset`` The obs dataset used if verticalComparisonGridName is 'obs' outFileName : str The name of the file to which the resulting data set should be written outObsFileName : str The name of the file to which the resulting obs data set should be written if it is interpolated # Authors # ------- # <NAME> # a defined vertical grid # remap each variable # remap each variable # }}} # }}} # {{{ A class for loading and manipulating transect observations Attributes ---------- config : ``MpasAnalysisConfigParser`` Configuration options obsFileNames : OrderedDict The names of transects and the file names of the corresponding observations for a transect horizontalResolution : str 'obs' for the obs as they are or a size in km if subdivision is desired. transectCollectionName : str A name that describes the collection of transects (e.g. the name of the collection of observations) used to name the destination "mesh" for regridding obsDatasets : OrderedDict A dictionary of observational datasets # Authors # ------- # <NAME> # {{{ Construct the object, setting the observations dictionary to None. Parameters ---------- config : ``MpasAnalysisConfigParser`` Configuration options obsFileNames : OrderedDict The names of transects and the file names of the corresponding observations for a transect horizontalResolution : str 'obs' for the obs as they are or a size in km if subdivision is desired. transectCollectionName : str A name that describes the collection of transects (e.g. the name of the collection of observations) used to name the destination "mesh" for regridding # Authors # ------- # <NAME> # {{{ Read in and set up the observations. Returns ------- obsDatasets : OrderedDict The observational dataset # Authors # ------- # <NAME> # make sure lat and lon are coordinates # }}} # {{{ read in the data sets for observations, and possibly rename some variables and dimensions Parameters ---------- fileName : str observation file name transectName : str transect name Returns ------- dsObs : ``xarray.Dataset`` The observational dataset # Authors # ------- # <NAME>-Davis # observations are expected to have horizontal dimension nPoints and # vertical dimension nz, as well as horizontal coordinates lat and lon # and vertical coordinate z. Override this function if these need to # be renamed from the observations file. # }}} # {{{ Given config options, the name of a field and a string identifying the months in a seasonal climatology, returns the full path for MPAS climatology files before and after remapping. Parameters ---------- transectName : str The name of the transect verticalComparisonGridName : {'obs', 'mpas'} or any str, optional The vertical grid name on which to compare MPAS data with observations. 'obs' indicates the locations of the original observations; 'mpas' is the vertical locations of MPAS points, remapped to the observation latitude/longitude. If any other, string, verticalComparisonGrid should be a 1D numpy array and this name should be a useful (and unique) description of that grid. Returns ------- fileName : str The path to the climatology file for the specified season. # Authors # ------- # <NAME> # }}} # {{{ Subdivide each segment of the transect so the horizontal resolution approximately matches the requested resolution # compute the great circle distance between these points # }}} # {{{ Subdivide each segment of the transect so the horizontal resolution approximately matches the requested resolution # compute the great circle distance between these points # interpolate fields without and with vertical dimension # }}} # {{{ Calculate the great circle distance in km between two points on the earth (specified in decimal degrees). Based on https://stackoverflow.com/a/4913653 # convert decimal degrees to radians # haversine formula # Radius of earth in kilometers. Use 3956 for miles # }}} # }}} # vim: foldmethod=marker ai ts=4 sts=4 et sw=4 ft=python | 1.83236 | 2 |
players/doraemon.py | shin-sforzando/PAC2020-RPS | 0 | 6617314 | from hand import Hand
from player import Player
class Doraemon(Player):
def next_hand(self):
return Hand.G
| from hand import Hand
from player import Player
class Doraemon(Player):
def next_hand(self):
return Hand.G
| none | 1 | 2.658276 | 3 | |
app.py | basakanayurt/DetoxAI | 0 | 6617315 | import streamlit as st
from detoxai.content_detector import *
st.title('Detect Toxicity in posts')
user_input = st.text_area("please put the text to be scanned" , 'type here')
st.subheader('Results')
max_len = 256
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased', do_lower_case=True, add_special_tokens=True,
max_length=max_len, pad_to_max_length=True)
tokenizer.convert_ids_to_tokens(tokenizer(user_input)['input_ids'])
if st.checkbox("Show Tokens"):
st.json(tokenizer.convert_ids_to_tokens(tokenizer(user_input)['input_ids']))
if st.button('Analyze'):
# 'Starting the prediction...'
print(user_input)
model = AllToxicity()
data = model.predict([user_input])
print (data[["selfharm_pred","selfharm_prob"]])
print(data[["hatespeech_pred","hatespeech_prob"]])
print(data[["spam_pred","spam_prob"]])
print(data[["prediction", "probability"]])
if data['prediction'][0] == 0:
st.write("The post does not have any toxic content")
else:
st.write(data['prediction'][0], " detected with ", np.round(data['probability'][0]*100, 2) , ' % probability' )
| import streamlit as st
from detoxai.content_detector import *
st.title('Detect Toxicity in posts')
user_input = st.text_area("please put the text to be scanned" , 'type here')
st.subheader('Results')
max_len = 256
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased', do_lower_case=True, add_special_tokens=True,
max_length=max_len, pad_to_max_length=True)
tokenizer.convert_ids_to_tokens(tokenizer(user_input)['input_ids'])
if st.checkbox("Show Tokens"):
st.json(tokenizer.convert_ids_to_tokens(tokenizer(user_input)['input_ids']))
if st.button('Analyze'):
# 'Starting the prediction...'
print(user_input)
model = AllToxicity()
data = model.predict([user_input])
print (data[["selfharm_pred","selfharm_prob"]])
print(data[["hatespeech_pred","hatespeech_prob"]])
print(data[["spam_pred","spam_prob"]])
print(data[["prediction", "probability"]])
if data['prediction'][0] == 0:
st.write("The post does not have any toxic content")
else:
st.write(data['prediction'][0], " detected with ", np.round(data['probability'][0]*100, 2) , ' % probability' )
| en | 0.582298 | # 'Starting the prediction...' | 2.790682 | 3 |
2021/advent2021_7.py | aatango/Advent-of-Code | 0 | 6617316 | """Advent of Code 2021, day 7: The Treachery of Whales
It's the naive approach (ie. search for the optinal solution),
while slow, it still executes within a reasonable timeframe.
"""
import sys
def main(input_stream: tuple[int]) -> int:
"""
Determine the horizontal position that the crabs can align to using the least fuel possible.
How much fuel must they spend to align to that position?
"""
# start with a large enough value for spent fuel, we will optimise this
spent_fuel: int = sys.maxsize
min_pos = min(input_stream)
max_pos = max(input_stream)
for test_pos in range(min_pos, max_pos + 1):
needed_fuel: int = 0
for pos in input_stream:
needed_fuel += abs(test_pos - pos)
if needed_fuel < spent_fuel:
spent_fuel = needed_fuel
return spent_fuel
def part_two(input_stream: tuple[int]) -> int:
"""
Determine the horizontal position that the crabs can align to using the least fuel possible.
How much fuel must they spend to align to that position?
Similar to main problem, but fuel costs is now the summation of the required movement steps.
"""
spent_fuel: int = sys.maxsize
min_pos = min(input_stream)
max_pos = max(input_stream)
for test_pos in range(min_pos, max_pos + 1):
needed_fuel: int = 0
for pos in input_stream:
movement = abs(test_pos - pos)
needed_fuel += sum(range(movement + 1))
if needed_fuel < spent_fuel:
spent_fuel = needed_fuel
return spent_fuel
if __name__ == "__main__":
with open("../../input", "r") as file:
INPUT_FILE = tuple(
[int(pos) for pos in file.read()[:-1].split(",")]
)
print("a: ", main(INPUT_FILE))
print("b: ", part_two(INPUT_FILE))
| """Advent of Code 2021, day 7: The Treachery of Whales
It's the naive approach (ie. search for the optinal solution),
while slow, it still executes within a reasonable timeframe.
"""
import sys
def main(input_stream: tuple[int]) -> int:
"""
Determine the horizontal position that the crabs can align to using the least fuel possible.
How much fuel must they spend to align to that position?
"""
# start with a large enough value for spent fuel, we will optimise this
spent_fuel: int = sys.maxsize
min_pos = min(input_stream)
max_pos = max(input_stream)
for test_pos in range(min_pos, max_pos + 1):
needed_fuel: int = 0
for pos in input_stream:
needed_fuel += abs(test_pos - pos)
if needed_fuel < spent_fuel:
spent_fuel = needed_fuel
return spent_fuel
def part_two(input_stream: tuple[int]) -> int:
"""
Determine the horizontal position that the crabs can align to using the least fuel possible.
How much fuel must they spend to align to that position?
Similar to main problem, but fuel costs is now the summation of the required movement steps.
"""
spent_fuel: int = sys.maxsize
min_pos = min(input_stream)
max_pos = max(input_stream)
for test_pos in range(min_pos, max_pos + 1):
needed_fuel: int = 0
for pos in input_stream:
movement = abs(test_pos - pos)
needed_fuel += sum(range(movement + 1))
if needed_fuel < spent_fuel:
spent_fuel = needed_fuel
return spent_fuel
if __name__ == "__main__":
with open("../../input", "r") as file:
INPUT_FILE = tuple(
[int(pos) for pos in file.read()[:-1].split(",")]
)
print("a: ", main(INPUT_FILE))
print("b: ", part_two(INPUT_FILE))
| en | 0.910358 | Advent of Code 2021, day 7: The Treachery of Whales It's the naive approach (ie. search for the optinal solution), while slow, it still executes within a reasonable timeframe. Determine the horizontal position that the crabs can align to using the least fuel possible. How much fuel must they spend to align to that position? # start with a large enough value for spent fuel, we will optimise this Determine the horizontal position that the crabs can align to using the least fuel possible. How much fuel must they spend to align to that position? Similar to main problem, but fuel costs is now the summation of the required movement steps. | 3.929722 | 4 |
json_syntax/action_v1.py | orsinium-forks/json-syntax | 9 | 6617317 | <filename>json_syntax/action_v1.py
from .errors import ErrorContext, err_ctx
from datetime import date, datetime, time, timedelta
from decimal import InvalidOperation
import math
import re
def check_parse_error(value, parser, error):
try:
parser(value)
except error:
return False
else:
return True
def check_isinst(value, typ):
return isinstance(value, typ)
def check_has_type(value, typ):
return type(value) == typ
def convert_decimal_str(value):
result = str(value)
if result == "sNaN":
raise InvalidOperation("Won't save signalling NaN")
return result
def convert_float(value):
value = float(value)
if math.isfinite(value):
return value
elif math.isnan(value):
return "NaN"
elif value < 0.0:
return "-Infinity"
else:
return "Infinity"
def check_float(value):
return (
isinstance(value, (int, float))
or isinstance(value, str)
and value.lower()
in ("nan", "inf", "infinity" "-inf", "-infinity", "+inf", "+infinity")
)
def convert_enum_str(value, typ):
return typ(value).name
def convert_none(value):
if value is not None:
raise ValueError("Expected None")
return None
def check_str_enum(value, typ):
try:
typ[value]
except (KeyError, TypeError):
return False
else:
return True
def convert_str_enum(value, typ):
return typ[value]
def pass_faux_enum(value, typ):
typ[value]
return value
if hasattr(datetime, "fromisoformat"):
convert_date = date.fromisoformat
convert_datetime = datetime.fromisoformat
convert_time = time.fromisoformat
else:
from dateutil.parser import isoparser
instance = isoparser(sep="T")
convert_date = instance.parse_isodate
convert_datetime = instance.isoparse
convert_time = instance.parse_isotime
del instance
def convert_timedelta_str(dur):
"Barebones support for storing a timedelta as an ISO8601 duration."
micro = ".{:06d}".format(dur.microseconds) if dur.microseconds else ""
return "P{:d}DT{:d}{}S".format(dur.days, dur.seconds, micro)
_iso8601_duration = re.compile(
r"^P(?!$)([-+]?\d+(?:[.,]\d+)?Y)?"
r"([-+]?\d+(?:[.,]\d+)?M)?"
r"([-+]?\d+(?:[.,]\d+)?W)?"
r"([-+]?\d+(?:[.,]\d+)?D)?"
r"(?:(T)(?=[0-9+-])"
r"([-+]?\d+(?:[.,]\d+)?H)?"
r"([-+]?\d+(?:[.,]\d+)?M)?"
r"([-+]?\d+(?:[.,]\d+)?S)?)?$"
)
_duration_args = {
"PW": "weeks",
"PD": "days",
"TH": "hours",
"TM": "minutes",
"TS": "seconds",
}
def convert_str_timedelta(dur):
if not isinstance(dur, str):
raise ValueError("Value was not a string.")
match = _iso8601_duration.match(dur.upper().replace(",", "."))
section = "P"
if not match:
raise ValueError("Value was not an ISO8601 duration.")
args = {}
for elem in match.groups():
if elem is None:
continue
if elem == "T":
section = "T"
continue
part = section + elem[-1]
value = float(elem[:-1])
if not value:
continue
if part in ("PY", "PM"):
raise ValueError("Year and month durations not supported")
args[_duration_args[part]] = value
return timedelta(**args)
def convert_optional(value, inner):
if value is None:
return None
return inner(value)
def check_optional(value, inner):
return value is None or inner(value)
def convert_collection(value, inner, con):
return con(
err_ctx("[{}]".format(i), lambda: inner(val)) for i, val in enumerate(value)
)
def check_collection(value, inner, con):
return isinstance(value, con) and all(
err_ctx("[{}]".format(i), lambda: inner(val)) for i, val in enumerate(value)
)
def convert_mapping(value, key, val, con):
return con(err_ctx(k, lambda: (key(k), val(v))) for k, v in value.items())
def check_mapping(value, key, val, con):
return isinstance(value, con) and all(
err_ctx(k, lambda: key(k) and val(v)) for k, v in value.items()
)
def convert_dict_to_attrs(value, pre_hook, inner_map, con):
value = pre_hook(value)
args = {}
for attr in inner_map:
with ErrorContext("[{!r}]".format(attr.name)):
try:
arg = value[attr.name]
except KeyError:
if attr.is_required:
raise KeyError("Missing key {!r}".format(attr.name)) from None
else:
args[attr.init_name] = attr.inner(arg)
return con(**args)
def convert_dict_to_dict(value, inner_map, con):
args = {}
for attr in inner_map:
with ErrorContext("[{!r}]".format(attr.name)):
try:
arg = value[attr.name]
except KeyError:
if attr.is_required:
raise KeyError("Missing key {!r}".format(attr.name)) from None
else:
args[attr.name] = attr.inner(arg)
return con(args)
def check_dict(value, inner_map, pre_hook):
value = pre_hook(value)
if not isinstance(value, dict):
return False
for attr in inner_map:
with ErrorContext("[{!r}]".format(attr.name)):
try:
arg = value[attr.name]
except KeyError:
if attr.is_required:
return False
else:
if not attr.inner(arg):
return False
return True
def convert_attrs_to_dict(value, post_hook, inner_map):
out = {}
for attr in inner_map:
with ErrorContext("." + attr.name):
field = getattr(value, attr.name)
if not attr.is_required and field == attr.default:
continue
out[attr.name] = attr.inner(field)
if post_hook is not None:
out = getattr(value, post_hook)(out)
return out
def convert_tuple_as_list(value, inner, con):
return con(
err_ctx("[{}]".format(i), lambda: cvt(val))
for i, (val, cvt) in enumerate(zip(value, inner))
)
def check_tuple_as_list(value, inner, con):
return (
isinstance(value, con)
and len(value) == len(inner)
and all(
err_ctx("[{}]".format(i), lambda: chk(val))
for i, (val, chk) in enumerate(zip(value, inner))
)
)
def check_union(value, steps):
return any(err_ctx(name, lambda: step(value)) for step, name in steps)
def convert_union(value, steps, typename):
for check, convert, name in steps:
with ErrorContext(name):
if check(value):
return convert(value)
raise ValueError("Expected value of type {} got {!r}".format(typename, value))
| <filename>json_syntax/action_v1.py
from .errors import ErrorContext, err_ctx
from datetime import date, datetime, time, timedelta
from decimal import InvalidOperation
import math
import re
def check_parse_error(value, parser, error):
try:
parser(value)
except error:
return False
else:
return True
def check_isinst(value, typ):
return isinstance(value, typ)
def check_has_type(value, typ):
return type(value) == typ
def convert_decimal_str(value):
result = str(value)
if result == "sNaN":
raise InvalidOperation("Won't save signalling NaN")
return result
def convert_float(value):
value = float(value)
if math.isfinite(value):
return value
elif math.isnan(value):
return "NaN"
elif value < 0.0:
return "-Infinity"
else:
return "Infinity"
def check_float(value):
return (
isinstance(value, (int, float))
or isinstance(value, str)
and value.lower()
in ("nan", "inf", "infinity" "-inf", "-infinity", "+inf", "+infinity")
)
def convert_enum_str(value, typ):
return typ(value).name
def convert_none(value):
if value is not None:
raise ValueError("Expected None")
return None
def check_str_enum(value, typ):
try:
typ[value]
except (KeyError, TypeError):
return False
else:
return True
def convert_str_enum(value, typ):
return typ[value]
def pass_faux_enum(value, typ):
typ[value]
return value
if hasattr(datetime, "fromisoformat"):
convert_date = date.fromisoformat
convert_datetime = datetime.fromisoformat
convert_time = time.fromisoformat
else:
from dateutil.parser import isoparser
instance = isoparser(sep="T")
convert_date = instance.parse_isodate
convert_datetime = instance.isoparse
convert_time = instance.parse_isotime
del instance
def convert_timedelta_str(dur):
"Barebones support for storing a timedelta as an ISO8601 duration."
micro = ".{:06d}".format(dur.microseconds) if dur.microseconds else ""
return "P{:d}DT{:d}{}S".format(dur.days, dur.seconds, micro)
_iso8601_duration = re.compile(
r"^P(?!$)([-+]?\d+(?:[.,]\d+)?Y)?"
r"([-+]?\d+(?:[.,]\d+)?M)?"
r"([-+]?\d+(?:[.,]\d+)?W)?"
r"([-+]?\d+(?:[.,]\d+)?D)?"
r"(?:(T)(?=[0-9+-])"
r"([-+]?\d+(?:[.,]\d+)?H)?"
r"([-+]?\d+(?:[.,]\d+)?M)?"
r"([-+]?\d+(?:[.,]\d+)?S)?)?$"
)
_duration_args = {
"PW": "weeks",
"PD": "days",
"TH": "hours",
"TM": "minutes",
"TS": "seconds",
}
def convert_str_timedelta(dur):
if not isinstance(dur, str):
raise ValueError("Value was not a string.")
match = _iso8601_duration.match(dur.upper().replace(",", "."))
section = "P"
if not match:
raise ValueError("Value was not an ISO8601 duration.")
args = {}
for elem in match.groups():
if elem is None:
continue
if elem == "T":
section = "T"
continue
part = section + elem[-1]
value = float(elem[:-1])
if not value:
continue
if part in ("PY", "PM"):
raise ValueError("Year and month durations not supported")
args[_duration_args[part]] = value
return timedelta(**args)
def convert_optional(value, inner):
if value is None:
return None
return inner(value)
def check_optional(value, inner):
return value is None or inner(value)
def convert_collection(value, inner, con):
return con(
err_ctx("[{}]".format(i), lambda: inner(val)) for i, val in enumerate(value)
)
def check_collection(value, inner, con):
return isinstance(value, con) and all(
err_ctx("[{}]".format(i), lambda: inner(val)) for i, val in enumerate(value)
)
def convert_mapping(value, key, val, con):
return con(err_ctx(k, lambda: (key(k), val(v))) for k, v in value.items())
def check_mapping(value, key, val, con):
return isinstance(value, con) and all(
err_ctx(k, lambda: key(k) and val(v)) for k, v in value.items()
)
def convert_dict_to_attrs(value, pre_hook, inner_map, con):
value = pre_hook(value)
args = {}
for attr in inner_map:
with ErrorContext("[{!r}]".format(attr.name)):
try:
arg = value[attr.name]
except KeyError:
if attr.is_required:
raise KeyError("Missing key {!r}".format(attr.name)) from None
else:
args[attr.init_name] = attr.inner(arg)
return con(**args)
def convert_dict_to_dict(value, inner_map, con):
args = {}
for attr in inner_map:
with ErrorContext("[{!r}]".format(attr.name)):
try:
arg = value[attr.name]
except KeyError:
if attr.is_required:
raise KeyError("Missing key {!r}".format(attr.name)) from None
else:
args[attr.name] = attr.inner(arg)
return con(args)
def check_dict(value, inner_map, pre_hook):
value = pre_hook(value)
if not isinstance(value, dict):
return False
for attr in inner_map:
with ErrorContext("[{!r}]".format(attr.name)):
try:
arg = value[attr.name]
except KeyError:
if attr.is_required:
return False
else:
if not attr.inner(arg):
return False
return True
def convert_attrs_to_dict(value, post_hook, inner_map):
out = {}
for attr in inner_map:
with ErrorContext("." + attr.name):
field = getattr(value, attr.name)
if not attr.is_required and field == attr.default:
continue
out[attr.name] = attr.inner(field)
if post_hook is not None:
out = getattr(value, post_hook)(out)
return out
def convert_tuple_as_list(value, inner, con):
return con(
err_ctx("[{}]".format(i), lambda: cvt(val))
for i, (val, cvt) in enumerate(zip(value, inner))
)
def check_tuple_as_list(value, inner, con):
return (
isinstance(value, con)
and len(value) == len(inner)
and all(
err_ctx("[{}]".format(i), lambda: chk(val))
for i, (val, chk) in enumerate(zip(value, inner))
)
)
def check_union(value, steps):
return any(err_ctx(name, lambda: step(value)) for step, name in steps)
def convert_union(value, steps, typename):
for check, convert, name in steps:
with ErrorContext(name):
if check(value):
return convert(value)
raise ValueError("Expected value of type {} got {!r}".format(typename, value))
| none | 1 | 2.541722 | 3 | |
insights/parsers/etcd_conf.py | skateman/insights-core | 1 | 6617318 | """
EtcdConf - file ``/etc/etcd/etcd.conf``
=======================================
"""
from insights.core import IniConfigFile
from insights.core.plugins import parser
from insights.core.filters import add_filter
from insights.specs import Specs
add_filter(Specs.etcd_conf, ["["])
@parser(Specs.etcd_conf)
class EtcdConf(IniConfigFile):
"""
The EtcdConf class parses the file ``/etc/etcd/etcd.conf``. The
etcd.conf is in the standard 'ini' format and is read by the base
parser class `IniConfigFile`.
Typical contents of the file look like::
[member]
ETCD_NAME=f05-h19-000-1029p.rdu2.scalelab.redhat.com
ETCD_LISTEN_PEER_URLS=https://10.1.40.235:2380
ETCD_DATA_DIR=/var/lib/etcd/
ETCD_HEARTBEAT_INTERVAL=500
ETCD_ELECTION_TIMEOUT=2500
ETCD_LISTEN_CLIENT_URLS=https://10.1.40.235:2379
[auth]
ETCD_AUTH_TOKEN=simple
Examples:
>>> type(conf)
<class 'insights.parsers.etcd_conf.EtcdConf'>
>>> conf.get('auth', 'ETCD_AUTH_TOKEN') == 'simple'
True
>>> conf.has_option('member', 'ETCD_NAME')
True
"""
pass
| """
EtcdConf - file ``/etc/etcd/etcd.conf``
=======================================
"""
from insights.core import IniConfigFile
from insights.core.plugins import parser
from insights.core.filters import add_filter
from insights.specs import Specs
add_filter(Specs.etcd_conf, ["["])
@parser(Specs.etcd_conf)
class EtcdConf(IniConfigFile):
"""
The EtcdConf class parses the file ``/etc/etcd/etcd.conf``. The
etcd.conf is in the standard 'ini' format and is read by the base
parser class `IniConfigFile`.
Typical contents of the file look like::
[member]
ETCD_NAME=f05-h19-000-1029p.rdu2.scalelab.redhat.com
ETCD_LISTEN_PEER_URLS=https://10.1.40.235:2380
ETCD_DATA_DIR=/var/lib/etcd/
ETCD_HEARTBEAT_INTERVAL=500
ETCD_ELECTION_TIMEOUT=2500
ETCD_LISTEN_CLIENT_URLS=https://10.1.40.235:2379
[auth]
ETCD_AUTH_TOKEN=simple
Examples:
>>> type(conf)
<class 'insights.parsers.etcd_conf.EtcdConf'>
>>> conf.get('auth', 'ETCD_AUTH_TOKEN') == 'simple'
True
>>> conf.has_option('member', 'ETCD_NAME')
True
"""
pass
| en | 0.50212 | EtcdConf - file ``/etc/etcd/etcd.conf`` ======================================= The EtcdConf class parses the file ``/etc/etcd/etcd.conf``. The etcd.conf is in the standard 'ini' format and is read by the base parser class `IniConfigFile`. Typical contents of the file look like:: [member] ETCD_NAME=f05-h19-000-1029p.rdu2.scalelab.redhat.com ETCD_LISTEN_PEER_URLS=https://10.1.40.235:2380 ETCD_DATA_DIR=/var/lib/etcd/ ETCD_HEARTBEAT_INTERVAL=500 ETCD_ELECTION_TIMEOUT=2500 ETCD_LISTEN_CLIENT_URLS=https://10.1.40.235:2379 [auth] ETCD_AUTH_TOKEN=simple Examples: >>> type(conf) <class 'insights.parsers.etcd_conf.EtcdConf'> >>> conf.get('auth', 'ETCD_AUTH_TOKEN') == 'simple' True >>> conf.has_option('member', 'ETCD_NAME') True | 2.320992 | 2 |
shift_detector/utils/errors.py | hpi-bp1819-naumann/shift-detector | 3 | 6617319 | class InsufficientDataError(Exception):
def __init__(self, actual_size, expected_size, message=None):
if message is None:
message = 'The input data is insufficient for the column type heuristics to work. ' \
'Only {actual} row(s) were passed. Please pass at least {expected} rows.'\
.format(actual=actual_size, expected=expected_size)
super().__init__(message)
self.actual_size = actual_size
self.expected_size = expected_size
class UnknownMetadataReturnColumnTypeError(Exception):
def __init__(self, mdtype, message=None):
if message is None:
message = 'Return column type {type} of {metadata} is unknown. Should be numerical or categorical.'\
.format(type=mdtype.metadata_return_type(), metadata=mdtype)
super().__init__(message)
self.mdtype = mdtype
| class InsufficientDataError(Exception):
def __init__(self, actual_size, expected_size, message=None):
if message is None:
message = 'The input data is insufficient for the column type heuristics to work. ' \
'Only {actual} row(s) were passed. Please pass at least {expected} rows.'\
.format(actual=actual_size, expected=expected_size)
super().__init__(message)
self.actual_size = actual_size
self.expected_size = expected_size
class UnknownMetadataReturnColumnTypeError(Exception):
def __init__(self, mdtype, message=None):
if message is None:
message = 'Return column type {type} of {metadata} is unknown. Should be numerical or categorical.'\
.format(type=mdtype.metadata_return_type(), metadata=mdtype)
super().__init__(message)
self.mdtype = mdtype
| none | 1 | 2.935443 | 3 | |
visionapi.py | deepakkumar1984/ml-api | 60 | 6617320 | <reponame>deepakkumar1984/ml-api<filename>visionapi.py
"""
Routes and views for the flask application.
"""
from flask import jsonify
from flask import request
from datetime import datetime
from Interface import app, utility, logmgr, projectmgr, constants, modelcache
from vis import objcls, objdet, cvmgr
@app.route('/api/vis/create', methods=['POST'])
def visioncreate():
message = "Success"
code = 200
try:
rjson = request.get_json()
name = rjson["servicename"]
projectmgr.UpsertService(name, constants.ServiceTypes.Vision, rjson)
except Exception as e:
code = 500
message = str(e)
return jsonify({"statuscode": code, "message": message})
@app.route('/api/vis/update/<name>', methods=['POST'])
def visionupdate(name):
message = "Success"
code = 200
try:
rjson = request.get_json()
projectmgr.ValidateServiceExists(name, constants.ServiceTypes.Vision)
projectmgr.UpsertService(name, constants.ServiceTypes.Vision, rjson)
except Exception as e:
code = 500
message = str(e)
return jsonify({"statuscode": code, "message": message})
@app.route('/api/vis/delete/<name>', methods=['POST'])
def visiondelete(name):
message = "Success"
code = 200
try:
projectmgr.ValidateServiceExists(name, constants.ServiceTypes.Vision)
projectmgr.DeleteService(name, constants.ServiceTypes.Vision)
except Exception as e:
code = 500
message = str(e)
return jsonify({"statuscode": code, "message": message})
@app.route('/api/vis/predict/<name>', methods=['POST'])
def visionpredict(name):
message = "Success"
code = 200
start = datetime.utcnow()
try:
data = request.get_json()
projectmgr.ValidateServiceExists(name, constants.ServiceTypes.Vision)
servicejson = utility.getServiceJson(name, constants.ServiceTypes.Vision)
result = {}
imagepath = data['imagepath']
if servicejson["type"] == "cls":
target_x = servicejson['options']['target_size_x']
target_y = servicejson['options']['target_size_y']
model_name = servicejson['options']['model']
model = modelcache.get(constants.ServiceTypes.Vision, name)
if model is None:
model = objcls.loadModel(model_name, target_x, target_y)
modelcache.store(constants.ServiceTypes.Vision, name, model)
result = objcls.predict(imagepath, target_x, target_y, model_name, model)
elif servicejson["type"] == "det":
model_name = servicejson['options']['model']
isgpu = servicejson['options']['gpu']
model = modelcache.get(constants.ServiceTypes.Vision, model_name)
if model is None:
model = objdet.loadModel(model_name, 10, isgpu)
modelcache.store(constants.ServiceTypes.Vision, model_name, model)
result = objdet.predict(imagepath, model)
elif servicejson["type"] == "face":
result = cvmgr.detectfaces(imagepath)
elif servicejson["type"] == "ocr":
preprocess = "thresh"
if "preprocess" in servicejson["options"]:
preprocess = servicejson["options"]["preprocess"]
result = cvmgr.extracttext(imagepath, preprocess)
logmgr.LogPredSuccess(name, constants.ServiceTypes.Vision, start)
except Exception as e:
code = 500
message = str(e)
logmgr.LogPredError(name, constants.ServiceTypes.Vision, start, message)
return jsonify({"statuscode": code, "message": message, "result": result})
@app.route('/api/vis/download/<name>', methods=['POST'])
def downloadmodels(name):
objcls.loadModel(name, 224, 224)
| """
Routes and views for the flask application.
"""
from flask import jsonify
from flask import request
from datetime import datetime
from Interface import app, utility, logmgr, projectmgr, constants, modelcache
from vis import objcls, objdet, cvmgr
@app.route('/api/vis/create', methods=['POST'])
def visioncreate():
message = "Success"
code = 200
try:
rjson = request.get_json()
name = rjson["servicename"]
projectmgr.UpsertService(name, constants.ServiceTypes.Vision, rjson)
except Exception as e:
code = 500
message = str(e)
return jsonify({"statuscode": code, "message": message})
@app.route('/api/vis/update/<name>', methods=['POST'])
def visionupdate(name):
message = "Success"
code = 200
try:
rjson = request.get_json()
projectmgr.ValidateServiceExists(name, constants.ServiceTypes.Vision)
projectmgr.UpsertService(name, constants.ServiceTypes.Vision, rjson)
except Exception as e:
code = 500
message = str(e)
return jsonify({"statuscode": code, "message": message})
@app.route('/api/vis/delete/<name>', methods=['POST'])
def visiondelete(name):
message = "Success"
code = 200
try:
projectmgr.ValidateServiceExists(name, constants.ServiceTypes.Vision)
projectmgr.DeleteService(name, constants.ServiceTypes.Vision)
except Exception as e:
code = 500
message = str(e)
return jsonify({"statuscode": code, "message": message})
@app.route('/api/vis/predict/<name>', methods=['POST'])
def visionpredict(name):
message = "Success"
code = 200
start = datetime.utcnow()
try:
data = request.get_json()
projectmgr.ValidateServiceExists(name, constants.ServiceTypes.Vision)
servicejson = utility.getServiceJson(name, constants.ServiceTypes.Vision)
result = {}
imagepath = data['imagepath']
if servicejson["type"] == "cls":
target_x = servicejson['options']['target_size_x']
target_y = servicejson['options']['target_size_y']
model_name = servicejson['options']['model']
model = modelcache.get(constants.ServiceTypes.Vision, name)
if model is None:
model = objcls.loadModel(model_name, target_x, target_y)
modelcache.store(constants.ServiceTypes.Vision, name, model)
result = objcls.predict(imagepath, target_x, target_y, model_name, model)
elif servicejson["type"] == "det":
model_name = servicejson['options']['model']
isgpu = servicejson['options']['gpu']
model = modelcache.get(constants.ServiceTypes.Vision, model_name)
if model is None:
model = objdet.loadModel(model_name, 10, isgpu)
modelcache.store(constants.ServiceTypes.Vision, model_name, model)
result = objdet.predict(imagepath, model)
elif servicejson["type"] == "face":
result = cvmgr.detectfaces(imagepath)
elif servicejson["type"] == "ocr":
preprocess = "thresh"
if "preprocess" in servicejson["options"]:
preprocess = servicejson["options"]["preprocess"]
result = cvmgr.extracttext(imagepath, preprocess)
logmgr.LogPredSuccess(name, constants.ServiceTypes.Vision, start)
except Exception as e:
code = 500
message = str(e)
logmgr.LogPredError(name, constants.ServiceTypes.Vision, start, message)
return jsonify({"statuscode": code, "message": message, "result": result})
@app.route('/api/vis/download/<name>', methods=['POST'])
def downloadmodels(name):
objcls.loadModel(name, 224, 224) | en | 0.907939 | Routes and views for the flask application. | 2.628695 | 3 |
tests/mnist/mnist_dense.py | imandr/gradnet | 0 | 6617321 | <reponame>imandr/gradnet
from gradnet import Input, Model
from gradnet.layers import Dense
from gradnet.activations import get_activation
from gradnet.optimizers import get_optimizer
from gradnet.losses import Loss
from gradnet.metrics import get_metric
import numpy as np
class Callback(object):
def __init__(self, print_every=5000, alpha=0.1):
self.RunningLoss = self.RunningAccuracy = None
self.NextPrint = self.PrintEvery = print_every
def train_batch_end(self, nsamples, loss_values, metrics):
if nsamples >= self.NextPrint:
print("nsamples:", nsamples, " cce loss:", loss_values["cce"], " accuracy:", metrics[0])
self.NextPrint += self.PrintEvery
from tensorflow.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
def one_hot(labels, n):
out = np.zeros((len(labels), n))
for i in range(n):
out[labels==i, i] = 1.0
return out
x_train = (x_train/256.0).reshape((-1, 28*28))
x_test = (x_test/256.0).reshape((-1, 28*28))
n_train = len(x_train)
y_train = one_hot(y_train, 10)
y_test = one_hot(y_test, 10)
np.set_printoptions(precision=4, suppress=True)
sgd = get_optimizer("SGD", learning_rate=0.01, momentum=0.5)
adam = get_optimizer("adam")
#ad = get_optimizer("ad", learning_rate=0.1)
accuracy = get_metric("accuracy")
def create_model():
inp = Input((28*28,))
dense1 = Dense(1024, name="dense1", activation="relu")(inp)
probs = Dense(10, activation="softmax", name="top")(dense1)
model = Model([inp], [probs])
model.add_loss(Loss("cce", probs), name="cce")
return model
model = create_model()
model.compile(adam, metrics=[accuracy])
mbsize = 100
for epoch in range(10):
#print("main: x:", x_train[:3], " y:", y_train[:3])
model.fit(x_train, y_train, batch_size=mbsize, metrics=[accuracy], callbacks=[Callback()])
y = model.compute(x_test)
y_ = y_test
acc = accuracy(y_test, y[0])
print("test accuracy:", acc, " losses:", model.LossValues)
| from gradnet import Input, Model
from gradnet.layers import Dense
from gradnet.activations import get_activation
from gradnet.optimizers import get_optimizer
from gradnet.losses import Loss
from gradnet.metrics import get_metric
import numpy as np
class Callback(object):
def __init__(self, print_every=5000, alpha=0.1):
self.RunningLoss = self.RunningAccuracy = None
self.NextPrint = self.PrintEvery = print_every
def train_batch_end(self, nsamples, loss_values, metrics):
if nsamples >= self.NextPrint:
print("nsamples:", nsamples, " cce loss:", loss_values["cce"], " accuracy:", metrics[0])
self.NextPrint += self.PrintEvery
from tensorflow.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
def one_hot(labels, n):
out = np.zeros((len(labels), n))
for i in range(n):
out[labels==i, i] = 1.0
return out
x_train = (x_train/256.0).reshape((-1, 28*28))
x_test = (x_test/256.0).reshape((-1, 28*28))
n_train = len(x_train)
y_train = one_hot(y_train, 10)
y_test = one_hot(y_test, 10)
np.set_printoptions(precision=4, suppress=True)
sgd = get_optimizer("SGD", learning_rate=0.01, momentum=0.5)
adam = get_optimizer("adam")
#ad = get_optimizer("ad", learning_rate=0.1)
accuracy = get_metric("accuracy")
def create_model():
inp = Input((28*28,))
dense1 = Dense(1024, name="dense1", activation="relu")(inp)
probs = Dense(10, activation="softmax", name="top")(dense1)
model = Model([inp], [probs])
model.add_loss(Loss("cce", probs), name="cce")
return model
model = create_model()
model.compile(adam, metrics=[accuracy])
mbsize = 100
for epoch in range(10):
#print("main: x:", x_train[:3], " y:", y_train[:3])
model.fit(x_train, y_train, batch_size=mbsize, metrics=[accuracy], callbacks=[Callback()])
y = model.compute(x_test)
y_ = y_test
acc = accuracy(y_test, y[0])
print("test accuracy:", acc, " losses:", model.LossValues) | en | 0.421366 | #ad = get_optimizer("ad", learning_rate=0.1) #print("main: x:", x_train[:3], " y:", y_train[:3]) | 2.610367 | 3 |
src/Cheese/cheeseRepository.py | KubaBoi/CheeseFramework | 2 | 6617322 | #cheese
from datetime import datetime
import inspect
import re
from Cheese.metadata import Metadata
from Cheese.cheeseModel import CheeseModel
from Cheese.resourceManager import ResMan
from Cheese.database import Database
class CheeseRepository:
"""
```CheeseRepository``` is static class for communication with database
"""
testing = False
@classmethod
def model(cls) -> CheeseModel:
"""
return ```CheeseModel``` with ```Primary key```, ```modelName``` and ```scheme```
"""
repository = Metadata.getRepositoryFromClass(cls.__name__)
modelName = Metadata.getModel(repository)
scheme = Metadata.getScheme(repository)
model = CheeseModel(modelName, scheme)
for sch in scheme:
if (sch == "id"):
setattr(model, sch, cls.findNewId())
else:
setattr(model, sch, "")
return model
@classmethod
def className(cls) -> str:
"""
return string with name of class
"""
return cls.__name__
@classmethod
def findAll(cls) -> list:
"""
return whole table of database as list of ```CheeseModel```
"""
return CheeseRepository.query(cls.__name__)
@classmethod
def find(cls, primaryKey) -> CheeseModel:
"""
return one ```CheeseModel``` by ```Primary key```
"""
return CheeseRepository.query(cls.__name__, primaryKey=primaryKey)
@classmethod
def findBy(cls, columnName, value) -> list:
"""
return list of ```CheeseModel```
```columnName``` name of column for filtering
```value``` value of ```column```
example:
```
columnName = "age"
value = 15
->
SQL: "... WHERE age = 15 ..."
```
"""
return CheeseRepository.query(cls.__name__, columnName="columnName-" + columnName, value=value)
@classmethod
def findOneBy(cls, columnName, value) -> CheeseModel:
"""
return one ```CheeseModel``` by ```columnName```
```columnName``` name of column for filtering
```value``` value of ```column```
example:
```
columnName = "age"
value = 15
->
SQL: "... WHERE age = 15 ..."
```
"""
return CheeseRepository.query(cls.__name__, columnName="columnName-" + columnName, value=value)
@classmethod
def findNewId(cls) -> int:
"""
find new available ```Primary key```
"""
return CheeseRepository.query(cls.__name__)+1
@classmethod
def save(cls, obj) -> bool:
"""
creates new row in database
```obj``` is ```CheeseModel``` object
"""
return CheeseRepository.query(cls.__name__, obj=obj)
@classmethod
def update(cls, obj) -> bool:
"""
updates row in database
```obj``` is ```CheeseModel``` object
"""
return CheeseRepository.query(cls.__name__, obj=obj)
@classmethod
def delete(cls, obj) -> bool:
"""
deletes row from database
```obj``` is ```CheeseModel``` object
"""
return CheeseRepository.query(cls.__name__, obj=obj)
# STATIC METHODS
@staticmethod
def startTesting(mockManager):
"""
sets repository testing enviroment
```mockManager``` is instance of ```MockManager``` used by testing
"""
CheeseRepository.mockManager = mockManager
CheeseRepository.testing = True
@staticmethod
def stopTesting():
"""
stop repository testing enviroment
"""
CheeseRepository.testing = False
@staticmethod
def query(userRepository="", **kwargs):
"""
Access point to database. Returns database output.
```userRepository``` is string name of used repository
```**kwargs``` is ```dict``` of arguments for SQL request
"""
if (userRepository == ""):
userRepository = CheeseRepository.findUserRepository()
repository = Metadata.getRepository(userRepository)
else:
repository = Metadata.getRepositoryFromClass(userRepository)
methodName = CheeseRepository.findUserMethod()
if (CheeseRepository.testing):
return CheeseRepository.mockManager.returnMock(userRepository, methodName, kwargs)
method = Metadata.getMethod(repository, methodName)
query = False
if ("QUERY" in method):
preparedSql = method["QUERY"]
query = True
else:
preparedSql = method["COMMIT"]
variables = CheeseRepository.getVariables(preparedSql)
for key, value in kwargs.items():
arg = CheeseRepository.getTypeOf(value, variables, key, repository["DBSCHEME"])
if (type(arg) is list):
for a in arg:
index = 0
while True:
index = preparedSql.find(":", index)
if (index == -1): break
newIndex = index+1
argName = ""
while (re.search(r"[; )]", preparedSql[newIndex]) == None):
newIndex += 1
if (newIndex >= len(preparedSql)):
newIndex -= 1
argName = preparedSql[index:newIndex]
break
argName = preparedSql[index:newIndex]
if (argName[1:] == a[1]):
break
index += 1
preparedSql = preparedSql[0:index] + a[0] + preparedSql[newIndex:]
else:
preparedSql = preparedSql.replace(f":{key}", arg)
preparedSql = preparedSql.replace("*", Metadata.getRawScheme(repository))
if (query):
return CheeseRepository.queryType(preparedSql, method, repository)
else:
return CheeseRepository.commitType(preparedSql)
@staticmethod
def queryType(preparedSql, method, repository):
"""
"""
db = Database()
response = db.query(preparedSql)
db.done()
if (method["RETURN"] == "raw"):
return response
elif (method["RETURN"] == "num"):
if (response[0][0] == None): return 0
return int(response[0][0])
elif (method["RETURN"] == "bool"):
return bool(int(response[0][0]))
elif (method["RETURN"] == "one"):
return CheeseRepository.toModel(repository, response)
elif (method["RETURN"] == "array"):
array = []
for item in response:
array.append(CheeseRepository.toModel(repository, [item]))
return array
@staticmethod
def commitType(preparedSql):
db = Database()
db.commit(preparedSql)
db.done()
return True
@staticmethod
def toModel(repository, data):
if (len(data) == 0):
return None
modelName = Metadata.getModel(repository)
scheme = Metadata.getScheme(repository)
model = CheeseModel(modelName, scheme)
model.toModel(data[0])
return model
# finds name of user-made repository
@staticmethod
def findUserRepository():
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)
userRepository = ResMan.getFileName(calframe[2].filename).replace(".py", "")
return userRepository
# finds name of method from user-made repository
@staticmethod
def findUserMethod():
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)
userMethod = ResMan.getFileName(calframe[2][3])
return userMethod
# creates array of variables from sql
# ascii from 48-57 (numbers) and 65-90 (big letters) and 97-122 (small letters)
# or 46 (.) or 95 (_)
@staticmethod
def getVariables(sql):
variables = []
newVar = None
for ch in sql:
ordCh = ord(ch)
if (ch == ":" and newVar == None):
newVar = ""
elif (((ordCh >= 48 and ordCh <= 57) or
(ordCh >= 65 and ordCh <= 90) or
(ordCh >= 97 and ordCh <= 122) or
ordCh == 46 or ordCh == 95)
and newVar != None):
newVar += ch
elif (newVar != None):
variables.append(newVar)
newVar = None
if (newVar != None):
variables.append(newVar)
return variables
# convert arguments
@staticmethod
def getTypeOf(arg, variables=None, key=None, scheme=None):
if (arg == None):
return "NULL"
elif (type(arg) is str):
if (len(arg) == 0): return "''"
if (arg[-1] != "\'"
and arg[-1] != ")"
and not arg.endswith("DESC")
and not arg.endswith("ASC")):
if (arg.startswith("columnName-")):
return arg.replace("columnName-", "")
else:
arg = arg.replace("'", "''")
return f"\'{arg}\'"
else:
return str(arg)
elif (type(arg) is list):
return "(" + ",".join(CheeseRepository.getTypeOf(arg)) + ")"
elif (type(arg) is datetime):
return "'" + datetime.strftime(arg, "%Y-%m-%dT%H:%M:%S") + "'"
elif (isinstance(arg, CheeseModel)):
ret = []
for var in variables:
spl = var.split(".")
if (spl[0] == key):
if (len(spl) >= 2):
ret.append((CheeseRepository.getTypeOf(getattr(arg, spl[1])), var))
else:
schemeArr = scheme.replace(")", "").replace("(", "").split(",")
newScheme = "("
for attr in schemeArr:
attr = attr.strip()
newScheme += CheeseRepository.getTypeOf(getattr(arg, attr)) + ","
newScheme = newScheme[:-1] + ")"
ret.append((newScheme, var))
return ret
else:
return str(arg)
| #cheese
from datetime import datetime
import inspect
import re
from Cheese.metadata import Metadata
from Cheese.cheeseModel import CheeseModel
from Cheese.resourceManager import ResMan
from Cheese.database import Database
class CheeseRepository:
"""
```CheeseRepository``` is static class for communication with database
"""
testing = False
@classmethod
def model(cls) -> CheeseModel:
"""
return ```CheeseModel``` with ```Primary key```, ```modelName``` and ```scheme```
"""
repository = Metadata.getRepositoryFromClass(cls.__name__)
modelName = Metadata.getModel(repository)
scheme = Metadata.getScheme(repository)
model = CheeseModel(modelName, scheme)
for sch in scheme:
if (sch == "id"):
setattr(model, sch, cls.findNewId())
else:
setattr(model, sch, "")
return model
@classmethod
def className(cls) -> str:
"""
return string with name of class
"""
return cls.__name__
@classmethod
def findAll(cls) -> list:
"""
return whole table of database as list of ```CheeseModel```
"""
return CheeseRepository.query(cls.__name__)
@classmethod
def find(cls, primaryKey) -> CheeseModel:
"""
return one ```CheeseModel``` by ```Primary key```
"""
return CheeseRepository.query(cls.__name__, primaryKey=primaryKey)
@classmethod
def findBy(cls, columnName, value) -> list:
"""
return list of ```CheeseModel```
```columnName``` name of column for filtering
```value``` value of ```column```
example:
```
columnName = "age"
value = 15
->
SQL: "... WHERE age = 15 ..."
```
"""
return CheeseRepository.query(cls.__name__, columnName="columnName-" + columnName, value=value)
@classmethod
def findOneBy(cls, columnName, value) -> CheeseModel:
"""
return one ```CheeseModel``` by ```columnName```
```columnName``` name of column for filtering
```value``` value of ```column```
example:
```
columnName = "age"
value = 15
->
SQL: "... WHERE age = 15 ..."
```
"""
return CheeseRepository.query(cls.__name__, columnName="columnName-" + columnName, value=value)
@classmethod
def findNewId(cls) -> int:
"""
find new available ```Primary key```
"""
return CheeseRepository.query(cls.__name__)+1
@classmethod
def save(cls, obj) -> bool:
"""
creates new row in database
```obj``` is ```CheeseModel``` object
"""
return CheeseRepository.query(cls.__name__, obj=obj)
@classmethod
def update(cls, obj) -> bool:
"""
updates row in database
```obj``` is ```CheeseModel``` object
"""
return CheeseRepository.query(cls.__name__, obj=obj)
@classmethod
def delete(cls, obj) -> bool:
"""
deletes row from database
```obj``` is ```CheeseModel``` object
"""
return CheeseRepository.query(cls.__name__, obj=obj)
# STATIC METHODS
@staticmethod
def startTesting(mockManager):
"""
sets repository testing enviroment
```mockManager``` is instance of ```MockManager``` used by testing
"""
CheeseRepository.mockManager = mockManager
CheeseRepository.testing = True
@staticmethod
def stopTesting():
"""
stop repository testing enviroment
"""
CheeseRepository.testing = False
@staticmethod
def query(userRepository="", **kwargs):
"""
Access point to database. Returns database output.
```userRepository``` is string name of used repository
```**kwargs``` is ```dict``` of arguments for SQL request
"""
if (userRepository == ""):
userRepository = CheeseRepository.findUserRepository()
repository = Metadata.getRepository(userRepository)
else:
repository = Metadata.getRepositoryFromClass(userRepository)
methodName = CheeseRepository.findUserMethod()
if (CheeseRepository.testing):
return CheeseRepository.mockManager.returnMock(userRepository, methodName, kwargs)
method = Metadata.getMethod(repository, methodName)
query = False
if ("QUERY" in method):
preparedSql = method["QUERY"]
query = True
else:
preparedSql = method["COMMIT"]
variables = CheeseRepository.getVariables(preparedSql)
for key, value in kwargs.items():
arg = CheeseRepository.getTypeOf(value, variables, key, repository["DBSCHEME"])
if (type(arg) is list):
for a in arg:
index = 0
while True:
index = preparedSql.find(":", index)
if (index == -1): break
newIndex = index+1
argName = ""
while (re.search(r"[; )]", preparedSql[newIndex]) == None):
newIndex += 1
if (newIndex >= len(preparedSql)):
newIndex -= 1
argName = preparedSql[index:newIndex]
break
argName = preparedSql[index:newIndex]
if (argName[1:] == a[1]):
break
index += 1
preparedSql = preparedSql[0:index] + a[0] + preparedSql[newIndex:]
else:
preparedSql = preparedSql.replace(f":{key}", arg)
preparedSql = preparedSql.replace("*", Metadata.getRawScheme(repository))
if (query):
return CheeseRepository.queryType(preparedSql, method, repository)
else:
return CheeseRepository.commitType(preparedSql)
@staticmethod
def queryType(preparedSql, method, repository):
"""
"""
db = Database()
response = db.query(preparedSql)
db.done()
if (method["RETURN"] == "raw"):
return response
elif (method["RETURN"] == "num"):
if (response[0][0] == None): return 0
return int(response[0][0])
elif (method["RETURN"] == "bool"):
return bool(int(response[0][0]))
elif (method["RETURN"] == "one"):
return CheeseRepository.toModel(repository, response)
elif (method["RETURN"] == "array"):
array = []
for item in response:
array.append(CheeseRepository.toModel(repository, [item]))
return array
@staticmethod
def commitType(preparedSql):
db = Database()
db.commit(preparedSql)
db.done()
return True
@staticmethod
def toModel(repository, data):
if (len(data) == 0):
return None
modelName = Metadata.getModel(repository)
scheme = Metadata.getScheme(repository)
model = CheeseModel(modelName, scheme)
model.toModel(data[0])
return model
# finds name of user-made repository
@staticmethod
def findUserRepository():
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)
userRepository = ResMan.getFileName(calframe[2].filename).replace(".py", "")
return userRepository
# finds name of method from user-made repository
@staticmethod
def findUserMethod():
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)
userMethod = ResMan.getFileName(calframe[2][3])
return userMethod
# creates array of variables from sql
# ascii from 48-57 (numbers) and 65-90 (big letters) and 97-122 (small letters)
# or 46 (.) or 95 (_)
@staticmethod
def getVariables(sql):
variables = []
newVar = None
for ch in sql:
ordCh = ord(ch)
if (ch == ":" and newVar == None):
newVar = ""
elif (((ordCh >= 48 and ordCh <= 57) or
(ordCh >= 65 and ordCh <= 90) or
(ordCh >= 97 and ordCh <= 122) or
ordCh == 46 or ordCh == 95)
and newVar != None):
newVar += ch
elif (newVar != None):
variables.append(newVar)
newVar = None
if (newVar != None):
variables.append(newVar)
return variables
# convert arguments
@staticmethod
def getTypeOf(arg, variables=None, key=None, scheme=None):
if (arg == None):
return "NULL"
elif (type(arg) is str):
if (len(arg) == 0): return "''"
if (arg[-1] != "\'"
and arg[-1] != ")"
and not arg.endswith("DESC")
and not arg.endswith("ASC")):
if (arg.startswith("columnName-")):
return arg.replace("columnName-", "")
else:
arg = arg.replace("'", "''")
return f"\'{arg}\'"
else:
return str(arg)
elif (type(arg) is list):
return "(" + ",".join(CheeseRepository.getTypeOf(arg)) + ")"
elif (type(arg) is datetime):
return "'" + datetime.strftime(arg, "%Y-%m-%dT%H:%M:%S") + "'"
elif (isinstance(arg, CheeseModel)):
ret = []
for var in variables:
spl = var.split(".")
if (spl[0] == key):
if (len(spl) >= 2):
ret.append((CheeseRepository.getTypeOf(getattr(arg, spl[1])), var))
else:
schemeArr = scheme.replace(")", "").replace("(", "").split(",")
newScheme = "("
for attr in schemeArr:
attr = attr.strip()
newScheme += CheeseRepository.getTypeOf(getattr(arg, attr)) + ","
newScheme = newScheme[:-1] + ")"
ret.append((newScheme, var))
return ret
else:
return str(arg)
| en | 0.693615 | #cheese ```CheeseRepository``` is static class for communication with database return ```CheeseModel``` with ```Primary key```, ```modelName``` and ```scheme``` return string with name of class return whole table of database as list of ```CheeseModel``` return one ```CheeseModel``` by ```Primary key``` return list of ```CheeseModel``` ```columnName``` name of column for filtering ```value``` value of ```column``` example: ``` columnName = "age" value = 15 -> SQL: "... WHERE age = 15 ..." ``` return one ```CheeseModel``` by ```columnName``` ```columnName``` name of column for filtering ```value``` value of ```column``` example: ``` columnName = "age" value = 15 -> SQL: "... WHERE age = 15 ..." ``` find new available ```Primary key``` creates new row in database ```obj``` is ```CheeseModel``` object updates row in database ```obj``` is ```CheeseModel``` object deletes row from database ```obj``` is ```CheeseModel``` object # STATIC METHODS sets repository testing enviroment ```mockManager``` is instance of ```MockManager``` used by testing stop repository testing enviroment Access point to database. Returns database output. ```userRepository``` is string name of used repository ```**kwargs``` is ```dict``` of arguments for SQL request # finds name of user-made repository # finds name of method from user-made repository # creates array of variables from sql # ascii from 48-57 (numbers) and 65-90 (big letters) and 97-122 (small letters) # or 46 (.) or 95 (_) # convert arguments | 2.690421 | 3 |
bin/train-interestingness.py | ngi-nix/poliscoops | 6 | 6617323 | <gh_stars>1-10
#!/usr/bin/env python
import sys
import os
import re
from pprint import pprint
import glob
import pickle
import requests
from sklearn import svm
sys.path.insert(0, '.')
from ocd_ml.interestingness import featurize, class_labels
def get_data_from_permalink(permalink):
poliflw_id = permalink.strip().split('/')[-1]
try:
result = requests.get(
'https://api.poliflw.nl/v0/combined_index/%s' % (poliflw_id,),
verify=False).json()
except Exception as e:
print e, e.__class__.__name__
result = {}
return result
def load_data_from_file(file_path):
ids = []
with open(file_path) as in_file:
ids = [get_data_from_permalink(x) for x in in_file.readlines()]
return ids
def main(argv):
class_files = glob.glob('ocd_backend/data/interestingness/*.txt')
classes = {}
train_data = []
train_labels = []
for class_path in class_files:
class_name = class_path.split('/')[-1].replace('.txt', '')
ids = load_data_from_file(class_path)
classes[class_name] = ids
train_data += [featurize(x) for x in ids]
train_labels += [class_labels.index(class_name) for x in ids]
pprint(train_data)
clf = svm.SVC(gamma='scale')
clf.fit(train_data, train_labels)
pickle.dump(clf, open('interestingness.model', 'wb'))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| #!/usr/bin/env python
import sys
import os
import re
from pprint import pprint
import glob
import pickle
import requests
from sklearn import svm
sys.path.insert(0, '.')
from ocd_ml.interestingness import featurize, class_labels
def get_data_from_permalink(permalink):
poliflw_id = permalink.strip().split('/')[-1]
try:
result = requests.get(
'https://api.poliflw.nl/v0/combined_index/%s' % (poliflw_id,),
verify=False).json()
except Exception as e:
print e, e.__class__.__name__
result = {}
return result
def load_data_from_file(file_path):
ids = []
with open(file_path) as in_file:
ids = [get_data_from_permalink(x) for x in in_file.readlines()]
return ids
def main(argv):
class_files = glob.glob('ocd_backend/data/interestingness/*.txt')
classes = {}
train_data = []
train_labels = []
for class_path in class_files:
class_name = class_path.split('/')[-1].replace('.txt', '')
ids = load_data_from_file(class_path)
classes[class_name] = ids
train_data += [featurize(x) for x in ids]
train_labels += [class_labels.index(class_name) for x in ids]
pprint(train_data)
clf = svm.SVC(gamma='scale')
clf.fit(train_data, train_labels)
pickle.dump(clf, open('interestingness.model', 'wb'))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv)) | ru | 0.26433 | #!/usr/bin/env python | 2.553653 | 3 |
problemsets/Codeforces/Python/B1191.py | juarezpaulino/coderemite | 0 | 6617324 | <reponame>juarezpaulino/coderemite
"""
*
* Author: <NAME>(coderemite)
* Email: <EMAIL>
*
"""
a=sorted(input().split())
r=len({*a})-1
a=sorted(a,key=lambda x:(x[1],x[0]))
v,s=[*zip(*a)]
c=1
for i in range(1,3):
if s[i]==s[i-1] and int(v[i-1])+1==int(v[i]):
c+=1
r=min(r,3-c)
elif s[i]==s[i-1] and int(v[i-1])+2==int(v[i]):
r=min(r,1)
else:c=1
print(r)
| """
*
* Author: <NAME>(coderemite)
* Email: <EMAIL>
*
"""
a=sorted(input().split())
r=len({*a})-1
a=sorted(a,key=lambda x:(x[1],x[0]))
v,s=[*zip(*a)]
c=1
for i in range(1,3):
if s[i]==s[i-1] and int(v[i-1])+1==int(v[i]):
c+=1
r=min(r,3-c)
elif s[i]==s[i-1] and int(v[i-1])+2==int(v[i]):
r=min(r,1)
else:c=1
print(r) | en | 0.307447 | * * Author: <NAME>(coderemite) * Email: <EMAIL> * | 3.051099 | 3 |
clusterman/run.py | akshaysharma096/clusterman | 281 | 6617325 | <reponame>akshaysharma096/clusterman
# Copyright 2019 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from clusterman.args import parse_args
from clusterman.config import setup_config
from clusterman.util import setup_logging
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
args = parse_args(argv, "Cluster scaling and management for Mesos and Kubernetes")
setup_logging(args.log_level)
setup_config(args)
try:
args.entrypoint(args)
except Exception as e:
print(f"Exception of type {e.__class__.__name__} occured")
if e.args:
for arg in e.args:
print(arg)
exit(1)
if __name__ == "__main__":
main()
| # Copyright 2019 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from clusterman.args import parse_args
from clusterman.config import setup_config
from clusterman.util import setup_logging
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
args = parse_args(argv, "Cluster scaling and management for Mesos and Kubernetes")
setup_logging(args.log_level)
setup_config(args)
try:
args.entrypoint(args)
except Exception as e:
print(f"Exception of type {e.__class__.__name__} occured")
if e.args:
for arg in e.args:
print(arg)
exit(1)
if __name__ == "__main__":
main() | en | 0.854643 | # Copyright 2019 Yelp Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. | 1.695482 | 2 |
asltam/io/__init__.py | Eldohrim/Project_2021_HAX712X | 3 | 6617326 | import os
url_price = 'https://raw.githubusercontent.com/Eldohrim/Project_2021_HAX712X/main/asltam/data/price_dataf2.csv'
url_dist = 'https://raw.githubusercontent.com/Eldohrim/Project_2021_HAX712X/main/asltam/data/data_dist.csv'
url_geo = 'https://raw.githubusercontent.com/Eldohrim/Project_2021_HAX712X/main/asltam/data/data_geo2.csv'
path_price = os.path.join(os.getcwd(), 'data_price.csv')
path_dist = os.path.join(os.getcwd(), 'data_dist.csv')
path_geo = os.path.join(os.getcwd(), 'data_geo2.csv')
| import os
url_price = 'https://raw.githubusercontent.com/Eldohrim/Project_2021_HAX712X/main/asltam/data/price_dataf2.csv'
url_dist = 'https://raw.githubusercontent.com/Eldohrim/Project_2021_HAX712X/main/asltam/data/data_dist.csv'
url_geo = 'https://raw.githubusercontent.com/Eldohrim/Project_2021_HAX712X/main/asltam/data/data_geo2.csv'
path_price = os.path.join(os.getcwd(), 'data_price.csv')
path_dist = os.path.join(os.getcwd(), 'data_dist.csv')
path_geo = os.path.join(os.getcwd(), 'data_geo2.csv')
| none | 1 | 2.085375 | 2 | |
myhealthapp/filters.py | SORARAwo4649/HamatteruProject | 1 | 6617327 | <reponame>SORARAwo4649/HamatteruProject
import django_filters
from .models import List
class ListFilter(django_filters):
"""
入力の遊び
https://blog.narito.ninja/detail/83/
"""
class Meta:
model = List
fields = (
"date",
"go_to_bed",
"wakeup",
"short_comment",
"sleep_quality",
"staff_comment"
)
| import django_filters
from .models import List
class ListFilter(django_filters):
"""
入力の遊び
https://blog.narito.ninja/detail/83/
"""
class Meta:
model = List
fields = (
"date",
"go_to_bed",
"wakeup",
"short_comment",
"sleep_quality",
"staff_comment"
) | en | 0.370185 | 入力の遊び https://blog.narito.ninja/detail/83/ | 2.149475 | 2 |
hackerrank/Algorithms/GraphTheory/dijkstrashortreach/dijkstrashortreach.py | everyevery/programming_study | 0 | 6617328 | # HACKERRANK: Dijkstra: Shortest Reach 2
# https://www.hackerrank.com/challenges/dijkstrashortreach
import heapq
from collections import namedtuple
Item = namedtuple('Item', ['r', 'n'])
def solve(t, n, m, es, s):
dist = [-1] * (n)
dist[s-1] = 0
bq = [Item(0, s)]
heapq.heapify(bq)
while 0 < len(bq):
current = heapq.heappop(bq)
print(current)
for item in es[current.n]:
if dist[item.n-1] == -1 or dist[item.n-1] > dist[current.n-1] + item.r:
dist[item.n-1] = dist[current.n-1] + item.r
heapq.heappush(bq, Item(item.r, item.n))
return(dist[:(s-1)]+dist[s:])
def main():
N, M = (int(v) for v in input().split())
ES = [set() for _ in range(N+1)]
for _ in range(M):
start, end, r = (int(v) for v in input().split())
ES[start].add(Item(r, end))
ES[end].add(Item(r, start))
S = int(input())
print(" ".join(map(lambda x: str(x), solve(T, N, M, ES, S))))
if __name__ == "__main__":
T = int(input())
for _ in range(T):
main()
| # HACKERRANK: Dijkstra: Shortest Reach 2
# https://www.hackerrank.com/challenges/dijkstrashortreach
import heapq
from collections import namedtuple
Item = namedtuple('Item', ['r', 'n'])
def solve(t, n, m, es, s):
dist = [-1] * (n)
dist[s-1] = 0
bq = [Item(0, s)]
heapq.heapify(bq)
while 0 < len(bq):
current = heapq.heappop(bq)
print(current)
for item in es[current.n]:
if dist[item.n-1] == -1 or dist[item.n-1] > dist[current.n-1] + item.r:
dist[item.n-1] = dist[current.n-1] + item.r
heapq.heappush(bq, Item(item.r, item.n))
return(dist[:(s-1)]+dist[s:])
def main():
N, M = (int(v) for v in input().split())
ES = [set() for _ in range(N+1)]
for _ in range(M):
start, end, r = (int(v) for v in input().split())
ES[start].add(Item(r, end))
ES[end].add(Item(r, start))
S = int(input())
print(" ".join(map(lambda x: str(x), solve(T, N, M, ES, S))))
if __name__ == "__main__":
T = int(input())
for _ in range(T):
main()
| en | 0.385574 | # HACKERRANK: Dijkstra: Shortest Reach 2 # https://www.hackerrank.com/challenges/dijkstrashortreach | 3.592067 | 4 |
pysrc/db.py | uuosio/python-contracts-dev-kit | 1 | 6617329 | import json
import struct
import chain
idx64 = 0
idx128 = 1
idx256 = 2
idx_double = 3
idx_long_double = 4
primary_type_i64 = 0
primary_type_i256 = 1
# Name represent as str or int
Name = str
class ChainDB(object):
def __init__(self, primary_type: int, code: Name, scope: Name, table: Name, data_type):
self.code = code
self.scope = scope
self.table = table
self.data_type = data_type
self.primary_type = primary_type
if primary_type == primary_type_i64:
self._db_find = chain.db_find_i64
self._db_get = chain.db_get_i64
self._db_store = chain.db_store_i64
self._db_update = chain.db_update_i64
self._db_remove = chain.db_remove_i64
self._lowerbound = chain.db_lowerbound_i64
self._upperbound = chain.db_upperbound_i64
else:
self._db_find = chain.db_find_i256
self._db_get = chain.db_get_i256
self._db_store = chain.db_store_i256
self._db_update = chain.db_update_i256
self._db_remove = chain.db_remove_i256
self._lowerbound = chain.db_lowerbound_i256
self._upperbound = chain.db_upperbound_i256
def find(self, primary_key: int):
return self._db_find(self.code, self.scope, self.table, primary_key)
def get(self, itr: int):
'''
'''
if itr < 0:
raise IndexError
data = self._db_get(itr)
return self.data_type.unpack(data)
def upper_bound(self, primary: int):
'''
'''
return self._upperbound(self.code, self.scope, self.table, primary)
def lower_bound(self, primary: int):
return self._lowerbound(self.code, self.scope, self.table, primary)
def load(self, primary_key: int):
itr = self.find(primary_key)
if itr < 0:
return None
return self.get(itr)
def store(self, obj):
primary_key = obj.get_primary_key()
itr = self._db_find(self.code, self.scope, self.table, primary_key)
if itr < 0:
self._db_store(self.scope, self.table, obj.payer, primary_key, obj.pack())
else:
self._db_update(itr, obj.payer, obj.pack())
def remove(self, primary_key: int):
itr = self._db_find(self.code, self.scope, self.table, primary_key)
if itr < 0:
raise IndexError
self._db_remove(itr)
def __len__(self):
return chain.db_get_table_row_count(self.code, self.scope, self.table)
def remove_by_itr(self, itr: int):
self._db_remove(itr)
class ChainDBKey64(ChainDB):
def __init__(self, code, scope, table, data_type):
ChainDB.__init__(self, primary_type_i64, code, scope, table, data_type)
class ChainDBKey256(ChainDB):
def __init__(self, code, scope, table, data_type):
ChainDB.__init__(self, primary_type_i256, code, scope, table, data_type)
class MultiIndex:
def __init__(self, code, scope, table, data_type):
self.code = code
self.scope = scope
self.table = table
self.indexes = data_type.get_secondary_indexes()
self.data_type = data_type
self.primary_key = 0
self.idx_tables = []
for i in range(len(self.indexes)):
table = (int(self.table) & 0xFFFFFFFFFFFFFFF0) | i
self.idx_tables.append(table)
def find(self, primary_key):
return chain.db_find_i64(self.code, self.scope, self.table, primary_key)
def get(self, itr):
if itr < 0:
raise IndexError
data = chain.db_get_i64(itr)
return self.data_type.unpack(data)
def get_secondary_values(self, primary_key):
values = []
i = 0
for idx in self.indexes:
table = self.idx_tables[i]
itr, secondary = chain.db_idx_find_primary(idx, self.code, self.scope, table, primary_key)
if itr < 0:
return None
values.append(secondary)
i += 1
return values
def __getitem__(self, primary_key):
itr = self.find(primary_key)
if itr < 0:
raise IndexError
return self.get(itr)
def __setitem__(self, primary_key, obj):
assert primary_key == obj.get_primary_key()
self.store(obj)
def load(self, primary_key):
itr = self.find(primary_key)
if itr < 0:
return None
return self.get(itr)
def store(self, obj):
primary = obj.get_primary_key()
itr = chain.db_find_i64(self.code, self.scope, self.table, primary)
if itr < 0:
chain.db_store_i64(self.scope, self.table, obj.payer, primary, obj.pack())
i = 0
for idx in self.indexes:
table = self.idx_tables[i]
chain.db_idx_store(idx, self.scope, table, obj.payer, primary, obj.get_secondary_values()[i])
i += 1
else:
chain.db_update_i64(itr, obj.payer, obj.pack())
i = 0
for idx in self.indexes:
table = self.idx_tables[i]
itr, old_secondary = chain.db_idx_find_primary(idx, self.code, self.scope, table, primary)
secondary = obj.get_secondary_values()[i]
if not secondary == old_secondary:
chain.db_idx_update(idx, itr, obj.payer, secondary)
i += 1
def remove(self, primary_key):
itr = chain.db_find_i64(self.code, self.scope, self.table, primary_key)
if itr < 0:
raise IndexError
chain.db_remove_i64(itr)
i = 0
for idx in self.indexes:
table = self.idx_tables[i]
itr, _ = chain.db_idx_find_primary(idx, self.code, self.scope, table, primary_key)
assert itr >= 0
chain.db_idx_remove(idx, itr)
i += 1
def __delitem__(self, primary_key):
self.remove(primary_key)
def __contains__(self, primary_key):
return chain.db_find_i64(self.code, self.scope, self.table, primary_key) >= 0
def __iter__(self):
self.itr = chain.db_end_i64(self.code, self.scope, self.table)
return self
def __next__(self):
if self.itr == -1:
raise StopIteration
self.itr, self.primary_key = chain.db_previous_i64(self.itr)
if self.itr < 0:
raise StopIteration
return self.get(self.itr)
def __len__(self):
row_count = chain.db_get_table_row_count(self.code, self.scope, self.table)
if self.indexes:
row_count /= 2
return row_count
def get_secondary_index(self, idx):
return SecondaryIndex(self, self.indexes[idx], self.data_type)
def upper_bound(self, primary):
return chain.db_upperbound_i64(self.code, self.scope, self.table, primary)
def lower_bound(self, primary):
return chain.db_lowerbound_i64(self.code, self.scope, self.table, primary)
def idx_find(self, index, secondary_key):
idx = self.indexes[index]
return chain.db_idx_find_secondary(idx, self.code, self.scope, self.table, secondary_key)
def idx_upper_bound(self, index, secondary_key):
idx = self.indexes[index]
idx_table = self.idx_tables[index]
return chain.db_idx_upperbound(idx, self.code, self.scope, idx_table, secondary_key)
def idx_lower_bound(self, index, secondary_key):
idx = self.indexes[index]
idx_table = self.idx_tables[index]
return chain.db_idx_lowerbound(idx, self.code, self.scope, idx_table, secondary_key)
def _say_hello(msg):
print('++++hello,world')
| import json
import struct
import chain
idx64 = 0
idx128 = 1
idx256 = 2
idx_double = 3
idx_long_double = 4
primary_type_i64 = 0
primary_type_i256 = 1
# Name represent as str or int
Name = str
class ChainDB(object):
def __init__(self, primary_type: int, code: Name, scope: Name, table: Name, data_type):
self.code = code
self.scope = scope
self.table = table
self.data_type = data_type
self.primary_type = primary_type
if primary_type == primary_type_i64:
self._db_find = chain.db_find_i64
self._db_get = chain.db_get_i64
self._db_store = chain.db_store_i64
self._db_update = chain.db_update_i64
self._db_remove = chain.db_remove_i64
self._lowerbound = chain.db_lowerbound_i64
self._upperbound = chain.db_upperbound_i64
else:
self._db_find = chain.db_find_i256
self._db_get = chain.db_get_i256
self._db_store = chain.db_store_i256
self._db_update = chain.db_update_i256
self._db_remove = chain.db_remove_i256
self._lowerbound = chain.db_lowerbound_i256
self._upperbound = chain.db_upperbound_i256
def find(self, primary_key: int):
return self._db_find(self.code, self.scope, self.table, primary_key)
def get(self, itr: int):
'''
'''
if itr < 0:
raise IndexError
data = self._db_get(itr)
return self.data_type.unpack(data)
def upper_bound(self, primary: int):
'''
'''
return self._upperbound(self.code, self.scope, self.table, primary)
def lower_bound(self, primary: int):
return self._lowerbound(self.code, self.scope, self.table, primary)
def load(self, primary_key: int):
itr = self.find(primary_key)
if itr < 0:
return None
return self.get(itr)
def store(self, obj):
primary_key = obj.get_primary_key()
itr = self._db_find(self.code, self.scope, self.table, primary_key)
if itr < 0:
self._db_store(self.scope, self.table, obj.payer, primary_key, obj.pack())
else:
self._db_update(itr, obj.payer, obj.pack())
def remove(self, primary_key: int):
itr = self._db_find(self.code, self.scope, self.table, primary_key)
if itr < 0:
raise IndexError
self._db_remove(itr)
def __len__(self):
return chain.db_get_table_row_count(self.code, self.scope, self.table)
def remove_by_itr(self, itr: int):
self._db_remove(itr)
class ChainDBKey64(ChainDB):
def __init__(self, code, scope, table, data_type):
ChainDB.__init__(self, primary_type_i64, code, scope, table, data_type)
class ChainDBKey256(ChainDB):
def __init__(self, code, scope, table, data_type):
ChainDB.__init__(self, primary_type_i256, code, scope, table, data_type)
class MultiIndex:
def __init__(self, code, scope, table, data_type):
self.code = code
self.scope = scope
self.table = table
self.indexes = data_type.get_secondary_indexes()
self.data_type = data_type
self.primary_key = 0
self.idx_tables = []
for i in range(len(self.indexes)):
table = (int(self.table) & 0xFFFFFFFFFFFFFFF0) | i
self.idx_tables.append(table)
def find(self, primary_key):
return chain.db_find_i64(self.code, self.scope, self.table, primary_key)
def get(self, itr):
if itr < 0:
raise IndexError
data = chain.db_get_i64(itr)
return self.data_type.unpack(data)
def get_secondary_values(self, primary_key):
values = []
i = 0
for idx in self.indexes:
table = self.idx_tables[i]
itr, secondary = chain.db_idx_find_primary(idx, self.code, self.scope, table, primary_key)
if itr < 0:
return None
values.append(secondary)
i += 1
return values
def __getitem__(self, primary_key):
itr = self.find(primary_key)
if itr < 0:
raise IndexError
return self.get(itr)
def __setitem__(self, primary_key, obj):
assert primary_key == obj.get_primary_key()
self.store(obj)
def load(self, primary_key):
itr = self.find(primary_key)
if itr < 0:
return None
return self.get(itr)
def store(self, obj):
primary = obj.get_primary_key()
itr = chain.db_find_i64(self.code, self.scope, self.table, primary)
if itr < 0:
chain.db_store_i64(self.scope, self.table, obj.payer, primary, obj.pack())
i = 0
for idx in self.indexes:
table = self.idx_tables[i]
chain.db_idx_store(idx, self.scope, table, obj.payer, primary, obj.get_secondary_values()[i])
i += 1
else:
chain.db_update_i64(itr, obj.payer, obj.pack())
i = 0
for idx in self.indexes:
table = self.idx_tables[i]
itr, old_secondary = chain.db_idx_find_primary(idx, self.code, self.scope, table, primary)
secondary = obj.get_secondary_values()[i]
if not secondary == old_secondary:
chain.db_idx_update(idx, itr, obj.payer, secondary)
i += 1
def remove(self, primary_key):
itr = chain.db_find_i64(self.code, self.scope, self.table, primary_key)
if itr < 0:
raise IndexError
chain.db_remove_i64(itr)
i = 0
for idx in self.indexes:
table = self.idx_tables[i]
itr, _ = chain.db_idx_find_primary(idx, self.code, self.scope, table, primary_key)
assert itr >= 0
chain.db_idx_remove(idx, itr)
i += 1
def __delitem__(self, primary_key):
self.remove(primary_key)
def __contains__(self, primary_key):
return chain.db_find_i64(self.code, self.scope, self.table, primary_key) >= 0
def __iter__(self):
self.itr = chain.db_end_i64(self.code, self.scope, self.table)
return self
def __next__(self):
if self.itr == -1:
raise StopIteration
self.itr, self.primary_key = chain.db_previous_i64(self.itr)
if self.itr < 0:
raise StopIteration
return self.get(self.itr)
def __len__(self):
row_count = chain.db_get_table_row_count(self.code, self.scope, self.table)
if self.indexes:
row_count /= 2
return row_count
def get_secondary_index(self, idx):
return SecondaryIndex(self, self.indexes[idx], self.data_type)
def upper_bound(self, primary):
return chain.db_upperbound_i64(self.code, self.scope, self.table, primary)
def lower_bound(self, primary):
return chain.db_lowerbound_i64(self.code, self.scope, self.table, primary)
def idx_find(self, index, secondary_key):
idx = self.indexes[index]
return chain.db_idx_find_secondary(idx, self.code, self.scope, self.table, secondary_key)
def idx_upper_bound(self, index, secondary_key):
idx = self.indexes[index]
idx_table = self.idx_tables[index]
return chain.db_idx_upperbound(idx, self.code, self.scope, idx_table, secondary_key)
def idx_lower_bound(self, index, secondary_key):
idx = self.indexes[index]
idx_table = self.idx_tables[index]
return chain.db_idx_lowerbound(idx, self.code, self.scope, idx_table, secondary_key)
def _say_hello(msg):
print('++++hello,world')
| en | 0.93353 | # Name represent as str or int | 2.652933 | 3 |
bin/run_ashlar.py | bioinfonerd/mcmicro | 0 | 6617330 | <reponame>bioinfonerd/mcmicro<gh_stars>0
#library
from __future__ import print_function
import csv
from subprocess import call
try:
import pathlib
except ImportError:
import pathlib2 as pathlib
import argparse
import os
import datetime
import sys
import glob
#function
def text_to_bool(text):
return False \
if not str(text) \
else str(text).lower() in '1,yes,y,true,t'
def path_to_date(path):
return os.path.getmtime(str(path))
#expand the number of microscope raw files types searched for
def microscope_check(current_sample):
if len(glob.glob(str(current_sample) + '/raw_files/*.ome.tiff')) != 0:
print('Exemplar Dataset Used')
output = '.ome.tiff'
return(output)
if len(glob.glob(str(current_sample) + '/raw_files/*.rcpnl')) != 0:
print('Rarecyte Microscope')
output = '.rcpnl'
return(output)
else:
output = 'notfound' #if neither found, still needs to return a string
return(output)
#local testing
#sys.argv=['tmp'] #local testing
#sys.argv.append(os.path.normpath('/home/bionerd/Dropbox/@<NAME>/CyCif/git/mcmicro/example_data/image_1'))
#sys.argv.append('/n/groups/lsp/cycif/cycif_pipeline_testing_space/mcmicro/environments/ashlar/bin/ashlar')
#sys.argv.append(str(['-m','30','--filter-sigma','0']))
#global variables from input
path_exp = pathlib.Path('/'.join([str(sys.argv[1])]))
ashlar_path = pathlib.Path(str(sys.argv[2]))
#parameters = eval(str(sys.argv[3])) #converting string to list (assumption is list to begin with)
parameters = ''.join(sys.argv[3:])
parameters = parameters[1:-1].split(',')
print('Data Path passed:',path_exp)
print('Ashlar Path passed:',ashlar_path)
print('Paramters passed',str(parameters))
# global variables
raw_file = ''.join(['*' + microscope_check(path_exp)])
file_type = microscope_check(path_exp)
raw_dir = path_exp / 'raw_files'
files_exp = sorted(raw_dir.glob(raw_file))
print('Processing files in', str(raw_dir))
print(datetime.datetime.now())
print()
ffp_list = []
dfp_list = []
for j in files_exp:
# print('\r ' + 'Generating ffp and dfp for ' + j.name)
ffp_file_name = j.name.replace(file_type, '-ffp.tif')
dfp_file_name = j.name.replace(file_type, '-dfp.tif')
illumination_dir = path_exp / 'illumination_profiles'
ffp_list.append(str(illumination_dir / ffp_file_name))
dfp_list.append(str(illumination_dir / dfp_file_name))
print('Run ashlar')
print(datetime.datetime.now())
print()
out_dir = path_exp / 'registration'
test_sample = out_dir / '.ome.tif'
# test if already run, if not run
if not test_sample.exists():
input_files = ' '.join([str(f) for f in files_exp])
#command for run
command = 'python ' + str(ashlar_path) + ' ' + input_files + ' ' + ' '.join(parameters) + ' -o ' + str(out_dir)
#if text_to_bool(exp['Pyramid']): #[TODO] add to parameter yaml
command += ' --pyramid -f ' + path_exp.name + '.ome.tif'
#if text_to_bool(exp['Correction']): [TODO] add to parameter yaml
ffps = ' '.join(ffp_list)
dfps = ' '.join(dfp_list)
command += ' --ffp ' + ffps + ' --dfp ' + dfps
#save the list order for rcpnl, ffp, and dfp
print([i for i in files_exp])
print([i for i in ffp_list])
print([i for i in dfp_list])
#save the command passed to ashlar
print(command)
call(command, shell=True)
print(datetime.datetime.now())
else:
print('Sample '+test_sample+ 'exists')
| #library
from __future__ import print_function
import csv
from subprocess import call
try:
import pathlib
except ImportError:
import pathlib2 as pathlib
import argparse
import os
import datetime
import sys
import glob
#function
def text_to_bool(text):
return False \
if not str(text) \
else str(text).lower() in '1,yes,y,true,t'
def path_to_date(path):
return os.path.getmtime(str(path))
#expand the number of microscope raw files types searched for
def microscope_check(current_sample):
if len(glob.glob(str(current_sample) + '/raw_files/*.ome.tiff')) != 0:
print('Exemplar Dataset Used')
output = '.ome.tiff'
return(output)
if len(glob.glob(str(current_sample) + '/raw_files/*.rcpnl')) != 0:
print('Rarecyte Microscope')
output = '.rcpnl'
return(output)
else:
output = 'notfound' #if neither found, still needs to return a string
return(output)
#local testing
#sys.argv=['tmp'] #local testing
#sys.argv.append(os.path.normpath('/home/bionerd/Dropbox/@<NAME>/CyCif/git/mcmicro/example_data/image_1'))
#sys.argv.append('/n/groups/lsp/cycif/cycif_pipeline_testing_space/mcmicro/environments/ashlar/bin/ashlar')
#sys.argv.append(str(['-m','30','--filter-sigma','0']))
#global variables from input
path_exp = pathlib.Path('/'.join([str(sys.argv[1])]))
ashlar_path = pathlib.Path(str(sys.argv[2]))
#parameters = eval(str(sys.argv[3])) #converting string to list (assumption is list to begin with)
parameters = ''.join(sys.argv[3:])
parameters = parameters[1:-1].split(',')
print('Data Path passed:',path_exp)
print('Ashlar Path passed:',ashlar_path)
print('Paramters passed',str(parameters))
# global variables
raw_file = ''.join(['*' + microscope_check(path_exp)])
file_type = microscope_check(path_exp)
raw_dir = path_exp / 'raw_files'
files_exp = sorted(raw_dir.glob(raw_file))
print('Processing files in', str(raw_dir))
print(datetime.datetime.now())
print()
ffp_list = []
dfp_list = []
for j in files_exp:
# print('\r ' + 'Generating ffp and dfp for ' + j.name)
ffp_file_name = j.name.replace(file_type, '-ffp.tif')
dfp_file_name = j.name.replace(file_type, '-dfp.tif')
illumination_dir = path_exp / 'illumination_profiles'
ffp_list.append(str(illumination_dir / ffp_file_name))
dfp_list.append(str(illumination_dir / dfp_file_name))
print('Run ashlar')
print(datetime.datetime.now())
print()
out_dir = path_exp / 'registration'
test_sample = out_dir / '.ome.tif'
# test if already run, if not run
if not test_sample.exists():
input_files = ' '.join([str(f) for f in files_exp])
#command for run
command = 'python ' + str(ashlar_path) + ' ' + input_files + ' ' + ' '.join(parameters) + ' -o ' + str(out_dir)
#if text_to_bool(exp['Pyramid']): #[TODO] add to parameter yaml
command += ' --pyramid -f ' + path_exp.name + '.ome.tif'
#if text_to_bool(exp['Correction']): [TODO] add to parameter yaml
ffps = ' '.join(ffp_list)
dfps = ' '.join(dfp_list)
command += ' --ffp ' + ffps + ' --dfp ' + dfps
#save the list order for rcpnl, ffp, and dfp
print([i for i in files_exp])
print([i for i in ffp_list])
print([i for i in dfp_list])
#save the command passed to ashlar
print(command)
call(command, shell=True)
print(datetime.datetime.now())
else:
print('Sample '+test_sample+ 'exists') | en | 0.404056 | #library #function #expand the number of microscope raw files types searched for #if neither found, still needs to return a string #local testing #sys.argv=['tmp'] #local testing #sys.argv.append(os.path.normpath('/home/bionerd/Dropbox/@<NAME>/CyCif/git/mcmicro/example_data/image_1')) #sys.argv.append('/n/groups/lsp/cycif/cycif_pipeline_testing_space/mcmicro/environments/ashlar/bin/ashlar') #sys.argv.append(str(['-m','30','--filter-sigma','0'])) #global variables from input #parameters = eval(str(sys.argv[3])) #converting string to list (assumption is list to begin with) # global variables # print('\r ' + 'Generating ffp and dfp for ' + j.name) # test if already run, if not run #command for run #if text_to_bool(exp['Pyramid']): #[TODO] add to parameter yaml #if text_to_bool(exp['Correction']): [TODO] add to parameter yaml #save the list order for rcpnl, ffp, and dfp #save the command passed to ashlar | 2.369933 | 2 |
scood/k_means.py | Jingkang50/ICCV21_SCOOD | 34 | 6617331 | import time
import faiss
import numpy as np
def preprocess_features(npdata, pca=256):
"""Preprocess an array of features.
Args:
npdata (np.array N * ndim): features to preprocess
pca (int): dim of output
Returns:
np.array of dim N * pca: data PCA-reduced, whitened and L2-normalized
"""
_, ndim = npdata.shape
npdata = npdata.astype("float32")
# Apply PCA-whitening with Faiss
mat = faiss.PCAMatrix(ndim, pca, eigen_power=-0.5)
mat.train(npdata)
assert mat.is_trained
npdata = mat.apply_py(npdata)
# L2 normalization
row_sums = np.linalg.norm(npdata, axis=1)
npdata = npdata / row_sums[:, np.newaxis]
return npdata
def run_kmeans(x, nmb_clusters, verbose=False):
"""Runs kmeans on 1 GPU.
Args:
x: data
nmb_clusters (int): number of clusters
Returns:
list: ids of data in each cluster
"""
n_data, d = x.shape
# faiss implementation of k-means
clus = faiss.Clustering(d, nmb_clusters)
# Change faiss seed at each k-means so that the randomly picked
# initialization centroids do not correspond to the same feature ids
# from an epoch to another.
clus.seed = np.random.randint(1234)
clus.niter = 20
clus.max_points_per_centroid = 10000000
res = faiss.StandardGpuResources()
flat_config = faiss.GpuIndexFlatConfig()
flat_config.useFloat16 = False
flat_config.device = 0
index = faiss.GpuIndexFlatL2(res, d, flat_config)
# perform the training
clus.train(x, index)
_, I = index.search(x, 1)
return I.reshape(
-1,
)
class KMeans(object):
def __init__(self, k, pca_dim):
self.k = k
self.pca_dim = pca_dim
def cluster(self, data, verbose=True):
"""Performs k-means clustering.
Args:
x_data (np.array N * dim): data to cluster
"""
# PCA-reducing, whitening and L2-normalization
xb = preprocess_features(data, pca=self.pca_dim)
if np.isnan(xb).any():
row_sums = np.linalg.norm(data, axis=1)
data_norm = data / row_sums[:, np.newaxis]
if np.isnan(data_norm).any():
I = run_kmeans(data_norm, self.k, verbose)
else:
I = run_kmeans(data, self.k, verbose)
else:
# cluster the data
I = run_kmeans(xb, self.k, verbose)
return I
| import time
import faiss
import numpy as np
def preprocess_features(npdata, pca=256):
"""Preprocess an array of features.
Args:
npdata (np.array N * ndim): features to preprocess
pca (int): dim of output
Returns:
np.array of dim N * pca: data PCA-reduced, whitened and L2-normalized
"""
_, ndim = npdata.shape
npdata = npdata.astype("float32")
# Apply PCA-whitening with Faiss
mat = faiss.PCAMatrix(ndim, pca, eigen_power=-0.5)
mat.train(npdata)
assert mat.is_trained
npdata = mat.apply_py(npdata)
# L2 normalization
row_sums = np.linalg.norm(npdata, axis=1)
npdata = npdata / row_sums[:, np.newaxis]
return npdata
def run_kmeans(x, nmb_clusters, verbose=False):
"""Runs kmeans on 1 GPU.
Args:
x: data
nmb_clusters (int): number of clusters
Returns:
list: ids of data in each cluster
"""
n_data, d = x.shape
# faiss implementation of k-means
clus = faiss.Clustering(d, nmb_clusters)
# Change faiss seed at each k-means so that the randomly picked
# initialization centroids do not correspond to the same feature ids
# from an epoch to another.
clus.seed = np.random.randint(1234)
clus.niter = 20
clus.max_points_per_centroid = 10000000
res = faiss.StandardGpuResources()
flat_config = faiss.GpuIndexFlatConfig()
flat_config.useFloat16 = False
flat_config.device = 0
index = faiss.GpuIndexFlatL2(res, d, flat_config)
# perform the training
clus.train(x, index)
_, I = index.search(x, 1)
return I.reshape(
-1,
)
class KMeans(object):
def __init__(self, k, pca_dim):
self.k = k
self.pca_dim = pca_dim
def cluster(self, data, verbose=True):
"""Performs k-means clustering.
Args:
x_data (np.array N * dim): data to cluster
"""
# PCA-reducing, whitening and L2-normalization
xb = preprocess_features(data, pca=self.pca_dim)
if np.isnan(xb).any():
row_sums = np.linalg.norm(data, axis=1)
data_norm = data / row_sums[:, np.newaxis]
if np.isnan(data_norm).any():
I = run_kmeans(data_norm, self.k, verbose)
else:
I = run_kmeans(data, self.k, verbose)
else:
# cluster the data
I = run_kmeans(xb, self.k, verbose)
return I
| en | 0.658334 | Preprocess an array of features. Args: npdata (np.array N * ndim): features to preprocess pca (int): dim of output Returns: np.array of dim N * pca: data PCA-reduced, whitened and L2-normalized # Apply PCA-whitening with Faiss # L2 normalization Runs kmeans on 1 GPU. Args: x: data nmb_clusters (int): number of clusters Returns: list: ids of data in each cluster # faiss implementation of k-means # Change faiss seed at each k-means so that the randomly picked # initialization centroids do not correspond to the same feature ids # from an epoch to another. # perform the training Performs k-means clustering. Args: x_data (np.array N * dim): data to cluster # PCA-reducing, whitening and L2-normalization # cluster the data | 2.742094 | 3 |
push_sampledata.py | SQuadrillion/ProjectIDEA-QUEUE | 0 | 6617332 | import redis
from datetime import datetime, timedelta
song1 = '{"id": 1, "song": {"song_name": "adrenaline!!!", "artist_name": "TrySail", "artwork_url": "https://images-na.ssl-images-amazon.com/images/I/61jI%2BP82JVL.jpg", "time": 130, "music_url": "http://amachamusic.chagasi.com/mp3/kasumisou.mp3"}, "usename": "MMAtsushi"}'
song2 = '{"id": 2, "song": {"song_name": "adrenaline!!!", "artist_name": "TrySail", "artwork_url": "https://images-na.ssl-images-amazon.com/images/I/61jI%2BP82JVL.jpg", "time": 127, "music_url": "http://www.ne.jp/asahi/music/myuu/wave/asibue.mp3"}, "usename": "MMAtsushi"}'
r = redis.Redis(host='127.0.0.1')
r.set(datetime.today(), song1)
r.set(datetime.today() + timedelta(seconds=10), song2) | import redis
from datetime import datetime, timedelta
song1 = '{"id": 1, "song": {"song_name": "adrenaline!!!", "artist_name": "TrySail", "artwork_url": "https://images-na.ssl-images-amazon.com/images/I/61jI%2BP82JVL.jpg", "time": 130, "music_url": "http://amachamusic.chagasi.com/mp3/kasumisou.mp3"}, "usename": "MMAtsushi"}'
song2 = '{"id": 2, "song": {"song_name": "adrenaline!!!", "artist_name": "TrySail", "artwork_url": "https://images-na.ssl-images-amazon.com/images/I/61jI%2BP82JVL.jpg", "time": 127, "music_url": "http://www.ne.jp/asahi/music/myuu/wave/asibue.mp3"}, "usename": "MMAtsushi"}'
r = redis.Redis(host='127.0.0.1')
r.set(datetime.today(), song1)
r.set(datetime.today() + timedelta(seconds=10), song2) | none | 1 | 2.635669 | 3 | |
core-python-organizing-larger-programs/nesting_modules_with_packages/main.py | hassonor/core-python | 1 | 6617333 | <reponame>hassonor/core-python
import urllib
from urllib import request
# example of class modules
print(type(urllib))
print(type(urllib.request))
| import urllib
from urllib import request
# example of class modules
print(type(urllib))
print(type(urllib.request)) | en | 0.345563 | # example of class modules | 2.102202 | 2 |
tests/backends/test_pyevm.py | kclowes/eth-tester | 1 | 6617334 | from __future__ import unicode_literals
import pytest
from eth.vm.forks import (
BerlinVM,
FrontierVM,
LondonVM,
)
from eth_utils import to_wei
from eth_tester import EthereumTester, PyEVMBackend
from eth_tester.backends.pyevm.main import (
generate_genesis_state_for_keys,
get_default_account_keys,
get_default_genesis_params,
)
from eth_tester.backends.pyevm.utils import is_supported_pyevm_version_available
from eth_tester.exceptions import ValidationError
from eth_tester.utils.backend_testing import BaseTestBackendDirect, SIMPLE_TRANSACTION
ZERO_ADDRESS_HEX = "0x0000000000000000000000000000000000000000"
MNEMONIC = "test test test test test test test test test test test junk"
@pytest.fixture
def eth_tester():
if not is_supported_pyevm_version_available():
pytest.skip("PyEVM is not available")
backend = PyEVMBackend()
return EthereumTester(backend=backend)
def test_custom_virtual_machines():
if not is_supported_pyevm_version_available():
pytest.skip("PyEVM is not available")
backend = PyEVMBackend(vm_configuration=(
(0, FrontierVM),
(3, LondonVM),
))
# This should be a FrontierVM block
VM_at_2 = backend.chain.get_vm_class_for_block_number(2)
# This should be a LondonVM block
VM_at_3 = backend.chain.get_vm_class_for_block_number(3)
assert issubclass(VM_at_2, FrontierVM)
assert not issubclass(VM_at_2, LondonVM)
assert issubclass(VM_at_3, LondonVM)
# Right now, just test that EthereumTester doesn't crash
# Maybe some more sophisticated test to make sure the VMs are set correctly?
# We should to make sure the VM config translates all the way to the main
# tester, maybe with a custom VM that hard-codes some block value? that can
# be found with tester.get_block_by_number()?
EthereumTester(backend=backend)
def test_berlin_configuration():
if not is_supported_pyevm_version_available():
pytest.skip("PyEVM is not available")
mnemonic = "test test test test test test test test test test test junk"
vm_configuration = ((0, BerlinVM),)
backend = PyEVMBackend.from_mnemonic(
mnemonic, vm_configuration=vm_configuration
)
# Berlin blocks shouldn't have base_fee_per_gas,
# since London was the fork that introduced base_fee_per_gas
with pytest.raises(KeyError):
backend.get_block_by_number(0)['base_fee_per_gas']
EthereumTester(backend=backend)
def test_london_configuration():
if not is_supported_pyevm_version_available():
pytest.skip("PyEVM is not available")
backend = PyEVMBackend(vm_configuration=(
(0, LondonVM),
))
assert backend.get_block_by_number(0)['base_fee_per_gas'] == 1000000000
EthereumTester(backend=backend)
class TestPyEVMBackendDirect(BaseTestBackendDirect):
def test_generate_custom_genesis_state(self):
state_overrides = {"balance": to_wei(900000, "ether")}
invalid_overrides = {"gato": "con botas"}
# Test creating a specific number of accounts
account_keys = get_default_account_keys(quantity=2)
assert len(account_keys) == 2
account_keys = get_default_account_keys(quantity=10)
assert len(account_keys) == 10
# Test the underlying state merging functionality
genesis_state = generate_genesis_state_for_keys(
account_keys=account_keys, overrides=state_overrides
)
assert len(genesis_state) == len(account_keys) == 10
for _public_address, account_state in genesis_state.items():
assert account_state["balance"] == state_overrides["balance"]
assert account_state["code"] == b""
# Only existing default genesis state keys can be overridden
with pytest.raises(ValueError):
_invalid_genesis_state = generate_genesis_state_for_keys(
account_keys=account_keys, overrides=invalid_overrides
)
# Use staticmethod state overriding
genesis_state = PyEVMBackend.generate_genesis_state(
overrides=state_overrides, num_accounts=3
)
assert len(genesis_state) == 3
for _public_address, account_state in genesis_state.items():
assert account_state["balance"] == state_overrides["balance"]
assert account_state["code"] == b""
# Only existing default genesis state keys can be overridden
with pytest.raises(ValueError):
_invalid_genesis_state = PyEVMBackend.generate_genesis_state(
overrides=invalid_overrides
)
def test_override_genesis_state(self):
state_overrides = {"balance": to_wei(900000, "ether")}
test_accounts = 3
# Initialize PyEVM backend with custom genesis state
genesis_state = PyEVMBackend.generate_genesis_state(
overrides=state_overrides, num_accounts=test_accounts
)
# Test the correct number of accounts are created with the specified balance override
pyevm_backend = PyEVMBackend(genesis_state=genesis_state)
assert len(pyevm_backend.account_keys) == test_accounts
for private_key in pyevm_backend.account_keys:
account = private_key.public_key.to_canonical_address()
balance = pyevm_backend.get_balance(account=account)
assert balance == state_overrides["balance"]
# Test integration with EthereumTester
tester = EthereumTester(backend=pyevm_backend)
for private_key in pyevm_backend.account_keys:
account = private_key.public_key.to_checksum_address()
balance = tester.get_balance(account=account)
assert balance == state_overrides["balance"]
def test_from_mnemonic(self):
# Initialize PyEVM backend using MNEMONIC, num_accounts,
# and state overrides (balance)
num_accounts = 3
balance = to_wei(15, "ether") # Give each account 15 Eth
pyevm_backend = PyEVMBackend.from_mnemonic(
MNEMONIC, num_accounts=num_accounts, genesis_state_overrides={"balance": balance}
)
# Each of these accounts stems from the MNEMONIC
expected_accounts = [
"0x1e59ce931b4cfea3fe4b875411e280e173cb7a9c",
"0xc89d42189f0450c2b2c3c61f58ec5d628176a1e7",
"<KEY>"
]
# Test integration with EthereumTester
tester = EthereumTester(backend=pyevm_backend)
actual_accounts = tester.get_accounts()
assert len(actual_accounts) == num_accounts
for i in range(0, num_accounts):
actual = actual_accounts[i]
expected = expected_accounts[i]
assert actual.lower() == expected.lower()
assert tester.get_balance(account=actual) == balance
def test_generate_custom_genesis_parameters(self):
# Establish parameter overrides, for example a custom genesis gas limit
param_overrides = {"gas_limit": 4750000}
# Test the underlying default parameter merging functionality
genesis_params = get_default_genesis_params(overrides=param_overrides)
assert genesis_params["gas_limit"] == param_overrides["gas_limit"]
# Use the the staticmethod to generate custom genesis parameters
genesis_params = PyEVMBackend.generate_genesis_params(param_overrides)
assert genesis_params["gas_limit"] == param_overrides["gas_limit"]
# Only existing default genesis parameter keys can be overridden
invalid_overrides = {"gato": "con botas"}
with pytest.raises(ValueError):
_invalid_genesis_params = PyEVMBackend.generate_genesis_params(
overrides=invalid_overrides
)
def test_override_genesis_parameters(self):
# Establish a custom gas limit
param_overrides = {"gas_limit": 4750000}
block_one_gas_limit = param_overrides['gas_limit']
# Initialize PyEVM backend with custom genesis parameters
genesis_params = PyEVMBackend.generate_genesis_params(
overrides=param_overrides
)
pyevm_backend = PyEVMBackend(genesis_parameters=genesis_params)
genesis_block = pyevm_backend.get_block_by_number(0)
assert genesis_block["gas_limit"] == param_overrides["gas_limit"]
genesis_block = pyevm_backend.get_block_by_number(1)
assert genesis_block["gas_limit"] == block_one_gas_limit
# Integrate with EthereumTester
tester = EthereumTester(backend=pyevm_backend)
genesis_block = tester.get_block_by_number(0)
assert genesis_block["gas_limit"] == param_overrides["gas_limit"]
genesis_block = tester.get_block_by_number(1)
assert genesis_block["gas_limit"] == block_one_gas_limit
def test_send_transaction_invalid_from(self, eth_tester):
accounts = eth_tester.get_accounts()
assert accounts, "No accounts available for transaction sending"
with pytest.raises(ValidationError, match=r'No valid "from" key was provided'):
self._send_and_check_transaction(
eth_tester, SIMPLE_TRANSACTION, ZERO_ADDRESS_HEX
)
| from __future__ import unicode_literals
import pytest
from eth.vm.forks import (
BerlinVM,
FrontierVM,
LondonVM,
)
from eth_utils import to_wei
from eth_tester import EthereumTester, PyEVMBackend
from eth_tester.backends.pyevm.main import (
generate_genesis_state_for_keys,
get_default_account_keys,
get_default_genesis_params,
)
from eth_tester.backends.pyevm.utils import is_supported_pyevm_version_available
from eth_tester.exceptions import ValidationError
from eth_tester.utils.backend_testing import BaseTestBackendDirect, SIMPLE_TRANSACTION
ZERO_ADDRESS_HEX = "0x0000000000000000000000000000000000000000"
MNEMONIC = "test test test test test test test test test test test junk"
@pytest.fixture
def eth_tester():
if not is_supported_pyevm_version_available():
pytest.skip("PyEVM is not available")
backend = PyEVMBackend()
return EthereumTester(backend=backend)
def test_custom_virtual_machines():
if not is_supported_pyevm_version_available():
pytest.skip("PyEVM is not available")
backend = PyEVMBackend(vm_configuration=(
(0, FrontierVM),
(3, LondonVM),
))
# This should be a FrontierVM block
VM_at_2 = backend.chain.get_vm_class_for_block_number(2)
# This should be a LondonVM block
VM_at_3 = backend.chain.get_vm_class_for_block_number(3)
assert issubclass(VM_at_2, FrontierVM)
assert not issubclass(VM_at_2, LondonVM)
assert issubclass(VM_at_3, LondonVM)
# Right now, just test that EthereumTester doesn't crash
# Maybe some more sophisticated test to make sure the VMs are set correctly?
# We should to make sure the VM config translates all the way to the main
# tester, maybe with a custom VM that hard-codes some block value? that can
# be found with tester.get_block_by_number()?
EthereumTester(backend=backend)
def test_berlin_configuration():
if not is_supported_pyevm_version_available():
pytest.skip("PyEVM is not available")
mnemonic = "test test test test test test test test test test test junk"
vm_configuration = ((0, BerlinVM),)
backend = PyEVMBackend.from_mnemonic(
mnemonic, vm_configuration=vm_configuration
)
# Berlin blocks shouldn't have base_fee_per_gas,
# since London was the fork that introduced base_fee_per_gas
with pytest.raises(KeyError):
backend.get_block_by_number(0)['base_fee_per_gas']
EthereumTester(backend=backend)
def test_london_configuration():
if not is_supported_pyevm_version_available():
pytest.skip("PyEVM is not available")
backend = PyEVMBackend(vm_configuration=(
(0, LondonVM),
))
assert backend.get_block_by_number(0)['base_fee_per_gas'] == 1000000000
EthereumTester(backend=backend)
class TestPyEVMBackendDirect(BaseTestBackendDirect):
def test_generate_custom_genesis_state(self):
state_overrides = {"balance": to_wei(900000, "ether")}
invalid_overrides = {"gato": "con botas"}
# Test creating a specific number of accounts
account_keys = get_default_account_keys(quantity=2)
assert len(account_keys) == 2
account_keys = get_default_account_keys(quantity=10)
assert len(account_keys) == 10
# Test the underlying state merging functionality
genesis_state = generate_genesis_state_for_keys(
account_keys=account_keys, overrides=state_overrides
)
assert len(genesis_state) == len(account_keys) == 10
for _public_address, account_state in genesis_state.items():
assert account_state["balance"] == state_overrides["balance"]
assert account_state["code"] == b""
# Only existing default genesis state keys can be overridden
with pytest.raises(ValueError):
_invalid_genesis_state = generate_genesis_state_for_keys(
account_keys=account_keys, overrides=invalid_overrides
)
# Use staticmethod state overriding
genesis_state = PyEVMBackend.generate_genesis_state(
overrides=state_overrides, num_accounts=3
)
assert len(genesis_state) == 3
for _public_address, account_state in genesis_state.items():
assert account_state["balance"] == state_overrides["balance"]
assert account_state["code"] == b""
# Only existing default genesis state keys can be overridden
with pytest.raises(ValueError):
_invalid_genesis_state = PyEVMBackend.generate_genesis_state(
overrides=invalid_overrides
)
def test_override_genesis_state(self):
state_overrides = {"balance": to_wei(900000, "ether")}
test_accounts = 3
# Initialize PyEVM backend with custom genesis state
genesis_state = PyEVMBackend.generate_genesis_state(
overrides=state_overrides, num_accounts=test_accounts
)
# Test the correct number of accounts are created with the specified balance override
pyevm_backend = PyEVMBackend(genesis_state=genesis_state)
assert len(pyevm_backend.account_keys) == test_accounts
for private_key in pyevm_backend.account_keys:
account = private_key.public_key.to_canonical_address()
balance = pyevm_backend.get_balance(account=account)
assert balance == state_overrides["balance"]
# Test integration with EthereumTester
tester = EthereumTester(backend=pyevm_backend)
for private_key in pyevm_backend.account_keys:
account = private_key.public_key.to_checksum_address()
balance = tester.get_balance(account=account)
assert balance == state_overrides["balance"]
def test_from_mnemonic(self):
# Initialize PyEVM backend using MNEMONIC, num_accounts,
# and state overrides (balance)
num_accounts = 3
balance = to_wei(15, "ether") # Give each account 15 Eth
pyevm_backend = PyEVMBackend.from_mnemonic(
MNEMONIC, num_accounts=num_accounts, genesis_state_overrides={"balance": balance}
)
# Each of these accounts stems from the MNEMONIC
expected_accounts = [
"0x1e59ce931b4cfea3fe4b875411e280e173cb7a9c",
"0xc89d42189f0450c2b2c3c61f58ec5d628176a1e7",
"<KEY>"
]
# Test integration with EthereumTester
tester = EthereumTester(backend=pyevm_backend)
actual_accounts = tester.get_accounts()
assert len(actual_accounts) == num_accounts
for i in range(0, num_accounts):
actual = actual_accounts[i]
expected = expected_accounts[i]
assert actual.lower() == expected.lower()
assert tester.get_balance(account=actual) == balance
def test_generate_custom_genesis_parameters(self):
# Establish parameter overrides, for example a custom genesis gas limit
param_overrides = {"gas_limit": 4750000}
# Test the underlying default parameter merging functionality
genesis_params = get_default_genesis_params(overrides=param_overrides)
assert genesis_params["gas_limit"] == param_overrides["gas_limit"]
# Use the the staticmethod to generate custom genesis parameters
genesis_params = PyEVMBackend.generate_genesis_params(param_overrides)
assert genesis_params["gas_limit"] == param_overrides["gas_limit"]
# Only existing default genesis parameter keys can be overridden
invalid_overrides = {"gato": "con botas"}
with pytest.raises(ValueError):
_invalid_genesis_params = PyEVMBackend.generate_genesis_params(
overrides=invalid_overrides
)
def test_override_genesis_parameters(self):
# Establish a custom gas limit
param_overrides = {"gas_limit": 4750000}
block_one_gas_limit = param_overrides['gas_limit']
# Initialize PyEVM backend with custom genesis parameters
genesis_params = PyEVMBackend.generate_genesis_params(
overrides=param_overrides
)
pyevm_backend = PyEVMBackend(genesis_parameters=genesis_params)
genesis_block = pyevm_backend.get_block_by_number(0)
assert genesis_block["gas_limit"] == param_overrides["gas_limit"]
genesis_block = pyevm_backend.get_block_by_number(1)
assert genesis_block["gas_limit"] == block_one_gas_limit
# Integrate with EthereumTester
tester = EthereumTester(backend=pyevm_backend)
genesis_block = tester.get_block_by_number(0)
assert genesis_block["gas_limit"] == param_overrides["gas_limit"]
genesis_block = tester.get_block_by_number(1)
assert genesis_block["gas_limit"] == block_one_gas_limit
def test_send_transaction_invalid_from(self, eth_tester):
accounts = eth_tester.get_accounts()
assert accounts, "No accounts available for transaction sending"
with pytest.raises(ValidationError, match=r'No valid "from" key was provided'):
self._send_and_check_transaction(
eth_tester, SIMPLE_TRANSACTION, ZERO_ADDRESS_HEX
)
| en | 0.723458 | # This should be a FrontierVM block # This should be a LondonVM block # Right now, just test that EthereumTester doesn't crash # Maybe some more sophisticated test to make sure the VMs are set correctly? # We should to make sure the VM config translates all the way to the main # tester, maybe with a custom VM that hard-codes some block value? that can # be found with tester.get_block_by_number()? # Berlin blocks shouldn't have base_fee_per_gas, # since London was the fork that introduced base_fee_per_gas # Test creating a specific number of accounts # Test the underlying state merging functionality # Only existing default genesis state keys can be overridden # Use staticmethod state overriding # Only existing default genesis state keys can be overridden # Initialize PyEVM backend with custom genesis state # Test the correct number of accounts are created with the specified balance override # Test integration with EthereumTester # Initialize PyEVM backend using MNEMONIC, num_accounts, # and state overrides (balance) # Give each account 15 Eth # Each of these accounts stems from the MNEMONIC # Test integration with EthereumTester # Establish parameter overrides, for example a custom genesis gas limit # Test the underlying default parameter merging functionality # Use the the staticmethod to generate custom genesis parameters # Only existing default genesis parameter keys can be overridden # Establish a custom gas limit # Initialize PyEVM backend with custom genesis parameters # Integrate with EthereumTester | 2.071572 | 2 |
python-scripts/simsetup.py | Craigspaz/gmu-research | 2 | 6617335 | import random
import sys
import argparse
###############################################################################
# Simple script used to setup simulation scenarios either within a text file #
# or through the command line. #
# #
# Author: <NAME> #
###############################################################################
def parse_command():
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate new branch file.')
parser.add_argument('-b', type=int, nargs='?', default=31,
help='set number of branches for this simulation. [Default is 31.]')
parser.add_argument('-l', type=float, nargs='?', default=.05,
help='set the probabilty for random selection of hardened lines for this simulation. [Default is randint(0, 1)]')
parser.add_argument('-w', type=float, nargs='?', default=.05,
help='set the probability for random selection of line located in weather zone for this simulation. [Default is randint(0, 1)]')
parser.add_argument('-f', type=float, nargs='?', default=.05,
help='set the probability for random selection of failure rates for each branch for this simulation. [Default is uniform(0, 1)]')
parser.add_argument('-fr', type=float, nargs='+', default=[0, 1],
help='set the range for random number generation of failure rates. [Default is range(0, 1)')
parser.add_argument('-o', nargs='?', default="data.txt",
help='set the output file. [Default is data.txt]')
parser.add_argument('-i', nargs='?',
help='set the input file. [Default is input.txt]')
# parser.add_argument('--gen', nargs='?',
# help='generate a new simulation file')
args = parser.parse_args(sys.argv[1:])
if(args.i):
read_file(args)
else:
write_file(args)
def read_file(args):
indata = args.i
outdata = args.o
zones = []
max_harden = 0
failure_rate_prob = 0
num_branches = args.b
with open(indata, "r") as f:
zones = list(map(int, f.readline().split(',')))
max_harden = int(f.readline())
failure_rate_prob = float(f.readline())
harden_count = 0
zone_count = 0
zone_pointer = 0
with open(outdata, "w") as f:
for i in range(num_branches):
if harden_count < max_harden:
line_harden_val = random.randint(0, 1)
if(line_harden_val == 1):
harden_count += 1
else:
line_harden_val = 0
if zone_pointer < len(zones) and zone_count == zones[zone_pointer]:
weather_zone_val = 1
zone_pointer += 1
else:
weather_zone_val = 0
zone_count += 1
f.write(str(weather_zone_val) + ' ' + str(line_harden_val) + ' ' + "{0:.2f} ".format(failure_rate_prob) + '\n')
print(str(weather_zone_val) + ' ' + str(line_harden_val) + ' ' + "{0:.2f} ".format(failure_rate_prob))
print("\nFile " + outdata + " has been generated successfully!\n")
print("Failure Rate: " + str(failure_rate_prob))
print("Number of Branches: " + str(num_branches))
print("Weather Zones: " + str(zones))
print("Max Hardened Lines: " + str(max_harden))
def write_file(args):
fn = args.o
num_branches = args.b
line_harden_prob = args.l
weather_zone_prob = args.w
failure_rate_prob = 0
fmin = args.fr[0]
fmax = args.fr[1]
with open(fn, "w+") as f:
for i in range(num_branches):
line_harden_val = random.randint(0, 1)
weather_zone_val = random.randint(0, 1)
failure_rate_prob = random.uniform(fmin, fmax)
f.write(str(weather_zone_val) + ' ' + str(line_harden_val) + ' ' + "{0:.2f} ".format(failure_rate_prob) + '\n')
print(str(weather_zone_val) + ' ' + str(line_harden_val) + ' ' + "{0:.2f} ".format(failure_rate_prob))
print("\nFile " + fn + " has been generated successfully!\n")
print("Number of Branches: " + str(num_branches) + "\n")
def calculate():
answer = 0
for i in range(100):
answer += i
print(answer)
parse_command()
| import random
import sys
import argparse
###############################################################################
# Simple script used to setup simulation scenarios either within a text file #
# or through the command line. #
# #
# Author: <NAME> #
###############################################################################
def parse_command():
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate new branch file.')
parser.add_argument('-b', type=int, nargs='?', default=31,
help='set number of branches for this simulation. [Default is 31.]')
parser.add_argument('-l', type=float, nargs='?', default=.05,
help='set the probabilty for random selection of hardened lines for this simulation. [Default is randint(0, 1)]')
parser.add_argument('-w', type=float, nargs='?', default=.05,
help='set the probability for random selection of line located in weather zone for this simulation. [Default is randint(0, 1)]')
parser.add_argument('-f', type=float, nargs='?', default=.05,
help='set the probability for random selection of failure rates for each branch for this simulation. [Default is uniform(0, 1)]')
parser.add_argument('-fr', type=float, nargs='+', default=[0, 1],
help='set the range for random number generation of failure rates. [Default is range(0, 1)')
parser.add_argument('-o', nargs='?', default="data.txt",
help='set the output file. [Default is data.txt]')
parser.add_argument('-i', nargs='?',
help='set the input file. [Default is input.txt]')
# parser.add_argument('--gen', nargs='?',
# help='generate a new simulation file')
args = parser.parse_args(sys.argv[1:])
if(args.i):
read_file(args)
else:
write_file(args)
def read_file(args):
indata = args.i
outdata = args.o
zones = []
max_harden = 0
failure_rate_prob = 0
num_branches = args.b
with open(indata, "r") as f:
zones = list(map(int, f.readline().split(',')))
max_harden = int(f.readline())
failure_rate_prob = float(f.readline())
harden_count = 0
zone_count = 0
zone_pointer = 0
with open(outdata, "w") as f:
for i in range(num_branches):
if harden_count < max_harden:
line_harden_val = random.randint(0, 1)
if(line_harden_val == 1):
harden_count += 1
else:
line_harden_val = 0
if zone_pointer < len(zones) and zone_count == zones[zone_pointer]:
weather_zone_val = 1
zone_pointer += 1
else:
weather_zone_val = 0
zone_count += 1
f.write(str(weather_zone_val) + ' ' + str(line_harden_val) + ' ' + "{0:.2f} ".format(failure_rate_prob) + '\n')
print(str(weather_zone_val) + ' ' + str(line_harden_val) + ' ' + "{0:.2f} ".format(failure_rate_prob))
print("\nFile " + outdata + " has been generated successfully!\n")
print("Failure Rate: " + str(failure_rate_prob))
print("Number of Branches: " + str(num_branches))
print("Weather Zones: " + str(zones))
print("Max Hardened Lines: " + str(max_harden))
def write_file(args):
fn = args.o
num_branches = args.b
line_harden_prob = args.l
weather_zone_prob = args.w
failure_rate_prob = 0
fmin = args.fr[0]
fmax = args.fr[1]
with open(fn, "w+") as f:
for i in range(num_branches):
line_harden_val = random.randint(0, 1)
weather_zone_val = random.randint(0, 1)
failure_rate_prob = random.uniform(fmin, fmax)
f.write(str(weather_zone_val) + ' ' + str(line_harden_val) + ' ' + "{0:.2f} ".format(failure_rate_prob) + '\n')
print(str(weather_zone_val) + ' ' + str(line_harden_val) + ' ' + "{0:.2f} ".format(failure_rate_prob))
print("\nFile " + fn + " has been generated successfully!\n")
print("Number of Branches: " + str(num_branches) + "\n")
def calculate():
answer = 0
for i in range(100):
answer += i
print(answer)
parse_command()
| de | 0.414015 | ############################################################################### # Simple script used to setup simulation scenarios either within a text file # # or through the command line. # # # # Author: <NAME> # ############################################################################### # parser.add_argument('--gen', nargs='?', # help='generate a new simulation file') | 3.077368 | 3 |
qst_win.py | HomiGrotas/EZ-Educational-Game | 1 | 6617336 | import pygame
import random
from windows import Window
from pygame.locals import VIDEORESIZE # to able the changing of the window
__author__ = "<NAME>"
###############################################################################
# making a questions window - inherits abilities from the normal window #
# it has the ability to get input from the user #
###############################################################################
class QstWin(Window):
def __init__(self, english_img, hebrew_img, language_app, play_music, width, height, questions, answers, level):
super().__init__(english_img, hebrew_img, language_app, play_music, width, height)
self.make_return_button()
self.input = ''
self.featuresLst.insert(4, "label")
self.featuresArgs.insert(4, ("Enter answer here", 3, 3.5))
self.questions = questions
self.answers = answers
self.answer = None
self.level = level
self.featuresLst.append("label")
#################################################################
# pool_random_question #
# does: grill index and by it makes a question and answer #
# add the question to the featuresArgs for painting label #
# (it's already in featuresLst) #
#################################################################
def pool_random_question(self):
ind = random.randint(0, len(self.questions)-1)
qst = self.questions[ind]
self.answer = self.answers[ind]
self.featuresArgs.append((qst, 3, 4.5))
##############################################################
# run function #
# main loop- the difference between this function to the #
# regular one is here you can get input #
# it also check the input #
##############################################################
def run(self, buttons_loc, func_to_call):
# makes a clock
clock = pygame.time.Clock()
# adding the location of the exit button
buttons_loc.append((0.92, 1, 0.9, 1))
print("run function-- working")
run = True
while run:
clock.tick(60)
# update the screen
pygame.display.update()
# for event in the events of the game:
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
pygame.quit()
elif event.type == pygame.KEYDOWN:
print(self.featuresLst, self.featuresArgs)
need_resize = False
key_name = pygame.key.name(event.key)
if len(key_name) == 1:
self.input += key_name
need_resize = True
elif key_name == 'backspace':
self.input = self.input[:-1]
need_resize = True
elif key_name == 'space':
self.input += ' '
need_resize = True
# if a key that is needed was clicked
if need_resize:
if self.input == self.answer:
self.featuresArgs.pop(5)
self.pool_random_question()
self.input = ''
self.featuresArgs.pop(4)
self.featuresArgs.insert(4, (self.input, 3, 3.5))
self.resize((self.curW, self.curH))
# if there is a change in the size
elif event.type == VIDEORESIZE:
self.resize(event.size)
elif event.type == pygame.MOUSEMOTION:
self.mouse_motion(buttons_loc)
# if the user clicked on something with the left side of the mouse
elif event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
# Set the x, y positions of the mouse click
x, y = event.pos
# checks if the loop needs to stop and checks
# gets the number of the function to run (it doesn't mean that the function will be called)
run, ind = self.check_button(buttons_loc, x, y)
if ind == len(buttons_loc) - 1:
run = False
pygame.quit()
# if the app should go to another window
elif not run:
func_to_call[ind](self.curW, self.curH, self.language, self.play_music)
# if the loop needs to continue
else:
# checks if it's one of the none-exit buttons
self.check_button_without_exit(x, y)
print(f'Clicked on screen: ({x},{y})')
print("Window was closed")
| import pygame
import random
from windows import Window
from pygame.locals import VIDEORESIZE # to able the changing of the window
__author__ = "<NAME>"
###############################################################################
# making a questions window - inherits abilities from the normal window #
# it has the ability to get input from the user #
###############################################################################
class QstWin(Window):
def __init__(self, english_img, hebrew_img, language_app, play_music, width, height, questions, answers, level):
super().__init__(english_img, hebrew_img, language_app, play_music, width, height)
self.make_return_button()
self.input = ''
self.featuresLst.insert(4, "label")
self.featuresArgs.insert(4, ("Enter answer here", 3, 3.5))
self.questions = questions
self.answers = answers
self.answer = None
self.level = level
self.featuresLst.append("label")
#################################################################
# pool_random_question #
# does: grill index and by it makes a question and answer #
# add the question to the featuresArgs for painting label #
# (it's already in featuresLst) #
#################################################################
def pool_random_question(self):
ind = random.randint(0, len(self.questions)-1)
qst = self.questions[ind]
self.answer = self.answers[ind]
self.featuresArgs.append((qst, 3, 4.5))
##############################################################
# run function #
# main loop- the difference between this function to the #
# regular one is here you can get input #
# it also check the input #
##############################################################
def run(self, buttons_loc, func_to_call):
# makes a clock
clock = pygame.time.Clock()
# adding the location of the exit button
buttons_loc.append((0.92, 1, 0.9, 1))
print("run function-- working")
run = True
while run:
clock.tick(60)
# update the screen
pygame.display.update()
# for event in the events of the game:
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
pygame.quit()
elif event.type == pygame.KEYDOWN:
print(self.featuresLst, self.featuresArgs)
need_resize = False
key_name = pygame.key.name(event.key)
if len(key_name) == 1:
self.input += key_name
need_resize = True
elif key_name == 'backspace':
self.input = self.input[:-1]
need_resize = True
elif key_name == 'space':
self.input += ' '
need_resize = True
# if a key that is needed was clicked
if need_resize:
if self.input == self.answer:
self.featuresArgs.pop(5)
self.pool_random_question()
self.input = ''
self.featuresArgs.pop(4)
self.featuresArgs.insert(4, (self.input, 3, 3.5))
self.resize((self.curW, self.curH))
# if there is a change in the size
elif event.type == VIDEORESIZE:
self.resize(event.size)
elif event.type == pygame.MOUSEMOTION:
self.mouse_motion(buttons_loc)
# if the user clicked on something with the left side of the mouse
elif event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
# Set the x, y positions of the mouse click
x, y = event.pos
# checks if the loop needs to stop and checks
# gets the number of the function to run (it doesn't mean that the function will be called)
run, ind = self.check_button(buttons_loc, x, y)
if ind == len(buttons_loc) - 1:
run = False
pygame.quit()
# if the app should go to another window
elif not run:
func_to_call[ind](self.curW, self.curH, self.language, self.play_music)
# if the loop needs to continue
else:
# checks if it's one of the none-exit buttons
self.check_button_without_exit(x, y)
print(f'Clicked on screen: ({x},{y})')
print("Window was closed")
| en | 0.600313 | # to able the changing of the window ############################################################################### # making a questions window - inherits abilities from the normal window # # it has the ability to get input from the user # ############################################################################### ################################################################# # pool_random_question # # does: grill index and by it makes a question and answer # # add the question to the featuresArgs for painting label # # (it's already in featuresLst) # ################################################################# ############################################################## # run function # # main loop- the difference between this function to the # # regular one is here you can get input # # it also check the input # ############################################################## # makes a clock # adding the location of the exit button # update the screen # for event in the events of the game: # if a key that is needed was clicked # if there is a change in the size # if the user clicked on something with the left side of the mouse # Set the x, y positions of the mouse click # checks if the loop needs to stop and checks # gets the number of the function to run (it doesn't mean that the function will be called) # if the app should go to another window # if the loop needs to continue # checks if it's one of the none-exit buttons | 3.503245 | 4 |
teardown/__init__.py | nefischer/aws-account-teardown | 0 | 6617337 | """Module providing methods to tear an AWS service's resources
"""
| """Module providing methods to tear an AWS service's resources
"""
| en | 0.80526 | Module providing methods to tear an AWS service's resources | 0.979277 | 1 |
Tests/Python/PandasTests/PandasIndexingTests.py | sucrose0413/Algoloop | 0 | 6617338 | <filename>Tests/Python/PandasTests/PandasIndexingTests.py<gh_stars>0
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Research")
AddReference("QuantConnect.Common")
from AlgorithmImports import *
class PandasIndexingTests():
def __init__(self):
self.qb = QuantBook()
self.qb.SetStartDate(2020, 1, 1)
self.qb.SetEndDate(2020, 1, 4)
self.symbol = self.qb.AddEquity("SPY", Resolution.Daily).Symbol
def test_indexing_dataframe_with_list(self):
symbols = [self.symbol]
self.history = self.qb.History(symbols, 30)
self.history = self.history['close'].unstack(level=0).dropna()
test = self.history[[self.symbol]]
return True
| <filename>Tests/Python/PandasTests/PandasIndexingTests.py<gh_stars>0
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Research")
AddReference("QuantConnect.Common")
from AlgorithmImports import *
class PandasIndexingTests():
def __init__(self):
self.qb = QuantBook()
self.qb.SetStartDate(2020, 1, 1)
self.qb.SetEndDate(2020, 1, 4)
self.symbol = self.qb.AddEquity("SPY", Resolution.Daily).Symbol
def test_indexing_dataframe_with_list(self):
symbols = [self.symbol]
self.history = self.qb.History(symbols, 30)
self.history = self.history['close'].unstack(level=0).dropna()
test = self.history[[self.symbol]]
return True
| en | 0.840437 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals. # Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. | 2.256192 | 2 |
generate_range_data.py | robberwick/robostats_mcl | 0 | 6617339 | <gh_stars>0
# Map exists which all robot particles operate in
# Particles each have a motion model and a measurement model
# Need to sample:
# Motion model for particle (given location of particle, map)
# Motion model (in this case) comes from log + noise.
# Measurement model for particle (given location, map)
# True measurements come from log
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import copy
from scipy.spatial import distance
import base64
from IPython.display import HTML
import montecarlo_localization as mcl
#load_ext autoreload
#autoreload 2
#%matplotlib inline
plt.style.use('ggplot')
#%%time
global_map = mcl.values_only_occupancy_map('data/map/wean.dat.gz')
def cache_map_ranges(theta_bins=120):
slice_theta_rad = 2*np.pi/theta_bins
slice_theta_deg = 360/theta_bins
map_width, map_height = 800, 800
coord_list = [(xidx, yidx) for yidx in range(map_height)
for xidx in range(map_width)]
range_array = np.zeros([map_width,map_height,theta_bins])
raycast_degree_values = np.linspace(0,2*np.pi, num=theta_bins)
# pre-calculate (cache) expected distance to wall for each theta bin, at each map location
for xidx,yidx in coord_list:
for idx, theta in enumerate(raycast_degree_values):
_,_,dist = mcl.raycast_bresenham(xidx*10, yidx*10, theta, global_map, freespace_min_val=0.7)
range_array[xidx,yidx,idx] = dist
np.save('./data/range_array_{}bin'.format(theta_bins), range_array, allow_pickle=False)
return range_array
raw_array = cache_map_ranges(theta_bins=40) # Takes ~9 minutes with theta_bins=120 on core i5 laptop
| # Map exists which all robot particles operate in
# Particles each have a motion model and a measurement model
# Need to sample:
# Motion model for particle (given location of particle, map)
# Motion model (in this case) comes from log + noise.
# Measurement model for particle (given location, map)
# True measurements come from log
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import copy
from scipy.spatial import distance
import base64
from IPython.display import HTML
import montecarlo_localization as mcl
#load_ext autoreload
#autoreload 2
#%matplotlib inline
plt.style.use('ggplot')
#%%time
global_map = mcl.values_only_occupancy_map('data/map/wean.dat.gz')
def cache_map_ranges(theta_bins=120):
slice_theta_rad = 2*np.pi/theta_bins
slice_theta_deg = 360/theta_bins
map_width, map_height = 800, 800
coord_list = [(xidx, yidx) for yidx in range(map_height)
for xidx in range(map_width)]
range_array = np.zeros([map_width,map_height,theta_bins])
raycast_degree_values = np.linspace(0,2*np.pi, num=theta_bins)
# pre-calculate (cache) expected distance to wall for each theta bin, at each map location
for xidx,yidx in coord_list:
for idx, theta in enumerate(raycast_degree_values):
_,_,dist = mcl.raycast_bresenham(xidx*10, yidx*10, theta, global_map, freespace_min_val=0.7)
range_array[xidx,yidx,idx] = dist
np.save('./data/range_array_{}bin'.format(theta_bins), range_array, allow_pickle=False)
return range_array
raw_array = cache_map_ranges(theta_bins=40) # Takes ~9 minutes with theta_bins=120 on core i5 laptop | en | 0.801681 | # Map exists which all robot particles operate in # Particles each have a motion model and a measurement model # Need to sample: # Motion model for particle (given location of particle, map) # Motion model (in this case) comes from log + noise. # Measurement model for particle (given location, map) # True measurements come from log #load_ext autoreload #autoreload 2 #%matplotlib inline #%%time # pre-calculate (cache) expected distance to wall for each theta bin, at each map location # Takes ~9 minutes with theta_bins=120 on core i5 laptop | 2.119261 | 2 |
backend/pollaris/app/views/side_effects.py | Elizabeth-Warren/pollaris | 78 | 6617340 | import json
import logging
import pprint
import urllib
import requests
from django.conf import settings
from django.http import JsonResponse
from jinja2 import Template
from ew_common.email_service import EmailService
from ew_common.input_validation import extract_phone_number, normalize_name
from ew_common.mobile_commons import create_or_update_mobile_commons_profile, send_sms
from pollaris.app.views.utils import SearchStatus, bad_request
DEFAULT_LANGUAGE = "en-US"
# URL for sending
BSD_WEB_POLL_TOOL_SUBMIT_URL = (
"https://bsd-signup-proxy"
)
BSD_SUBMIT_HEADERS = {"Origin": "https://elizabethwarren.com"}
VOTING_TYPE_EARLY_VOTE = "early_vote_locations"
VOTING_TYPE_DAY_OF = "polling_locations"
VOTING_TYPE_DROPBOX_LOCATIONS = "dropbox_locations"
VOTING_TYPE_MAIL = "vote_by_mail"
VOTING_TYPES = [
VOTING_TYPE_EARLY_VOTE,
VOTING_TYPE_DAY_OF,
VOTING_TYPE_DROPBOX_LOCATIONS,
VOTING_TYPE_MAIL,
]
# If recipient email address has +draftdraftdraft in it, we'll append
# '_draft' to the mailing template name.
VOTING_LOCATION_MAILING_RECIPIENT_DRAFT_SUFFIX = "+draftdraftdraft"
VOTING_LOCATION_MAILING_TEMPLATE_DRAFT_SUFFIX = "_draft"
VOTING_LOCATION_MAILING_CONFIGURATION_SET_NAME = "organizing_emails"
VOTING_LOCATION_MAILING_APPLICATION_NAME = "pollaris"
VOTING_LOCATION_MAILING_BY_LANGUAGE_STATE = {
# These templates are editable in our Contentful Emails space
"en-US": {
"IA": {
"template_name": "voting_location_IA",
"from_email": "<NAME> <<EMAIL>>",
"reply_to_email": "Iowa for Warren <<EMAIL>>",
},
"NH": {
"template_name": "voting_location_generic",
"from_email": "New Hampshire for Warren <<EMAIL>>",
"reply_to_email": "New Hampshire for Warren <<EMAIL>>",
},
"NV": {
# NV has early vote and day-of caucuses, so it needs its own
# template.
"template_name": "voting_location_NV",
"from_email": "<NAME> <<EMAIL>>",
"reply_to_email": "<NAME> <<EMAIL>>",
},
"default": {
"template_name": "voting_location_generic",
"from_email": "ElizabethWarren.com <<EMAIL>>",
"reply_to_email": "ElizabethWarren.com <<EMAIL>>",
},
},
"es-MX": {
"IA": {
"template_name": "voting_location_IA",
"from_email": "Iowa for Warren <<EMAIL>>",
"reply_to_email": "Iowa for Warren <<EMAIL>>",
},
"NH": {
"template_name": "voting_location_generic",
"from_email": "New Hampshire for Warren <<EMAIL>>",
"reply_to_email": "New Hampshire for Warren <<EMAIL>>",
},
"NV": {
"template_name": "voting_location_NV_es",
"from_email": "<NAME> <<EMAIL>>",
"reply_to_email": "<NAME> <<EMAIL>>",
},
"default": {
"template_name": "voting_location_generic_es",
"from_email": "ElizabethWarren.com <<EMAIL>>",
"reply_to_email": "ElizabethWarren.com <<EMAIL>>",
},
},
}
MOBILE_COMMONS_CAMPAIGN_ID = 189358
VOTING_LOCATION_SMS_BY_LANGUAGE_STATE = {
"en-US": {
"IA": {
"default": {
"opt_in_path_id": 290677,
"message": "Your caucus location:\n{{name_of_location}}\n{{polling_address}}\n{{polling_city}}, {{polling_state}} {{polling_zip}}\n{{time_of_event}}",
}
},
"NH": {
"default": {
"opt_in_path_id": 291258,
"message": "Your polling location:\n{{name_of_location}}\n{{polling_address}}\n{{polling_city}}, {{polling_state}} {{polling_zip}}\n{{time_of_event}}",
}
},
"NV": {
VOTING_TYPE_EARLY_VOTE: {
"opt_in_path_id": 292302,
"message": "Your early vote location:\n{{name_of_location}}\n{{polling_address}}\n{{polling_city}}, {{polling_state}} {{polling_zip}}\n{{time_of_event}}",
},
"default": {
"opt_in_path_id": 292305,
"message": "Your caucus location:\n{{name_of_location}}\n{{polling_address}}\n{{polling_city}}, {{polling_state}} {{polling_zip}}\n{{time_of_event}}",
},
},
"default": {
VOTING_TYPE_EARLY_VOTE: {
"opt_in_path_id": 294228,
"message": "Your early vote location:\n{{name_of_location}}\n{{polling_address}}\n{{polling_city}}, {{polling_state}} {{polling_zip}}\n{{time_of_event}}",
},
VOTING_TYPE_MAIL: {
"opt_in_path_id": 294243,
"message": "Learn more about how to vote: {{voter_education_url}}",
},
"default": {
"opt_in_path_id": 294225,
"message": "Your polling location:\n{{name_of_location}}\n{{polling_address}}\n{{polling_city}}, {{polling_state}} {{polling_zip}}\n{{time_of_event}}",
},
},
},
"es-MX": {
"IA": {
"default": {
"opt_in_path_id": 290677,
"message": "Tu localizacion de caucus:\n{{name_of_location}}\n{{polling_address}}\n{{polling_city}}, {{polling_state}} {{polling_zip}}\n{{time_of_event}}",
}
},
"NH": {
"default": {
"opt_in_path_id": 291258,
"message": "Tu localizacion de votacion:\n{{name_of_location}}\n{{polling_address}}\n{{polling_city}}, {{polling_state}} {{polling_zip}}\n{{time_of_event}}",
}
},
"NV": {
VOTING_TYPE_EARLY_VOTE: {
"opt_in_path_id": 292302,
"message": "Tu localizacion de votacion temprana:\n{{name_of_location}}\n{{polling_address}}\n{{polling_city}}, {{polling_state}} {{polling_zip}}\n{{time_of_event}}",
},
"default": {
"opt_in_path_id": 292305,
"message": "Tu localizacion de caucus:\n{{name_of_location}}\n{{polling_address}}\n{{polling_city}}, {{polling_state}} {{polling_zip}}\n{{time_of_event}}",
},
},
"default": {
VOTING_TYPE_EARLY_VOTE: {
"opt_in_path_id": 294228,
"message": "Tu localizacion de votacion temprana:\n{{name_of_location}}\n{{polling_address}}\n{{polling_city}}, {{polling_state}} {{polling_zip}}\n{{time_of_event}}",
},
VOTING_TYPE_MAIL: {
"opt_in_path_id": 294243,
"message": "Learn more about how to vote: {{voter_education_url}}",
},
"default": {
"opt_in_path_id": 294225,
"message": "Tu localizacion de votacion:\n{{name_of_location}}\n{{polling_address}}\n{{polling_city}}, {{polling_state}} {{polling_zip}}\n{{time_of_event}}",
},
},
},
}
FORM_DATA_BSD_FIELDS = {
"custom-16063": "van_precinct_id",
"custom-16064": "name_of_location",
"custom-16065": "time_of_event",
"custom-16066": "polling_address",
"custom-16067": "polling_city",
"custom-16068": "polling_state",
"custom-16069": "polling_zip",
"custom-16616": "language",
"custom-17131": "preferred_voting_type",
"custom-17132": "results_url",
"custom-17240": "pollaris_search_id",
"custom-18152": "voting_period_start",
"custom-18123": "voting_period_end",
"custom-18207": "voter_education_url",
}
def after_search(request):
request_body = request.body
logging.info(f"After search request: {request_body}")
success, message = handle_bsd_web_poll_tool_form_submission(request_body)
logging.info(f"Got success: {success}; message: {message}")
if not success:
return bad_request(message=message, code=SearchStatus.BAD_REQUEST)
resp_body = {"status": "success"}
return JsonResponse(resp_body)
def handle_bsd_web_poll_tool_form_submission(request_body):
logging.info(f"handle_bsd_web_poll_tool_form_submission received: {request_body}")
request_json = json.loads(request_body)
if not request_json:
return False, f"Couldn't parse incoming JSON: {request_body}"
request_form = request_json.get("form", "")
if not request_form:
return False, f"Couldn't parse form data: {request_body}"
# First, submit form as-is to BSD.
try:
bsd_succeeded = send_voting_location_bsd(request_json)
except Exception as e:
logging.exception(e)
bsd_succeeded = False
if not bsd_succeeded:
logging.error(
"General failure in submitting to BSD"
)
return False, "Failure in submitting to BSD"
fields = {}
for k, v in urllib.parse.parse_qs(request_form).items():
if k in FORM_DATA_BSD_FIELDS:
fields[FORM_DATA_BSD_FIELDS[k]] = v[0]
else:
fields[k] = v[0]
if "state_cd" not in fields:
return False, f"Form data doesn't include state_cd: {request_body}"
fields.setdefault("preferred_voting_type", VOTING_TYPE_DAY_OF)
# SES templates offer very little in the way of logical conditions;
# so we create a true/false variable for each possible preferred
# voting type, which allows tailoring SES template to each voting
# type.
for voting_type in VOTING_TYPES:
fields[voting_type] = fields["preferred_voting_type"] == voting_type
email = fields.get("email")
phone = fields.get("phone")
try:
send_voting_location_email(email, fields)
except Exception as e:
logging.exception(e)
return False, "Failure in sending voting location email"
try:
send_voting_location_sms(email, phone, fields)
except Exception as e:
logging.exception(e)
return False, "Failure in sending voting location SMS"
return True, "Success"
def send_voting_location_bsd(request_json):
resp = requests.post(
BSD_WEB_POLL_TOOL_SUBMIT_URL, headers=BSD_SUBMIT_HEADERS, json=request_json
)
print("Got resp from BSD proxy", resp.text)
return resp.json().get("statusCode") == 202
def send_voting_location_email(email, fields):
state = fields["state_cd"]
if not state:
logging.info("Not sending email with voting location; no state")
return
language = fields.get("language", DEFAULT_LANGUAGE)
details_for_language = VOTING_LOCATION_MAILING_BY_LANGUAGE_STATE.get(
language, VOTING_LOCATION_MAILING_BY_LANGUAGE_STATE[DEFAULT_LANGUAGE]
)
details = details_for_language.get(state, details_for_language["default"])
# We'll prepare a payload for SES rendering that is a superset of
# 'fields'.
ses_fields = fields.copy()
# Set "transactional" in payload so the footer doesn't render
# unsubscribe links.
ses_fields["transactional"] = True
template_name = details["template_name"]
if VOTING_LOCATION_MAILING_RECIPIENT_DRAFT_SUFFIX in email:
template_name += VOTING_LOCATION_MAILING_TEMPLATE_DRAFT_SUFFIX
email_service = EmailService()
send_email_args = {
"template_name": template_name,
"from_email": details["from_email"],
"recipient": email,
"reply_to_email": details["reply_to_email"],
"configuration_set_name": VOTING_LOCATION_MAILING_CONFIGURATION_SET_NAME,
"payload": ses_fields,
"application_name": VOTING_LOCATION_MAILING_APPLICATION_NAME,
}
logging.info("SES send_email args:")
logging.info(pprint.pformat(send_email_args))
email_service.send_email(**send_email_args)
def send_voting_location_sms(email, phone, fields):
"""Opts-in to SMS and sends caucus/polling location address text message.
We do two Mobile Commons API calls:
1. Create or update profile, with a state-specific opt-in path.
If the number is new to our national Mobile Commons campaign,
this will subscribe the number to our national campaign and trigger a text message like:
<NAME>: Thanks for confirming your caucus location! We'll follow-up when it's time to make your voice heard for Elizabeth.
HELP4INFO/STOP2Quit/Msg&DataRatesMayApply
If the number if already on our list, this will trigger nothing and no message.
2. Send a message with caucus/polling location name and address.
"""
phone = extract_phone_number(phone)
if not phone:
logging.info("Not sending SMS with voting location; no phone number")
return
state = fields["state_cd"]
if not state:
logging.info("Not sending SMS with voting location; no state")
return
language = fields.get("language", DEFAULT_LANGUAGE)
details_for_language = VOTING_LOCATION_SMS_BY_LANGUAGE_STATE.get(
language, VOTING_LOCATION_SMS_BY_LANGUAGE_STATE[DEFAULT_LANGUAGE]
)
details_for_state = details_for_language.get(state, details_for_language["default"])
details = details_for_state.get(
fields["preferred_voting_type"], details_for_state["default"]
)
first_name, last_name = normalize_name(
fields.get("firstname", ""), fields.get("lastname", "")
)
profile_payload = {
"phone_number": phone,
"email": fields.get("email", ""),
"postal_code": fields.get("zip", ""),
"first_name": first_name,
"last_name": last_name,
"street1": fields.get("addr1", ""),
"city": fields.get("city", ""),
"state": state,
"country": "US",
"polling_location_name": fields.get("name_of_location"),
"polling_address": fields.get("polling_address"),
"polling_city": fields.get("polling_city"),
"polling_state": fields.get("polling_state"),
"polling_zip": fields.get("polling_zip"),
"polling_time": fields.get("time_of_event"),
"polling_precinct_id": fields.get("van_precinct_id"),
"opt_in_path_id": details.get("opt_in_path_id"),
}
# Don't upload null or empty fields.
keys_to_delete = [k for k, v in profile_payload.items() if not v]
for k in keys_to_delete:
del profile_payload[k]
resp = create_or_update_mobile_commons_profile(
settings.MOBILE_COMMONS_USERNAME,
settings.MOBILE_COMMONS_PASSWORD,
profile_payload,
)
logging.debug(f"Response from mobile commons profile creation: {resp.text}")
message_template = details["message"]
message = Template(message_template).render(**fields)
resp = send_sms(
settings.MOBILE_COMMONS_USERNAME,
settings.MOBILE_COMMONS_PASSWORD,
MOBILE_COMMONS_CAMPAIGN_ID,
phone,
message,
)
logging.debug(f"Response from mobile commons send: {resp.text}")
| import json
import logging
import pprint
import urllib
import requests
from django.conf import settings
from django.http import JsonResponse
from jinja2 import Template
from ew_common.email_service import EmailService
from ew_common.input_validation import extract_phone_number, normalize_name
from ew_common.mobile_commons import create_or_update_mobile_commons_profile, send_sms
from pollaris.app.views.utils import SearchStatus, bad_request
DEFAULT_LANGUAGE = "en-US"
# URL for sending
BSD_WEB_POLL_TOOL_SUBMIT_URL = (
"https://bsd-signup-proxy"
)
BSD_SUBMIT_HEADERS = {"Origin": "https://elizabethwarren.com"}
VOTING_TYPE_EARLY_VOTE = "early_vote_locations"
VOTING_TYPE_DAY_OF = "polling_locations"
VOTING_TYPE_DROPBOX_LOCATIONS = "dropbox_locations"
VOTING_TYPE_MAIL = "vote_by_mail"
VOTING_TYPES = [
VOTING_TYPE_EARLY_VOTE,
VOTING_TYPE_DAY_OF,
VOTING_TYPE_DROPBOX_LOCATIONS,
VOTING_TYPE_MAIL,
]
# If recipient email address has +draftdraftdraft in it, we'll append
# '_draft' to the mailing template name.
VOTING_LOCATION_MAILING_RECIPIENT_DRAFT_SUFFIX = "+draftdraftdraft"
VOTING_LOCATION_MAILING_TEMPLATE_DRAFT_SUFFIX = "_draft"
VOTING_LOCATION_MAILING_CONFIGURATION_SET_NAME = "organizing_emails"
VOTING_LOCATION_MAILING_APPLICATION_NAME = "pollaris"
VOTING_LOCATION_MAILING_BY_LANGUAGE_STATE = {
# These templates are editable in our Contentful Emails space
"en-US": {
"IA": {
"template_name": "voting_location_IA",
"from_email": "<NAME> <<EMAIL>>",
"reply_to_email": "Iowa for Warren <<EMAIL>>",
},
"NH": {
"template_name": "voting_location_generic",
"from_email": "New Hampshire for Warren <<EMAIL>>",
"reply_to_email": "New Hampshire for Warren <<EMAIL>>",
},
"NV": {
# NV has early vote and day-of caucuses, so it needs its own
# template.
"template_name": "voting_location_NV",
"from_email": "<NAME> <<EMAIL>>",
"reply_to_email": "<NAME> <<EMAIL>>",
},
"default": {
"template_name": "voting_location_generic",
"from_email": "ElizabethWarren.com <<EMAIL>>",
"reply_to_email": "ElizabethWarren.com <<EMAIL>>",
},
},
"es-MX": {
"IA": {
"template_name": "voting_location_IA",
"from_email": "Iowa for Warren <<EMAIL>>",
"reply_to_email": "Iowa for Warren <<EMAIL>>",
},
"NH": {
"template_name": "voting_location_generic",
"from_email": "New Hampshire for Warren <<EMAIL>>",
"reply_to_email": "New Hampshire for Warren <<EMAIL>>",
},
"NV": {
"template_name": "voting_location_NV_es",
"from_email": "<NAME> <<EMAIL>>",
"reply_to_email": "<NAME> <<EMAIL>>",
},
"default": {
"template_name": "voting_location_generic_es",
"from_email": "ElizabethWarren.com <<EMAIL>>",
"reply_to_email": "ElizabethWarren.com <<EMAIL>>",
},
},
}
MOBILE_COMMONS_CAMPAIGN_ID = 189358
VOTING_LOCATION_SMS_BY_LANGUAGE_STATE = {
"en-US": {
"IA": {
"default": {
"opt_in_path_id": 290677,
"message": "Your caucus location:\n{{name_of_location}}\n{{polling_address}}\n{{polling_city}}, {{polling_state}} {{polling_zip}}\n{{time_of_event}}",
}
},
"NH": {
"default": {
"opt_in_path_id": 291258,
"message": "Your polling location:\n{{name_of_location}}\n{{polling_address}}\n{{polling_city}}, {{polling_state}} {{polling_zip}}\n{{time_of_event}}",
}
},
"NV": {
VOTING_TYPE_EARLY_VOTE: {
"opt_in_path_id": 292302,
"message": "Your early vote location:\n{{name_of_location}}\n{{polling_address}}\n{{polling_city}}, {{polling_state}} {{polling_zip}}\n{{time_of_event}}",
},
"default": {
"opt_in_path_id": 292305,
"message": "Your caucus location:\n{{name_of_location}}\n{{polling_address}}\n{{polling_city}}, {{polling_state}} {{polling_zip}}\n{{time_of_event}}",
},
},
"default": {
VOTING_TYPE_EARLY_VOTE: {
"opt_in_path_id": 294228,
"message": "Your early vote location:\n{{name_of_location}}\n{{polling_address}}\n{{polling_city}}, {{polling_state}} {{polling_zip}}\n{{time_of_event}}",
},
VOTING_TYPE_MAIL: {
"opt_in_path_id": 294243,
"message": "Learn more about how to vote: {{voter_education_url}}",
},
"default": {
"opt_in_path_id": 294225,
"message": "Your polling location:\n{{name_of_location}}\n{{polling_address}}\n{{polling_city}}, {{polling_state}} {{polling_zip}}\n{{time_of_event}}",
},
},
},
"es-MX": {
"IA": {
"default": {
"opt_in_path_id": 290677,
"message": "Tu localizacion de caucus:\n{{name_of_location}}\n{{polling_address}}\n{{polling_city}}, {{polling_state}} {{polling_zip}}\n{{time_of_event}}",
}
},
"NH": {
"default": {
"opt_in_path_id": 291258,
"message": "Tu localizacion de votacion:\n{{name_of_location}}\n{{polling_address}}\n{{polling_city}}, {{polling_state}} {{polling_zip}}\n{{time_of_event}}",
}
},
"NV": {
VOTING_TYPE_EARLY_VOTE: {
"opt_in_path_id": 292302,
"message": "Tu localizacion de votacion temprana:\n{{name_of_location}}\n{{polling_address}}\n{{polling_city}}, {{polling_state}} {{polling_zip}}\n{{time_of_event}}",
},
"default": {
"opt_in_path_id": 292305,
"message": "Tu localizacion de caucus:\n{{name_of_location}}\n{{polling_address}}\n{{polling_city}}, {{polling_state}} {{polling_zip}}\n{{time_of_event}}",
},
},
"default": {
VOTING_TYPE_EARLY_VOTE: {
"opt_in_path_id": 294228,
"message": "Tu localizacion de votacion temprana:\n{{name_of_location}}\n{{polling_address}}\n{{polling_city}}, {{polling_state}} {{polling_zip}}\n{{time_of_event}}",
},
VOTING_TYPE_MAIL: {
"opt_in_path_id": 294243,
"message": "Learn more about how to vote: {{voter_education_url}}",
},
"default": {
"opt_in_path_id": 294225,
"message": "Tu localizacion de votacion:\n{{name_of_location}}\n{{polling_address}}\n{{polling_city}}, {{polling_state}} {{polling_zip}}\n{{time_of_event}}",
},
},
},
}
FORM_DATA_BSD_FIELDS = {
"custom-16063": "van_precinct_id",
"custom-16064": "name_of_location",
"custom-16065": "time_of_event",
"custom-16066": "polling_address",
"custom-16067": "polling_city",
"custom-16068": "polling_state",
"custom-16069": "polling_zip",
"custom-16616": "language",
"custom-17131": "preferred_voting_type",
"custom-17132": "results_url",
"custom-17240": "pollaris_search_id",
"custom-18152": "voting_period_start",
"custom-18123": "voting_period_end",
"custom-18207": "voter_education_url",
}
def after_search(request):
request_body = request.body
logging.info(f"After search request: {request_body}")
success, message = handle_bsd_web_poll_tool_form_submission(request_body)
logging.info(f"Got success: {success}; message: {message}")
if not success:
return bad_request(message=message, code=SearchStatus.BAD_REQUEST)
resp_body = {"status": "success"}
return JsonResponse(resp_body)
def handle_bsd_web_poll_tool_form_submission(request_body):
logging.info(f"handle_bsd_web_poll_tool_form_submission received: {request_body}")
request_json = json.loads(request_body)
if not request_json:
return False, f"Couldn't parse incoming JSON: {request_body}"
request_form = request_json.get("form", "")
if not request_form:
return False, f"Couldn't parse form data: {request_body}"
# First, submit form as-is to BSD.
try:
bsd_succeeded = send_voting_location_bsd(request_json)
except Exception as e:
logging.exception(e)
bsd_succeeded = False
if not bsd_succeeded:
logging.error(
"General failure in submitting to BSD"
)
return False, "Failure in submitting to BSD"
fields = {}
for k, v in urllib.parse.parse_qs(request_form).items():
if k in FORM_DATA_BSD_FIELDS:
fields[FORM_DATA_BSD_FIELDS[k]] = v[0]
else:
fields[k] = v[0]
if "state_cd" not in fields:
return False, f"Form data doesn't include state_cd: {request_body}"
fields.setdefault("preferred_voting_type", VOTING_TYPE_DAY_OF)
# SES templates offer very little in the way of logical conditions;
# so we create a true/false variable for each possible preferred
# voting type, which allows tailoring SES template to each voting
# type.
for voting_type in VOTING_TYPES:
fields[voting_type] = fields["preferred_voting_type"] == voting_type
email = fields.get("email")
phone = fields.get("phone")
try:
send_voting_location_email(email, fields)
except Exception as e:
logging.exception(e)
return False, "Failure in sending voting location email"
try:
send_voting_location_sms(email, phone, fields)
except Exception as e:
logging.exception(e)
return False, "Failure in sending voting location SMS"
return True, "Success"
def send_voting_location_bsd(request_json):
resp = requests.post(
BSD_WEB_POLL_TOOL_SUBMIT_URL, headers=BSD_SUBMIT_HEADERS, json=request_json
)
print("Got resp from BSD proxy", resp.text)
return resp.json().get("statusCode") == 202
def send_voting_location_email(email, fields):
state = fields["state_cd"]
if not state:
logging.info("Not sending email with voting location; no state")
return
language = fields.get("language", DEFAULT_LANGUAGE)
details_for_language = VOTING_LOCATION_MAILING_BY_LANGUAGE_STATE.get(
language, VOTING_LOCATION_MAILING_BY_LANGUAGE_STATE[DEFAULT_LANGUAGE]
)
details = details_for_language.get(state, details_for_language["default"])
# We'll prepare a payload for SES rendering that is a superset of
# 'fields'.
ses_fields = fields.copy()
# Set "transactional" in payload so the footer doesn't render
# unsubscribe links.
ses_fields["transactional"] = True
template_name = details["template_name"]
if VOTING_LOCATION_MAILING_RECIPIENT_DRAFT_SUFFIX in email:
template_name += VOTING_LOCATION_MAILING_TEMPLATE_DRAFT_SUFFIX
email_service = EmailService()
send_email_args = {
"template_name": template_name,
"from_email": details["from_email"],
"recipient": email,
"reply_to_email": details["reply_to_email"],
"configuration_set_name": VOTING_LOCATION_MAILING_CONFIGURATION_SET_NAME,
"payload": ses_fields,
"application_name": VOTING_LOCATION_MAILING_APPLICATION_NAME,
}
logging.info("SES send_email args:")
logging.info(pprint.pformat(send_email_args))
email_service.send_email(**send_email_args)
def send_voting_location_sms(email, phone, fields):
"""Opts-in to SMS and sends caucus/polling location address text message.
We do two Mobile Commons API calls:
1. Create or update profile, with a state-specific opt-in path.
If the number is new to our national Mobile Commons campaign,
this will subscribe the number to our national campaign and trigger a text message like:
<NAME>: Thanks for confirming your caucus location! We'll follow-up when it's time to make your voice heard for Elizabeth.
HELP4INFO/STOP2Quit/Msg&DataRatesMayApply
If the number if already on our list, this will trigger nothing and no message.
2. Send a message with caucus/polling location name and address.
"""
phone = extract_phone_number(phone)
if not phone:
logging.info("Not sending SMS with voting location; no phone number")
return
state = fields["state_cd"]
if not state:
logging.info("Not sending SMS with voting location; no state")
return
language = fields.get("language", DEFAULT_LANGUAGE)
details_for_language = VOTING_LOCATION_SMS_BY_LANGUAGE_STATE.get(
language, VOTING_LOCATION_SMS_BY_LANGUAGE_STATE[DEFAULT_LANGUAGE]
)
details_for_state = details_for_language.get(state, details_for_language["default"])
details = details_for_state.get(
fields["preferred_voting_type"], details_for_state["default"]
)
first_name, last_name = normalize_name(
fields.get("firstname", ""), fields.get("lastname", "")
)
profile_payload = {
"phone_number": phone,
"email": fields.get("email", ""),
"postal_code": fields.get("zip", ""),
"first_name": first_name,
"last_name": last_name,
"street1": fields.get("addr1", ""),
"city": fields.get("city", ""),
"state": state,
"country": "US",
"polling_location_name": fields.get("name_of_location"),
"polling_address": fields.get("polling_address"),
"polling_city": fields.get("polling_city"),
"polling_state": fields.get("polling_state"),
"polling_zip": fields.get("polling_zip"),
"polling_time": fields.get("time_of_event"),
"polling_precinct_id": fields.get("van_precinct_id"),
"opt_in_path_id": details.get("opt_in_path_id"),
}
# Don't upload null or empty fields.
keys_to_delete = [k for k, v in profile_payload.items() if not v]
for k in keys_to_delete:
del profile_payload[k]
resp = create_or_update_mobile_commons_profile(
settings.MOBILE_COMMONS_USERNAME,
settings.MOBILE_COMMONS_PASSWORD,
profile_payload,
)
logging.debug(f"Response from mobile commons profile creation: {resp.text}")
message_template = details["message"]
message = Template(message_template).render(**fields)
resp = send_sms(
settings.MOBILE_COMMONS_USERNAME,
settings.MOBILE_COMMONS_PASSWORD,
MOBILE_COMMONS_CAMPAIGN_ID,
phone,
message,
)
logging.debug(f"Response from mobile commons send: {resp.text}")
| en | 0.815776 | # URL for sending # If recipient email address has +draftdraftdraft in it, we'll append # '_draft' to the mailing template name. # These templates are editable in our Contentful Emails space # NV has early vote and day-of caucuses, so it needs its own # template. # First, submit form as-is to BSD. # SES templates offer very little in the way of logical conditions; # so we create a true/false variable for each possible preferred # voting type, which allows tailoring SES template to each voting # type. # We'll prepare a payload for SES rendering that is a superset of # 'fields'. # Set "transactional" in payload so the footer doesn't render # unsubscribe links. Opts-in to SMS and sends caucus/polling location address text message. We do two Mobile Commons API calls: 1. Create or update profile, with a state-specific opt-in path. If the number is new to our national Mobile Commons campaign, this will subscribe the number to our national campaign and trigger a text message like: <NAME>: Thanks for confirming your caucus location! We'll follow-up when it's time to make your voice heard for Elizabeth. HELP4INFO/STOP2Quit/Msg&DataRatesMayApply If the number if already on our list, this will trigger nothing and no message. 2. Send a message with caucus/polling location name and address. # Don't upload null or empty fields. | 1.908617 | 2 |
atomic/analytic/handlers/stabilize.py | usc-psychsim/atomic_domain_definitions | 0 | 6617341 | <reponame>usc-psychsim/atomic_domain_definitions
from ..models.jags.jag import Jag
def handle_triage(jag_instance: Jag, data):
victim_id = data['victim_id']
if victim_id != jag_instance.inputs.get('victim-id'):
return
# print(f"stabilize::handle_triage {jag_instance.short_string()} {data}")
player_id = data['participant_id']
elapsed_ms = data['elapsed_milliseconds']
triage_state = data['triage_state']
is_addressing = jag_instance.is_addressing(player_id)
if triage_state == "IN_PROGRESS":
jag_instance.update_addressing(player_id, player_id, 1.0, elapsed_ms)
# if is_addressing than we received two messages and can ignore
if triage_state == "SUCCESSFUL":
if is_addressing: # normal messaging
jag_instance.update_addressing(player_id, player_id, 0.0, elapsed_ms)
jag_instance.update_completion_status(player_id, True, elapsed_ms)
else: # must have missed a message
print(f"{victim_id} missed an in progress message but done with {jag_instance.short_string()}")
jag_instance.update_addressing(player_id, player_id, 1.0, elapsed_ms)
jag_instance.update_addressing(player_id, player_id, 0.0, elapsed_ms)
jag_instance.update_completion_status(player_id, True, elapsed_ms)
if triage_state == "UNSUCCESSFUL":
if is_addressing: # normal messaging
jag_instance.update_addressing(player_id, player_id, 0.5, elapsed_ms)
else: # must have missed a message
print(f"{victim_id} missed an in progress message but still not done with {jag_instance.short_string()}")
jag_instance.update_addressing(player_id, player_id, 1.0, elapsed_ms)
jag_instance.update_addressing(player_id, player_id, 0.5, elapsed_ms)
| from ..models.jags.jag import Jag
def handle_triage(jag_instance: Jag, data):
victim_id = data['victim_id']
if victim_id != jag_instance.inputs.get('victim-id'):
return
# print(f"stabilize::handle_triage {jag_instance.short_string()} {data}")
player_id = data['participant_id']
elapsed_ms = data['elapsed_milliseconds']
triage_state = data['triage_state']
is_addressing = jag_instance.is_addressing(player_id)
if triage_state == "IN_PROGRESS":
jag_instance.update_addressing(player_id, player_id, 1.0, elapsed_ms)
# if is_addressing than we received two messages and can ignore
if triage_state == "SUCCESSFUL":
if is_addressing: # normal messaging
jag_instance.update_addressing(player_id, player_id, 0.0, elapsed_ms)
jag_instance.update_completion_status(player_id, True, elapsed_ms)
else: # must have missed a message
print(f"{victim_id} missed an in progress message but done with {jag_instance.short_string()}")
jag_instance.update_addressing(player_id, player_id, 1.0, elapsed_ms)
jag_instance.update_addressing(player_id, player_id, 0.0, elapsed_ms)
jag_instance.update_completion_status(player_id, True, elapsed_ms)
if triage_state == "UNSUCCESSFUL":
if is_addressing: # normal messaging
jag_instance.update_addressing(player_id, player_id, 0.5, elapsed_ms)
else: # must have missed a message
print(f"{victim_id} missed an in progress message but still not done with {jag_instance.short_string()}")
jag_instance.update_addressing(player_id, player_id, 1.0, elapsed_ms)
jag_instance.update_addressing(player_id, player_id, 0.5, elapsed_ms) | en | 0.75437 | # print(f"stabilize::handle_triage {jag_instance.short_string()} {data}") # if is_addressing than we received two messages and can ignore # normal messaging # must have missed a message # normal messaging # must have missed a message | 2.36592 | 2 |
python/dxa/get_year_deltas.py | portfolioscout/py4fi | 15 | 6617342 | <reponame>portfolioscout/py4fi
#
# DX Library Frame
# get_year_deltas.py
#
import numpy as np
def get_year_deltas(date_list, day_count=365.):
''' Return vector of floats with day deltas in years.
Initial value normalized to zero.
Parameters
==========
date_list : list or array
collection of datetime objects
day_count : float
number of days for a year
(to account for different conventions)
Results
=======
delta_list : array
year fractions
'''
start = date_list[0]
delta_list = [(date - start).days / day_count
for date in date_list]
return np.array(delta_list) | #
# DX Library Frame
# get_year_deltas.py
#
import numpy as np
def get_year_deltas(date_list, day_count=365.):
''' Return vector of floats with day deltas in years.
Initial value normalized to zero.
Parameters
==========
date_list : list or array
collection of datetime objects
day_count : float
number of days for a year
(to account for different conventions)
Results
=======
delta_list : array
year fractions
'''
start = date_list[0]
delta_list = [(date - start).days / day_count
for date in date_list]
return np.array(delta_list) | en | 0.599425 | # # DX Library Frame # get_year_deltas.py # Return vector of floats with day deltas in years. Initial value normalized to zero. Parameters ========== date_list : list or array collection of datetime objects day_count : float number of days for a year (to account for different conventions) Results ======= delta_list : array year fractions | 3.361606 | 3 |
flowmanager.py | aymeniq/flowmanager | 0 | 6617343 | # Copyright (c) 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ryu.base import app_manager
from ryu.app.wsgi import WSGIApplication
from ryu.controller import dpset
# these are needed for the events
from ryu.controller import ofp_event
from ryu.controller.handler import HANDSHAKE_DISPATCHER
from ryu.controller.handler import CONFIG_DISPATCHER
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib import ofctl_v1_3
from ryu.lib import ofctl_utils
from ryu import utils
# for packet content
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
# for topology discovery
#from ryu.topology import event, switches
from ryu.topology.api import get_all_switch, get_all_link, get_all_host
from webapi import WebApi
import os, logging
from logging.handlers import WatchedFileHandler
class FlowManager(app_manager.RyuApp):
#OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
_CONTEXTS = {'wsgi': WSGIApplication,
'dpset': dpset.DPSet}
port_id = {
"IN_PORT": 0xfffffff8,
"TABLE": 0xfffffff9,
"NORMAL": 0xfffffffa,
"FLOOD": 0xfffffffb,
"ALL": 0xfffffffc,
"CONTROLLER": 0xfffffffd,
"LOCAL": 0xfffffffe,
"ANY": 0xffffffff
}
logname = 'flwmgr'
logfile = 'flwmgr.log'
def __init__(self, *args, **kwargs):
super(FlowManager, self).__init__(*args, **kwargs)
wsgi = kwargs['wsgi']
self.dpset = kwargs['dpset']
# get this file's path
dirname = os.path.dirname(__file__)
self.lists = {}
self.lists['actions'] = self.read_files(
'actions', os.path.join(dirname, 'data/actions.txt'))
self.lists['matches'] = self.read_files(
'matches', os.path.join(dirname, 'data/matches.txt'))
self.waiters = {}
self.ofctl = ofctl_v1_3
# Data exchanged with WebApi
wsgi.register(WebApi,
{"webctl": self,
"dpset": self.dpset,
"lists": self.lists,
"waiters": self.waiters})
# Setup logging
self.logger = self.get_logger(self.logname, self.logfile, 'INFO', 0)
def get_logger(self, logname, logfile, loglevel, propagate):
"""Create and return a logger object."""
# TODO: simplify
logger = logging.getLogger(logname)
logger_handler = WatchedFileHandler(logfile, mode='w')
# removed \t%(name)-6s
log_fmt = '%(asctime)s\t%(levelname)-8s\t%(message)s'
logger_handler.setFormatter(
logging.Formatter(log_fmt, '%b %d %H:%M:%S'))
logger.addHandler(logger_handler)
logger.propagate = propagate
logger.setLevel(loglevel)
return logger
def get_switches(self):
"""Return switches."""
return self.dpset.get_all()
def get_switch_desc(self, dpid):
dp = self.dpset.get(dpid)
if dp:
return self.ofctl.get_desc_stats(dp, self.waiters, to_user=True)
else:
return None
def get_port_desc(self, dpid):
dp = self.dpset.get(dpid)
if dp:
return self.ofctl.get_port_desc(dp, self.waiters)
else:
return None
def get_port_stat(self, dpid):
dp = self.dpset.get(dpid)
if dp:
return self.ofctl.get_port_stats(dp, self.waiters, port=None, to_user=True)
return None
def get_flow_summary(self, dpid):
dp = self.dpset.get(dpid)
if dp:
return self.ofctl.get_aggregate_flow_stats(dp, self.waiters)
return None
def get_table_stat(self, dpid):
dp = self.dpset.get(dpid)
if dp:
return self.ofctl.get_table_stats(dp, self.waiters)
else:
return None
def read_logs(self):
items = []
with open(self.logfile, 'r') as my_file:
while True:
line = my_file.readline()
if not line:
break
lst = line.split('\t')
items.append(lst)
#items.append(line)
return items
def read_files(self, key, filename):
"""Reads tab-seperated text files.
Used to read files that contain data about match fields and actions.
"""
items = {}
with open(filename, 'r') as my_file:
while True:
line = my_file.readline()
if not line:
break
lst = line.split('\t')
items[lst[0]] = lst
return items
def get_actions(self, parser, set):
actions = []
aDict = {
'SET_FIELD': (parser.OFPActionSetField, 'field'),
'COPY_TTL_OUT': (parser.OFPActionCopyTtlOut, None),
'COPY_TTL_IN': (parser.OFPActionCopyTtlIn, None),
'POP_PBB': (parser.OFPActionPopPbb, None),
'PUSH_PBB': (parser.OFPActionPushPbb, 'ethertype'),
'POP_MPLS': (parser.OFPActionPopMpls, 'ethertype'),
'PUSH_MPLS': (parser.OFPActionPushMpls, 'ethertype'),
'POP_VLAN': (parser.OFPActionPopVlan, None),
'PUSH_VLAN': (parser.OFPActionPushVlan, 'ethertype'),
'DEC_MPLS_TTL': (parser.OFPActionDecMplsTtl, None),
'SET_MPLS_TTL': (parser.OFPActionSetMplsTtl, 'mpls_ttl'),
'DEC_NW_TTL': (parser.OFPActionDecNwTtl, None),
'SET_NW_TTL': (parser.OFPActionSetNwTtl, 'nw_ttl'),
'SET_QUEUE': (parser.OFPActionSetQueue, 'queue_id'),
'GROUP': (parser.OFPActionGroup, 'group_id'),
'OUTPUT': (parser.OFPActionOutput, 'port'),
}
for action in set:
key = action.keys()[0] #There should be only one key
value = action[key]
if key in aDict:
f = aDict[key][0] # the action
if aDict[key][1]: # check if the action needs a value
kwargs = {}
if aDict[key][1] == 'field':
x = value.split('=')
kwargs = {x[0]: x[1]}
elif aDict[key][1] == 'port':
x = value.upper()
val = self.port_id[x] if x in self.port_id else int(x)
kwargs = {aDict[key][1]: val}
else:
kwargs = {aDict[key][1]: int(value)}
actions.append(f(**kwargs))
else:
actions.append(f())
else:
raise Exception("Action {} not supported!".format(key))
return actions
def process_flow_message(self, d):
"""Sends flow form data to the switch to update flow tables.
"""
dp = self.dpset.get(d["dpid"])
if not dp:
return "Datapatch does not exist!"
ofproto = dp.ofproto
parser = dp.ofproto_parser
command = {
'add': ofproto.OFPFC_ADD,
'mod': ofproto.OFPFC_MODIFY,
'modst': ofproto.OFPFC_MODIFY_STRICT,
'del': ofproto.OFPFC_DELETE,
'delst': ofproto.OFPFC_DELETE_STRICT,
}
# Initialize arguments for the flow mod message
msg_kwargs = {
'datapath': dp,
'command': command.get(d["operation"], ofproto.OFPFC_ADD),
'buffer_id': ofproto.OFP_NO_BUFFER,
}
try:
msg_kwargs['table_id'] = d['table_id']
# Match fields
mf = d["match"]
match = parser.OFPMatch(**mf)
# print(match.to_jsondict())
msg_kwargs['match'] = match
# if d['hard_timeout'] else 0
msg_kwargs['hard_timeout'] = d['hard_timeout']
# if d['idle_timeout'] else 0
msg_kwargs['idle_timeout'] = d['idle_timeout']
msg_kwargs['priority'] = d['priority'] # if d['priority'] else 0
msg_kwargs['cookie'] = d['cookie'] # if d['cookie'] else 0
# if d['cookie_mask'] else 0
msg_kwargs['cookie_mask'] = d['cookie_mask']
# d['out_port'] # for the delete command
msg_kwargs['out_port'] = d['out_port'] if d['out_port'] >= 0 else ofproto.OFPP_ANY
# d['out_group'] # for the delete command
msg_kwargs['out_group'] = d['out_group'] if d['out_group'] >= 0 else ofproto.OFPG_ANY
# instructions
inst = []
# Goto meter
if d["meter_id"]:
inst += [parser.OFPInstructionMeter(d["meter_id"])]
# Apply Actions
if d["apply"]:
applyActions = self.get_actions(parser, d["apply"])
inst += [parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS, applyActions)]
# Clear Actions
if d["clearactions"]:
inst += [parser.OFPInstructionActions(
ofproto.OFPIT_CLEAR_ACTIONS, [])]
# Write Actions
if d["write"]:
# bc actions must be unique they are in dict
# from dict to list
toList = [{k:d["write"][k]} for k in d["write"]]
#print(toList)
writeActions = self.get_actions(parser, toList)
inst += [parser.OFPInstructionActions(
ofproto.OFPIT_WRITE_ACTIONS, writeActions)]
# Write Metadata
if d["metadata"]:
inst += [parser.OFPInstructionWriteMetadata(
d["metadata"], d["metadata_mask"])]
# Goto Table Metadata
if d["goto"]:
inst += [parser.OFPInstructionGotoTable(table_id=d["goto"])]
if inst:
msg_kwargs['instructions'] = inst
# Flags
flags = 0
flags += 0x01 if d['SEND_FLOW_REM'] else 0
flags += 0x02 if d['CHECK_OVERLAP'] else 0
flags += 0x04 if d['RESET_COUNTS'] else 0
flags += 0x08 if d['NO_PKT_COUNTS'] else 0
flags += 0x10 if d['NO_BYT_COUNTS'] else 0
msg_kwargs['flags'] = flags
except Exception as e:
return "Value for '{}' is not found!".format(e.message)
# ryu/ryu/ofproto/ofproto_v1_3_parser.py
msg = parser.OFPFlowMod(**msg_kwargs)
try:
dp.send_msg(msg) # ryu/ryu/controller/controller.py
except KeyError as e:
return e.__repr__()
except Exception as e:
return e.__repr__()
return "Message sent successfully."
def process_group_message(self, d):
"""Sends group form data to the switch to update group tables.
"""
dp = self.dpset.get(d["dpid"])
if not dp:
return "Datapatch does not exist!"
ofproto = dp.ofproto
parser = dp.ofproto_parser
command = {
'add': ofproto.OFPGC_ADD,
'mod': ofproto.OFPGC_MODIFY,
'del': ofproto.OFPGC_DELETE,
}
cmd = command.get(d["operation"], ofproto.OFPGC_ADD)
type_convert = {'ALL': dp.ofproto.OFPGT_ALL,
'SELECT': dp.ofproto.OFPGT_SELECT,
'INDIRECT': dp.ofproto.OFPGT_INDIRECT,
'FF': dp.ofproto.OFPGT_FF}
gtype = type_convert.get(d["type"])
group_id = d["group_id"]
buckets = []
for bucket in d["buckets"]:
#print("bucket", bucket)
weight = bucket.get('weight', 0)
watch_port = bucket.get('watch_port', ofproto.OFPP_ANY)
watch_group = bucket.get('watch_group', dp.ofproto.OFPG_ANY)
actions = []
if bucket['actions']:
actions = self.get_actions(parser, bucket['actions'])
buckets.append(dp.ofproto_parser.OFPBucket(
weight, watch_port, watch_group, actions))
#print(dp, cmd, gtype, group_id, buckets)
group_mod = parser.OFPGroupMod(
dp, cmd, gtype, group_id, buckets)
try:
dp.send_msg(group_mod) # ryu/ryu/controller/controller.py
except KeyError as e:
return e.__repr__()
except Exception as e:
return e.__repr__()
return "Message sent successfully."
def process_meter_message(self, d):
"""Sends meter form data to the switch to update meter table.
"""
dp = self.dpset.get(d["dpid"])
if not dp:
return "Datapatch does not exist!"
ofproto = dp.ofproto
parser = dp.ofproto_parser
command = {
'add': ofproto.OFPMC_ADD,
'mod': ofproto.OFPMC_MODIFY,
'del': ofproto.OFPMC_DELETE,
}
cmd = command.get(d["operation"], ofproto.OFPMC_ADD)
meter_id = d["meter_id"]
# Flags
flags = 0
flags += 0x01 if d['OFPMF_KBPS'] else 0
flags += 0x02 if d['OFPMF_PKTPS'] else 0
flags += 0x04 if d['OFPMF_BURST'] else 0
flags += 0x08 if d['OFPMF_STATS'] else 0
bands = []
for band in d["bands"]:
#mtype = type_convert.get(band[0])
if band[0] == 'DROP':
bands += [parser.OFPMeterBandDrop(rate=band[1],
burst_size=band[2])]
elif band[0] == 'DSCP_REMARK':
bands += [parser.OFPMeterBandDscpRemark(rate=band[1],
burst_size=band[2], prec_level=band[3])]
print(dp, cmd, flags, meter_id, bands)
meter_mod = parser.OFPMeterMod(dp, cmd, flags, meter_id, bands)
try:
dp.send_msg(meter_mod)
except KeyError as e:
return e.__repr__()
except Exception as e:
return e.__repr__()
return "Message sent successfully."
# def get_flow_stats(self, req, dpid): # unused
# flow = {} # no filters
# dp = self.dpset.get(int(str(dpid), 0))
# return self.ofctl.get_flow_stats(dp, self.waiters, flow)
def get_stats(self, req, dpid):
dp = self.dpset.get(int(str(dpid), 0))
if req == "flows":
return self.ofctl.get_flow_stats(dp, self.waiters)
elif req == "groups":
return {"desc": self.ofctl.get_group_desc(dp, self.waiters),
"stats": self.ofctl.get_group_stats(dp, self.waiters)}
elif req == "meters":
return {"desc": self.ofctl.get_meter_config(dp, self.waiters),
"stats": self.ofctl.get_meter_stats(dp, self.waiters)}
def get_packet_summary(self, content):
pkt = packet.Packet(content)
eth = pkt.get_protocols(ethernet.ethernet)[0]
ethtype = eth.ethertype
dst = eth.dst
src = eth.src
return '(src={}, dst={}, type=0x{:04x})'.format(src, dst, ethtype)
##### Event Handlers #######################################
@set_ev_cls([ # ofp_event.EventOFPStatsReply,
ofp_event.EventOFPDescStatsReply,
ofp_event.EventOFPFlowStatsReply,
ofp_event.EventOFPAggregateStatsReply,
ofp_event.EventOFPTableStatsReply,
# ofp_event.EventOFPTableFeaturesStatsReply,
ofp_event.EventOFPPortStatsReply,
# ofp_event.EventOFPQueueStatsReply,
# ofp_event.EventOFPQueueDescStatsReply,
ofp_event.EventOFPMeterStatsReply,
# ofp_event.EventOFPMeterFeaturesStatsReply,
ofp_event.EventOFPMeterConfigStatsReply,
ofp_event.EventOFPGroupStatsReply,
# ofp_event.EventOFPGroupFeaturesStatsReply,
ofp_event.EventOFPGroupDescStatsReply,
ofp_event.EventOFPPortDescStatsReply,
# ofp_event.EventOFPPacketIn,
], MAIN_DISPATCHER)
def stats_reply_handler(self, ev):
msg = ev.msg
dp = msg.datapath
if dp.id not in self.waiters:
return
if msg.xid not in self.waiters[dp.id]:
return
lock, msgs = self.waiters[dp.id][msg.xid]
msgs.append(msg)
flags = dp.ofproto.OFPMPF_REPLY_MORE
if msg.flags & flags:
return
del self.waiters[dp.id][msg.xid]
lock.set()
# self.messages.append(msg)
@set_ev_cls(ofp_event.EventOFPFlowRemoved, MAIN_DISPATCHER)
def flow_removed_handler(self, ev):
msg = ev.msg
dp = msg.datapath
ofp = dp.ofproto
if msg.reason == ofp.OFPRR_IDLE_TIMEOUT:
reason = 'IDLE TIMEOUT'
elif msg.reason == ofp.OFPRR_HARD_TIMEOUT:
reason = 'HARD TIMEOUT'
elif msg.reason == ofp.OFPRR_DELETE:
reason = 'DELETE'
elif msg.reason == ofp.OFPRR_GROUP_DELETE:
reason = 'GROUP DELETE'
else:
reason = 'unknown'
self.logger.info('FlowRemoved\t'
'cookie=%d priority=%d reason=%s table_id=%d '
'duration_sec=%d duration_nsec=%d '
'idle_timeout=%d hard_timeout=%d '
'packet_count=%d byte_count=%d match.fields=%s',
msg.cookie, msg.priority, reason, msg.table_id,
msg.duration_sec, msg.duration_nsec,
msg.idle_timeout, msg.hard_timeout,
msg.packet_count, msg.byte_count, msg.match)
@set_ev_cls(ofp_event.EventOFPErrorMsg,
[HANDSHAKE_DISPATCHER, CONFIG_DISPATCHER, MAIN_DISPATCHER])
def error_msg_handler(self, ev):
msg = ev.msg
self.logger.error('ErrorMsg\ttype=0x%02x code=0x%02x '
'message=%s',
msg.type, msg.code, utils.hex_array(msg.data))
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def packet_in_handler(self, ev):
msg = ev.msg
dp = msg.datapath
ofp = dp.ofproto
if msg.reason == ofp.OFPR_NO_MATCH:
reason = 'NO MATCH'
elif msg.reason == ofp.OFPR_ACTION:
reason = 'ACTION'
elif msg.reason == ofp.OFPR_INVALID_TTL:
reason = 'INVALID TTL'
else:
reason = 'UNKNOWN'
self.logger.info('PacketIn\t'
'buffer_id=%x total_len=%d reason=%s '
'table_id=%d cookie=%d match=%s summary=%s',
msg.buffer_id, msg.total_len, reason,
msg.table_id, msg.cookie, msg.match,
#utils.hex_array(msg.data))
self.get_packet_summary(msg.data))
# @set_ev_cls(event.EventSwitchEnter)
def get_topology_data(self):
"""Get Topology Data
"""
switch_list = get_all_switch(self)
switches = [switch.to_dict() for switch in switch_list]
links_list = get_all_link(self)
links = [link.to_dict() for link in links_list]
host_list = get_all_host(self)
hosts = [h.to_dict() for h in host_list]
return {"switches": switches, "links":links, "hosts": hosts} | # Copyright (c) 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ryu.base import app_manager
from ryu.app.wsgi import WSGIApplication
from ryu.controller import dpset
# these are needed for the events
from ryu.controller import ofp_event
from ryu.controller.handler import HANDSHAKE_DISPATCHER
from ryu.controller.handler import CONFIG_DISPATCHER
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib import ofctl_v1_3
from ryu.lib import ofctl_utils
from ryu import utils
# for packet content
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
# for topology discovery
#from ryu.topology import event, switches
from ryu.topology.api import get_all_switch, get_all_link, get_all_host
from webapi import WebApi
import os, logging
from logging.handlers import WatchedFileHandler
class FlowManager(app_manager.RyuApp):
#OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
_CONTEXTS = {'wsgi': WSGIApplication,
'dpset': dpset.DPSet}
port_id = {
"IN_PORT": 0xfffffff8,
"TABLE": 0xfffffff9,
"NORMAL": 0xfffffffa,
"FLOOD": 0xfffffffb,
"ALL": 0xfffffffc,
"CONTROLLER": 0xfffffffd,
"LOCAL": 0xfffffffe,
"ANY": 0xffffffff
}
logname = 'flwmgr'
logfile = 'flwmgr.log'
def __init__(self, *args, **kwargs):
super(FlowManager, self).__init__(*args, **kwargs)
wsgi = kwargs['wsgi']
self.dpset = kwargs['dpset']
# get this file's path
dirname = os.path.dirname(__file__)
self.lists = {}
self.lists['actions'] = self.read_files(
'actions', os.path.join(dirname, 'data/actions.txt'))
self.lists['matches'] = self.read_files(
'matches', os.path.join(dirname, 'data/matches.txt'))
self.waiters = {}
self.ofctl = ofctl_v1_3
# Data exchanged with WebApi
wsgi.register(WebApi,
{"webctl": self,
"dpset": self.dpset,
"lists": self.lists,
"waiters": self.waiters})
# Setup logging
self.logger = self.get_logger(self.logname, self.logfile, 'INFO', 0)
def get_logger(self, logname, logfile, loglevel, propagate):
"""Create and return a logger object."""
# TODO: simplify
logger = logging.getLogger(logname)
logger_handler = WatchedFileHandler(logfile, mode='w')
# removed \t%(name)-6s
log_fmt = '%(asctime)s\t%(levelname)-8s\t%(message)s'
logger_handler.setFormatter(
logging.Formatter(log_fmt, '%b %d %H:%M:%S'))
logger.addHandler(logger_handler)
logger.propagate = propagate
logger.setLevel(loglevel)
return logger
def get_switches(self):
"""Return switches."""
return self.dpset.get_all()
def get_switch_desc(self, dpid):
dp = self.dpset.get(dpid)
if dp:
return self.ofctl.get_desc_stats(dp, self.waiters, to_user=True)
else:
return None
def get_port_desc(self, dpid):
dp = self.dpset.get(dpid)
if dp:
return self.ofctl.get_port_desc(dp, self.waiters)
else:
return None
def get_port_stat(self, dpid):
dp = self.dpset.get(dpid)
if dp:
return self.ofctl.get_port_stats(dp, self.waiters, port=None, to_user=True)
return None
def get_flow_summary(self, dpid):
dp = self.dpset.get(dpid)
if dp:
return self.ofctl.get_aggregate_flow_stats(dp, self.waiters)
return None
def get_table_stat(self, dpid):
dp = self.dpset.get(dpid)
if dp:
return self.ofctl.get_table_stats(dp, self.waiters)
else:
return None
def read_logs(self):
items = []
with open(self.logfile, 'r') as my_file:
while True:
line = my_file.readline()
if not line:
break
lst = line.split('\t')
items.append(lst)
#items.append(line)
return items
def read_files(self, key, filename):
"""Reads tab-seperated text files.
Used to read files that contain data about match fields and actions.
"""
items = {}
with open(filename, 'r') as my_file:
while True:
line = my_file.readline()
if not line:
break
lst = line.split('\t')
items[lst[0]] = lst
return items
def get_actions(self, parser, set):
actions = []
aDict = {
'SET_FIELD': (parser.OFPActionSetField, 'field'),
'COPY_TTL_OUT': (parser.OFPActionCopyTtlOut, None),
'COPY_TTL_IN': (parser.OFPActionCopyTtlIn, None),
'POP_PBB': (parser.OFPActionPopPbb, None),
'PUSH_PBB': (parser.OFPActionPushPbb, 'ethertype'),
'POP_MPLS': (parser.OFPActionPopMpls, 'ethertype'),
'PUSH_MPLS': (parser.OFPActionPushMpls, 'ethertype'),
'POP_VLAN': (parser.OFPActionPopVlan, None),
'PUSH_VLAN': (parser.OFPActionPushVlan, 'ethertype'),
'DEC_MPLS_TTL': (parser.OFPActionDecMplsTtl, None),
'SET_MPLS_TTL': (parser.OFPActionSetMplsTtl, 'mpls_ttl'),
'DEC_NW_TTL': (parser.OFPActionDecNwTtl, None),
'SET_NW_TTL': (parser.OFPActionSetNwTtl, 'nw_ttl'),
'SET_QUEUE': (parser.OFPActionSetQueue, 'queue_id'),
'GROUP': (parser.OFPActionGroup, 'group_id'),
'OUTPUT': (parser.OFPActionOutput, 'port'),
}
for action in set:
key = action.keys()[0] #There should be only one key
value = action[key]
if key in aDict:
f = aDict[key][0] # the action
if aDict[key][1]: # check if the action needs a value
kwargs = {}
if aDict[key][1] == 'field':
x = value.split('=')
kwargs = {x[0]: x[1]}
elif aDict[key][1] == 'port':
x = value.upper()
val = self.port_id[x] if x in self.port_id else int(x)
kwargs = {aDict[key][1]: val}
else:
kwargs = {aDict[key][1]: int(value)}
actions.append(f(**kwargs))
else:
actions.append(f())
else:
raise Exception("Action {} not supported!".format(key))
return actions
def process_flow_message(self, d):
"""Sends flow form data to the switch to update flow tables.
"""
dp = self.dpset.get(d["dpid"])
if not dp:
return "Datapatch does not exist!"
ofproto = dp.ofproto
parser = dp.ofproto_parser
command = {
'add': ofproto.OFPFC_ADD,
'mod': ofproto.OFPFC_MODIFY,
'modst': ofproto.OFPFC_MODIFY_STRICT,
'del': ofproto.OFPFC_DELETE,
'delst': ofproto.OFPFC_DELETE_STRICT,
}
# Initialize arguments for the flow mod message
msg_kwargs = {
'datapath': dp,
'command': command.get(d["operation"], ofproto.OFPFC_ADD),
'buffer_id': ofproto.OFP_NO_BUFFER,
}
try:
msg_kwargs['table_id'] = d['table_id']
# Match fields
mf = d["match"]
match = parser.OFPMatch(**mf)
# print(match.to_jsondict())
msg_kwargs['match'] = match
# if d['hard_timeout'] else 0
msg_kwargs['hard_timeout'] = d['hard_timeout']
# if d['idle_timeout'] else 0
msg_kwargs['idle_timeout'] = d['idle_timeout']
msg_kwargs['priority'] = d['priority'] # if d['priority'] else 0
msg_kwargs['cookie'] = d['cookie'] # if d['cookie'] else 0
# if d['cookie_mask'] else 0
msg_kwargs['cookie_mask'] = d['cookie_mask']
# d['out_port'] # for the delete command
msg_kwargs['out_port'] = d['out_port'] if d['out_port'] >= 0 else ofproto.OFPP_ANY
# d['out_group'] # for the delete command
msg_kwargs['out_group'] = d['out_group'] if d['out_group'] >= 0 else ofproto.OFPG_ANY
# instructions
inst = []
# Goto meter
if d["meter_id"]:
inst += [parser.OFPInstructionMeter(d["meter_id"])]
# Apply Actions
if d["apply"]:
applyActions = self.get_actions(parser, d["apply"])
inst += [parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS, applyActions)]
# Clear Actions
if d["clearactions"]:
inst += [parser.OFPInstructionActions(
ofproto.OFPIT_CLEAR_ACTIONS, [])]
# Write Actions
if d["write"]:
# bc actions must be unique they are in dict
# from dict to list
toList = [{k:d["write"][k]} for k in d["write"]]
#print(toList)
writeActions = self.get_actions(parser, toList)
inst += [parser.OFPInstructionActions(
ofproto.OFPIT_WRITE_ACTIONS, writeActions)]
# Write Metadata
if d["metadata"]:
inst += [parser.OFPInstructionWriteMetadata(
d["metadata"], d["metadata_mask"])]
# Goto Table Metadata
if d["goto"]:
inst += [parser.OFPInstructionGotoTable(table_id=d["goto"])]
if inst:
msg_kwargs['instructions'] = inst
# Flags
flags = 0
flags += 0x01 if d['SEND_FLOW_REM'] else 0
flags += 0x02 if d['CHECK_OVERLAP'] else 0
flags += 0x04 if d['RESET_COUNTS'] else 0
flags += 0x08 if d['NO_PKT_COUNTS'] else 0
flags += 0x10 if d['NO_BYT_COUNTS'] else 0
msg_kwargs['flags'] = flags
except Exception as e:
return "Value for '{}' is not found!".format(e.message)
# ryu/ryu/ofproto/ofproto_v1_3_parser.py
msg = parser.OFPFlowMod(**msg_kwargs)
try:
dp.send_msg(msg) # ryu/ryu/controller/controller.py
except KeyError as e:
return e.__repr__()
except Exception as e:
return e.__repr__()
return "Message sent successfully."
def process_group_message(self, d):
"""Sends group form data to the switch to update group tables.
"""
dp = self.dpset.get(d["dpid"])
if not dp:
return "Datapatch does not exist!"
ofproto = dp.ofproto
parser = dp.ofproto_parser
command = {
'add': ofproto.OFPGC_ADD,
'mod': ofproto.OFPGC_MODIFY,
'del': ofproto.OFPGC_DELETE,
}
cmd = command.get(d["operation"], ofproto.OFPGC_ADD)
type_convert = {'ALL': dp.ofproto.OFPGT_ALL,
'SELECT': dp.ofproto.OFPGT_SELECT,
'INDIRECT': dp.ofproto.OFPGT_INDIRECT,
'FF': dp.ofproto.OFPGT_FF}
gtype = type_convert.get(d["type"])
group_id = d["group_id"]
buckets = []
for bucket in d["buckets"]:
#print("bucket", bucket)
weight = bucket.get('weight', 0)
watch_port = bucket.get('watch_port', ofproto.OFPP_ANY)
watch_group = bucket.get('watch_group', dp.ofproto.OFPG_ANY)
actions = []
if bucket['actions']:
actions = self.get_actions(parser, bucket['actions'])
buckets.append(dp.ofproto_parser.OFPBucket(
weight, watch_port, watch_group, actions))
#print(dp, cmd, gtype, group_id, buckets)
group_mod = parser.OFPGroupMod(
dp, cmd, gtype, group_id, buckets)
try:
dp.send_msg(group_mod) # ryu/ryu/controller/controller.py
except KeyError as e:
return e.__repr__()
except Exception as e:
return e.__repr__()
return "Message sent successfully."
def process_meter_message(self, d):
"""Sends meter form data to the switch to update meter table.
"""
dp = self.dpset.get(d["dpid"])
if not dp:
return "Datapatch does not exist!"
ofproto = dp.ofproto
parser = dp.ofproto_parser
command = {
'add': ofproto.OFPMC_ADD,
'mod': ofproto.OFPMC_MODIFY,
'del': ofproto.OFPMC_DELETE,
}
cmd = command.get(d["operation"], ofproto.OFPMC_ADD)
meter_id = d["meter_id"]
# Flags
flags = 0
flags += 0x01 if d['OFPMF_KBPS'] else 0
flags += 0x02 if d['OFPMF_PKTPS'] else 0
flags += 0x04 if d['OFPMF_BURST'] else 0
flags += 0x08 if d['OFPMF_STATS'] else 0
bands = []
for band in d["bands"]:
#mtype = type_convert.get(band[0])
if band[0] == 'DROP':
bands += [parser.OFPMeterBandDrop(rate=band[1],
burst_size=band[2])]
elif band[0] == 'DSCP_REMARK':
bands += [parser.OFPMeterBandDscpRemark(rate=band[1],
burst_size=band[2], prec_level=band[3])]
print(dp, cmd, flags, meter_id, bands)
meter_mod = parser.OFPMeterMod(dp, cmd, flags, meter_id, bands)
try:
dp.send_msg(meter_mod)
except KeyError as e:
return e.__repr__()
except Exception as e:
return e.__repr__()
return "Message sent successfully."
# def get_flow_stats(self, req, dpid): # unused
# flow = {} # no filters
# dp = self.dpset.get(int(str(dpid), 0))
# return self.ofctl.get_flow_stats(dp, self.waiters, flow)
def get_stats(self, req, dpid):
dp = self.dpset.get(int(str(dpid), 0))
if req == "flows":
return self.ofctl.get_flow_stats(dp, self.waiters)
elif req == "groups":
return {"desc": self.ofctl.get_group_desc(dp, self.waiters),
"stats": self.ofctl.get_group_stats(dp, self.waiters)}
elif req == "meters":
return {"desc": self.ofctl.get_meter_config(dp, self.waiters),
"stats": self.ofctl.get_meter_stats(dp, self.waiters)}
def get_packet_summary(self, content):
pkt = packet.Packet(content)
eth = pkt.get_protocols(ethernet.ethernet)[0]
ethtype = eth.ethertype
dst = eth.dst
src = eth.src
return '(src={}, dst={}, type=0x{:04x})'.format(src, dst, ethtype)
##### Event Handlers #######################################
@set_ev_cls([ # ofp_event.EventOFPStatsReply,
ofp_event.EventOFPDescStatsReply,
ofp_event.EventOFPFlowStatsReply,
ofp_event.EventOFPAggregateStatsReply,
ofp_event.EventOFPTableStatsReply,
# ofp_event.EventOFPTableFeaturesStatsReply,
ofp_event.EventOFPPortStatsReply,
# ofp_event.EventOFPQueueStatsReply,
# ofp_event.EventOFPQueueDescStatsReply,
ofp_event.EventOFPMeterStatsReply,
# ofp_event.EventOFPMeterFeaturesStatsReply,
ofp_event.EventOFPMeterConfigStatsReply,
ofp_event.EventOFPGroupStatsReply,
# ofp_event.EventOFPGroupFeaturesStatsReply,
ofp_event.EventOFPGroupDescStatsReply,
ofp_event.EventOFPPortDescStatsReply,
# ofp_event.EventOFPPacketIn,
], MAIN_DISPATCHER)
def stats_reply_handler(self, ev):
msg = ev.msg
dp = msg.datapath
if dp.id not in self.waiters:
return
if msg.xid not in self.waiters[dp.id]:
return
lock, msgs = self.waiters[dp.id][msg.xid]
msgs.append(msg)
flags = dp.ofproto.OFPMPF_REPLY_MORE
if msg.flags & flags:
return
del self.waiters[dp.id][msg.xid]
lock.set()
# self.messages.append(msg)
@set_ev_cls(ofp_event.EventOFPFlowRemoved, MAIN_DISPATCHER)
def flow_removed_handler(self, ev):
msg = ev.msg
dp = msg.datapath
ofp = dp.ofproto
if msg.reason == ofp.OFPRR_IDLE_TIMEOUT:
reason = 'IDLE TIMEOUT'
elif msg.reason == ofp.OFPRR_HARD_TIMEOUT:
reason = 'HARD TIMEOUT'
elif msg.reason == ofp.OFPRR_DELETE:
reason = 'DELETE'
elif msg.reason == ofp.OFPRR_GROUP_DELETE:
reason = 'GROUP DELETE'
else:
reason = 'unknown'
self.logger.info('FlowRemoved\t'
'cookie=%d priority=%d reason=%s table_id=%d '
'duration_sec=%d duration_nsec=%d '
'idle_timeout=%d hard_timeout=%d '
'packet_count=%d byte_count=%d match.fields=%s',
msg.cookie, msg.priority, reason, msg.table_id,
msg.duration_sec, msg.duration_nsec,
msg.idle_timeout, msg.hard_timeout,
msg.packet_count, msg.byte_count, msg.match)
@set_ev_cls(ofp_event.EventOFPErrorMsg,
[HANDSHAKE_DISPATCHER, CONFIG_DISPATCHER, MAIN_DISPATCHER])
def error_msg_handler(self, ev):
msg = ev.msg
self.logger.error('ErrorMsg\ttype=0x%02x code=0x%02x '
'message=%s',
msg.type, msg.code, utils.hex_array(msg.data))
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def packet_in_handler(self, ev):
msg = ev.msg
dp = msg.datapath
ofp = dp.ofproto
if msg.reason == ofp.OFPR_NO_MATCH:
reason = 'NO MATCH'
elif msg.reason == ofp.OFPR_ACTION:
reason = 'ACTION'
elif msg.reason == ofp.OFPR_INVALID_TTL:
reason = 'INVALID TTL'
else:
reason = 'UNKNOWN'
self.logger.info('PacketIn\t'
'buffer_id=%x total_len=%d reason=%s '
'table_id=%d cookie=%d match=%s summary=%s',
msg.buffer_id, msg.total_len, reason,
msg.table_id, msg.cookie, msg.match,
#utils.hex_array(msg.data))
self.get_packet_summary(msg.data))
# @set_ev_cls(event.EventSwitchEnter)
def get_topology_data(self):
"""Get Topology Data
"""
switch_list = get_all_switch(self)
switches = [switch.to_dict() for switch in switch_list]
links_list = get_all_link(self)
links = [link.to_dict() for link in links_list]
host_list = get_all_host(self)
hosts = [h.to_dict() for h in host_list]
return {"switches": switches, "links":links, "hosts": hosts} | en | 0.601297 | # Copyright (c) 2018 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # these are needed for the events # for packet content # for topology discovery #from ryu.topology import event, switches #OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION] # get this file's path # Data exchanged with WebApi # Setup logging Create and return a logger object. # TODO: simplify # removed \t%(name)-6s Return switches. #items.append(line) Reads tab-seperated text files. Used to read files that contain data about match fields and actions. #There should be only one key # the action # check if the action needs a value Sends flow form data to the switch to update flow tables. # Initialize arguments for the flow mod message # Match fields # print(match.to_jsondict()) # if d['hard_timeout'] else 0 # if d['idle_timeout'] else 0 # if d['priority'] else 0 # if d['cookie'] else 0 # if d['cookie_mask'] else 0 # d['out_port'] # for the delete command # d['out_group'] # for the delete command # instructions # Goto meter # Apply Actions # Clear Actions # Write Actions # bc actions must be unique they are in dict # from dict to list #print(toList) # Write Metadata # Goto Table Metadata # Flags # ryu/ryu/ofproto/ofproto_v1_3_parser.py # ryu/ryu/controller/controller.py Sends group form data to the switch to update group tables. #print("bucket", bucket) #print(dp, cmd, gtype, group_id, buckets) # ryu/ryu/controller/controller.py Sends meter form data to the switch to update meter table. # Flags #mtype = type_convert.get(band[0]) # def get_flow_stats(self, req, dpid): # unused # flow = {} # no filters # dp = self.dpset.get(int(str(dpid), 0)) # return self.ofctl.get_flow_stats(dp, self.waiters, flow) ##### Event Handlers ####################################### # ofp_event.EventOFPStatsReply, # ofp_event.EventOFPTableFeaturesStatsReply, # ofp_event.EventOFPQueueStatsReply, # ofp_event.EventOFPQueueDescStatsReply, # ofp_event.EventOFPMeterFeaturesStatsReply, # ofp_event.EventOFPGroupFeaturesStatsReply, # ofp_event.EventOFPPacketIn, # self.messages.append(msg) #utils.hex_array(msg.data)) # @set_ev_cls(event.EventSwitchEnter) Get Topology Data | 1.419673 | 1 |
tests/test_orthogonal_procrustes.py | PSSF23/graspologic | 148 | 6617344 | # Copyright (c) Microsoft Corporation and contributors.
# Licensed under the MIT License.
import unittest
import numpy as np
from graspologic.align import OrthogonalProcrustes
class TestOrthogonalProcrustes(unittest.TestCase):
def test_bad_datasets(self):
X = np.arange(6).reshape(6, 1)
Y = np.arange(6).reshape(6, 1)
Y_wrong_d = np.arange(12).reshape(6, 2)
Y_wrong_n = np.arange(12).reshape(12, 1)
# check passing weird stuff as input (caught by us)
with self.assertRaises(TypeError):
aligner = OrthogonalProcrustes()
aligner.fit("hello there", Y)
with self.assertRaises(TypeError):
aligner = OrthogonalProcrustes()
aligner.fit(X, "hello there")
with self.assertRaises(TypeError):
aligner = OrthogonalProcrustes()
aligner.fit({"hello": "there"}, Y)
with self.assertRaises(TypeError):
aligner = OrthogonalProcrustes()
aligner.fit(X, {"hello": "there"})
# check passing arrays of weird ndims (caught by check_array)
with self.assertRaises(ValueError):
aligner = OrthogonalProcrustes()
aligner.fit(X, Y.reshape(3, 2, 1))
with self.assertRaises(ValueError):
aligner = OrthogonalProcrustes()
aligner.fit(X.reshape(3, 2, 1), Y)
# check passing arrays with different dimensions (caught by us)
with self.assertRaises(ValueError):
aligner = OrthogonalProcrustes()
aligner.fit(X, Y_wrong_d)
# check passing arrays with different number of vertices (caught by us)
with self.assertRaises(ValueError):
aligner = OrthogonalProcrustes()
aligner.fit(X, Y_wrong_n)
# check passing array with wrong dimensions to transform (caught by us)
with self.assertRaises(ValueError):
aligner = OrthogonalProcrustes()
aligner.fit(X, Y)
aligner.transform(Y_wrong_d)
# passing array with different number of vertices to fit is okay
aligner = OrthogonalProcrustes()
aligner.fit(X, Y)
aligner.transform(Y_wrong_n)
def test_identity(self):
Y = np.array([[1234, 19], [6798, 18], [9876, 17], [4321, 16]])
aligner = OrthogonalProcrustes()
aligner.fit(Y, Y)
assert np.all(np.isclose(aligner.Q_, np.eye(2)))
def test_two_datasets(self):
# A very simple example with a true existing solution
# X: Y:
# | |
# | 2
# | 1 |
# 2 | |
# | |
# | |
# ------+------ -1----+----3-
# | |
# | |
# | 4 |
# 3 | |
# | 4
# | |
#
# solution is
# _ _ _ _
# | | | |
# | 3 / 5 - 4 / 5 | | -1 0 |
# | | times | |
# | 3 / 5 3 / 5 | | 0 1 |
# |_ _| |_ _|
# because it is just rotation times reflection
X = np.array([[3, 4], [-4, 3], [-3, -4], [4, -3]])
Y = np.array([[-5, 0], [0, 5], [5, 0], [0, -5]])
Q_answer = np.array([[-0.6, -0.8], [-0.8, 0.6]])
X_answer = X.copy() @ Q_answer
# first, do fit and transform separately
aligner_1 = OrthogonalProcrustes()
aligner_1.fit(X, Y)
Q_test_1 = aligner_1.Q_
X_test_1 = aligner_1.transform(X)
self.assertTrue(np.isclose(0, aligner_1.score_))
self.assertTrue(np.all(np.isclose(Q_test_1, Q_answer)))
self.assertTrue(np.all(np.isclose(X_test_1, X_answer)))
# now, do fit_transform
aligner_2 = OrthogonalProcrustes()
X_test_2 = aligner_2.fit_transform(X, Y)
Q_test_2 = aligner_2.Q_
self.assertTrue(np.isclose(0, aligner_2.score_))
self.assertTrue(np.all(np.isclose(Q_test_2, Q_answer)))
self.assertTrue(np.all(np.isclose(X_test_2, X_answer)))
if __name__ == "__main__":
unittest.main()
| # Copyright (c) Microsoft Corporation and contributors.
# Licensed under the MIT License.
import unittest
import numpy as np
from graspologic.align import OrthogonalProcrustes
class TestOrthogonalProcrustes(unittest.TestCase):
def test_bad_datasets(self):
X = np.arange(6).reshape(6, 1)
Y = np.arange(6).reshape(6, 1)
Y_wrong_d = np.arange(12).reshape(6, 2)
Y_wrong_n = np.arange(12).reshape(12, 1)
# check passing weird stuff as input (caught by us)
with self.assertRaises(TypeError):
aligner = OrthogonalProcrustes()
aligner.fit("hello there", Y)
with self.assertRaises(TypeError):
aligner = OrthogonalProcrustes()
aligner.fit(X, "hello there")
with self.assertRaises(TypeError):
aligner = OrthogonalProcrustes()
aligner.fit({"hello": "there"}, Y)
with self.assertRaises(TypeError):
aligner = OrthogonalProcrustes()
aligner.fit(X, {"hello": "there"})
# check passing arrays of weird ndims (caught by check_array)
with self.assertRaises(ValueError):
aligner = OrthogonalProcrustes()
aligner.fit(X, Y.reshape(3, 2, 1))
with self.assertRaises(ValueError):
aligner = OrthogonalProcrustes()
aligner.fit(X.reshape(3, 2, 1), Y)
# check passing arrays with different dimensions (caught by us)
with self.assertRaises(ValueError):
aligner = OrthogonalProcrustes()
aligner.fit(X, Y_wrong_d)
# check passing arrays with different number of vertices (caught by us)
with self.assertRaises(ValueError):
aligner = OrthogonalProcrustes()
aligner.fit(X, Y_wrong_n)
# check passing array with wrong dimensions to transform (caught by us)
with self.assertRaises(ValueError):
aligner = OrthogonalProcrustes()
aligner.fit(X, Y)
aligner.transform(Y_wrong_d)
# passing array with different number of vertices to fit is okay
aligner = OrthogonalProcrustes()
aligner.fit(X, Y)
aligner.transform(Y_wrong_n)
def test_identity(self):
Y = np.array([[1234, 19], [6798, 18], [9876, 17], [4321, 16]])
aligner = OrthogonalProcrustes()
aligner.fit(Y, Y)
assert np.all(np.isclose(aligner.Q_, np.eye(2)))
def test_two_datasets(self):
# A very simple example with a true existing solution
# X: Y:
# | |
# | 2
# | 1 |
# 2 | |
# | |
# | |
# ------+------ -1----+----3-
# | |
# | |
# | 4 |
# 3 | |
# | 4
# | |
#
# solution is
# _ _ _ _
# | | | |
# | 3 / 5 - 4 / 5 | | -1 0 |
# | | times | |
# | 3 / 5 3 / 5 | | 0 1 |
# |_ _| |_ _|
# because it is just rotation times reflection
X = np.array([[3, 4], [-4, 3], [-3, -4], [4, -3]])
Y = np.array([[-5, 0], [0, 5], [5, 0], [0, -5]])
Q_answer = np.array([[-0.6, -0.8], [-0.8, 0.6]])
X_answer = X.copy() @ Q_answer
# first, do fit and transform separately
aligner_1 = OrthogonalProcrustes()
aligner_1.fit(X, Y)
Q_test_1 = aligner_1.Q_
X_test_1 = aligner_1.transform(X)
self.assertTrue(np.isclose(0, aligner_1.score_))
self.assertTrue(np.all(np.isclose(Q_test_1, Q_answer)))
self.assertTrue(np.all(np.isclose(X_test_1, X_answer)))
# now, do fit_transform
aligner_2 = OrthogonalProcrustes()
X_test_2 = aligner_2.fit_transform(X, Y)
Q_test_2 = aligner_2.Q_
self.assertTrue(np.isclose(0, aligner_2.score_))
self.assertTrue(np.all(np.isclose(Q_test_2, Q_answer)))
self.assertTrue(np.all(np.isclose(X_test_2, X_answer)))
if __name__ == "__main__":
unittest.main()
| en | 0.852461 | # Copyright (c) Microsoft Corporation and contributors. # Licensed under the MIT License. # check passing weird stuff as input (caught by us) # check passing arrays of weird ndims (caught by check_array) # check passing arrays with different dimensions (caught by us) # check passing arrays with different number of vertices (caught by us) # check passing array with wrong dimensions to transform (caught by us) # passing array with different number of vertices to fit is okay # A very simple example with a true existing solution # X: Y: # | | # | 2 # | 1 | # 2 | | # | | # | | # ------+------ -1----+----3- # | | # | | # | 4 | # 3 | | # | 4 # | | # # solution is # _ _ _ _ # | | | | # | 3 / 5 - 4 / 5 | | -1 0 | # | | times | | # | 3 / 5 3 / 5 | | 0 1 | # |_ _| |_ _| # because it is just rotation times reflection # first, do fit and transform separately # now, do fit_transform | 2.851093 | 3 |
eval.py | Kdc23/video-caption-pytorch | 3 | 6617345 | <filename>eval.py<gh_stars>1-10
import json
import os
import opts
import torch
from torch import nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from models import EncoderRNN, DecoderRNN, S2VTAttModel, S2VTModel
from dataloader import VideoDataset
import misc.utils as utils
from misc.cocoeval import suppress_stdout_stderr, COCOScorer
from pandas.io.json import json_normalize
def convert_data_to_coco_scorer_format(data_frame):
gts = {}
for row in zip(data_frame["caption"], data_frame["video_id"]):
if row[1] in gts:
gts[row[1]].append(
{'image_id': row[1], 'cap_id': len(gts[row[1]]), 'caption': row[0]})
else:
gts[row[1]] = []
gts[row[1]].append(
{'image_id': row[1], 'cap_id': len(gts[row[1]]), 'caption': row[0]})
return gts
def test(model, crit, dataset, vocab, opt):
loader = DataLoader(dataset, batch_size=opt.batch_size, shuffle=True)
scorer = COCOScorer()
gt_dataframe = json_normalize(json.load(open(opt.input_json))['sentences'])
gts = convert_data_to_coco_scorer_format(gt_dataframe)
results = []
samples = {}
for data in loader:
# forward the model to get loss
fc_feats = Variable(data['fc_feats']).cuda()
labels = Variable(data['labels']).long().cuda()
with torch.no_grad():
# forward the model to also get generated samples for each image
seq_probs, seq_preds = model(
fc_feats, labels, teacher_forcing_ratio=0)
print(seq_preds)
sents = utils.decode_sequence(vocab, seq_preds)
for k, sent in enumerate(sents):
video_id = 'video' + str(data['ix'][k])
samples[video_id] = [{'image_id': video_id, 'caption': sent}]
with suppress_stdout_stderr():
valid_score = scorer.score(gts, samples, samples.keys())
results.append(valid_score)
print(valid_score)
if not os.path.exists(opt.results_path):
os.makedirs(opt.results_path)
with open(os.path.join(opt.results_path, "scores.txt"), 'a') as scores_table:
scores_table.write(json.dumps(results[0]) + "\n")
with open(os.path.join(opt.results_path, opt.model.split("/")[-1].split('.')[0] + ".json"), 'w') as prediction_results:
json.dump({"predictions": samples, "scores": valid_score},
prediction_results)
def main(opt):
dataset = VideoDataset(opt, 'test')
opt.vocab_size = dataset.get_vocab_size()
opt.seq_length = dataset.seq_length
if opt.model == 'S2VTModel':
model = S2VTModel(opt.vocab_size, opt.seq_length, opt.dim_hidden, opt.dim_word,
rnn_dropout_p=opt.rnn_dropout_p).cuda()
elif opt.model == "S2VTAttModel":
encoder = EncoderRNN(opt.dim_vid, opt.dim_hidden)
decoder = DecoderRNN(opt.vocab_size, opt.seq_length, opt.dim_hidden, opt.dim_word,
rnn_dropout_p=0.2)
model = S2VTAttModel(encoder, decoder).cuda()
model = nn.DataParallel(model)
# Setup the model
model.load_state_dict(torch.load(opt.saved_model))
model.eval()
crit = utils.LanguageModelCriterion()
test(model, crit, dataset, dataset.get_vocab(), opt)
if __name__ == '__main__':
opt = opts.parse_opt()
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu
main(opt)
| <filename>eval.py<gh_stars>1-10
import json
import os
import opts
import torch
from torch import nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from models import EncoderRNN, DecoderRNN, S2VTAttModel, S2VTModel
from dataloader import VideoDataset
import misc.utils as utils
from misc.cocoeval import suppress_stdout_stderr, COCOScorer
from pandas.io.json import json_normalize
def convert_data_to_coco_scorer_format(data_frame):
gts = {}
for row in zip(data_frame["caption"], data_frame["video_id"]):
if row[1] in gts:
gts[row[1]].append(
{'image_id': row[1], 'cap_id': len(gts[row[1]]), 'caption': row[0]})
else:
gts[row[1]] = []
gts[row[1]].append(
{'image_id': row[1], 'cap_id': len(gts[row[1]]), 'caption': row[0]})
return gts
def test(model, crit, dataset, vocab, opt):
loader = DataLoader(dataset, batch_size=opt.batch_size, shuffle=True)
scorer = COCOScorer()
gt_dataframe = json_normalize(json.load(open(opt.input_json))['sentences'])
gts = convert_data_to_coco_scorer_format(gt_dataframe)
results = []
samples = {}
for data in loader:
# forward the model to get loss
fc_feats = Variable(data['fc_feats']).cuda()
labels = Variable(data['labels']).long().cuda()
with torch.no_grad():
# forward the model to also get generated samples for each image
seq_probs, seq_preds = model(
fc_feats, labels, teacher_forcing_ratio=0)
print(seq_preds)
sents = utils.decode_sequence(vocab, seq_preds)
for k, sent in enumerate(sents):
video_id = 'video' + str(data['ix'][k])
samples[video_id] = [{'image_id': video_id, 'caption': sent}]
with suppress_stdout_stderr():
valid_score = scorer.score(gts, samples, samples.keys())
results.append(valid_score)
print(valid_score)
if not os.path.exists(opt.results_path):
os.makedirs(opt.results_path)
with open(os.path.join(opt.results_path, "scores.txt"), 'a') as scores_table:
scores_table.write(json.dumps(results[0]) + "\n")
with open(os.path.join(opt.results_path, opt.model.split("/")[-1].split('.')[0] + ".json"), 'w') as prediction_results:
json.dump({"predictions": samples, "scores": valid_score},
prediction_results)
def main(opt):
dataset = VideoDataset(opt, 'test')
opt.vocab_size = dataset.get_vocab_size()
opt.seq_length = dataset.seq_length
if opt.model == 'S2VTModel':
model = S2VTModel(opt.vocab_size, opt.seq_length, opt.dim_hidden, opt.dim_word,
rnn_dropout_p=opt.rnn_dropout_p).cuda()
elif opt.model == "S2VTAttModel":
encoder = EncoderRNN(opt.dim_vid, opt.dim_hidden)
decoder = DecoderRNN(opt.vocab_size, opt.seq_length, opt.dim_hidden, opt.dim_word,
rnn_dropout_p=0.2)
model = S2VTAttModel(encoder, decoder).cuda()
model = nn.DataParallel(model)
# Setup the model
model.load_state_dict(torch.load(opt.saved_model))
model.eval()
crit = utils.LanguageModelCriterion()
test(model, crit, dataset, dataset.get_vocab(), opt)
if __name__ == '__main__':
opt = opts.parse_opt()
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu
main(opt)
| en | 0.940421 | # forward the model to get loss # forward the model to also get generated samples for each image # Setup the model | 2.13059 | 2 |
pmaf/pipe/specs/_base.py | mmtechslv/PhyloMAF | 1 | 6617346 | from ._metakit import SpecificationBackboneMetabase
from typing import Any, Union
class SpecificationBase(SpecificationBackboneMetabase):
""""""
def __repr__(self):
class_name = self.__class__.__name__
state = "Active" if self.state == 1 else "Inactive"
total_steps = str(len(self.steps))
inlet = self.inlet.__name__
outlet = self.outlet.__name__
repr_str = "<{}:[{}], Steps:[{}], Inlet:[{}], Outlet:[{}]>".format(
class_name, state, total_steps, inlet, outlet
)
return repr_str
def fetch(self, data: Any, *args: Any, **kwargs: Any):
"""Fetch the data through the pipe specification.
Parameters
----------
data
Data that will be passed through the pipe.
Type of `data` depends on the :term:`spec` indicated in :meth:`.inlet`.
*args
Compatibility
**kwargs
Compatibility
Returns
-------
Results of the pipe with type matching :meth:`.outlet`
"""
product = None
next_args = (data, *args)
next_kwargs = kwargs
for name, method, outlet, description in self.steps:
product, new_args, next_kwargs = method(*next_args, **next_kwargs)
next_args = (product, *new_args)
if isinstance(product, self.outlet):
return product
else:
raise RuntimeError("Process was not completed according to specification.")
| from ._metakit import SpecificationBackboneMetabase
from typing import Any, Union
class SpecificationBase(SpecificationBackboneMetabase):
""""""
def __repr__(self):
class_name = self.__class__.__name__
state = "Active" if self.state == 1 else "Inactive"
total_steps = str(len(self.steps))
inlet = self.inlet.__name__
outlet = self.outlet.__name__
repr_str = "<{}:[{}], Steps:[{}], Inlet:[{}], Outlet:[{}]>".format(
class_name, state, total_steps, inlet, outlet
)
return repr_str
def fetch(self, data: Any, *args: Any, **kwargs: Any):
"""Fetch the data through the pipe specification.
Parameters
----------
data
Data that will be passed through the pipe.
Type of `data` depends on the :term:`spec` indicated in :meth:`.inlet`.
*args
Compatibility
**kwargs
Compatibility
Returns
-------
Results of the pipe with type matching :meth:`.outlet`
"""
product = None
next_args = (data, *args)
next_kwargs = kwargs
for name, method, outlet, description in self.steps:
product, new_args, next_kwargs = method(*next_args, **next_kwargs)
next_args = (product, *new_args)
if isinstance(product, self.outlet):
return product
else:
raise RuntimeError("Process was not completed according to specification.")
| en | 0.62409 | Fetch the data through the pipe specification. Parameters ---------- data Data that will be passed through the pipe. Type of `data` depends on the :term:`spec` indicated in :meth:`.inlet`. *args Compatibility **kwargs Compatibility Returns ------- Results of the pipe with type matching :meth:`.outlet` | 2.192008 | 2 |
core/tests/portability.py | tskisner/spt3g_software | 6 | 6617347 | <reponame>tskisner/spt3g_software
#!/usr/bin/env python
from spt3g import core
import os, sys
# Test that we can read files written on a variety of platforms. Pass a path
# to generate test data for whatever this platform is.
testpath = os.path.join(os.environ['SPT3G_SOFTWARE_PATH'], 'core/tests/portability')
# Test data. Exercise some complicated things (STL bits) that we don't
# necessarily have control over, mapping a few primitive types.
f = core.G3Frame()
f['Five'] = 5
v = core.G3VectorDouble([2.6, 7.2])
f['Vec'] = v
v = core.G3VectorInt([17, 42, 87])
f['VecInt'] = v
m = core.G3MapDouble()
m['Six'] = 6
m['GoingOnSixteen'] = 15.9
f['Map'] = m
if len(sys.argv) > 1:
core.G3Writer(sys.argv[1])(f)
sys.exit(0)
# For now, we test files from big-endian (PPC64) and little-endian (amd64)
# 64-bit systems. Should include some 32-bit ones.
for test in ['test-be.g3', 'test-le.g3', 'test-le-v2.g3']:
print(test)
testdata = core.G3Reader(os.path.join(testpath, test))(None)[0]
assert(testdata['Five'] == f['Five'])
assert(len(testdata['Vec']) == len(f['Vec']))
for i in range(len(testdata['Vec'])):
assert(testdata['Vec'][i] == f['Vec'][i])
assert(len(testdata['VecInt']) == len(f['VecInt']))
for i in range(len(testdata['VecInt'])):
assert(testdata['VecInt'][i] == f['VecInt'][i])
assert(len(testdata['Map']) == len(f['Map']))
assert(testdata['Map'].keys() == f['Map'].keys())
for i in testdata['Map'].keys():
assert(testdata['Map'][i] == f['Map'][i])
| #!/usr/bin/env python
from spt3g import core
import os, sys
# Test that we can read files written on a variety of platforms. Pass a path
# to generate test data for whatever this platform is.
testpath = os.path.join(os.environ['SPT3G_SOFTWARE_PATH'], 'core/tests/portability')
# Test data. Exercise some complicated things (STL bits) that we don't
# necessarily have control over, mapping a few primitive types.
f = core.G3Frame()
f['Five'] = 5
v = core.G3VectorDouble([2.6, 7.2])
f['Vec'] = v
v = core.G3VectorInt([17, 42, 87])
f['VecInt'] = v
m = core.G3MapDouble()
m['Six'] = 6
m['GoingOnSixteen'] = 15.9
f['Map'] = m
if len(sys.argv) > 1:
core.G3Writer(sys.argv[1])(f)
sys.exit(0)
# For now, we test files from big-endian (PPC64) and little-endian (amd64)
# 64-bit systems. Should include some 32-bit ones.
for test in ['test-be.g3', 'test-le.g3', 'test-le-v2.g3']:
print(test)
testdata = core.G3Reader(os.path.join(testpath, test))(None)[0]
assert(testdata['Five'] == f['Five'])
assert(len(testdata['Vec']) == len(f['Vec']))
for i in range(len(testdata['Vec'])):
assert(testdata['Vec'][i] == f['Vec'][i])
assert(len(testdata['VecInt']) == len(f['VecInt']))
for i in range(len(testdata['VecInt'])):
assert(testdata['VecInt'][i] == f['VecInt'][i])
assert(len(testdata['Map']) == len(f['Map']))
assert(testdata['Map'].keys() == f['Map'].keys())
for i in testdata['Map'].keys():
assert(testdata['Map'][i] == f['Map'][i]) | en | 0.894662 | #!/usr/bin/env python # Test that we can read files written on a variety of platforms. Pass a path # to generate test data for whatever this platform is. # Test data. Exercise some complicated things (STL bits) that we don't # necessarily have control over, mapping a few primitive types. # For now, we test files from big-endian (PPC64) and little-endian (amd64) # 64-bit systems. Should include some 32-bit ones. | 2.3246 | 2 |
src/bawr/tool.py | schoenemann/bawr | 40 | 6617348 | <filename>src/bawr/tool.py
# Copyright 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import argparse
from bawr import config_parser as cp
VERSION = '0.0.3'
def banner():
print(r'''
______ ___ _ _______
| ___ \/ _ \| | | | ___ \
| |_/ / /_\ \ | | | |_/ /
| ___ \ _ | |/\| | /
| |_/ / | | \ /\ / |\ \
\____/\_| |_/\/ \/\_| \_|
version {}
(c) Copyright 2021 <NAME>
Licensed under MIT License.
'''.format(VERSION))
def main(args):
banner()
cfg = cp.Parser(args.cfg, dict(src_dir=args.src, out_dir=args.out))
def command_line_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("--out", help="Output directory path", default="build")
parser.add_argument("--src", help="Source directory path", default=".")
parser.add_argument("--cfg", help="Config file", default="config.py")
return parser.parse_args()
if __name__ == '__main__':
main(command_line_arguments())
| <filename>src/bawr/tool.py
# Copyright 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import argparse
from bawr import config_parser as cp
VERSION = '0.0.3'
def banner():
print(r'''
______ ___ _ _______
| ___ \/ _ \| | | | ___ \
| |_/ / /_\ \ | | | |_/ /
| ___ \ _ | |/\| | /
| |_/ / | | \ /\ / |\ \
\____/\_| |_/\/ \/\_| \_|
version {}
(c) Copyright 2021 <NAME>
Licensed under MIT License.
'''.format(VERSION))
def main(args):
banner()
cfg = cp.Parser(args.cfg, dict(src_dir=args.src, out_dir=args.out))
def command_line_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("--out", help="Output directory path", default="build")
parser.add_argument("--src", help="Source directory path", default=".")
parser.add_argument("--cfg", help="Config file", default="config.py")
return parser.parse_args()
if __name__ == '__main__':
main(command_line_arguments())
| en | 0.739748 | # Copyright 2021 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to use, # copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the # Software, and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, # INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A # PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ______ ___ _ _______ | ___ \/ _ \| | | | ___ \ | |_/ / /_\ \ | | | |_/ / | ___ \ _ | |/\| | / | |_/ / | | \ /\ / |\ \ \____/\_| |_/\/ \/\_| \_| version {} (c) Copyright 2021 <NAME> Licensed under MIT License. | 2.120949 | 2 |
tests/test_lib.py | ddalu5/logs-analyzer | 1 | 6617349 | import os
from unittest import TestCase
from logs_analyzer.lib import *
class TestLib(TestCase):
def test_get_date_filter(self):
nginx_settings = get_service_settings('nginx')
self.assertEqual(get_date_filter(nginx_settings, 13, 13, 16, 1, 1989),
'[16/Jan/1989:13:13', "get_date_filter#1")
self.assertEqual(get_date_filter(nginx_settings, '*', '*', 16, 1, 1989),
'[16/Jan/1989', "get_date_filter#2")
self.assertEqual(get_date_filter(nginx_settings, '*'), datetime.now().strftime("[%d/%b/%Y:%H"),
"get_date_filter#3")
apache2_settings = get_service_settings('apache2')
self.assertEqual(get_date_filter(apache2_settings, 13, 13, 16, 1, 1989),
'[16/Jan/1989:13:13', "get_date_filter#4")
self.assertEqual(get_date_filter(apache2_settings, '*', '*', 16, 1, 1989),
'[16/Jan/1989', "get_date_filter#5")
self.assertEqual(get_date_filter(apache2_settings, '*'), datetime.now().strftime("[%d/%b/%Y:%H"),
"get_date_filter#6")
auth_settings = get_service_settings('auth')
self.assertEqual(get_date_filter(auth_settings, 13, 13, 16, 1),
'Jan 16 13:13:', "get_date_filter#7")
self.assertEqual(get_date_filter(auth_settings, '*', '*', 16, 1),
'Jan 16 ', "get_date_filter#8")
def test_filter_data(self):
nginx_settings = get_service_settings('nginx')
date_filter = get_date_filter(nginx_settings, '*', '*', 27, 4, 2016)
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
file_name = os.path.join(base_dir, 'logs-samples/nginx1.sample')
data = filter_data('192.168.5', filepath=file_name)
data = filter_data(date_filter, data=data)
self.assertEqual(len(data.split("\n")), 28, "filter_data#1")
self.assertRaises(Exception, filter_data, log_filter='192.168.5')
apache2_settings = get_service_settings('apache2')
date_filter = get_date_filter(apache2_settings, 27, 11, 4, 5, 2016)
file_name = os.path.join(base_dir, 'logs-samples/apache1.sample')
data = filter_data('127.0.0.1', filepath=file_name)
data = filter_data(date_filter, data=data)
self.assertEqual(len(data.split("\n")), 34, "filter_data#2")
self.assertRaises(Exception, filter_data, log_filter='127.0.0.1')
auth_settings = get_service_settings('auth')
date_filter = get_date_filter(auth_settings, '*', 22, 4, 5)
file_name = os.path.join(base_dir, 'logs-samples/auth.sample')
data = filter_data('192.168.3.11', filepath=file_name)
data = filter_data(date_filter, data=data)
self.assertEqual(len(data.split("\n")), 19, "filter_data#3")
data = filter_data('192.168.3.11', filepath=file_name, is_reverse=True)
self.assertFalse('192.168.3.11' in data, "filter_data#4")
def test_get_web_requests(self):
nginx_settings = get_service_settings('nginx')
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
file_name = os.path.join(base_dir, 'logs-samples/nginx1.sample')
data = filter_data('172.16.58.3', filepath=file_name)
requests = get_web_requests(data, nginx_settings['request_model'])
self.assertEqual(len(requests), 2, "get_web_requests#1")
self.assertTrue('daedalu5' in requests[0].values(), "get_web_requests#2")
requests = get_web_requests(data, nginx_settings['request_model'],
nginx_settings['date_pattern'], nginx_settings['date_keys'])
self.assertEqual(requests[0]['DATETIME'], '2016-04-24 06:26:37', "get_web_requests#3")
apache2_settings = get_service_settings('apache2')
file_name = os.path.join(base_dir, 'logs-samples/apache1.sample')
data = filter_data('127.0.1.1', filepath=file_name)
requests = get_web_requests(data, apache2_settings['request_model'])
self.assertEqual(len(requests), 1, "get_web_requests#4")
self.assertTrue('daedalu5' in requests[0].values(), "get_web_requests#5")
requests = get_web_requests(data, apache2_settings['request_model'],
nginx_settings['date_pattern'], nginx_settings['date_keys'])
self.assertEqual(requests[0]['DATETIME'], '2016-05-04 11:31:39', "get_web_requests#3")
def test_get_auth_requests(self):
auth_settings = get_service_settings('auth')
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
date_filter = get_date_filter(auth_settings, '*', 22, 4, 5)
file_name = os.path.join(base_dir, 'logs-samples/auth.sample')
data = filter_data('192.168.3.11', filepath=file_name)
data = filter_data(date_filter, data=data)
requests = get_auth_requests(data, auth_settings['request_model'])
self.assertEqual(len(requests), 18, "get_auth_requests#1")
self.assertEqual(requests[17]['INVALID_PASS_USER'], 'root', "get_auth_requests#2")
self.assertEqual(requests[15]['INVALID_USER'], 'admin', "get_auth_requests#3")
requests = get_auth_requests(data, auth_settings['request_model'],
auth_settings['date_pattern'], auth_settings['date_keys'])
self.assertEqual(requests[0]['DATETIME'][4:], '-05-04 22:00:32', "get_auth_requests#4")
def test_logsanalyzer(self):
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
auth_logfile = os.path.join(base_dir, 'logs-samples/auth.sample')
nginx_logfile = os.path.join(base_dir, 'logs-samples/nginx1.sample')
auth_logsanalyzer = LogsAnalyzer('auth', filepath=auth_logfile)
nginx_logsanalyzer = LogsAnalyzer('nginx', filepath=nginx_logfile)
auth_logsanalyzer.add_filter('192.168.3.11')
auth_logsanalyzer.add_date_filter(minute='*', hour=22, day=4, month=5)
requests = auth_logsanalyzer.get_requests()
self.assertEqual(len(requests), 18, "LogsAnalyzer#1")
nginx_logsanalyzer.add_filter('172.16.58.3')
requests = nginx_logsanalyzer.get_requests()
self.assertEqual(len(requests), 2, "LogsAnalyzer#2")
| import os
from unittest import TestCase
from logs_analyzer.lib import *
class TestLib(TestCase):
def test_get_date_filter(self):
nginx_settings = get_service_settings('nginx')
self.assertEqual(get_date_filter(nginx_settings, 13, 13, 16, 1, 1989),
'[16/Jan/1989:13:13', "get_date_filter#1")
self.assertEqual(get_date_filter(nginx_settings, '*', '*', 16, 1, 1989),
'[16/Jan/1989', "get_date_filter#2")
self.assertEqual(get_date_filter(nginx_settings, '*'), datetime.now().strftime("[%d/%b/%Y:%H"),
"get_date_filter#3")
apache2_settings = get_service_settings('apache2')
self.assertEqual(get_date_filter(apache2_settings, 13, 13, 16, 1, 1989),
'[16/Jan/1989:13:13', "get_date_filter#4")
self.assertEqual(get_date_filter(apache2_settings, '*', '*', 16, 1, 1989),
'[16/Jan/1989', "get_date_filter#5")
self.assertEqual(get_date_filter(apache2_settings, '*'), datetime.now().strftime("[%d/%b/%Y:%H"),
"get_date_filter#6")
auth_settings = get_service_settings('auth')
self.assertEqual(get_date_filter(auth_settings, 13, 13, 16, 1),
'Jan 16 13:13:', "get_date_filter#7")
self.assertEqual(get_date_filter(auth_settings, '*', '*', 16, 1),
'Jan 16 ', "get_date_filter#8")
def test_filter_data(self):
nginx_settings = get_service_settings('nginx')
date_filter = get_date_filter(nginx_settings, '*', '*', 27, 4, 2016)
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
file_name = os.path.join(base_dir, 'logs-samples/nginx1.sample')
data = filter_data('192.168.5', filepath=file_name)
data = filter_data(date_filter, data=data)
self.assertEqual(len(data.split("\n")), 28, "filter_data#1")
self.assertRaises(Exception, filter_data, log_filter='192.168.5')
apache2_settings = get_service_settings('apache2')
date_filter = get_date_filter(apache2_settings, 27, 11, 4, 5, 2016)
file_name = os.path.join(base_dir, 'logs-samples/apache1.sample')
data = filter_data('127.0.0.1', filepath=file_name)
data = filter_data(date_filter, data=data)
self.assertEqual(len(data.split("\n")), 34, "filter_data#2")
self.assertRaises(Exception, filter_data, log_filter='127.0.0.1')
auth_settings = get_service_settings('auth')
date_filter = get_date_filter(auth_settings, '*', 22, 4, 5)
file_name = os.path.join(base_dir, 'logs-samples/auth.sample')
data = filter_data('192.168.3.11', filepath=file_name)
data = filter_data(date_filter, data=data)
self.assertEqual(len(data.split("\n")), 19, "filter_data#3")
data = filter_data('192.168.3.11', filepath=file_name, is_reverse=True)
self.assertFalse('192.168.3.11' in data, "filter_data#4")
def test_get_web_requests(self):
nginx_settings = get_service_settings('nginx')
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
file_name = os.path.join(base_dir, 'logs-samples/nginx1.sample')
data = filter_data('172.16.58.3', filepath=file_name)
requests = get_web_requests(data, nginx_settings['request_model'])
self.assertEqual(len(requests), 2, "get_web_requests#1")
self.assertTrue('daedalu5' in requests[0].values(), "get_web_requests#2")
requests = get_web_requests(data, nginx_settings['request_model'],
nginx_settings['date_pattern'], nginx_settings['date_keys'])
self.assertEqual(requests[0]['DATETIME'], '2016-04-24 06:26:37', "get_web_requests#3")
apache2_settings = get_service_settings('apache2')
file_name = os.path.join(base_dir, 'logs-samples/apache1.sample')
data = filter_data('127.0.1.1', filepath=file_name)
requests = get_web_requests(data, apache2_settings['request_model'])
self.assertEqual(len(requests), 1, "get_web_requests#4")
self.assertTrue('daedalu5' in requests[0].values(), "get_web_requests#5")
requests = get_web_requests(data, apache2_settings['request_model'],
nginx_settings['date_pattern'], nginx_settings['date_keys'])
self.assertEqual(requests[0]['DATETIME'], '2016-05-04 11:31:39', "get_web_requests#3")
def test_get_auth_requests(self):
auth_settings = get_service_settings('auth')
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
date_filter = get_date_filter(auth_settings, '*', 22, 4, 5)
file_name = os.path.join(base_dir, 'logs-samples/auth.sample')
data = filter_data('192.168.3.11', filepath=file_name)
data = filter_data(date_filter, data=data)
requests = get_auth_requests(data, auth_settings['request_model'])
self.assertEqual(len(requests), 18, "get_auth_requests#1")
self.assertEqual(requests[17]['INVALID_PASS_USER'], 'root', "get_auth_requests#2")
self.assertEqual(requests[15]['INVALID_USER'], 'admin', "get_auth_requests#3")
requests = get_auth_requests(data, auth_settings['request_model'],
auth_settings['date_pattern'], auth_settings['date_keys'])
self.assertEqual(requests[0]['DATETIME'][4:], '-05-04 22:00:32', "get_auth_requests#4")
def test_logsanalyzer(self):
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
auth_logfile = os.path.join(base_dir, 'logs-samples/auth.sample')
nginx_logfile = os.path.join(base_dir, 'logs-samples/nginx1.sample')
auth_logsanalyzer = LogsAnalyzer('auth', filepath=auth_logfile)
nginx_logsanalyzer = LogsAnalyzer('nginx', filepath=nginx_logfile)
auth_logsanalyzer.add_filter('192.168.3.11')
auth_logsanalyzer.add_date_filter(minute='*', hour=22, day=4, month=5)
requests = auth_logsanalyzer.get_requests()
self.assertEqual(len(requests), 18, "LogsAnalyzer#1")
nginx_logsanalyzer.add_filter('172.16.58.3')
requests = nginx_logsanalyzer.get_requests()
self.assertEqual(len(requests), 2, "LogsAnalyzer#2")
| es | 0.538709 | #1") #2") #3") #4") #5") #6") #7") #8") #1") #2") #3") #4") #1") #2") #3") #4") #5") #3") #1") #2") #3") #4") #1") #2") | 2.799565 | 3 |
pants/test/core/test_sendfile.py | ecdavis/pants | 20 | 6617350 | ###############################################################################
#
# Copyright 2012 Pants Developers (see AUTHORS.txt)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
import os
import socket
import unittest
import pants
from pants.test._pants_util import *
class FileSender(pants.Stream):
def on_connect(self):
with open(os.path.dirname(__file__) + "/data.txt", 'r') as test_file:
# The file is flushed here to get around an awkward issue
# that was only happening with the unit test. sendfile() was
# blocking for some strange reason.
self.write_file(test_file, flush=True)
class TestSendfile(PantsTestCase):
def setUp(self):
self.server = pants.Server(ConnectionClass=FileSender).listen(('127.0.0.1', 4040))
PantsTestCase.setUp(self)
def test_sendfile(self):
with open(os.path.dirname(__file__) + "/data.txt", 'r') as test_file:
expected_data = ''.join(test_file.readlines())
sock = socket.socket()
sock.settimeout(1.0)
sock.connect(('127.0.0.1', 4040))
actual_data = sock.recv(1024)
self.assertEqual(actual_data, expected_data)
sock.close()
def tearDown(self):
PantsTestCase.tearDown(self)
self.server.close()
| ###############################################################################
#
# Copyright 2012 Pants Developers (see AUTHORS.txt)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
import os
import socket
import unittest
import pants
from pants.test._pants_util import *
class FileSender(pants.Stream):
def on_connect(self):
with open(os.path.dirname(__file__) + "/data.txt", 'r') as test_file:
# The file is flushed here to get around an awkward issue
# that was only happening with the unit test. sendfile() was
# blocking for some strange reason.
self.write_file(test_file, flush=True)
class TestSendfile(PantsTestCase):
def setUp(self):
self.server = pants.Server(ConnectionClass=FileSender).listen(('127.0.0.1', 4040))
PantsTestCase.setUp(self)
def test_sendfile(self):
with open(os.path.dirname(__file__) + "/data.txt", 'r') as test_file:
expected_data = ''.join(test_file.readlines())
sock = socket.socket()
sock.settimeout(1.0)
sock.connect(('127.0.0.1', 4040))
actual_data = sock.recv(1024)
self.assertEqual(actual_data, expected_data)
sock.close()
def tearDown(self):
PantsTestCase.tearDown(self)
self.server.close()
| en | 0.739814 | ############################################################################### # # Copyright 2012 Pants Developers (see AUTHORS.txt) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ############################################################################### # The file is flushed here to get around an awkward issue # that was only happening with the unit test. sendfile() was # blocking for some strange reason. | 2.165691 | 2 |