text string | size int64 | token_count int64 |
|---|---|---|
version https://git-lfs.github.com/spec/v1
oid sha256:9ab6c6191360e63c1b4c9b5659aef348a743c9e078be68190917369e4e9563e8
size 60
| 127 | 92 |
def test_con_feed_list():
from selenium import webdriver
import time
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
opt = Options()
opt.headless = True
driver = webdriver.Chrome(ChromeDriverManager().install(), options=opt)
driver.get('http://localhost:1667/#/login')
# bejelentkezes
driver.find_element_by_xpath('//fieldset//input[@placeholder="Email"]').send_keys('testuser1@example.com')
driver.find_element_by_xpath('//fieldset//input[@placeholder="Password"]').send_keys('Abcd123$')
driver.find_element_by_xpath('//form/button').click()
time.sleep(4)
driver.find_element_by_xpath('//div[@class="container"]//ul/li[4]/a').click()
time.sleep(3)
# cikkek cimeinek kigyujtese
my_articles = driver.find_elements_by_xpath('//div[@class="article-preview"]//h1')
list_of_feed = []
for row in my_articles:
list_of_feed.append(row.text + '\n')
print(list_of_feed)
# talalatok fileba mentese
with open('list_of_feed.txt', 'a') as x:
for i in list_of_feed:
x.write(i)
driver.close()
| 1,167 | 394 |
import numpy as np
from PIL import Image
from d3dshot.capture_outputs.pytorch_capture_output import PytorchCaptureOutput
class PytorchFloatCaptureOutput(PytorchCaptureOutput):
def process(self, pointer, pitch, size, width, height, region, rotation):
image = super().process(pointer, pitch, size, width, height, region, rotation)
return image / 255.0
def to_pil(self, frame):
return Image.fromarray(np.array(frame * 255.0, dtype=np.uint8))
| 476 | 156 |
from rest_framework.test import APITestCase
from authentication.models import User
class TestModel(APITestCase):
def test_creates_user(self):
user=User.objects.create_user('username', 'user@gmail.com','password1!')
self.assertIsInstance(user,User)
def test_creates_superuser(self):
user=User.objects.create_superuser('username', 'user@gmail.com','password1!')
self.assertIsInstance(user,User)
def test_raises_error_when_no_username_is_supplied(self):
self.assertRaises(ValueError, User.objects.create_user, username='',email='user@gmail.com',password='password1!')
def test_raises_error_when_no_email_is_supplied(self):
self.assertRaises(ValueError, User.objects.create_user, username='username',email='',password='password1!')
def test_creates_superuser_with_is_staff_status(self):
with self.assertRaisesMessage(ValueError, 'Superuser must have is_staff=True.'):
User.objects.create_superuser(username='username', email='user@gmail.com', password='password1!', is_staff=False)
def test_creates_superuser_with_is_superuser_status(self):
with self.assertRaisesMessage(ValueError, 'Superuser must have is_superuser=True.'):
User.objects.create_superuser(username='username', email='user@gmail.com', password='password1!', is_superuser=False) | 1,359 | 423 |
# Contains the domain models of the application
from flask_login import UserMixin
from . import db, login_manager
class Genre(db.Model):
__tablename__ = 'genre'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250), unique=True)
movies = db.relationship('Movie', back_populates='genre')
@property
def serialize(self):
return {
'id': self.id,
'movies': [m.serialize for m in self.movies]
}
class Movie(db.Model):
__tablename__ = 'movie'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(250), nullable=False)
description = db.Column(db.Text)
image_url = db.Column(db.String(250))
year = db.Column(db.String(4))
genre_name = db.Column(db.String(250), db.ForeignKey('genre.name'))
genre = db.relationship('Genre', back_populates='movies')
@property
def serialize(self):
return {
'id': self.id,
'title': self.title,
'description': self.description,
'image-url': self.image_url,
'year': self.year
}
class User(UserMixin, db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
social_id = db.Column(db.String(64), nullable=False, unique=True)
@login_manager.user_loader
def load_user(id):
return User.query.get(int(id))
| 1,395 | 488 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'PaymentPackage.name'
db.delete_column('payment_paymentpackage', 'name')
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'PaymentPackage.name'
raise RuntimeError("Cannot reverse this migration. 'PaymentPackage.name' and its values cannot be restored.")
models = {
'payment.paymentpackage': {
'Meta': {'object_name': 'PaymentPackage'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'conference_price': ('django.db.models.fields.IntegerField', [], {}),
'early': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['payment']
| 1,046 | 298 |
import threading
from tkinter import (
Tk,
Frame,
Label,
Button,
Entry,
Message,
CENTER,
Menu,
Menubutton,
StringVar,
RAISED,
messagebox,
)
from eth_wallet.configuration import (
Configuration,
)
from eth_wallet.api import (
WalletAPI,
)
from eth_wallet.ui.page import (
Page
)
class NewWalletPage(Page):
def __init__(self, *args, **kwargs):
Page.__init__(self, *args, **kwargs)
self.configuration = None
self.api = WalletAPI()
self.wallet = None
lbl_pswd = Label(self,
text='Passphrase:',
width=60,
font=(None, 20))
lbl_pswd.pack()
entry_password = Entry(self,
show="*",
font=(None, 20),
justify=CENTER)
entry_password.pack()
btn_create_wallet = Button(self,
text="Generate",
width=60,
font=(None, 16),
command=lambda: self.create_wallet(btn_create_wallet,
entry_password.get()))
btn_create_wallet.pack()
def create_wallet(self, btn_create_wallet, password):
"""
Create new wallet
:param btn_create_wallet: generate button which change text and functionality
:param password: passphrase from the user
:return:
"""
self.configuration = Configuration().load_configuration()
self.wallet = self.api.new_wallet(self.configuration, password)
lbl_remember_words = Label(self,
text='Restore sentence:',
width=60)
lbl_remember_words.pack()
lbl_mnemonic = Message(self,
text=self.wallet.get_mnemonic(),
justify=CENTER,
borderwidth=10,
background='light blue')
lbl_mnemonic.pack()
btn_create_wallet.configure(text="Continue",
command=self.navigate_home_page)
def navigate_home_page(self):
"""
Navigate to home page
:return:
"""
info_page = HomePage(self)
info_page.place(in_=self, x=0, y=0, relwidth=1, relheight=1)
info_page.show()
class TransactionPage(Page):
def __init__(self, *args, **kwargs):
Page.__init__(self, *args, **kwargs)
self.configuration = Configuration().load_configuration()
self.api = WalletAPI()
self.tokens = self.api.list_tokens(self.configuration)
self.eth_balance, _ = self.api.get_balance(self.configuration)
def change_token(token):
if token == 'ETH':
self.eth_balance, _ = self.api.get_balance(self.configuration)
else:
self.eth_balance, _ = self.api.get_balance(self.configuration, token)
balance.set(str(self.eth_balance) + ' ' + token)
token_symbol = StringVar()
token_symbol.set('ETH')
balance = StringVar()
balance.set(str(self.eth_balance) + ' ' + token_symbol.get())
mb = Menubutton(self,
width=60,
textvariable=token_symbol,
relief=RAISED)
mb.grid()
mb.menu = Menu(mb, tearoff=0)
mb["menu"] = mb.menu
mb.menu.add_radiobutton(label="ETH",
variable=token_symbol,
value='ETH',
command=lambda: change_token(token_symbol.get()))
for token in self.tokens:
mb.menu.add_radiobutton(label=token,
variable=token_symbol,
value=token,
command=lambda: change_token(token_symbol.get()))
mb.pack()
label = Label(self,
textvariable=balance,
width=60,
font=(None, 30))
label.pack()
lbl_address = Label(self,
text="To address:",
width=60,
font=(None, 20))
lbl_address.pack()
entry_address = Entry(self,
font=(None, 20),
width=60,
justify=CENTER)
entry_address.pack()
lbl_amount = Label(self,
text="Amount:",
width=60,
font=(None, 20))
lbl_amount.pack()
entry_amount = Entry(self,
font=(None, 20),
width=60,
justify=CENTER)
entry_amount.pack()
lbl_passphrase = Label(self,
text="Passphrase:",
width=60,
font=(None, 20))
lbl_passphrase.pack()
entry_passphrase = Entry(self,
font=(None, 20),
width=60,
justify=CENTER)
entry_passphrase.pack()
btn_send = Button(self,
text="Send",
width=60,
font=(None, 16),
command=lambda: self.send_transaction(entry_address.get(),
entry_amount.get(),
entry_passphrase.get(),
token_symbol.get()))
btn_send.pack()
btn_back = Button(self,
text="Back",
width=60,
font=(None, 16),
command=self.navigate_home_page)
btn_back.pack()
def navigate_home_page(self):
"""
Navigate to home page
:return:
"""
info_page = HomePage(self)
info_page.place(in_=self, x=0, y=0, relwidth=1, relheight=1)
info_page.show()
def send_transaction(self, to, value, password, token):
"""
Send transaction
:return:
"""
if token == 'ETH':
tx_thread = TransactionThread(configuration=self.configuration,
password=password,
to=to,
value=value,
token=None)
else:
tx_thread = TransactionThread(configuration=self.configuration,
password=password,
to=to,
value=value,
token=token)
tx_thread.start()
class TransactionThread(threading.Thread):
def __init__(self, configuration, password, to, value, token=None):
threading.Thread.__init__(self)
self.api = WalletAPI()
self.configuration = configuration
self.password = password
self.to = to
self.value = value
self.token = token
def run(self):
if self.token is None:
# send ETH transaction
tx_hash, tx_cost_eth = self.api.send_transaction(self.configuration,
self.password,
self.to,
self.value)
else:
# send erc20 transaction
tx_hash, tx_cost_eth = self.api.send_transaction(self.configuration,
self.password,
self.to,
self.value,
self.token)
messagebox.showinfo("Transaction mined!",
"Transaction was mined for " + str(tx_cost_eth) + "ETH fee.")
class AddTokenPage(Page):
def __init__(self, *args, **kwargs):
Page.__init__(self, *args, **kwargs)
self.configuration = Configuration().load_configuration()
self.api = WalletAPI()
lbl_symbol = Label(self,
text="Contract's symbol:",
width=60,
font=(None, 20))
lbl_symbol.pack()
entry_symbol = Entry(self,
font=(None, 20),
width=60,
justify=CENTER)
entry_symbol.pack()
lbl_address = Label(self,
text="Contract's address:",
width=60,
font=(None, 20))
lbl_address.pack()
entry_address = Entry(self,
font=(None, 20),
width=60,
justify=CENTER)
entry_address.pack()
btn_back = Button(self,
text="Add",
font=(None, 16),
width=60,
command=lambda: self.add_token(entry_symbol.get(), entry_address.get()))
btn_back.pack()
btn_back = Button(self,
text="Back",
font=(None, 16),
width=60,
command=self.navigate_home_page)
btn_back.pack()
def navigate_home_page(self):
"""
Navigate to home page
:return:
"""
info_page = HomePage(self)
info_page.place(in_=self, x=0, y=0, relwidth=1, relheight=1)
info_page.show()
def add_token(self, symbol, contract):
"""
Add new token and navigate to home page
:param symbol: token symbol
:param contract: contracts address
:return:
"""
self.api.add_contract(self.configuration, symbol, contract)
info_page = HomePage(self)
info_page.place(in_=self, x=0, y=0, relwidth=1, relheight=1)
info_page.show()
class HomePage(Page):
def __init__(self, *args, **kwargs):
Page.__init__(self, *args, **kwargs)
self.configuration = Configuration().load_configuration()
self.api = WalletAPI()
self.tokens = self.api.list_tokens(self.configuration)
self.eth_balance, self.address = self.api.get_balance(self.configuration)
def refresh():
change_token(token_symbol.get())
def change_token(token):
if token == 'ETH':
self.eth_balance, self.address = self.api.get_balance(self.configuration)
else:
self.eth_balance, self.address = self.api.get_balance(self.configuration, token)
balance.set(str(self.eth_balance) + ' ' + token)
token_symbol = StringVar()
token_symbol.set('ETH')
balance = StringVar()
balance.set(str(self.eth_balance) + ' ' + token_symbol.get())
mb = Menubutton(self,
width=60,
textvariable=token_symbol,
relief=RAISED)
mb.grid()
mb.menu = Menu(mb, tearoff=0)
mb["menu"] = mb.menu
mb.menu.add_radiobutton(label="ETH",
variable=token_symbol,
value='ETH',
command=lambda: change_token(token_symbol.get()))
for token in self.tokens:
mb.menu.add_radiobutton(label=token,
variable=token_symbol,
value=token,
command=lambda: change_token(token_symbol.get()))
mb.menu.add_radiobutton(label="Add new token ...",
command=self.navigate_add_token_page)
mb.pack()
label_address_lbl = Label(self,
text='Address:',
width=60,
font=(None, 10, "bold"))
label_address_lbl.pack()
label_address = Label(self,
text=self.address,
width=60,
font=(None, 10))
label_address.pack()
label_balance = Label(self,
textvariable=balance,
width=60,
font=(None, 30))
label_balance.pack()
btn_refresh = Button(self,
text="Refresh",
command=refresh,
width=60,
font=(None, 16))
btn_refresh.pack()
btn_copy_address = Button(self,
text="Copy address",
command=self.copy_address,
width=60,
font=(None, 16))
btn_copy_address.pack()
btn_send_transaction = Button(self,
text="Send Transaction",
command=self.navigate_transaction_page,
width=60,
font=(None, 16))
btn_send_transaction.pack()
def navigate_transaction_page(self):
"""
Navigate to transaction page
:return:
"""
transaction_page = TransactionPage(self)
transaction_page.place(in_=self, x=0, y=0, relwidth=1, relheight=1)
transaction_page.show()
def navigate_add_token_page(self):
"""
Navigate to transaction page
:return:
"""
add_token_page = AddTokenPage(self)
add_token_page.place(in_=self, x=0, y=0, relwidth=1, relheight=1)
add_token_page.show()
def copy_address(self):
"""Add address to the clipboard"""
self.clipboard_clear() # clear clipboard contents
self.clipboard_append(self.address) # append new value to clipbao
class MainView(Frame):
def __init__(self, *args, **kwargs):
Frame.__init__(self, *args, **kwargs)
self.configuration = Configuration()
self.api = WalletAPI()
self.wallet = None
if self.configuration.is_configuration():
screen = HomePage(self)
else:
screen = NewWalletPage(self)
container = Frame(self)
container.pack(side="top", fill="both", expand=True)
screen.place(in_=container, x=0, y=0, relwidth=1, relheight=1)
screen.show()
if __name__ == "__main__":
root = Tk()
root.title("Ethereum wallet")
main = MainView(root)
main.pack(side="top", fill="both", expand=True)
root.wm_geometry("300x400")
root.mainloop()
| 15,529 | 4,073 |
from abc import abstractmethod, ABCMeta
import pickle
import keras
# from self.keras_module.models import Model
# from self.keras_module.layers import InputLayer
# from self.keras_module.layers import Dense, Conv2D
from palmnet.core.palminizable import Palminizable
from palmnet.utils import get_idx_last_layer_of_class, get_idx_first_layer_of_class
from skluc.utils import log_memory_usage, logger
from collections import defaultdict
import pathlib
class LayerReplacer(metaclass=ABCMeta):
def __init__(self, keep_last_layer=False, keep_first_layer=False, dct_name_compression=None, path_checkpoint_file=None, only_dense=False, keras_module=keras, multi_step=False):
self.keras_module = keras_module
self.keep_last_layer = keep_last_layer
self.keep_first_layer = keep_first_layer
self.only_dense = only_dense
self.dct_name_compression = dct_name_compression if dct_name_compression is not None else dict()
self.path_checkpoint_file = path_checkpoint_file # type: pathlib.Path
self.dct_bool_replaced_layers = defaultdict(lambda: False)
self.dct_old_name_new_name = defaultdict(lambda: None)
self.dct_new_name_old_name = defaultdict(lambda: None)
self._init_layer_classes() # set the classes to be recognised to replace
self.multi_step = multi_step
def __refresh_and_apply_layer_to_input(self, layer, layer_inputs):
new_fresh_layer = layer.__class__(**layer.get_config())
old_layer_weights = layer.get_weights()
x = new_fresh_layer(layer_inputs)
new_fresh_layer.set_weights(old_layer_weights)
return x, new_fresh_layer
@abstractmethod
def _apply_replacement(self, layer):
pass
def load_dct_name_compression(self):
with open(str(self.path_checkpoint_file), 'rb') as rb_file:
self.dct_name_compression = pickle.load(rb_file)
if type(self.dct_name_compression) == Palminizable:
self.dct_name_compression = self.dct_name_compression.sparsely_factorized_layers
def save_dct_name_compression(self):
if self.path_checkpoint_file is None:
return
with open(str(self.path_checkpoint_file), 'wb') as wb_file:
pickle.dump(self.dct_name_compression, wb_file)
def fit_transform(self, model):
self.fit(model)
return self.transform(model)
def fit_one_layer(self, layer):
if layer.name not in self.dct_name_compression:
dct_replacement = self._apply_replacement(layer)
# should return dict in most case but need to be backward compatible with older implementation of PALM
self.dct_name_compression[layer.name] = dct_replacement
self.save_dct_name_compression()
else:
logger.warning("skip layer {} because already in dict".format(layer.name))
def fit(self, model):
for layer in model.layers:
self.fit_one_layer(layer)
def _init_layer_classes(self):
self.dense_layer_class = self.keras_module.layers.Dense
self.conv_layer_class = self.keras_module.layers.Conv2D
def transform_one_layer(self, layer, idx_layer, layer_inputs):
if not self.multi_step:
sparse_factorization = self.dct_name_compression[layer.name]
else:
try:
sparse_factorization = self.dct_name_compression[layer.name]
except KeyError:
# print(layer.name)
# print(self.dct_name_compression.keys())
# print([k for k, v in self.dct_name_compression.items() if k.startswith(layer.name + "_-_")])
sparse_factorization = next(v for k, v in self.dct_name_compression.items() if k.startswith(layer.name + "_-_"))
# exit()
# adapted to the palminized case... not very clean but OK
bool_find_modif = (sparse_factorization != None and sparse_factorization != (None, None))
logger.info('Prepare layer {}'.format(layer.name))
bool_only_dense = not isinstance(layer, self.keras_module.layers.Dense) and self.only_dense
bool_last_layer = idx_layer == self.idx_last_dense_layer and self.keep_last_layer
bool_first_layer = idx_layer == self.idx_first_conv_layer and self.keep_first_layer
keep_this_layer = bool_only_dense or bool_last_layer or bool_first_layer
if bool_find_modif and not keep_this_layer:
# if there is a replacement available and not (it is the last layer and we want to keep it as is)
# create new layer
if isinstance(layer, self.dense_layer_class):
logger.debug("Dense layer treatment")
replacing_layer, replacing_weights, bool_modified = self._replace_dense(layer, sparse_factorization)
elif isinstance(layer, self.conv_layer_class):
logger.debug("Conv2D layer treatment")
replacing_layer, replacing_weights, bool_modified = self._replace_conv2D(layer, sparse_factorization)
else:
raise ValueError("Unsupported layer class")
if bool_modified: # then replace layer with compressed layer
try:
replacing_layer.name = '{}_-_{}'.format(layer.name, replacing_layer.name)
except AttributeError:
logger.warning("Found layer with property name unsettable. try _name instead.")
replacing_layer._name = '{}_-_{}'.format(layer.name, replacing_layer.name)
x = replacing_layer(layer_inputs)
self.dct_old_name_new_name[layer.name] = replacing_layer.name
self.dct_new_name_old_name[replacing_layer.name] = layer.name
self.dct_bool_replaced_layers[layer.name] = True
self._set_weights_to_layer(replacing_layer, replacing_weights)
logger.info('Layer {} modified into {}'.format(layer.name, replacing_layer.name))
else:
x, new_fresh_layer = self.__refresh_and_apply_layer_to_input(layer, layer_inputs)
logger.info('Layer {} unmodified'.format(new_fresh_layer.name))
else:
x, new_fresh_layer = self.__refresh_and_apply_layer_to_input(layer, layer_inputs)
# x = layer(layer_inputs)
logger.info('Layer {} unmodified'.format(new_fresh_layer.name))
return x
def prepare_transform(self, model):
if not isinstance(model.layers[0], self.keras_module.layers.InputLayer):
model = self.keras_module.models.Model(inputs=model.input, outputs=model.output)
# else:
# # this is important because we also want the InputLayer to be reinitialized
# input_shape = model.input_shape
# model.layers.pop(0)
# newInput = self.keras_module.layers.Input(shape=input_shape[1:])
# newOutput = model(newInput)
# model2 = self.keras_module.models.Model(input=newInput, output=newOutput)
network_dict = {'input_layers_of': defaultdict(lambda: []), 'new_output_tensor_of': defaultdict(lambda: [])}
input_shape = model.input_shape
newInput = self.keras_module.layers.Input(shape=input_shape[1:])
# Set the output tensor of the input layer
network_dict['new_output_tensor_of'].update(
{model.layers[0].name: newInput})
for i, layer in enumerate(model.layers):
# each layer is set as `input` layer of all its outbound layers
for node in layer._outbound_nodes:
outbound_layer_name = node.outbound_layer.name
network_dict['input_layers_of'][outbound_layer_name].append(layer.name)
self.idx_last_dense_layer = get_idx_last_layer_of_class(model, self.keras_module.layers.Dense) if self.keep_last_layer else -1
self.idx_last_dense_layer -= 1
self.idx_first_conv_layer = get_idx_first_layer_of_class(model, self.keras_module.layers.Conv2D) if self.keep_first_layer else -1
self.idx_first_conv_layer -= 1
return model, network_dict
def transform(self, model):
model, network_dict = self.prepare_transform(model)
for i, layer in enumerate(model.layers[1:]):
log_memory_usage("Before layer {}".format(layer.name))
# get all layers input
layer_inputs = [network_dict['new_output_tensor_of'][curr_layer_input] for curr_layer_input in network_dict['input_layers_of'][layer.name]]
if len(layer_inputs) == 1:
layer_inputs = layer_inputs[0]
x = self.transform_one_layer(layer, i, layer_inputs)
network_dict['new_output_tensor_of'].update({layer.name: x})
# model = self.keras_module.models.Model(inputs=model.inputs, outputs=x)
model = self.keras_module.models.Model(inputs=network_dict['new_output_tensor_of'][model.layers[0].name], outputs=x)
return model
def have_been_replaced(self, layer_name):
return self.dct_bool_replaced_layers[layer_name]
def get_replaced_layer_name(self, new_layer_name):
return self.dct_new_name_old_name[new_layer_name]
def get_replacing_layer_name(self, old_layer_name):
return self.dct_old_name_new_name[old_layer_name]
@abstractmethod
def _replace_conv2D(self, layer, dct_compression):
"""
Implementation of this method should return the triplet:
replacing_weights: list of np.ndarray
replacing_layer: self.keras_module.layers.Layer
bool_replaced: tells if the layer should be replaced
:param layer:
:param dct_compression:
:return:
"""
pass
@abstractmethod
def _replace_dense(self, layer, dct_compression):
"""
Implementation of this method should return the triplet:
replacing_weights: list of np.ndarray
replacing_layer: self.keras_module.layers.Layer
bool_replaced: tells if the layer should be replaced
:param layer:
:param dct_compression:
:return:
"""
pass
@abstractmethod
def _set_weights_to_layer(self, replacing_layer, replacing_weights):
pass
| 10,273 | 3,135 |
'''
Created on Jun 29, 2017
@author: yglazner
'''
from kivy.uix.widget import Widget
from kivy.properties import *
from kivy.uix.boxlayout import BoxLayout
from kivy.base import runTouchApp
from kivy.uix.slider import Slider
Slider
class EmotionFeedBack(Widget):
'''
EmotionFeedBack - a widget that lets the user express its emotion by swipping up or down
'''
level = NumericProperty(0.5)
orientation = OptionProperty('horizontal', options=(
'horizontal', 'vertical'))
sources = ListProperty([])
def __init__(self, sources=[], **kw):
'''
'''
self.sources = sources
super(EmotionFeedBack, self).__init__(**kw)
@property
def vertical(self):
return self.orientation == 'vertical'
def on_touch_down(self, touch):
touch.ud['pos'] = touch.pos
return super(EmotionFeedBack, self).on_touch_down(touch)
def on_touch_move(self, touch):
sx, sy = touch.ud['pos']
touch.ud['pos'] = x, y = touch.pos
ts, t = (sy, y) if self.vertical else (sx, x)
size = self.height if self.vertical else self.width
change = (t - ts)*20.0 / size
self.level += change
self.level = max(min(self.level, 1.0), 0)
print (self.level)
return super(EmotionFeedBack, self).on_touch_move(touch)
if __name__ == '__main__':
runTouchApp(EmotionFeedBack()) | 1,469 | 470 |
import unittest
from .results import NavInstruction, Position, Ship
class TestNavInstruction(unittest.TestCase):
def test_nav_instruction_from_str(self):
instr_str = 'F10'
instr = NavInstruction.from_str(instr_str)
self.assertEqual('F', instr.direction)
self.assertEqual(10, instr.magnitude)
class TestPosition(unittest.TestCase):
def _get_position(self):
return Position(3, 5)
def test_init(self):
p = self._get_position()
self.assertEqual(Position(3, 5), p)
def test_init_default(self):
p = Position()
self.assertEqual(p.x, 0)
self.assertEqual(p.y, 0)
def test_move_sequence(self):
p = self._get_position()
instrs = [
NavInstruction('N', 3),
NavInstruction('W', 8),
]
for instr in instrs:
p.move(instr)
self.assertEqual(Position(-5, 8), p)
def test_position_addition(self):
p1 = Position(1, 2)
p2 = Position(3, 4)
self.assertEqual(Position(4, 6), p1 + p2)
class TestShipNavigation(unittest.TestCase):
@property
def nav_instructions(self):
return [NavInstruction.from_str(z) for z in (
'F10',
'N3',
'F7',
'R90',
'F11',
)]
def test_ship_navigation(self):
ship = Ship()
ship.navigate(self.nav_instructions)
expected = Position(17, -8)
self.assertEqual(expected, ship.position)
def test_ship_waypoint_navigation(self):
init_wpt = Position(10, 1)
ship = Ship(wpt_nav=True, init_wpt=init_wpt)
ship.navigate(self.nav_instructions)
expected = Position(214, -72)
self.assertEqual(expected, ship.position)
| 1,793 | 621 |
import sys
import numpy as np
A = int(sys.argv[2]) # start of the tree threshold
B = int(sys.argv[3]) # end of the tree threshold
N = int(sys.argv[1]) # repeat number now
threshold_list = [i/1000.0 for i in range(A, B)]
# compare cluster result with the nwk_tree
for i in threshold_list:
branch_length_threshold = float(i)
# read the file grouped by nwk
group_divide_name = "simu_%s_grouped_by_nwk" % str(branch_length_threshold)
group_divide = open(group_divide_name, "r")
indi_divided = []
line_num = 0
indi_num = 0
for line in group_divide:
line_num += 1
if (line_num % 2) == 0:
indi_list = line.strip().split(";")
indi_list.pop()
indi_num += len(indi_list)
indi_divided.append(indi_list)
group_divide.close()
# read the file of clustering result
group_cluster_name = "simu%s_clustering.star_cluster_result.txt" % str(N)
group_cluster = open(group_cluster_name, "r") # cluster result by my method
divide_evaluate_name = "simu%s_clustering_%s_evaluate.txt" % (str(N), str(branch_length_threshold))
divide_evaluate = open(divide_evaluate_name, "w")
n = 0
for line2 in group_cluster:
n += 1
output = []
col = line2.strip().split("\t")
output.append(col[0])
output.append(col[1])
indi_list2 = col[2].split(";")
if n == 1:
output1 = "group\tindi_no\t"
for j in range(1, (int(line_num/2))):
output1 += str(j)
output1 += "\t"
output1 += str(line_num/2)
divide_evaluate.write(output1 + "\n")
for i in indi_divided:
a = len(list(set(indi_list2).intersection(set(i))))
output.append(str(a))
divide_evaluate.write("\t".join(output) + "\n")
group_cluster.close()
divide_evaluate.close()
# summary results of comparison
method_evaluate_final_name = "simu%s_method_evaluate_final.txt" % str(N)
method_evaluate_final = open(method_evaluate_final_name, "w")
col_name_write = False
for i in threshold_list:
branch_length_threshold = float(i)
divide_evaluate_name = "simu%s_clustering_%s_evaluate.txt" % (str(N), str(branch_length_threshold))
divide_evaluate = open(divide_evaluate_name, "r")
group_divided_evaluation = []
group_serial_number = []
group_indi_number = []
for line in divide_evaluate:
col = line.strip().split("\t")
group_divided_evaluation.append(col)
group_divided_evaluation = np.array(group_divided_evaluation) # the array of group divided
nrow = int(group_divided_evaluation.shape[0]) # get the total number of row
ncol = int(group_divided_evaluation.shape[1]) # get the total number of col
# output the title line
if not col_name_write:
group_serial_number = group_divided_evaluation[1:, 0]
group_serial_number_2 = []
for q in group_serial_number:
group_serial_number_2.append(q)
group_serial_number_2.append(q)
group_indi_number = group_divided_evaluation[1:, 1]
group_indi_number_2 = []
for q in group_indi_number:
group_indi_number_2.append(q)
group_indi_number_2.append(q)
method_evaluate_final.write("\t".join(group_serial_number_2) + "\n")
method_evaluate_final.write("\t".join(group_indi_number_2) + "\n")
col_name_write = True
line_rate = []
for j in range(1, nrow):
row_data = group_divided_evaluation[j, 2:]
row_data = [float(k) for k in row_data]
row_data_max = max(row_data) # get the max value in the row (clustering result by my method)
row_data_pos = row_data.index(row_data_max) # get the position of max value
row_data_rate = "%.4f" % (row_data_max/sum(row_data)) # inclusion rate in the row
col_data = group_divided_evaluation[1:, row_data_pos + 2]
col_data = [float(k) for k in col_data]
col_data_max = max(col_data) # get the max value in the col (clustering result by my method)
col_data_pos = col_data.index(col_data_max) # get the position of max value
col_data_rate = "%.4f" % (col_data_max/sum(col_data)) # inclusion rate in the col
line_rate.append(row_data_rate)
line_rate.append(col_data_rate)
method_evaluate_final.write("\t".join(line_rate) + "\n")
divide_evaluate.close()
method_evaluate_final.close()
| 4,507 | 1,569 |
from celery import Celery
app = Celery(__name__, broker="redis://redis//")
# app.conf.task_routes = {
# 'worker_a.pulse': {'queue': 'worker_a'}
# }
@app.task
def pulse(i):
print(f"Pulse: {i} ({__name__})")
return i + 1000
| 236 | 102 |
# Main file which solve equation
import processor
coefficient_rows = int(input("Please enter coefficient matrix row numbers: "))
coefficient_columns = int(input("Please enter coefficient matrix column numbers: "))
coefficient_matrix = [[] for _ in range(coefficient_rows)]
print("Please enter coefficients in one row then another one: ")
for i in range(coefficient_rows):
coefficient_matrix[i] = [int(k) for k in input().split()]
variables_numbers = int(input("Please enter variables numbers: "))
variables_matrix = []
for i in range(variables_numbers):
variables_matrix.append(input(f"Please enter {i+1} variable's symbol: "))
constants_matrix = [[0] for _ in range(coefficient_rows)]
for i in range(coefficient_rows):
constants_matrix[i][0] = int(input(f"Please enter {i+1} constant: "))
print('-' * 10)
print("Answer: ")
answer = processor.equation_solver(coefficient_matrix, constants_matrix)
processor.answer_printer(variables_matrix, answer)
| 960 | 279 |
# Copyright 2019-2020 Not Just A Toy Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import re
import uuid
import base64
import binascii
import datetime
import typing as ty
import rfc3987
import rfc3339
from strict_rfc3339 import rfc3339_to_timestamp, InvalidRFC3339Error
from falcon_heavy.utils import force_str, force_bytes
from .base import AbstractConvertible, BaseType, ValidationResult, Messages
from .primitive import StringType, IntegerType
from .enums import ConvertibleEntity
from .exceptions import SchemaError
from .errors import Error
from .path import Path
from .utils import is_file_like
__all__ = (
'DateType',
'DateTimeType',
'RegexType',
'URIType',
'EmailType',
'Int32Type',
'Int64Type',
'UUIDType',
'ByteType',
'BinaryType',
)
class DateType(AbstractConvertible[ty.Union[str, datetime.date]]):
"""Date type
Converts RFC3339 full-date string into python date object and vice versa
:param subtype: basic converter
"""
MESSAGES: ty.ClassVar[Messages] = {
'format': "Is not a valid RFC3339 full-date"
}
__slots__ = ('subtype', )
def __init__(self, subtype: StringType, **kwargs: ty.Any) -> None:
super(DateType, self).__init__(**kwargs)
self.subtype = subtype
def convert(
self,
value: ty.Any,
path: Path,
*args: ty.Any,
entity: ty.Optional[ConvertibleEntity] = None,
**context: ty.Any
) -> ty.Optional[ty.Union[str, datetime.date]]:
if isinstance(value, datetime.date) and entity == ConvertibleEntity.RESPONSE and value is not None:
value = value.isoformat()
result = self.subtype.convert(value, path, *args, entity=entity, **context)
if entity == ConvertibleEntity.RESPONSE:
return result
if result is None:
return None
try:
return datetime.datetime.strptime(value, '%Y-%m-%d').date()
except ValueError:
raise SchemaError(Error(path, self.messages['format']))
class DateTimeType(AbstractConvertible[ty.Union[str, datetime.datetime]]):
"""Datetime type
Converts RFC3339 date-time string into python datetime object and vice versa
:param subtype: basic converter
"""
MESSAGES: ty.ClassVar[Messages] = {
'format': "Is not a valid RFC3339 date-time"
}
__slots__ = ('subtype', )
def __init__(self, subtype: StringType, **kwargs: ty.Any) -> None:
super(DateTimeType, self).__init__(**kwargs)
self.subtype = subtype
def convert(
self,
value: ty.Any,
path: Path,
*args: ty.Any,
entity: ty.Optional[ConvertibleEntity] = None,
**context: ty.Any
) -> ty.Optional[ty.Union[str, datetime.datetime]]:
if isinstance(value, datetime.datetime) and entity == ConvertibleEntity.RESPONSE and value is not None:
value = rfc3339.rfc3339(value)
result = self.subtype.convert(value, path, *args, entity=entity, **context)
if entity == ConvertibleEntity.RESPONSE:
return result
if result is None:
return None
try:
return datetime.datetime.fromtimestamp(rfc3339_to_timestamp(value))
except InvalidRFC3339Error:
raise SchemaError(Error(path, self.messages['format']))
class RegexType(AbstractConvertible[ty.Union[str, ty.Pattern]]):
"""Regex type
:param subtype: basic converter
"""
MESSAGES: ty.ClassVar[Messages] = {
'format': "Is not a valid regular expression"
}
__slots__ = ('subtype', )
def __init__(self, subtype: StringType, **kwargs: ty.Any) -> None:
super(RegexType, self).__init__(**kwargs)
self.subtype = subtype
def convert(
self,
value: ty.Any,
path: Path,
*args: ty.Any,
entity: ty.Optional[ConvertibleEntity] = None,
**context: ty.Any
) -> ty.Optional[ty.Union[str, ty.Pattern]]:
result = self.subtype.convert(value, path, *args, entity=entity, **context)
if entity == ConvertibleEntity.RESPONSE:
return result
if result is None:
return None
try:
return re.compile(result)
except (TypeError, re.error):
raise SchemaError(Error(path, self.messages['format']))
class URIType(StringType):
"""URI type"""
MESSAGES: ty.ClassVar[Messages] = {
'format': "Is not a valid URI according to RFC3987"
}
__slots__ = ()
def validate_format(self, value: str, *args: ty.Any, **context: ty.Any) -> ValidationResult:
try:
rfc3987.parse(value, rule='URI')
except ValueError:
return self.messages['format']
return None
EMAIL_PATTERN = re.compile(r'^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$')
class EmailType(StringType):
"""Email type"""
MESSAGES: ty.ClassVar[Messages] = {
'format': "Is not a valid email address according to RFC5322"
}
__slots__ = ()
def validate_format(self, value: str, *args: ty.Any, **context: ty.Any) -> ValidationResult:
if not EMAIL_PATTERN.match(value):
return self.messages['format']
return None
class Int32Type(IntegerType):
"""Int32 type"""
MESSAGES: ty.ClassVar[Messages] = {
'format': "Is not a valid Int32"
}
__slots__ = ()
def validate_format(self, value: int, *args: ty.Any, **context: ty.Any) -> ValidationResult:
if value < -2147483648 or value > 2147483647:
return self.messages['format']
return None
class Int64Type(IntegerType):
"""Int64 type"""
MESSAGES: ty.ClassVar[Messages] = {
'format': "Is not a valid Int64"
}
__slots__ = ()
def validate_format(self, value: int, *args: ty.Any, **context: ty.Any) -> ValidationResult:
if value < -9223372036854775808 or value > 9223372036854775807:
return self.messages['format']
return None
UUID_PATTERN = re.compile(
'^'
'[a-f0-9]{8}-'
'[a-f0-9]{4}-'
'[1345][a-f0-9]{3}-'
'[a-f0-9]{4}'
'-[a-f0-9]{12}'
'$'
)
class UUIDType(StringType):
"""UUID type"""
MESSAGES: ty.ClassVar[Messages] = {
'format': "Is not a valid UUID"
}
__slots__ = ()
def _cast(
self,
value: ty.Any,
path: Path,
*args: ty.Any,
entity: ty.Optional[ConvertibleEntity] = None,
**context: ty.Any
) -> ty.Any:
if entity == ConvertibleEntity.RESPONSE and isinstance(value, uuid.UUID):
return str(value)
return value
def validate_format(self, value: str, *args: ty.Any, **context: ty.Any) -> ValidationResult:
if not UUID_PATTERN.match(value):
return self.messages['format']
return None
class ByteType(AbstractConvertible[ty.Union[str, ty.BinaryIO]]):
"""Byte type
:param subtype: basic converter
"""
MESSAGES: ty.ClassVar[Messages] = {
'format': "Is not base64 encoded"
}
__slots__ = ('subtype', )
def __init__(self, subtype: StringType, **kwargs: ty.Any) -> None:
super(ByteType, self).__init__(**kwargs)
self.subtype = subtype
def convert(
self,
value: ty.Any,
path: Path,
*args: ty.Any,
entity: ty.Optional[ConvertibleEntity] = None,
**context: ty.Any
) -> ty.Optional[ty.Union[str, ty.BinaryIO]]:
if entity == ConvertibleEntity.RESPONSE and value is not None:
value = force_str(base64.b64encode(force_bytes(value)), encoding='ascii')
result = self.subtype.convert(value, path, *args, entity=entity, **context)
if entity == ConvertibleEntity.REQUEST and result is not None:
try:
return io.BytesIO(base64.b64decode(result, validate=True))
except binascii.Error:
raise SchemaError(Error(path, self.messages['format']))
return result
class BinaryType(BaseType[ty.IO]):
"""Binary type"""
MESSAGES: ty.ClassVar[Messages] = {
'type': "Must be a file-like object"
}
__slots__ = ()
def _check_type(self, value: ty.Any, path: Path, *args: ty.Any, **context: ty.Any) -> bool:
return is_file_like(value)
| 9,000 | 2,959 |
valores = [[], [], [],
[], [], [],
[], [], []]
num = linha = coluna = pos = soma = soma_ter_col = maior = 0
titulo = 'Matriz 3x3'
print(titulo.center(50, '='))
for v in range(9): # 9 valores da matriz
num = int(input(f'Linha[{linha}] Coluna[{coluna}]: '))
valores[pos].append(num)
pos += 1
coluna += 1
if coluna == 3: # após 3ª coluna
coluna = 0 # volta à 1ª coluna
linha += 1 # e desce à próxima linha de baixo
if num % 2 == 0:
soma += num
if coluna == 0:
soma_ter_col = valores[2] + valores[5] + valores[8]
print('_'*30)
print(f' { valores[0] } { valores[1] } { valores[2] } ')
print(f' { valores[3] } { valores[4] } { valores[5] } ')
print(f' { valores[6] } { valores[7] } { valores[8] } ')
print('_'*30)
print(f'-> Soma de todos os pares: {soma}')
print(f'-> Soma da 3ª coluna: {sum(soma_ter_col)}')
print(f'-> Maior nº da 2ª linha: {max(valores[3:6])}') # pega-se o maior valor da 2ª linha
| 1,008 | 457 |
from datetime import datetime
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.providers.apache.spark.operators.spark_submit import SparkSubmitOperator
spark_master = "spark://spark-master:7077"
input_path = "/usr/local/spark/data/ebook"
now = datetime.now()
with DAG(
dag_id='spark_word_count',
schedule_interval=None,
start_date=datetime(now.year, now.month, now.day),
catchup=False,
tags=['spark']
) as dag:
start = DummyOperator(task_id="start")
spark_job = SparkSubmitOperator(
application='/usr/local/spark/jobs/word_count_job.py',
conn_id='spark_local',
task_id='word_count',
verbose=1,
conf={"spark.master":spark_master, "job.local.dir":"/usr/local/spark/data/"},
application_args=[input_path]
)
end = DummyOperator(task_id="end")
start >> spark_job >> end
| 911 | 302 |
# coding=utf-8
from __future__ import unicode_literals
from .. import Provider as PhoneNumberProvider
class Provider(PhoneNumberProvider):
formats = (
# National & Mobile dialing
'0{{area_code}}#######',
'0{{area_code}} ### ####',
'0{{area_code}}-###-####',
# International parenthesis
'234{{area_code}}#######',
'234 {{area_code}} ### ####',
'234-{{area_code}}-###-####',
'+234{{area_code}}#######',
'+234 {{area_code}} ### ####',
'+234-{{area_code}}-###-####',
)
# https://en.wikipedia.org/wiki/Telephone_numbers_in_Nigeria
mobile_codes = [
# MTN
'803',
'703',
'903',
'806',
'706',
'813',
'814',
'816',
'810',
'906',
'704',
# Airtel
'802', '902', '701', '808', '708', '812', '901', '907',
# Glo
'805', '705', '905', '807', '815', '905', '811',
# 9Mobile
'809', '909', '817', '818', '908',
# Ntel
'804',
# Smile
'702',
# Multilinks
'709',
# Starcomms
'819',
# Zoom
'707',
]
def area_code(self):
return self.numerify(self.random_element(self.mobile_codes))
def phone_number(self):
pattern = self.random_element(self.formats)
return self.numerify(self.generator.parse(pattern))
| 1,446 | 574 |
import datetime
"""
STATS GENERATOR FOR TURN BASED OR ACTION RPG (ROLE PLAYING GAMES)
By: ROHMAN WIDIYANTO
GitHub: http://github.com/rohwid/
All component or object defined separately, here's the reason:
- Levels: Because sometimes the characters won't start from 1st level.
- Magic Point: Because sometimes the games doesn't need it (ex: action RPG).
- Number of Weaknesses: Same reason with Magic Point.
- Generate data container: Generate data container dynamically.
Notes:
- Anything which contain "show" in the function was used for debug or
check the values.
"""
from EnemyDataContainer import Enemy
def all_enemies():
# Initialize with ENEMIES NUMBER and MAX LEVELS
numbers_enemy = 400
max_level = 80
enemies = Enemy(numbers_enemy, max_level)
"""
[RANGE ENEMIES NAME]
Set the "enemy_name" variable to string to automatically generate names
Example:
enemy_name = 'Enemy'
Set the "enemy_name" variable to list or array to manually generate name
Example:
enemy_name = ['alpha', 'beta', 'charlie', 'delta']
"""
enemy_name = 'Enemy'
enemies.range_enemy_name(enemy_name, 'Name', auto='yes')
# [RANGE ENEMIES LEVELS]
min_level = 1
levels_class = ['Easy', 'Medium', 'High']
# Show Graph and Debug
graph = True
debug = False
# Show Title
graph_title = 'Enemy Level Distribution'
title = True
enemies.range_levels(min_level, levels_class, 'Levels', debug, scale=len(levels_class))
enemies.show_range_levels(graph_title, graph, title, debug)
# [RANGE ENEMIES HP]
min_hp = 40
max_hp = 520
enemies.range_health_points(min_hp, max_hp, 'HP')
# [RANGE ENEMIES MP]
min_mp = 20
max_mp = 490
enemies.range_magic_points(min_mp, max_mp, 'MP')
# [RANGE ENEMIES TYPE]
enemy_type = ['Mixed', 'Hard Magic', 'Soft Magic', 'Hard Strength', 'Soft Strength']
# Show Graph and Debug
graph = True
debug = False
# Show Title
graph_title = 'Enemy Level Distribution'
title = True
# Distribution percentage (distribute_percent) example:
# distribute_percent = [40, 10, 20, 10, 20]
distribute_percent = [34, 13, 20, 13, 20]
enemies.range_enemy_type(enemy_type, distribute_percent, 'Type', debug)
enemies.show_range_enemy_type(graph_title, graph, title, debug)
"""
[RANGE ENEMIES WEAKNESSES]
CHARACTER ELEMENT DAMAGE IMPACT.
0: Normal damage.
1: Repel against (no damage).
2: The damage weaknesses.
"""
element_name = ['Phys', 'Water', 'Wind', 'Earth', 'Fire']
damage_name = ['Normal', 'Repel', 'Weak']
# Show Graph and Debug
graph = True
debug = False
# Show Title
graph_title = 'Enemy Element Distribution'
title = True
# Override this function when have different weaknesses concept!
enemies.range_element_weak(element_name, damage_name)
enemies.show_element_weak(graph_title, graph, title, debug)
# [RANGE ENEMIES STATS]
stats_name = ['Strength', 'Magic', 'Endurance', 'Speed', 'Luck']
basic_max_stats = [50, 60, 40, 55, 45]
basic_min_stats = [2, 2, 2, 2, 2]
# Show Graph and Debug
graph = True
debug = False
# Show Title
graph_title = 'Enemy Stats Distribution'
title = True
enemies.range_stats(stats_name, basic_min_stats, basic_max_stats, debug)
enemies.show_range_stats(graph_title, graph, title, debug)
# Parse All Data to The Tables
enemies.generate_enemy()
if __name__ == '__main__':
begin_time = datetime.datetime.now()
all_enemies()
print('\nTime to run this program: ', datetime.datetime.now() - begin_time)
| 3,794 | 1,396 |
# TODO 1. Prompt user by asking “What would you like? (espresso/latte/cappuccino):” \ a. Check the user’s input to
# decide what to do next.\ b. The prompt should show every time action has completed, e.g. once the drink is
# dispensed. The prompt should show again to serve the next customer.
# TODO 2. Turn off the Coffee Machine by entering “off” to the prompt.
# a. For maintainers of the coffee machine, they can use “off” as the secret word to turn off
# the machine. Your code should end execution when this happens.
# TODO 3. Print report.
# a. When the user enters “report” to the prompt, a report should be generated that shows
# the current resource values. e.g.
# Water: 100ml
# Milk: 50ml
# Coffee: 76g
# Money: $2.5
# TODO 4. Check resources sufficient?
# a. When the user chooses a drink, the program should check if there are enough
# resources to make that drink.
# b. E.g. if Latte requires 200ml water but there is only 100ml left in the machine. It should
# not continue to make the drink but print: “Sorry there is not enough water.”
# c. The same should happen if another resource is depleted, e.g. milk or coffee.
# TODO 5. Process coins.
# a. If there are sufficient resources to make the drink selected, then the program should
# prompt the user to insert coins.
# b. Remember that quarters = $0.25, dimes = $0.10, nickles = $0.05, pennies = $0.01
# c. Calculate the monetary value of the coins inserted. E.g. 1 quarter, 2 dimes, 1 nickel, 2
# pennies = 0.25 + 0.1 x 2 + 0.05 + 0.01 x 2 = $0.52
# TODO 6. Check transaction successful?
# a. Check that the user has inserted enough money to purchase the drink they selected.
# E.g Latte cost $2.50, but they only inserted $0.52 then after counting the coins the
# program should say “Sorry that's not enough money. Money refunded.”.
# b. But if the user has inserted enough money, then the cost of the drink gets added to the
# machine as the profit and this will be reflected the next time “report” is triggered. E.g.
# Water: 100ml
# Milk: 50ml
# Coffee: 76g
# Money: $2.5
# c. If the user has inserted too much money, the machine should offer change.
# E.g. “Here is $2.45 dollars in change.” The change should be rounded to 2 decimal
# places.
# TODO 7. Make Coffee.
# a. If the transaction is successful and there are enough resources to make the drink the
# user selected, then the ingredients to make the drink should be deducted from the
# coffee machine resources.
# E.g. report before purchasing latte:
# Water: 300ml
# Milk: 200ml
# Coffee: 100g
# Money: $0
# Report after purchasing latte:
# Water: 100ml
# Milk: 50ml
# Coffee: 76g
# Money: $2.5
# b. Once all resources have been deducted, tell the user “Here is your latte. Enjoy!”. If
# latte was their choice of drink.
| 2,880 | 1,012 |
class RestWriter(object):
def __init__(self, file, report):
self.file = file
self.report = report
def write(self, restsection):
assert len(restsection) >= 1
for title, table in self.report:
self.write_header(title, restsection[0], 80)
self.file.write('\n')
self.file.write(str(table))
def write_header(self, title, char, width = 80):
f = self.file
f.write('\n')
f.write('\n')
f.write("%s\n" % title)
f.write(char * max(len(title), width))
f.write('\n')
| 590 | 194 |
from collections import defaultdict
from functools import partial
import logging
from typing import (
Any,
Callable,
DefaultDict,
Dict,
List,
NamedTuple,
Sequence,
Tuple,
Union,
cast,
)
try:
from numpy.typing import ArrayLike
except ImportError:
ArrayLike = Any
import numpy as np
from xarray.backends.locks import SerializableLock
from . import UNDEFINED, _Variable, WgribError
from .wgrib2 import MemoryBuffer, wgrib, free_files
from .inventory import MetaData
from .template import Template
logger = logging.getLogger(__name__)
# wgrib2 returns C float arrays
DTYPE = np.dtype("float32")
HeaderIndices = Tuple[int, ...]
FileIndex = DefaultDict[str, Dict[HeaderIndices, str]] # file -> Dict
FileIndices = DefaultDict[str, FileIndex] # variable name -> FileIndex
WGRIB2_LOCK = SerializableLock()
class Dataset(NamedTuple):
dims: Dict[str, int]
vars: Dict[str, _Variable]
attrs: Dict[str, Any]
# FIXME: might use https://github.com/roebel/py_find_1st
def find_1st(array, value):
return np.nonzero(array == value)[0][0]
def build_file_index(
items: Sequence[MetaData],
template: Template,
) -> FileIndices:
file_indices: FileIndices = defaultdict(cast(Callable, partial(defaultdict, dict)))
for item in (i for i in items if template.item_match(i)):
varname = template.item_to_varname(item)
try:
specs = template.var_specs[varname]
except KeyError:
logger.info("Variable {!s} not found in template, skipping".format(varname))
continue
time_coord = specs.time_coord
level_coord = specs.level_coord
fcst_time = item.end_ft - item.reftime
header_indices: Tuple[int, ...] = ()
found = True
if time_coord in specs.dims:
try:
i = find_1st(template.coords[time_coord].data, fcst_time)
header_indices = (i,)
except IndexError:
found = False
else:
if template.coords[time_coord].data != fcst_time:
found = False
if not found:
logger.info(
"Variable {:s} forecast time {!r} not found in template, "
"skipping".format(varname, fcst_time)
)
continue
if level_coord in specs.dims:
try:
i = find_1st(template.coords[level_coord].data, item.level_value)
header_indices += (i,)
except IndexError:
logger.info(
"Variable {:s} level {!r} not found in template, "
"skipping".format(varname, item.level_value)
)
continue
file_indices[varname][item.file][header_indices] = item.offset
return file_indices
def expand_item(item: Sequence[Any], shape: Tuple[int, ...]) -> Tuple[List[Any], ...]:
expanded_item = []
for i, size in zip(item, shape):
if isinstance(i, list):
expanded_item.append(i)
elif isinstance(i, np.ndarray):
expanded_item.append(i.tolist())
elif isinstance(i, slice):
expanded_item.append(list(range(i.start or 0, i.stop or size, i.step or 1)))
elif isinstance(i, int):
expanded_item.append([i])
else:
raise TypeError("Unsupported index type {!r}".format(type(i)))
return tuple(expanded_item)
class OnDiskArray:
def __init__(
self,
varname: str,
file_index: FileIndex,
shape: Sequence[int],
template: Template,
) -> None:
self.varname = varname
self.file_index = file_index
self.shape = tuple(shape)
self.geo_ndim = len(template.grid.dims)
self.npts = np.prod(shape[-self.geo_ndim :])
self.missing_value = UNDEFINED # wgrib2 missing value
self.dtype = DTYPE
def __getitem__(self, item: Tuple[Any, ...]) -> ArrayLike:
assert isinstance(item, tuple), "Item type must be tuple not {!r}".format(
type(item)
)
assert len(item) == len(self.shape), "Item len must be {!r} not {!r}".format(
len(self.shape), len(item)
)
header_item = expand_item(item[: -self.geo_ndim], self.shape)
array_field_shape = (
tuple(len(i) for i in header_item) + self.shape[-self.geo_ndim :]
)
array_field = np.full(array_field_shape, fill_value=np.nan, dtype=DTYPE)
datasize = self.npts * array_field.dtype.itemsize
for file, index in self.file_index.items():
# Faster, longer code
def _get_array_indexes():
for header_indices, offset in index.items():
try:
afi = [
it.index(ix) for it, ix in zip(header_item, header_indices)
]
yield afi, offset
except ValueError:
continue
try:
seq_of_array_field_indexes, offsets = zip(*_get_array_indexes())
except ValueError:
continue
inventory = MemoryBuffer()
inventory.set("\n".join(offsets))
output = MemoryBuffer()
args = [
file,
"-rewind_init",
file,
"-i_file",
inventory,
"-rewind_init",
inventory,
"-inv",
"/dev/null",
"-no_header",
"-bin",
output,
]
try:
wgrib(*args)
values = output.get("b")
except WgribError as e:
logger.error("wgrib2 error: {:s}".format(str(e)))
output.close()
continue
finally:
inventory.close()
output.close()
free_files(file)
for pos, array_field_indexes in zip(
range(0, len(values), datasize), seq_of_array_field_indexes
):
chunk = np.frombuffer(values[pos : pos + datasize], dtype=DTYPE)
array_field.__getitem__(tuple(array_field_indexes)).flat[:] = chunk
# Slow, shorter code
# for header_indices, offset in index.items():
# try:
# array_field_indexes = [
# it.index(ix) for it, ix in zip(header_item, header_indices)
# ]
# except ValueError:
# continue
# output = MemoryBuffer()
# args = [
# path,
# "-rewind_init",
# path,
# "-d",
# offset,
# "-inv",
# "/dev/null",
# "-no_header",
# "-bin",
# output,
# ]
# #print('=========== calling wgrib', path, header_indices, offset)
# try:
# wgrib(*args)
# values = output.get("a")
# array_field.__getitem__(tuple(array_field_indexes)).flat[:] = values
# except WgribError as e:
# logger.error("wgrib2 error: {!r}".format(e))
# output.close()
# continue
# finally:
# output.close()
# free_files(path)
array = array_field[(Ellipsis,) + item[-self.geo_ndim :]]
array[array == self.missing_value] = np.nan
for i, it in reversed(list(enumerate(item[: -self.geo_ndim]))):
if isinstance(it, int):
array = array[(slice(None, None, None),) * i + (0,)]
return array
def open_dataset(
items: Sequence[MetaData],
template: Template,
) -> Union[Dataset, None]:
dimensions: Dict[str, int] = {}
variables: Dict[str, _Variable] = {}
file_indices = build_file_index(items, template)
if not file_indices:
logger.warning("No matching data found")
return Dataset(dimensions, variables, {})
for name, file_index in file_indices.items():
var_specs = template.var_specs[name]
data = OnDiskArray(name, file_index, var_specs.shape, template)
variables[name] = _Variable(var_specs.dims, data, var_specs.attrs)
dimensions.update({k: v for k, v in zip(var_specs.dims, var_specs.shape)})
variables.update(template.coords)
variables["reftime"] = _Variable(
# reftime is the same for all items
(),
np.array(items[0].reftime),
{"standard_name": "reference_time"},
)
# Projection variable
variables[template.grid.cfname] = _Variable((), np.array(0), template.grid.attrs)
attrs = template.attrs.copy()
attrs["coordinates"] = " ".join(
tuple(template.coords.keys()) + ("reftime", template.grid.cfname)
)
return Dataset(dimensions, variables, attrs)
| 9,005 | 2,705 |
import json
import time
from tools import logger as log
import strategies
def go_to_incarnam(**kwargs):
"""
A strategy to go from Astrub to Incarnam trough the portal
:param kwargs: strategy, listener, and orders_queue
:return: the input strategy with a report
"""
strategy = kwargs['strategy']
listener = kwargs['listener']
orders_queue = kwargs['orders_queue']
assets = kwargs['assets']
logger = log.get_logger(__name__, strategy['bot'])
global_start, start = time.time(), time.time()
# Enter the portal room
door_skill_id = 184
element_id, skill_uid = None, None
for element in listener.game_state['map_elements']:
if 'enabledSkills' in element.keys():
for skill in element['enabledSkills']:
if 'skillId' in skill.keys() and skill['skillId'] == door_skill_id:
element_id = element['elementId']
skill_uid = skill['skillInstanceUid']
if element_id is None or skill_uid is None:
logger.warn('Failed entering the portal room in {}s'.format(0))
strategy['report'] = {
'success': False,
'details': {'Execution time': 0, 'Reason': 'Could not find skill UID or element id'}
}
log.close_logger(logger)
return strategy
order = {
'command': 'use_interactive',
'parameters': {
'element_id': element_id,
'skill_uid': skill_uid
}
}
logger.info('Sending order to bot API: {}'.format(order))
orders_queue.put((json.dumps(order),))
start = time.time()
timeout = 10 if 'timeout' not in strategy.keys() else strategy['timeout']
waiting = True
while waiting and time.time() - start < timeout:
if 'pos' in listener.game_state.keys():
if listener.game_state['map_id'] == 192416776:
waiting = False
time.sleep(0.05)
execution_time = time.time() - start
if waiting:
logger.warn('Failed entering the portal room in {}s'.format(execution_time))
strategy['report'] = {
'success': False,
'details': {'Execution time': execution_time, 'Reason': 'Timeout'}
}
log.close_logger(logger)
return strategy
logger.info('Entered the portal room in {}s'.format(execution_time))
# Go to cell 468
order = {
'command': 'move',
'parameters': {
"isUsingNewMovementSystem": False,
"cells": [[True, False, 0, 0, True, 0] for _ in range(560)],
"target_cell": 468
}
}
logger.info('Sending order to bot API: {}'.format(order))
orders_queue.put((json.dumps(order),))
start = time.time()
timeout = 10 if 'timeout' not in strategy.keys() else strategy['timeout']
waiting = True
while waiting and time.time() - start < timeout:
if 'pos' in listener.game_state.keys() and 'worldmap' in listener.game_state.keys():
if listener.game_state['cell'] == 468:
waiting = False
time.sleep(0.05)
execution_time = time.time() - start
if waiting:
logger.warning('Failed going to cell 468 in {}s'.format(execution_time))
strategy['report'] = {
'success': False,
'details': {'Execution time': execution_time, 'Reason': 'Timeout'}
}
log.close_logger(logger)
return strategy
# Use the portal
door_skill_id = 184
element_id, skill_uid = None, None
for element in listener.game_state['map_elements']:
if 'enabledSkills' in element.keys():
for skill in element['enabledSkills']:
if 'skillId' in skill.keys() and skill['skillId'] == door_skill_id:
element_id = element['elementId']
skill_uid = skill['skillInstanceUid']
if element_id is None or skill_uid is None:
logger.warn('Failed entering the portal room in {}s'.format(0))
strategy['report'] = {
'success': False,
'details': {'Execution time': 0, 'Reason': 'Could not find skill UID or element id'}
}
log.close_logger(logger)
return strategy
order = {
'command': 'use_interactive',
'parameters': {
'element_id': element_id,
'skill_uid': skill_uid
}
}
logger.info('Sending order to bot API: {}'.format(order))
orders_queue.put((json.dumps(order),))
start = time.time()
timeout = 10 if 'timeout' not in strategy.keys() else strategy['timeout']
waiting = True
while waiting and time.time() - start < timeout:
if 'pos' in listener.game_state.keys():
if listener.game_state['worldmap'] == 2:
waiting = False
time.sleep(0.05)
execution_time = time.time() - start
if waiting:
logger.warning('Failed going through the portal from Incarnam to Astrub in {}s'.format(execution_time))
strategy['report'] = {
'success': False,
'details': {'Execution time': execution_time, 'Reason': 'Timeout'}
}
log.close_logger(logger)
return strategy
execution_time = time.time() - global_start
logger.info('Went from Astrub to Incarnam in {}s'.format(execution_time))
strategy['report'] = {
'success': True,
'details': {'Execution time': execution_time}
}
log.close_logger(logger)
return strategy
| 5,486 | 1,612 |
from . import get_deck_id
| 26 | 10 |
import h5py,os
from transformers import T5Tokenizer, T5Model, T5ForConditionalGeneration
if __name__ == "__main__":
snt_0 = "amrgraphize: establish model in Industrial Innovation </s>"
snt_1 = "amrgraphize: raise standard to in excess of CITY_1 's 1 magnitude could leave authority with some breathing space for explanation , and alleviate public anger . </s>"
snt_2 = "amrgraphize: 1 . from among they , pick-out 10 for submission to a assessment committee to assess . </s>"
amr_1 = "possible and leave-13 raise standard in-excess-of seismic-quantity 1 earthquake CITY_1 have authority space breathe explain authority some alleviate raise anger public"
amr_2 = "pick-out 1 thing 10 submit committee assess assess committee thing they"
## Load finetuned t5 model
finetuned_t5 = "../t5-small-amrtrained_4"
t5 = T5ForConditionalGeneration.from_pretrained(finetuned_t5)
## Load t5 tokenizer
t5_tokenizer = T5Tokenizer.from_pretrained("../t5-vocab")
snt = snt_2
amr = amr_2
input_ids = t5_tokenizer.encode(snt, return_tensors="pt")
outputs = t5.generate(input_ids=input_ids, max_length=1000)
pred = [
t5_tokenizer.decode(
output#, skip_special_tokens=False, clean_up_tokenization_spaces=False
)
for output in outputs
]
print("snt: ", snt)
print("amr: ", amr)
print("pred: ", pred)
print("outputs: ", outputs)
#print("t5: ", t5.config)
| 1,473 | 518 |
import numpy as np
import pandas as pd
from multiprocessing import Pool, freeze_support, cpu_count
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import MinMaxScaler
from sklearn.neighbors import KernelDensity
from sklearn.model_selection import StratifiedKFold
from anomatools.models import SSDO
from sklearn.ensemble import IsolationForest
import collections, functools
from anomatools.models import SSDO
import pyximport #import cython such that CAPe can work!
pyximport.install()
from CAPe import * #this imports CAPe functions
from TIcE import * #this imports TIcE functions
from Kernel_MPE_grad_threshold import * #this imports km1 and km2 functions
def get_f1scores_wdiff_priors(data, y, real_cont, tmp_cont = 0.1, k = 5, ntimes = 10, name_ds = '', case = 2,
n_splits = 5, n_iter = 3, n_jobs = cpu_count()):
""" This function gets the F1 scores of SSDO when using different contamination factors according to all the methods used in
the paper (CAPe). This function builds a dataframe with the final results and saves it on a csv file (remark: change the
path at the bottom or remove the line). The 5 methods for estimating the class prior in a PU dataset compared are: CAPe,
TIcE, km1, km2 and the naive baseline with the real contamination factor.
Parameters
----------
data : np.array of shape (n_samples, n_features). It is the entire dataset.
y : np.array of shape (n_samples,) containing all the labels.
real_cont : float regarding the REAL expected percentage of anomalies in the training set.
tmp_cont : float regarding the starting expected percentage of anomalies in the training set. Default=0.1.
k : int regarding the number of new labels required. Default = 5.
ntimes : int regarding the number of iterations for getting new k labels. Default = 10 (at least 50 examples).
name_ds : string containing the name of the dataset to output a meaningful csv file. Default empty string.
case : int getting 0 when the user's uncertainty is not present (PO), 2 when dealing with it (IO). Default=0.
n_splits : int regarding the number of splits inside the crossvalidation. Default = 5.
n_iter : int regarding the number of iterations of the whole method. Default = 3.
n_jobs : int regarding the number of jobs to run in parallel. Default = maximum number of jobs.
Returns
----------
F1_results : dataframe containing the F1 results. The first columns is about the labels acquired (k) while the others
contain the F1 score for each method and each number of labels.
prior_results : dataframe containing the prior results. The first columns is about the labels acquired (k) while the
others contain the prior estimates for each method and each number of labels.
"""
F1_results = pd.DataFrame(data = k*np.arange(1, ntimes+1,1), columns = ['k'])
F1_results['F1_CAPe'] = np.zeros(ntimes, dtype = float)
F1_results['F1_TIcE'] = np.zeros(ntimes, dtype = float)
F1_results['F1_km1'] = np.zeros(ntimes, dtype = float)
F1_results['F1_km2'] = np.zeros(ntimes, dtype = float)
F1_results['F1_real'] = np.zeros(ntimes, dtype = float)
CAPe_prior = np.zeros(ntimes, dtype = float)
TIcE_prior = np.zeros(ntimes, dtype = float)
km1_prior = np.zeros(ntimes, dtype = float)
km2_prior = np.zeros(ntimes, dtype = float)
real_prior = np.zeros(ntimes, dtype = float)
for num in np.arange(1,n_iter+1):
skf = StratifiedKFold(n_splits=n_splits, random_state=331, shuffle=True)
F1_CAPe = []
F1_TIcE = []
F1_km1 = []
F1_km2 = []
F1_real = []
for train_index, test_index in skf.split(data, y):
X_train, X_test = data[train_index], data[test_index]
y_train, y_test = y[train_index], y[test_index]
real_anomalies = np.where(y_train == 1)[0]
f1cape, prior_cape = evaluate_CAPe(X_train, X_test, y_test, real_anomalies, k, ntimes, tmp_cont, case)
F1_CAPe.append(f1cape)
CAPe_prior += prior_cape
f1tice, prior_tice = evaluate_TIcE(X_train, X_test, y_test, real_anomalies, k, ntimes, tmp_cont, case)
F1_TIcE.append(f1tice)
TIcE_prior += prior_tice
f1km1, f1km2, prior_km1, prior_km2 = evaluate_km1km2(X_train, X_test, y_test, real_anomalies, k, ntimes,
tmp_cont, case)
F1_km1.append(f1km1)
F1_km2.append(f1km2)
km1_prior += prior_km1
km2_prior += prior_km2
f1real, prior_real = evaluate_realF1(X_train, X_test, y_test, real_cont, real_anomalies, k, ntimes, case)
F1_real.append(f1real)
real_prior += prior_real
print('Done crossval for iter num:', num)
FinalF1CAPe = dict(functools.reduce(lambda x, y: x.update(y) or x, F1_CAPe, collections.Counter()))
FinalF1TIcE = dict(functools.reduce(lambda x, y: x.update(y) or x, F1_TIcE, collections.Counter()))
FinalF1km1 = dict(functools.reduce(lambda x, y: x.update(y) or x, F1_km1, collections.Counter()))
FinalF1km2 = dict(functools.reduce(lambda x, y: x.update(y) or x, F1_km2, collections.Counter()))
FinalF1real = dict(functools.reduce(lambda x, y: x.update(y) or x, F1_real, collections.Counter()))
for j in range(ntimes):
tnfpfntp = FinalF1CAPe[int(k*(j+1))]
FinalF1CAPe[int(k*(j+1))] = (2 * tnfpfntp[3]) / (2 * tnfpfntp[3] + tnfpfntp[1] + tnfpfntp[2])
tnfpfntp = FinalF1TIcE[int(k*(j+1))]
FinalF1TIcE[int(k*(j+1))] = (2 * tnfpfntp[3]) / (2 * tnfpfntp[3] + tnfpfntp[1] + tnfpfntp[2])
tnfpfntp = FinalF1km1[int(k*(j+1))]
FinalF1km1[int(k*(j+1))] = (2 * tnfpfntp[3]) / (2 * tnfpfntp[3] + tnfpfntp[1] + tnfpfntp[2])
tnfpfntp = FinalF1km2[int(k*(j+1))]
FinalF1km2[int(k*(j+1))] = (2 * tnfpfntp[3]) / (2 * tnfpfntp[3] + tnfpfntp[1] + tnfpfntp[2])
tnfpfntp = FinalF1real[int(k*(j+1))]
FinalF1real[int(k*(j+1))] = (2 * tnfpfntp[3]) / (2 * tnfpfntp[3] + tnfpfntp[1] + tnfpfntp[2])
F1_results['F1_CAPe'] += list(FinalF1CAPe.values())
F1_results['F1_TIcE'] += list(FinalF1TIcE.values())
F1_results['F1_km1'] += list(FinalF1km1.values())
F1_results['F1_km2'] += list(FinalF1km2.values())
F1_results['F1_real'] += list(FinalF1real.values())
print('Finished Iteration number', num,'out of', n_iter)
prior_results = pd.DataFrame(data = k*np.arange(1, ntimes+1,1), columns = ['k'])
prior_results['CAPe_prior'] = CAPe_prior/(ntimes*n_splits)
prior_results['TIcE_prior'] = TIcE_prior/(ntimes*n_splits)
prior_results['km1_prior'] = km1_prior/(ntimes*n_splits)
prior_results['km2_prior'] = km2_prior/(ntimes*n_splits)
prior_results['real_prior'] = real_prior/(ntimes*n_splits)
F1_columns = ['F1_CAPe', 'F1_TIcE', 'F1_km1', 'F1_km2', 'F1_real']
F1_results[F1_columns] = F1_results[F1_columns]/n_iter
F1_results.to_csv('F1score_case_'+str(case)+name_ds+'.csv')
prior_results.to_csv('prior_case_'+str(case)+name_ds+'.csv')
return F1_results, prior_results
def evaluate_CAPe(X_train, X_test, y_test, real_anomalies = [], k = 5, ntimes = 10, tmp_cont = 0.1,
case = 0, n_jobs = cpu_count()):
""" Evaluating CAPe as provided in the paper. This function 1) gets CAPe's estimation of the class prior and 2) saves the
confusion matrix's cell values (tn, fp, fn, tp) in order to compute after all the interations a unique F1 score.
Parameters
----------
X_train : np.array of shape (n_samples, n_features). It is the training set.
X_test : np.array of shape (m_samples, n_features). It is the test set.
y_test : np.array of shape (m_samples,). It contains the test labels.
real_anomalies : list of shape (n_samples,) containing the index of the real training anomalies. Only needed if case=2.
k : int regarding the number of new labels required.
ntimes : int regarding the number of iterations for getting new k labels.
tmp_cont : float regarding the starting expected percentage of anomalies in the training set. Default=0.1.
case : int getting 0 when the user's uncertainty is not present (PO), 2 when dealing with it (IO). Default=0.
n_jobs : int regarding the number of jobs to run in parallel. Default = maximum number of jobs.
Returns
----------
F1_CAPe : dict containing for each key multiple of k (k, 2*k, 3*k,...,ntimes*k) the array [tn,fp,fn,tp] obtained
with the estimate of the prior in such case.
class_priors : array of shape (ntimes,) containing every k new labels the estimate of the class prior.
"""
n = np.shape(X_train)[0]
tmp_cont = 0.1
query_list = []
labeled_ex = np.zeros(n, dtype=np.int)
ker = KernelDensity().fit(X_train)
dmu = [np.exp(ker.score(X_train[i:i+1])) for i in range(n)]
mean_prob_term = math.log(np.mean(dmu),10) #Take the log density
F1_CAPe = {}
class_priors = np.zeros(ntimes, dtype = float)
for j in range(ntimes):
prior, labeled_ex, query_list = CAPe(X_train, labeled_ex, query_list, k, real_anomalies, tmp_cont, mean_prob_term,
case, n_jobs)
class_priors[j] = prior
tmp_cont = 1 - min(prior,0.9999) #update the contamination factor
F1_CAPe[int(k*(j+1))] = get_tnfpfntp(X_train, labeled_ex, X_test, y_test, tmp_cont) #compute the performance of the
#classifier
return F1_CAPe, class_priors
def evaluate_TIcE(X_train, X_test, y_test, real_anomalies = [], k = 5, ntimes = 10, tmp_cont = 0.1, case = 0):
""" This function for evaluating TIcE does 1) query examples until k new labels are acquired, 2) get
TIcE's estimation of the class prior and 3) save the confusion matrix's cell values (tn, fp, fn, tp) in order to
compute after all the interations a unique F1 score.
Parameters
----------
X_train : np.array of shape (n_samples, n_features). It is the training set.
X_test : np.array of shape (m_samples, n_features). It is the test set.
y_test : np.array of shape (m_samples,). It contains the test labels.
real_anomalies : list of shape (n_samples,) containing the index of the real training anomalies. Only needed if case=2.
k : int regarding the number of new labels required.
ntimes : int regarding the number of iterations for getting new k labels.
tmp_cont : float regarding the starting expected percentage of anomalies in the training set. Default=0.1.
case : int getting 0 when the user's uncertainty is not present (PO), 2 when dealing with it (IO). Default=0.
Returns
----------
F1_TIcE : dict containing for each key multiple of k (k, 2*k, 3*k,...,ntimes*k) the array [tn,fp,fn,tp] obtained
with the estimate of the prior in such case.
class_priors : array of shape (ntimes,) containing every k new labels the estimate of the class prior.
"""
n = np.shape(X_train)[0]
tmp_cont = 0.1
query_list = []
labeled_ex = np.zeros(n, dtype=np.int)
F1_TIcE = {}
class_priors = np.zeros(ntimes, dtype = float)
scaler = MinMaxScaler()
for j in range(ntimes):
labeled_ex, query_list = query_at_least_k_points(X_train, labeled_ex, real_anomalies, query_list, k, tmp_cont,\
case)
_, prior = run_from_elsewhere(data = scaler.fit_transform(X_train), labels = labeled_ex) #run TIcE algo and find c
class_priors[j] = prior
tmp_cont = 1 - min(prior, 0.9999)
F1_TIcE[int(k*(j+1))] = get_tnfpfntp(X_train, labeled_ex, X_test, y_test, tmp_cont) #compute the performance of the
return F1_TIcE, class_priors
def evaluate_km1km2(X_train, X_test, y_test, real_anomalies = [], k = 5, ntimes = 10, tmp_cont = 0.1, case = 0):
""" This function for evaluating km1 and km2 does 1) query examples until k new labels are acquired, 2) get
km1's (km2's) estimation of the class prior and 3) save the confusion matrix's cell values (tn, fp, fn, tp) in order to
compute after all the interations a unique F1 score. It repeats the whole process twice, once for km1 and once for km2.
Parameters
----------
X_train : np.array of shape (n_samples, n_features). It is the training set.
X_test : np.array of shape (m_samples, n_features). It is the test set.
y_test : np.array of shape (m_samples,). It contains the test labels.
real_anomalies : list of shape (n_samples,) containing the index of the real training anomalies. Only needed if case=2.
k : int regarding the number of new labels required.
ntimes : int regarding the number of iterations for getting new k labels.
tmp_cont : float regarding the starting expected percentage of anomalies in the training set. Default=0.1.
case : int getting 0 when the user's uncertainty is not present (PO), 2 when dealing with it (IO). Default=0.
Returns
----------
F1_km1 : dict containing for each key multiple of k (k, 2*k, 3*k,...,ntimes*k) the array [tn,fp,fn,tp] obtained
with the estimate of the prior in such case (km1).
F1_km2 : dict containing for each key multiple of k (k, 2*k, 3*k,...,ntimes*k) the array [tn,fp,fn,tp] obtained
with the estimate of the prior in such case (km2).
class_priors_km1: array of shape (ntimes,) containing every k new labels the estimate of the class prior for km1.
class_priors_km2: array of shape (ntimes,) containing every k new labels the estimate of the class prior for km2.
"""
n = np.shape(X_train)[0]
query_list_km1 = []
query_list_km2 = []
labeled_ex_km1 = np.zeros(n, dtype=np.int)
labeled_ex_km2 = np.zeros(n, dtype=np.int)
F1_km1 = {}
F1_km2 = {}
class_priors_km1 = np.zeros(ntimes, dtype = float)
class_priors_km2 = np.zeros(ntimes, dtype = float)
km1_tmp_cont = 0.1
km2_tmp_cont = 0.1
km1_query_list = []
km2_query_list = []
for j in range(ntimes):
labeled_ex_km1, query_list_km1 = query_at_least_k_points(X_train, labeled_ex_km1, real_anomalies, query_list_km1, k,\
tmp_cont, case)
X_component = np.where(labeled_ex_km1 == 1)[0]
X_component = X_train[X_component]
X_mixture = np.where(labeled_ex_km1 == 0)[0]
X_mixture = X_train[X_mixture]
prior_km1,_ = wrapper(X_mixture, X_component)
class_priors_km1[j] = prior_km1
km1_tmp_cont = 1-prior_km1
F1_km1[int(k*(j+1))] = get_tnfpfntp(X_train, labeled_ex_km1, X_test, y_test, km1_tmp_cont)
#---------------------------------------------------------------------------------------------------
labeled_ex_km2, query_list_km2 = query_at_least_k_points(X_train, labeled_ex_km2, real_anomalies, query_list_km2, k,\
tmp_cont, case)
X_component = np.where(labeled_ex_km2 == 1)[0]
X_component = X_train[X_component]
X_mixture = np.where(labeled_ex_km2 == 0)[0]
X_mixture = X_train[X_mixture]
_, prior_km2 = wrapper(X_mixture, X_component)
class_priors_km2[j] = prior_km2
km1_tmp_cont = 1-prior_km2
F1_km2[int(k*(j+1))] = get_tnfpfntp(X_train, labeled_ex_km2, X_test, y_test, km2_tmp_cont)
return F1_km1, F1_km2, class_priors_km1, class_priors_km2
def evaluate_realF1(X_train, X_test, y_test, real_cont, real_anomalies = [], k = 5, ntimes = 10, case = 0):
""" This function for evaluating the performance of SSDO with real contamination factor does 1) query examples until k new
labels are acquired and 2) save the confusion matrix's cell values (tn, fp, fn, tp) in order to compute after all the
interations a unique F1 score. It never updates the contamination factor, as it is the true one.
Parameters
----------
X_train : np.array of shape (n_samples, n_features). It is the training set.
X_test : np.array of shape (m_samples, n_features). It is the test set.
y_test : np.array of shape (m_samples,). It contains the test labels.
real_cont : float regarding the REAL expected percentage of anomalies in the training set.
real_anomalies : list of shape (n_samples,) containing the index of the real training anomalies. Only needed if case=2.
k : int regarding the number of new labels required.
ntimes : int regarding the number of iterations for getting new k labels.
case : int getting 0 when the user's uncertainty is not present (PO), 2 when dealing with it (IO). Default=0.
Returns
----------
F1_real : dict containing for each key multiple of k (k, 2*k, 3*k,...,ntimes*k) the array [tn,fp,fn,tp] obtained
with the real prior.
class_priors : array of shape (ntimes,) containing every k new labels the real class prior.
"""
n = np.shape(X_train)[0]
query_list = []
labeled_ex = np.zeros(n, dtype=np.int)
F1_real = {}
class_priors = np.zeros(ntimes, dtype = float)
for j in range(ntimes):
labeled_ex, query_list = query_at_least_k_points(X_train, labeled_ex, real_anomalies, query_list, k, real_cont, case)
class_priors[j] = 1 - real_cont
F1_real[int(k*(j+1))] = get_tnfpfntp(X_train, labeled_ex, X_test, y_test, real_cont)
return F1_real, class_priors
def get_tnfpfntp(X_train, y_train, X_test, y_test, contamination):
""" This function for evaluating the performance of SSDO gets as input the contamination factor (in addition to training and
test sets) in order to train the classifier with all the information available at the considered step (labels and prior
estimate) and then to compute the confusion matrix's cells values (tn fp fn tp).
Parameters
----------
X_train : np.array of shape (n_samples, n_features). It is the training set.
X_test : np.array of shape (m_samples, n_features). It is the test set.
y_train : np.array of shape (n_samples,). It contains the training labels (at the considered step).
y_test : np.array of shape (m_samples,). It contains the test labels.
contamination : float regarding the REAL expected percentage of anomalies in the training set.
Returns
----------
a numpy array containing [tn,fp,fn,tp] computed for SSDO with y_train labels and contamination equal 1-prior.
"""
prior_detector = IsolationForest(contamination = contamination, behaviour='new').fit(X_train)
train_prior = prior_detector.decision_function(X_train) * -1
train_prior = train_prior + abs(min(train_prior))
detector = SSDO(k=3, alpha=2.3, unsupervised_prior='other', contamination = contamination)
detector.fit(X_train, y_train, prior = train_prior)
prediction = detector.predict(X_test, prior = train_prior)
y_pred = [1 if prediction[i] == 1 else 0 for i in range(len(prediction))]
tn, fp, fn, tp = confusion_matrix(y_test, y_pred).ravel()
return np.array([tn, fp, fn, tp], dtype = int)
def query_at_least_k_points(data, labeled_ex, real_anomalies, query_list, k, tmp_cont, case):
""" This function queries at least k examples in order to acquire new k labels. The new labels depend on the case and on the
user's uncertainty. This function does 1) compute the user's uncertainty with respect to the case, 2) as long as k new
labels are not acquired, train SSDO and query the most informative example to get labeled by the oracle (PO or IO).
Parameters
----------
data : np.array of shape (n_samples, n_features). It is the data set.
labeled_ex : list of shape (n_samples,) assuming 1 if the example is labeled, 0 otherwise.
real_anomalies : list of shape (n_samples,) containing the index of the real anomalies. Only needed if case=2.
query_list : list of shape (n_samples,) assuming 1 if the example has been queried, 0 otherwise.
k : int regarding the number of new labels required.
tmp_cont : float regarding the starting expected percentage of anomalies in the training set. Default=0.1.
case : int getting 0 when the user's uncertainty is not present (PO), 2 when dealing with it (IO). Default=0.
Returns
----------
labeled_ex : list of shape (n_samples,) containing both the already labeled examples and the new ones.
query_list : list of shape (n_samples,) containing both the already queried examples and the new ones.
"""
n = np.shape(data)[0]
user_uncertainty = compute_user_uncertainty(data, real_anomalies, tmp_cont, case)
prior_detector = IsolationForest(contamination = tmp_cont, behaviour='new', random_state = 331).fit(data)
train_prior = prior_detector.decision_function(data) * -1
train_prior = train_prior + abs(min(train_prior))
detector = SSDO(k=3, alpha=2.3, unsupervised_prior='other', contamination = tmp_cont)
while int(sum(labeled_ex)) < k and len(query_list) < n:
detector.fit(data, np.negative(labeled_ex), prior = train_prior)
score = detector.predict_proba(data, prior = train_prior, method='squash')[:, 0]
score = [abs(x-0.5) for x in score]
#Sort the data according to their uncertainty score
index = sorted([[x,i] for i,x in enumerate(score) if i not in query_list], reverse = False)
idx_query_point = index[0][1] #choose the first most uncertain example and query it
query_list.append(idx_query_point)
uncertainty_score = user_uncertainty[idx_query_point]
reply = np.random.binomial(1, uncertainty_score)
if reply:
labeled_ex[idx_query_point] = +1 #if the user says that it's normal, than update the label
#otherwise put it in the ranking but the model will be trained on the same dataset
return labeled_ex, query_list
| 23,627 | 7,737 |
#!/usr/bin/python3
# Copyright (c) 2021 Walbrix Corporation
# https://github.com/wbrxcorp/genpack/blob/main/LICENSE
import os,re,argparse,subprocess,glob,json,uuid
import importlib.resources
import urllib.request
import initlib,init,util
import qemu
from sudo import sudo,Tee
BASE_URL="http://ftp.iij.ad.jp/pub/linux/gentoo/"
CONTAINER_NAME="genpack-%d" % os.getpid()
def decode_utf8(bin):
return bin.decode("utf-8")
def encode_utf8(str):
return str.encode("utf-8")
def url_readlines(url):
req = urllib.request.Request(url)
with urllib.request.urlopen(req) as res:
return map(decode_utf8, res.readlines())
def get_latest_stage3_tarball_url(base,arch):
if not base.endswith('/'): base += '/'
_arch = arch
if _arch == "x86_64": _arch = "amd64"
elif _arch == "aarch64": _arch = "arm64"
for line in url_readlines(base + "releases/" + _arch + "/autobuilds/latest-stage3-" + _arch + "-systemd.txt"):
line = re.sub(r'#.*$', "", line.strip())
if line == "": continue
#else
splitted = line.split(" ")
if len(splitted) < 2: continue
#else
return base + "releases/" + _arch + "/autobuilds/" + splitted[0]
return None # not found
def get_content_length(url):
req = urllib.request.Request(url, method="HEAD")
with urllib.request.urlopen(req) as res:
headers = res.info()
if "Content-Length" in headers:
return int(headers["Content-Length"])
#else
return None
def lower_exec(lower_dir, cache_dir, portage_dir, cmdline, nspawn_opts=[]):
subprocess.check_call(sudo(
["systemd-nspawn", "-q", "-M", CONTAINER_NAME, "-D", lower_dir,
"--bind=%s:/var/cache" % os.path.abspath(cache_dir),
"--capability=CAP_MKNOD,CAP_SYS_ADMIN",
"--bind-ro=%s:/var/db/repos/gentoo" % os.path.abspath(portage_dir) ]
+ nspawn_opts + cmdline)
)
def scan_files(dir):
files_found = []
newest_file = 0
for root,dirs,files in os.walk(dir, followlinks=True):
if len(files) == 0: continue
for f in files:
mtime = os.stat(os.path.join(root,f)).st_mtime
if mtime > newest_file: newest_file = mtime
files_found.append(os.path.join(root[len(dir) + 1:], f))
return (files_found, newest_file)
def link_files(srcdir, dstdir):
files_to_link, newest_file = scan_files(srcdir)
for f in files_to_link:
src = os.path.join(srcdir, f)
dst = os.path.join(dstdir, f)
os.makedirs(os.path.dirname(dst), exist_ok=True)
if os.path.isfile(dst): os.unlink(dst)
os.link(src, dst)
return newest_file
def sync_files(srcdir, dstdir, exclude=None):
files_to_sync, newest_file = scan_files(srcdir)
for f in files_to_sync:
if exclude is not None and re.match(exclude, f): continue
src = os.path.join(srcdir, f)
dst = os.path.join(dstdir, f)
subprocess.check_call(sudo(["rsync", "-k", "-R", "--chown=root:root", os.path.join(srcdir, ".", f), dstdir]))
return newest_file
def get_newest_mtime(srcdir):
return scan_files(srcdir)[1]
def put_resource_file(gentoo_dir, module, filename, dst_filename=None, make_executable=False):
dst_path = os.path.join(gentoo_dir, dst_filename if dst_filename is not None else filename)
with Tee(dst_path) as f:
f.write(importlib.resources.read_binary(module, filename))
if make_executable: subprocess.check_output(sudo(["chmod", "+x", dst_path]))
def load_json_file(path):
if not os.path.isfile(path): return None
#else
with open(path) as f:
return json.load(f)
def set_gitignore(workdir):
work_gitignore = os.path.join(workdir, ".gitignore")
if not os.path.exists(work_gitignore):
with open(work_gitignore, "w") as f:
f.write("/*")
def extract_portage(base, workdir):
portage_tarball_url = base + "snapshots/portage-latest.tar.xz"
portage_tarball = os.path.join(workdir, "portage.tar.xz")
portage_dir = os.path.join(workdir, "portage")
trash_dir = os.path.join(workdir, "trash")
done_file = os.path.join(portage_dir, ".done")
os.makedirs(workdir, exist_ok=True)
set_gitignore(workdir)
if not os.path.isfile(portage_tarball) or os.path.getsize(portage_tarball) != get_content_length(portage_tarball_url):
subprocess.check_call(["wget", "-O", portage_tarball, portage_tarball_url])
if os.path.exists(done_file): os.remove(done_file)
if os.path.isdir(portage_dir) and not os.path.exists(done_file):
os.makedirs(trash_dir, exist_ok=True)
os.rename(portage_dir, os.path.join(trash_dir, str(uuid.uuid4())))
if not os.path.isdir(portage_dir):
os.makedirs(portage_dir, exist_ok=True)
print("Extracting portage...")
subprocess.check_call(sudo(["tar", "xpf", portage_tarball, "--strip-components=1", "-C", portage_dir]))
with open(done_file, "w") as f:
pass
def main(base, workdir, arch, sync, bash, artifact, outfile=None, profile=None):
artifact_dir = os.path.join(".", "artifacts", artifact)
build_json = load_json_file(os.path.join(artifact_dir, "build.json"))
if profile is None:
profile = "default"
if build_json and "profile" in build_json: profile = build_json["profile"]
stage3_tarball_url = get_latest_stage3_tarball_url(base,arch)
arch_workdir = os.path.join(workdir, arch)
os.makedirs(arch_workdir, exist_ok=True)
set_gitignore(workdir)
stage3_tarball = os.path.join(arch_workdir, "stage3.tar.xz")
portage_dir = os.path.join(workdir, "portage")
profile_workdir = os.path.join(arch_workdir, "profiles", profile)
cache_dir = os.path.join(profile_workdir, "cache")
gentoo_dir = os.path.join(profile_workdir, "root")
repos_dir = os.path.join(gentoo_dir, "var/db/repos/gentoo")
usr_local_dir = os.path.join(gentoo_dir, "usr/local")
trash_dir = os.path.join(workdir, "trash")
if not os.path.isfile(stage3_tarball) or os.path.getsize(stage3_tarball) != get_content_length(stage3_tarball_url):
subprocess.check_call(["wget", "-O", stage3_tarball, stage3_tarball_url])
stage3_done_file = os.path.join(gentoo_dir, ".stage3-done")
stage3_done_file_time = os.stat(stage3_done_file).st_mtime if os.path.isfile(stage3_done_file) else None
if not stage3_done_file_time or stage3_done_file_time < os.stat(stage3_tarball).st_mtime:
if os.path.isdir(gentoo_dir):
os.makedirs(trash_dir, exist_ok=True)
os.rename(gentoo_dir, os.path.join(trash_dir, str(uuid.uuid4())))
os.makedirs(repos_dir, exist_ok=True)
print("Extracting stage3...")
subprocess.check_call(sudo(["tar", "xpf", stage3_tarball, "--strip-components=1", "-C", gentoo_dir]))
kernel_config_dir = os.path.join(gentoo_dir, "etc/kernels")
subprocess.check_call(sudo(["mkdir", "-p", kernel_config_dir]))
subprocess.check_call(sudo(["chmod", "-R", "o+rw",
os.path.join(gentoo_dir, "etc/portage"), os.path.join(gentoo_dir, "usr/src"),
os.path.join(gentoo_dir, "var/db/repos"), os.path.join(gentoo_dir, "var/cache"),
kernel_config_dir, usr_local_dir]))
with open(os.path.join(gentoo_dir, "etc/portage/make.conf"), "a") as f:
f.write('FEATURES="-sandbox -usersandbox -network-sandbox"\n')
with open(stage3_done_file, "w") as f:
pass
newest_file = link_files(os.path.join(".", "profiles", profile), gentoo_dir)
# remove irrelevant arch dependent settings
for i in glob.glob(os.path.join(gentoo_dir, "etc/portage/package.*/arch-*")):
if not i.endswith("-" + arch): os.unlink(i)
# move files under /var/cache
os.makedirs(cache_dir, exist_ok=True)
subprocess.check_call(sudo(["rsync", "-a", "--remove-source-files", os.path.join(gentoo_dir,"var/cache/"), cache_dir]))
put_resource_file(gentoo_dir, initlib, "initlib.cpp")
put_resource_file(gentoo_dir, initlib, "initlib.h")
put_resource_file(gentoo_dir, initlib, "fat.cpp")
put_resource_file(gentoo_dir, initlib, "fat.h")
put_resource_file(gentoo_dir, init, "init.cpp")
put_resource_file(gentoo_dir, init, "init.h")
put_resource_file(gentoo_dir, util, "build-kernel.py", "usr/local/sbin/build-kernel", True)
put_resource_file(gentoo_dir, util, "with-mysql.py", "usr/local/sbin/with-mysql", True)
put_resource_file(gentoo_dir, util, "download.py", "usr/local/bin/download", True)
put_resource_file(gentoo_dir, util, "install-system-image", "usr/sbin/install-system-image", True)
put_resource_file(gentoo_dir, util, "expand-rw-layer", "usr/sbin/expand-rw-layer", True)
put_resource_file(gentoo_dir, util, "do-with-lvm-snapshot", "usr/sbin/do-with-lvm-snapshot", True)
put_resource_file(gentoo_dir, util, "rpmbootstrap.py", "usr/sbin/rpmbootstrap", True)
put_resource_file(gentoo_dir, util, "genbootstrap.py", "usr/sbin/genbootstrap", True)
put_resource_file(gentoo_dir, util, "genpack-install.cpp", "usr/src/genpack-install.cpp", True)
if sync: lower_exec(gentoo_dir, cache_dir, portage_dir, ["emerge", "--sync"])
if bash:
print("Entering shell... 'exit 1' to abort the process.")
lower_exec(gentoo_dir, cache_dir, portage_dir, ["bash"])
done_file = os.path.join(gentoo_dir, ".done")
done_file_time = os.stat(done_file).st_mtime if os.path.isfile(done_file) else None
portage_time = os.stat(os.path.join(portage_dir, "metadata/timestamp")).st_mtime
newest_file = max(newest_file, portage_time)
if (not done_file_time or newest_file > done_file_time or sync or artifact == "none"):
lower_exec(gentoo_dir, cache_dir, portage_dir, ["emerge", "-uDN", "-bk", "--binpkg-respect-use=y",
"system", "nano", "gentoolkit", "repoman",
"strace", "vim", "tcpdump", "netkit-telnetd"])
if os.path.isfile(os.path.join(gentoo_dir, "build.sh")):
lower_exec(gentoo_dir, cache_dir, portage_dir, ["/build.sh"])
lower_exec(gentoo_dir, cache_dir, portage_dir, ["sh", "-c", "emerge -bk --binpkg-respect-use=y @preserved-rebuild && emerge --depclean && etc-update --automode -5 && eclean-dist -d && eclean-pkg -d"])
with open(done_file, "w") as f:
pass
if artifact == "none": return None # no build artifact
elif artifact == "bash":
lower_exec(gentoo_dir, cache_dir, portage_dir, ["bash"])
return None
#else
##### building profile done
##### build artifact if necessary
upper_dir = os.path.join(arch_workdir, "artifacts", artifact)
genpack_packages_file = os.path.join(upper_dir, ".genpack", "packages") # use its timestamp as build date
if not os.path.exists(genpack_packages_file) or os.stat(genpack_packages_file).st_mtime < max(os.stat(done_file).st_mtime, get_newest_mtime(artifact_dir), get_newest_mtime(os.path.join(".", "packages"))):
if os.path.isdir(upper_dir):
os.makedirs(trash_dir, exist_ok=True)
subprocess.check_call(sudo(["mv", upper_dir, os.path.join(trash_dir, str(uuid.uuid4()))]))
build_artifact(profile, artifact, gentoo_dir, cache_dir, upper_dir, build_json)
# final output
if outfile is None:
if build_json and "outfile" in build_json: outfile = build_json["outfile"]
else: outfile = "%s-%s.squashfs" % (artifact, arch)
if outfile == "-":
subprocess.check_call(sudo(["systemd-nspawn", "-M", CONTAINER_NAME, "-q", "-D", upper_dir, "--network-veth", "-b"]))
return None
#else
if not os.path.isfile(outfile) or os.stat(genpack_packages_file).st_mtime > os.stat(outfile).st_mtime:
compression = build_json["compression"] if build_json and "compression" in build_json else "gzip"
pack(upper_dir, outfile, compression)
return outfile
def build_artifact(profile, artifact, gentoo_dir, cache_dir, upper_dir, build_json):
artifact_pkgs = ["gentoo-systemd-integration", "util-linux","timezone-data","bash","openssh", "coreutils", "procps", "net-tools",
"iproute2", "iputils", "dbus", "python", "rsync", "tcpdump", "ca-certificates","e2fsprogs"]
if build_json and "packages" in build_json:
if not isinstance(build_json["packages"], list): raise Exception("packages must be list")
#else
artifact_pkgs += build_json["packages"]
pkg_map = collect_packages(gentoo_dir)
pkgs = scan_pkg_dep(gentoo_dir, pkg_map, artifact_pkgs)
packages_dir = os.path.join(".", "packages")
files = process_pkgs(gentoo_dir, packages_dir, pkgs)
if os.path.isfile(os.path.join(gentoo_dir, "boot/kernel")): files.append("/boot/kernel")
if os.path.isfile(os.path.join(gentoo_dir, "boot/initramfs")): files.append("/boot/initramfs")
if os.path.isdir(os.path.join(gentoo_dir, "lib/modules")): files.append("/lib/modules/.")
files += ["/dev/.", "/proc", "/sys", "/root", "/home", "/tmp", "/var/tmp", "/var/run", "/run", "/mnt"]
files += ["/etc/passwd", "/etc/group", "/etc/shadow", "/etc/profile.env"]
files += ["/etc/ld.so.conf", "/etc/ld.so.conf.d/."]
files += ["/usr/lib/locale/locale-archive"]
files += ["/bin/sh", "/bin/sed", "/usr/bin/awk", "/usr/bin/python", "/usr/bin/vi", "/usr/bin/nano",
"/bin/tar", "/usr/bin/unzip",
"/usr/bin/wget", "/usr/bin/curl", "/usr/bin/telnet",
"/usr/bin/make", "/usr/bin/diff", "/usr/bin/strings", "/usr/bin/strace",
"/usr/bin/find", "/usr/bin/xargs", "/usr/bin/less"]
files += ["/sbin/iptables", "/sbin/ip6tables", "/sbin/iptables-restore", "/sbin/ip6tables-restore", "/sbin/iptables-save", "/sbin/ip6tables-save"]
if build_json and "files" in build_json:
if not isinstance(build_json["files"], list): raise Exception("files must be list")
#else
files += build_json["files"]
os.makedirs(os.path.dirname(upper_dir), exist_ok=True)
subprocess.check_call(sudo(["mkdir", upper_dir]))
print("Copying files to artifact dir...")
copy(gentoo_dir, upper_dir, files)
copyup_gcc_libs(gentoo_dir, upper_dir)
remove_root_password(upper_dir)
make_ld_so_conf_latest(upper_dir)
create_default_iptables_rules(upper_dir)
set_locale_to_envvar(upper_dir)
# per-package setup
newest_pkg_file = 0
for pkg in pkgs:
pkg_wo_ver = strip_ver(pkg)
package_dir = os.path.join(packages_dir, pkg_wo_ver)
if not os.path.isdir(package_dir): continue
#else
print("Processing package %s..." % pkg_wo_ver)
newest_pkg_file = max(newest_pkg_file, sync_files(package_dir, upper_dir, r"^CONTENTS(\.|$)"))
if os.path.isfile(os.path.join(upper_dir, "pkgbuild")):
subprocess.check_call(sudo(["systemd-nspawn", "-q", "-M", CONTAINER_NAME, "-D", gentoo_dir, "--overlay=+/:%s:/" % os.path.abspath(upper_dir),
"--bind=%s:/var/cache" % os.path.abspath(cache_dir),
"-E", "PROFILE=%s" % profile, "-E", "ARTIFACT=%s" % artifact,
"--capability=CAP_MKNOD",
"sh", "-c", "/pkgbuild && rm -f /pkgbuild" ]))
# enable services
services = ["sshd","systemd-networkd", "systemd-resolved"]
if build_json and "services" in build_json:
if not isinstance(build_json["services"], list): raise Exception("services must be list")
#else
services += build_json["services"]
enable_services(upper_dir, services)
# artifact specific setup
artifact_dir = os.path.join(".", "artifacts", artifact)
newest_artifact_file = max(newest_pkg_file, sync_files(artifact_dir, upper_dir))
if os.path.isfile(os.path.join(upper_dir, "build")):
print("Building artifact...")
subprocess.check_call(sudo(["systemd-nspawn", "-q", "-M", CONTAINER_NAME, "-D", gentoo_dir,
"--overlay=+/:%s:/" % os.path.abspath(upper_dir),
"--bind=%s:/var/cache" % os.path.abspath(cache_dir),
"/build" ]))
else:
print("Artifact build script not found.")
subprocess.check_call(sudo(["rm", "-rf", os.path.join(upper_dir, "build"), os.path.join(upper_dir,"build.json"), os.path.join(upper_dir,"usr/src")]))
# generate metadata
genpack_metadata_dir = os.path.join(upper_dir, ".genpack")
subprocess.check_call(sudo(["mkdir", "-p", genpack_metadata_dir]))
subprocess.check_call(sudo(["chmod", "o+rwx", genpack_metadata_dir]))
with open(os.path.join(genpack_metadata_dir, "profile"), "w") as f:
f.write(profile)
with open(os.path.join(genpack_metadata_dir, "artifact"), "w") as f:
f.write(artifact)
with open(os.path.join(genpack_metadata_dir, "packages"), "w") as f:
for pkg in pkgs:
f.write(pkg + '\n')
subprocess.check_call(sudo(["chown", "-R", "root.root", genpack_metadata_dir]))
subprocess.check_call(sudo(["chmod", "755", genpack_metadata_dir]))
def strip_ver(pkgname):
pkgname = re.sub(r'-r[0-9]+?$', "", pkgname) # remove rev part
last_dash = pkgname.rfind('-')
if last_dash < 0: return pkgname
next_to_dash = pkgname[last_dash + 1]
return pkgname[:last_dash] if pkgname.find('/') < last_dash and (next_to_dash >= '0' and next_to_dash <= '9') else pkgname
def collect_packages(gentoo_dir):
pkg_map = {}
db_dir = os.path.join(gentoo_dir, "var/db/pkg")
for category in os.listdir(db_dir):
cat_dir = os.path.join(db_dir, category)
if not os.path.isdir(cat_dir): continue
#else
for pn in os.listdir(cat_dir):
pkg_dir = os.path.join(cat_dir, pn)
if not os.path.isdir(pkg_dir): continue
#else
cat_pn = "%s/%s" % (category, pn)
pn_wo_ver = strip_ver(pn)
cat_pn_wo_ver = "%s/%s" % (category, pn_wo_ver)
if pn_wo_ver in pkg_map: pkg_map[pn_wo_ver].append(cat_pn)
else: pkg_map[pn_wo_ver] = [cat_pn]
if cat_pn_wo_ver in pkg_map: pkg_map[cat_pn_wo_ver].append(cat_pn)
else: pkg_map[cat_pn_wo_ver] = [cat_pn]
return pkg_map
def get_package_set(gentoo_dir, set_name):
pkgs = []
with open(os.path.join(gentoo_dir, "etc/portage/sets", set_name)) as f:
for line in f:
line = re.sub(r'#.*', "", line).strip()
if line != "": pkgs.append(line)
return pkgs
def split_rdepend(line):
if line.startswith("|| ( "):
idx = 5
level = 0
while idx < len(line):
ch = line[idx]
if ch == '(': level += 1
elif ch == ')':
if level == 0:
idx += 1
break
else: level -= 1
idx += 1
leftover = line[idx:].strip()
return (line[:idx], None if leftover == "" else leftover)
#else:
splitted = line.split(' ', 1)
if len(splitted) == 1: return (splitted[0],None)
#else
return (splitted[0], splitted[1])
def parse_rdepend_line(line, make_optional=False):
p = []
while line is not None and line.strip() != "":
splitted = split_rdepend(line)
p.append(splitted[0])
line = splitted[1]
pkgs = set()
for pkg in p:
m = re.match(r"\|\| \( (.+) \)", pkg)
if m:
pkgs |= parse_rdepend_line(m.group(1), True)
continue
if pkg[0] == '!': continue
if pkg[0] == '~': pkg = pkg[1:]
#else
pkg_stripped = strip_ver(re.sub(r':.+$', "", re.sub(r'\[.+\]$', "", re.sub(r'^(<=|>=|=|<|>)', "", pkg))))
pkgs.add('?' + pkg_stripped if make_optional else pkg_stripped)
return pkgs
def scan_pkg_dep(gentoo_dir, pkg_map, pkgnames, pkgs = None):
if pkgs is None: pkgs = set()
for pkgname in pkgnames:
if pkgname[0] == '@':
scan_pkg_dep(gentoo_dir, pkg_map, get_package_set(gentoo_dir, pkgname[1:]), pkgs)
continue
optional = False
if pkgname[0] == '?':
optional = True
pkgname = pkgname[1:]
if pkgname not in pkg_map:
if optional: continue
else: raise BaseException("Package %s not found" % pkgname)
#else
for cat_pn in pkg_map[pkgname]:
cat_pn_wo_ver = strip_ver(cat_pn)
if cat_pn in pkgs: continue # already exists
pkgs.add(cat_pn) # add self
rdepend_file = os.path.join(gentoo_dir, "var/db/pkg", cat_pn, "RDEPEND")
if os.path.isfile(rdepend_file):
with open(rdepend_file) as f:
line = f.read().strip()
if len(line) > 0:
rdepend_pkgnames = parse_rdepend_line(line)
if len(rdepend_pkgnames) > 0: scan_pkg_dep(gentoo_dir, pkg_map, rdepend_pkgnames, pkgs)
return pkgs
def is_path_excluded(path):
for expr in ["/run/","/var/run/","/var/lock/","/usr/share/man/","/usr/share/doc/","/usr/share/gtk-doc/","/usr/share/info/",
"/usr/include/","/var/cache/",re.compile(r'^/usr/lib/python[0-9\.]+?/test/'),re.compile(r'\.a$'),
re.compile(r"\/gschemas.compiled$"), re.compile(r"\/giomodule.cache$")]:
if isinstance(expr, re.Pattern):
if re.search(expr, path): return True
elif isinstance(expr, str):
if path.startswith(expr): return True
else:
raise Exception("Unknown type")
return False
def process_pkgs(gentoo_dir, packages_dir, pkgs):
files = []
for pkg in pkgs:
contents_file = os.path.join(gentoo_dir, "var/db/pkg" , pkg, "CONTENTS")
overridden_contents_file = os.path.join(packages_dir, strip_ver(pkg), "CONTENTS")
if os.path.isfile(os.path.join(overridden_contents_file)):
contents_file = overridden_contents_file
if not os.path.isfile(contents_file): continue
#else
with open(contents_file) as f:
while line := f.readline():
line = re.sub(r'#.*$', "", line).strip()
if line == "": continue
file_to_append = None
if line.startswith("obj "):
file_to_append = re.sub(r' [0-9a-f]+ [0-9]+$', "", line[4:])
elif line.startswith("sym "):
file_to_append = re.sub(r' -> .+$', "", line[4:])
if file_to_append is not None and not is_path_excluded(file_to_append): files.append(file_to_append)
return files
def copy(gentoo_dir, upper_dir, files):
if not gentoo_dir.endswith('/'): gentoo_dir += '/'
# files / dirs to shallow copy
rsync = subprocess.Popen(sudo(["rsync", "-lptgoD", "--keep-dirlinks", "--files-from=-", gentoo_dir, upper_dir]), stdin=subprocess.PIPE)
for f in files:
if f.endswith("/."): continue
f_wo_leading_slash = re.sub(r'^/', "", f)
rsync.stdin.write(encode_utf8(f_wo_leading_slash + '\n'))
src_path = os.path.join(gentoo_dir, f_wo_leading_slash)
if os.path.islink(src_path):
link = os.readlink(src_path)
target = link[1:] if link[0] == '/' else os.path.join(os.path.dirname(f_wo_leading_slash), link)
if os.path.exists(os.path.join(gentoo_dir, target)):
rsync.stdin.write(encode_utf8(target + '\n'))
rsync.stdin.close()
if rsync.wait() != 0: raise BaseException("rsync returned error code.")
# dirs to deep copy
rsync = subprocess.Popen(sudo(["rsync", "-ar", "--keep-dirlinks", "--files-from=-", gentoo_dir, upper_dir]), stdin=subprocess.PIPE)
for f in files:
if not f.endswith("/."): continue
f_wo_leading_slash = re.sub(r'^/', "", f)
rsync.stdin.write(encode_utf8(f_wo_leading_slash + '\n'))
src_path = os.path.join(gentoo_dir, f_wo_leading_slash)
rsync.stdin.close()
if rsync.wait() != 0: raise BaseException("rsync returned error code.")
def copyup_gcc_libs(gentoo_dir, upper_dir):
subprocess.check_call(sudo(["systemd-nspawn", "-q", "-M", CONTAINER_NAME, "-D", gentoo_dir, "--overlay=+/:%s:/" % os.path.abspath(upper_dir), "sh", "-c", "touch -h `gcc --print-file-name=`/*.so.* && ldconfig" ]))
def remove_root_password(root_dir):
subprocess.check_call(sudo(["sed", "-i", r"s/^root:\*:/root::/", os.path.join(root_dir, "etc/shadow") ]))
def make_ld_so_conf_latest(root_dir):
subprocess.check_call(sudo(["touch", os.path.join(root_dir, "etc/ld.so.conf") ]))
def create_default_iptables_rules(root_dir):
subprocess.check_call(sudo(["touch", os.path.join(root_dir, "var/lib/iptables/rules-save"), os.path.join(root_dir, "var/lib/ip6tables/rules-save")]))
def set_locale_to_envvar(root_dir):
subprocess.check_call(sudo(["sed", "-i", r"s/^export LANG=.\+$/\[ -f \/etc\/locale\.conf \] \&\& . \/etc\/locale.conf \&\& export LANG/", os.path.join(root_dir, "etc/profile.env") ]))
def enable_services(root_dir, services):
if not isinstance(services, list): services = [services]
subprocess.check_call(sudo(["systemd-nspawn", "-q", "-M", CONTAINER_NAME, "-D", root_dir, "systemctl", "enable"] + services))
def pack(upper_dir, outfile, compression="gzip"):
cmdline = ["mksquashfs", upper_dir, outfile, "-noappend", "-no-exports"]
if compression == "xz": cmdline += ["-comp", "xz", "-b", "1M", "-Xbcj", "x86"]
elif compression == "gzip": cmdline += ["-Xcompression-level", "1"]
elif compression == "lzo": cmdline += ["-comp", "lzo"]
else: raise BaseException("Unknown compression type %s" % compression)
subprocess.check_call(sudo(cmdline))
subprocess.check_call(sudo(["chown", "%d:%d" % (os.getuid(), os.getgid()), outfile]))
def clean(workdir, arch, profile=None):
portage = os.path.join(workdir, "portage.tar.xz")
archdir = os.path.join(workdir, arch)
stage3 = os.path.join(archdir, "stage3.tar.xz")
profiles = os.path.join(archdir, "profiles")
artifacts = os.path.join(archdir, "artifacts")
subprocess.check_call(sudo(["rm", "-rf", portage, stage3, profiles, artifacts]))
if __name__ == "__main__":
arch = os.uname().machine
parser = argparse.ArgumentParser()
parser.add_argument("--base", default=BASE_URL, help="Base URL contains dirs 'releases' 'snapshots'")
parser.add_argument("--workdir", default="./work", help="Working directory to use")
parser.add_argument("-o", "--outfile", default=None, help="Output file")
parser.add_argument("--sync", action="store_true", default=False, help="Run emerge --sync before build gentoo")
parser.add_argument("--bash", action="store_true", default=False, help="Enter bash before anything")
parser.add_argument("--qemu", action="store_true", default=False, help="Run generated rootfs using qemu")
parser.add_argument("--drm", action="store_true", default=False, help="Enable DRM(virgl) when running qemu")
parser.add_argument("--data-volume", action="store_true", default=False, help="Create data partition when running qemu")
parser.add_argument("--system-ini", default=None, help="system.ini file when running qemu")
parser.add_argument("--profile", default=None, help="Override profile")
parser.add_argument("artifact", default=[], nargs='*', help="Artifacts to build")
args = parser.parse_args()
artifacts = []
if len(args.artifact) == 0 and os.path.isdir("./artifacts"):
for i in os.listdir("./artifacts"):
if os.path.isdir(os.path.join("./artifacts", i)): artifacts.append(i)
else:
artifacts += args.artifact
if len(artifacts) == 0: artifacts.append("default")
extract_portage(args.base, args.workdir)
for artifact in artifacts:
if artifact != "default" and not os.path.isdir(os.path.join("./artifacts", artifact)):
raise BaseException("No such artifact: %s" % artifact)
print("Processing artifact %s..." % artifact)
if args.artifact == "clean":
clean(args.workdir, arch, args.profile)
else:
outfile = main(args.base, args.workdir, arch, args.sync, args.bash, artifact, args.outfile, args.profile)
if outfile is not None and args.qemu:
qemu.run(outfile, os.path.join(args.workdir, "qemu.img"), args.drm, args.data_volume, args.system_ini)
print("Done.")
trash_dir = os.path.join(args.workdir, "trash")
if os.path.isdir(trash_dir):
print("Cleaning up...")
subprocess.check_call(sudo(["rm", "-rf", trash_dir]))
| 28,470 | 10,263 |
from django import forms
class NovoPedido(forms.Form):
data_realizacao_desejada = forms.DateField()
horario_inicio = forms.TimeField(required=False)
horario_fim = forms.TimeField(required=False) | 207 | 71 |
import numpy as np
import pandas as pd
import pytask
from src.config import SRC
from src.read_and_write import read_config
from src.read_and_write import read_names
def create_matchings_history(names):
"""Create first instance of matchings history using names data file.
Returns:
matchings_history (pd.DataFrame):
"""
id_ = names["id"]
n = len(id_)
matchings_history = pd.DataFrame(
np.zeros((n, n), dtype=int), columns=id_, index=id_
)
return matchings_history
@pytask.mark.preliminaries
@pytask.mark.produces(SRC / "data" / "matchings_history.csv")
def task_preliminaries(produces): # noqa: D103
config = read_config()
p = config["matchings_history_path"]
if p is None:
names = read_names()
matchings_history = create_matchings_history(names)
matchings_history.to_csv(produces)
| 873 | 286 |
print(__file__, 'Hello!')
| 26 | 11 |
from .WolframLanguageData import find_symbol
# Mathematica's members are all module level, so no need to import then.
# import .Mathematica | 140 | 39 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##########################################################
# Generated by scripts/generate_enhance_variable.py
##########################################################
import typing
from typing import Union
from fate_arch.common import Party
from fate_arch.federation.transfer_variable import Variable
from fate_arch.session import get_latest_opened
class _VariableProtocol(object):
def remote_parties(self,
obj,
parties: Union[typing.List[Party], Party],
suffix: Union[typing.Any, typing.Tuple] = tuple()):
raise NotImplementedError()
def get_parties(self,
parties: Union[typing.List[Party], Party],
suffix: Union[typing.Any, typing.Tuple] = tuple()) -> typing.List:
raise NotImplementedError()
@staticmethod
def roles_to_parties(roles):
party_info = get_latest_opened().parties
return party_info.roles_to_parties(roles)
# noinspection PyAbstractClass
class _ToArbiter(_VariableProtocol):
def to_arbiter(self, obj, suffix):
parties = self.roles_to_parties(["arbiter"])
self.remote_parties(obj, parties, suffix)
def to_kth_arbiter(self, obj, k, suffix):
parties = self.roles_to_parties(["arbiter"])
assert k < len(parties), f"index {k} out of range [0, {len(parties) - 1}]"
self.remote_parties(obj, parties[k], suffix)
# noinspection PyAbstractClass
class _FromArbiter(_VariableProtocol):
def from_arbiter(self, suffix) -> typing.List:
parties = self.roles_to_parties(["arbiter"])
return self.get_parties(parties, suffix)
def get_kth_arbiter(self, k, suffix) -> typing.Any:
parties = self.roles_to_parties(["arbiter"])
assert k < len(parties), f"index {k} out of range [0, {len(parties) - 1}]"
results = self.get_parties(parties[k], suffix)
return results[0]
# noinspection PyAbstractClass
class _ToGuest(_VariableProtocol):
def to_guest(self, obj, suffix):
parties = self.roles_to_parties(["guest"])
self.remote_parties(obj, parties, suffix)
def to_kth_guest(self, obj, k, suffix):
parties = self.roles_to_parties(["guest"])
assert k < len(parties), f"index {k} out of range [0, {len(parties) - 1}]"
self.remote_parties(obj, parties[k], suffix)
# noinspection PyAbstractClass
class _FromGuest(_VariableProtocol):
def from_guest(self, suffix) -> typing.List:
parties = self.roles_to_parties(["guest"])
return self.get_parties(parties, suffix)
def get_kth_guest(self, k, suffix) -> typing.Any:
parties = self.roles_to_parties(["guest"])
assert k < len(parties), f"index {k} out of range [0, {len(parties) - 1}]"
results = self.get_parties(parties[k], suffix)
return results[0]
# noinspection PyAbstractClass
class _ToHost(_VariableProtocol):
def to_host(self, obj, suffix):
parties = self.roles_to_parties(["host"])
self.remote_parties(obj, parties, suffix)
def to_kth_host(self, obj, k, suffix):
parties = self.roles_to_parties(["host"])
assert k < len(parties), f"index {k} out of range [0, {len(parties) - 1}]"
self.remote_parties(obj, parties[k], suffix)
# noinspection PyAbstractClass
class _FromHost(_VariableProtocol):
def from_host(self, suffix) -> typing.List:
parties = self.roles_to_parties(["host"])
return self.get_parties(parties, suffix)
def get_kth_host(self, k, suffix) -> typing.Any:
parties = self.roles_to_parties(["host"])
assert k < len(parties), f"index {k} out of range [0, {len(parties) - 1}]"
results = self.get_parties(parties[k], suffix)
return results[0]
# noinspection PyAbstractClass
class _FromArbiterGuest(_FromArbiter, _FromGuest):
def from_guest_host(self, suffix) -> typing.List:
parties = self.roles_to_parties(["arbiter", "guest"])
return self.get_parties(parties, suffix)
# noinspection PyAbstractClass
class _ToArbiterGuest(_ToArbiter, _ToGuest):
def to_guest_host(self, obj, suffix):
parties = self.roles_to_parties(["arbiter", "guest"])
return self.remote_parties(obj, parties, suffix)
# noinspection PyAbstractClass
class _FromArbiterHost(_FromArbiter, _FromHost):
def from_guest_host(self, suffix) -> typing.List:
parties = self.roles_to_parties(["arbiter", "host"])
return self.get_parties(parties, suffix)
# noinspection PyAbstractClass
class _ToArbiterHost(_ToArbiter, _ToHost):
def to_guest_host(self, obj, suffix):
parties = self.roles_to_parties(["arbiter", "host"])
return self.remote_parties(obj, parties, suffix)
# noinspection PyAbstractClass
class _FromGuestHost(_FromGuest, _FromHost):
def from_guest_host(self, suffix) -> typing.List:
parties = self.roles_to_parties(["guest", "host"])
return self.get_parties(parties, suffix)
# noinspection PyAbstractClass
class _ToGuestHost(_ToGuest, _ToHost):
def to_guest_host(self, obj, suffix):
parties = self.roles_to_parties(["guest", "host"])
return self.remote_parties(obj, parties, suffix)
class A2GVariable(Variable, _FromArbiter, _ToGuest):
def __init__(self, name):
super().__init__(name, src=('arbiter',), dst=('guest',))
class A2HVariable(Variable, _FromArbiter, _ToHost):
def __init__(self, name):
super().__init__(name, src=('arbiter',), dst=('host',))
class G2AVariable(Variable, _FromGuest, _ToArbiter):
def __init__(self, name):
super().__init__(name, src=('guest',), dst=('arbiter',))
class G2HVariable(Variable, _FromGuest, _ToHost):
def __init__(self, name):
super().__init__(name, src=('guest',), dst=('host',))
class H2AVariable(Variable, _FromHost, _ToArbiter):
def __init__(self, name):
super().__init__(name, src=('host',), dst=('arbiter',))
class H2GVariable(Variable, _FromHost, _ToGuest):
def __init__(self, name):
super().__init__(name, src=('host',), dst=('guest',))
class A2GHVariable(Variable, _FromArbiter, _ToGuestHost):
def __init__(self, name):
super().__init__(name, src=('arbiter',), dst=('guest', 'host'))
class GH2AVariable(Variable, _FromGuestHost, _ToArbiter):
def __init__(self, name):
super().__init__(name, src=('guest', 'host'), dst=('arbiter',))
class G2AHVariable(Variable, _FromGuest, _ToArbiterHost):
def __init__(self, name):
super().__init__(name, src=('guest',), dst=('arbiter', 'host'))
class AH2GVariable(Variable, _FromArbiterHost, _ToGuest):
def __init__(self, name):
super().__init__(name, src=('arbiter', 'host'), dst=('guest',))
class H2AGVariable(Variable, _FromHost, _ToArbiterGuest):
def __init__(self, name):
super().__init__(name, src=('host',), dst=('arbiter', 'guest'))
class AG2HVariable(Variable, _FromArbiterGuest, _ToHost):
def __init__(self, name):
super().__init__(name, src=('arbiter', 'guest'), dst=('host',))
| 7,752 | 2,635 |
import time
import serial
from Sensor import *
class Arduino:
BaudRate = 9600
def __init__(self, specs):
self.name = specs['name']
self.port = specs['port']
self.ser = serial.Serial(self.port, timeout=2)
self.sensors = [Sensor.makeSensor(s)for s in specs['sensors']]
self.on = 0
def read(self):
self.ser.write(self.getCommand())
time.sleep(0.2)
line = self.ser.readline().rstrip()
result = {}
for sensor in self.sensors:
if len(line) > 0:
data = line.split(b' ')
result[sensor.name] = sensor.read(data[sensor.pin])
return result
def getCommand(self):
return 1 if self.on else 0
| 740 | 246 |
import logging
import os
from typing import Dict
from controller.invoker.invoker_task_base import TaskBaseInvoker
from controller.utils import utils
from id_definition.error_codes import CTLResponseCode
from proto import backend_pb2
class TaskCopyInvoker(TaskBaseInvoker):
def task_pre_invoke(self, sandbox_root: str, request: backend_pb2.GeneralReq) -> backend_pb2.GeneralResp:
copy_request = request.req_create_task.copy
logging.info(f"copy_request: {copy_request}")
if not (copy_request.src_user_id and copy_request.src_repo_id):
return utils.make_general_response(code=CTLResponseCode.ARG_VALIDATION_FAILED,
message="Invalid src user and/or repo id")
src_root = os.path.join(sandbox_root, copy_request.src_user_id, copy_request.src_repo_id)
if not os.path.isdir(src_root):
return utils.make_general_response(code=CTLResponseCode.ARG_VALIDATION_FAILED,
message=f"Invalid src root: {src_root}")
return utils.make_general_response(code=CTLResponseCode.CTR_OK, message="")
@classmethod
def subtask_count(cls) -> int:
return 1
@classmethod
def subtask_invoke_0(cls, sandbox_root: str, repo_root: str, assets_config: Dict[str, str],
request: backend_pb2.GeneralReq, subtask_id: str, subtask_workdir: str,
subtask_id_dict: Dict[int, str]) -> backend_pb2.GeneralResp:
copy_request = request.req_create_task.copy
src_root = os.path.join(sandbox_root, copy_request.src_user_id, copy_request.src_repo_id)
copy_response = cls.copying_cmd(repo_root=repo_root,
task_id=subtask_id,
src_root=src_root,
src_dataset_id=copy_request.src_dataset_id,
work_dir=subtask_workdir,
name_strategy_ignore=copy_request.name_strategy_ignore)
return copy_response
@staticmethod
def copying_cmd(repo_root: str, task_id: str, src_root: str, src_dataset_id: str, work_dir: str,
name_strategy_ignore: bool) -> backend_pb2.GeneralResp:
copying_cmd_str = [
utils.mir_executable(), 'copy', '--root', repo_root,
'--src-root', src_root, '--dst-rev', f"{task_id}@{task_id}", '--src-revs',
f"{src_dataset_id}@{src_dataset_id}", '-w', work_dir
]
if name_strategy_ignore:
copying_cmd_str.append('--ignore-unknown-types')
return utils.run_command(copying_cmd_str)
| 2,716 | 832 |
from . import class_
class Cleric(class_.Class):
def __init__(self, channel_divinity):
class_.Class.__init__(self)
self.defs['will'] = 2
self.spec = 'cleric_%s' % channel_divinity | 209 | 71 |
"""ask URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from qa.urls import *
from qa.views import test ###only for now
urlpatterns = [
url(r'^admin/', admin.site.urls, name='admin'),
url(r'^login/', test, name='login'),
url(r'^signup/', test, name='signup'),
url(r'^question/', include('qa.urls')), ###good parctice as tree-structure
url(r'^ask/', test, name='ask'),
url(r'^popular/', test, name='popular'),
url(r'^new/', test, name='new'),
url(r'^$', test),
] | 1,124 | 380 |
import json
import os
import time
import logging
import boto3
from botocore.exceptions import ClientError
from common import *
from subscription import *
from azure.mgmt.subscription import SubscriptionClient
# Setup Logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('boto3').setLevel(logging.WARNING)
# TODO - Figure out what stupid module Azure uses for logger so I can suppress all their damn debug messages.
def handler(event, context):
logger.info("Received event: " + json.dumps(event, sort_keys=True))
dynamodb = boto3.resource('dynamodb')
subscription_table = dynamodb.Table(os.environ['SUBSCRIPTION_TABLE'])
azure_secrets = get_azure_creds(os.environ['AZURE_SECRET_NAME'])
if azure_secrets is None:
raise Exception("Unable to extract Azure Credentials. Aborting...")
collected_subs = []
for tenant, credential_info in azure_secrets.items():
azure_creds = ServicePrincipalCredentials(
client_id=credential_info["application_id"],
secret=credential_info["key"],
tenant=credential_info["tenant_id"]
)
resource_client = SubscriptionClient(azure_creds)
for subscription in resource_client.subscriptions.list():
# Some subscrption ID's retured by the API are not queryable, this seems like a bug with MS API.
# There may also be a better way of determining this...
queryable = 'false'
if 'Access to Azure Active Directory' not in subscription.display_name:
# Keep track of all valid subscriptions
collected_subs.append(subscription.subscription_id)
queryable = 'true'
subscription_dict = {
"subscription_id": subscription.subscription_id,
"display_name": subscription.display_name,
"state": subscription.state,
"SubscriptionClass": json.loads(json.dumps(subscription, default=str)),
"tenant_id": credential_info["tenant_id"],
"tenant_name": tenant,
"queryable": queryable
}
# Add subscriptions to DynamoDB subscriptions table.
create_or_update_subscription(subscription_dict, subscription_table)
if collected_subs is None:
raise Exception("No Subscriptions found. Aborting...")
# Return only valid subscription ID's to be sent via SNS by inventory trigger function
event['subscription_list'] = collected_subs
return(event)
def create_or_update_subscription(subscription, subscription_table):
logger.info(u"Adding subscription {}".format(subscription))
try:
response = subscription_table.update_item(
Key= {'subscription_id': subscription["subscription_id"]},
UpdateExpression="set display_name=:name, subscription_state=:status, SubscriptionClass=:class_record, tenant_id=:tenant_id, tenant_name=:tenant_name, queryable=:queryable",
ExpressionAttributeValues={
':name': subscription["display_name"],
':status': subscription["state"],
':class_record': subscription["SubscriptionClass"],
':tenant_id': subscription["tenant_id"],
':tenant_name': subscription["tenant_name"],
':queryable': subscription["queryable"]
}
)
except ClientError as e:
raise AccountUpdateError(u"Unable to create {}: {}".format(subscription, e))
except KeyError as e:
logger.critical(f"Subscription {subscription} is missing a key: {e}")
class AccountUpdateError(Exception):
'''raised when an update to DynamoDB Fails'''
| 3,898 | 1,032 |
"""
This stage runs Dismod-AT. Dismod gets called in very similar ways.
Let's look at them in order to narrow down configuration of this
stage.::
dismod_at database init
dismod_at database fit <variables>
dismod_at database fit <variables> <simulate_index>
dismod_at database set option <name> <value>
dismod_at database set <table_out> <source>
dismod_at database set <table_out> <source> <sample_index>
dismod_at database depend
dismod_at database simulate <number_simulate>
dismod_at database sample <method> <number_sample>
dismod_at database predict <source>
So how does the cascade know what the input database is?
We decided it would use the name of the stage as the name of
the database. Can a user call dismod_at through the cascade?
Would they want to? I see no reason for it when you can just
call Dismod. You'd call it within the Cascade when it's a known step,
in which case the variables and sources are decided beforehand.
Therefore, the ``command_list`` below will include the entries
that come after the database.::
command_list = [
["init"],
["fit", "both"],
["predict"]
]
That gives enough freedom to specify the command list when
defining the :class:`cascade_at.sequential_batch.Batch`.
"""
import functools
import logging
import os
import asyncio
CODELOG = logging.getLogger(__name__)
MATHLOG = logging.getLogger(__name__)
def dismod_run(command_list):
"""
Returns a batch stage that runs DismodAT on these commands. This
is a builder. The idea is to use this in ``Batch``. These function
names are made up, but it shows how to use ``dismod_run``::
batch = Batch([
("settings", import_settings),
("init", dismod_run([["init"]])),
("country", import_ccov),
("fitpredict", dismod_run([
["fit", "fixed"],
["predict"]
]))
("posteriors", posterior_to_priors)
])
There are two command-line options that affect how DismodAT
runs.
* ``single_use_machine=False`` This is True or False where True
means that we nice the Dismod process in order for it not
to interfere with interactive work on the same machine.
This makes the machine much more responsive with little
loss of efficiency.
* ``subprocess_poll_time=0.5`` This decides how often to check
whether DismodAT is done. It is the time in seconds, as
a floating-point number, to wait between checks. There's
nothing unreasonable about using a tenth of a second.
Args:
command_list (List[List[str]]): A list of commands for DismodAT
to run. It's the part of the command after the database.
Returns:
A callable stage that runs DismodAT on these commands.
"""
return functools.partial(dismod_recipe, command_list)
def dismod_recipe(command_list, context):
"""
Runs Dismod-AT. We generally run Dismod-AT more than once with
a sequence of commands, so we call these a recipe.
Args:
command_list (List[List[str]]): A list of commands for Dismod AT.
context: A context object from which we do I/O.
"""
dismod_executable = context.dismod_executable()
# These are checks we can do before trying to run Dismod. They
# don't need to be exhaustive because we'll see if it doesn't run.
if len(dismod_executable) < 1:
raise ValueError("There is no dismod executable in context")
if not dismod_executable[0].exists():
raise FileNotFoundError(f"Could not find file {dismod_executable}")
using_singularity = len(dismod_executable) > 3 and dismod_executable[0].name == "singularity"
if using_singularity and not dismod_executable[2].exists():
raise FileNotFoundError(f"Could not find singularity image {dismod_executable[2]}")
db_file = context.dismod_file()
if not db_file.exists():
raise FileNotFoundError(f"Could not find file {db_file}")
for command in command_list:
MATHLOG.info("Running dismod_at {} {}".format(db_file, command))
run_and_watch(
dismod_executable + [db_file] + command,
context.params("single_use_machine"),
context.params("subprocess_poll_time"),
)
def reduce_process_priority():
"""
It seems counter-intuitive to ask the process to be slower,
but reducing the priority of the process makes it livable to run
in the background on your laptop, and it won't go appreciably
slower.
"""
os.nice(19)
@asyncio.coroutine
def _read_pipe(pipe, result, callback=lambda text: None):
"""Read from a pipe until it closes.
Args:
pipe: The pipe to read from
result: a list to accumulate the output into
callback: a callable which will be invoked each time data is read from the pipe
"""
while not pipe.at_eof():
text = yield from pipe.read(2 ** 16)
text = text.decode("utf-8")
result.append(text)
callback(text)
def run_and_watch(command, single_use_machine, poll_time):
"""
Runs a command and logs its stdout and stderr while that command
runs. The point is two-fold, to gather stdout from the running
program and to turn any faults into exceptions.
Args:
command (Path|str): The command to run as a rooted path.
single_use_machine (bool): Whether this is running on a machine
where someone is doing interactive work at the same time.
If so, we reduce process priority.
poll_time (int): How many seconds to wait between checking
whether the program is done. This isn't an expensive
operation.
Returns:
str: The output stream.
str: The error stream.
"""
command = [str(a) for a in command]
loop = asyncio.get_event_loop()
return loop.run_until_complete(_async_run_and_watch(command, single_use_machine, poll_time))
@asyncio.coroutine
def _async_run_and_watch(command, single_use_machine, poll_time):
if single_use_machine:
pre_execution_function = reduce_process_priority
else:
pre_execution_function = None
try:
CODELOG.info(f"Forking to {command}")
sub_process = yield from asyncio.subprocess.create_subprocess_exec(
*command, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, preexec_fn=pre_execution_function
)
except ValueError as ve:
raise Exception(f"Dismod called with invalid arguments {ve}")
except OSError as ose:
raise Exception(f"Dismod couldn't run due to OS error {ose}")
out_list = []
err_list = []
loop = asyncio.get_event_loop()
std_out_task = loop.create_task(_read_pipe(sub_process.stdout, out_list, lambda text: MATHLOG.debug(text)))
std_err_task = loop.create_task(_read_pipe(sub_process.stderr, err_list, lambda text: MATHLOG.error(text)))
yield from sub_process.wait()
yield from std_out_task
yield from std_err_task
if sub_process.returncode != 0:
msg = (
f"return code {sub_process.returncode}\n"
f"stdout {os.linesep.join(out_list)}\n"
f"stderr {os.linesep.join(err_list)}\n"
)
raise Exception("dismod_at failed.\n{}".format(msg))
else:
pass # Return code is 0. Success.
return "".join(out_list), "".join(err_list)
| 7,443 | 2,194 |
import py
import platform
from rpython.translator.platform.arch.s390x import (s390x_cpu_revision,
extract_s390x_cpu_ids)
if platform.machine() != 's390x':
py.test.skip("s390x tests only")
def test_cpuid_s390x():
revision = s390x_cpu_revision()
assert revision != 'unknown', 'the model you are running on might be too old'
def test_read_processor_info():
ids = extract_s390x_cpu_ids("""
processor 0: machine = 12345
processor 1: version = FF, identification = AF
""".splitlines())
assert ids == [(0, None, None, 0x12345),
(1, 'FF', 'AF', 0),
]
| 617 | 234 |
from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
""" Test create a new user with an email is successful"""
email ='rishabhkushwah208@gmail.com'
password ='kushwah20801'
user =get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email,email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
"""test the email for a new user is nor"""
email ='kushwahrishabh0@gmail.com'
user =get_user_model().objects.create_user(email,'rishabh123456')
self.assertEqual(user.email,email.lower())
def test_new_user_invalid_email(self):
with self.assertRaises(valueError):
get_user_model().objects.create_user(None,'rishabh123456')
def test_create_new_superuser(self):
""""""
user =get_user_model().objects.create_superuser(
'rishabhkushwah208@gmail.com',
'rishabh123456'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
| 1,155 | 413 |
class Paste:
def __init__(self, paste_id, name, user, date, content):
self.id = paste_id
self.name = name
self.user = user
self.date = date
self.content = content
def __str__(self):
return "{} - {} - by {} at {} \n {}".format(self.id, self.name, self.user, self.date, self.content)
| 340 | 112 |
#!/usr/bin/env python3
# Purpose: Sum any number of inputted integers together.
import argparse
def get_args():
parser = argparse.ArgumentParser(description='Add numbers')
parser.add_argument('integers', metavar='INT', type=int, nargs='+',
help='Numbers to add')
return parser.parse_args()
def main():
args = get_args()
answer = args.integers
print(' + '.join(map(str, answer)) + ' = ' + str(sum(args.integers)))
if __name__ == '__main__':
main() | 502 | 159 |
import planckStyle as s
g = s.getSubplotPlotter()
roots = ['base_Alens_plikHM_TT_lowl_lowE', 'base_Alens_plikHM_TTTEEE_lowl_lowE', 'base_plikHM_TTTEEE_lowl_lowE',
'base_Alens_CamSpecHM_TTTEEE_lowl_lowE']
for i, root in enumerate(roots):
samples = g.getSamples(root)
p = samples.getParams()
samples.addDerived(p.rmsdeflect ** 2, 'vardeflect', label=r'$\langle |\nabla\phi|^2\rangle\,[{\rm arcmin}^2]$')
roots[i] = samples
yparams = [u'Alens', u'vardeflect']
xparams = [u'omegabh2', u'omegach2', 'ns', u'H0', u'omegam', u'sigma8']
g.rectangle_plot(xparams, yparams, roots=roots, ymarkers=[1, None], filled=[True] * 3 + [False],
colors=g.settings.solid_colors[:3] + ['k'], ls=['-'] * 3 + ['--'],
legend_labels=[s.planckTT + r' ($\Lambda{\rm CDM}+A_L$)', s.planckall + r' ($\Lambda{\rm CDM}+A_L$)',
s.planckall + r' ($\Lambda{\rm CDM}$)'])
g.export()
| 940 | 388 |
from dolfin import *
from numpy import *
import scipy as Sci
import scipy.linalg
from math import pi,sin,cos,sqrt
import scipy.sparse as sps
import scipy.io as save
import scipy
import pdb
parameters['linear_algebra_backend'] = 'uBLAS'
j = 1
n = 2
n =2
# print n
mesh = UnitSquareMesh(n,n)
# mesh = Mesh('untitled.xml')
c = 1
# print "starting assemble"
tic()
parameters['reorder_dofs_serial'] = False
V = FunctionSpace(mesh, "N1curl", 1)
Q = FunctionSpace(mesh, "DG", 0)
parameters['reorder_dofs_serial'] = False
W = V*Q
(u, p) = TrialFunctions(W)
(v, q) = TestFunctions(W)
# f=Expression(("2+x[1]*(1-x[1])","2+x[0]*(1-x[0])"))
f= Expression(("(8*pow(pi,2)+1)*sin(2*pi*x[1])*cos(2*pi*x[0])","-(8*pow(pi,2)+1)*sin(2*pi*x[0])*cos(2*pi*x[1])"))
ue= Expression(("sin(2*pi*x[1])*cos(2*pi*x[0])","-sin(2*pi*x[0])*cos(2*pi*x[1])"))
# ue = Expression(("x[1]*(1-x[1])","x[0]*(1-x[0])"))
u0 = Expression(('0','0'))
def u0_boundary(x, on_boundary):
return on_boundary
bc = DirichletBC(W.sub(0), u0, u0_boundary)
a11 = inner(curl(v),curl(u))*dx-c*inner(u,v)*dx
a12 = inner(v,grad(p))
a21 = inner(u,grad(q))
a = a11+a12+a21
b = dolfin.inner(f,v)*dx
A, bb = assemble_system(a, b, bc)
time = toc()
u = Function(V)
print "solve"
set_log_level(PROGRESS)
solver = KrylovSolver("cg","icc")
solver.parameters["relative_tolerance"] = 1e-10
solver.parameters["absolute_tolerance"] = 1e-7
solver.solve(A,u.vector(),bb)
set_log_level(PROGRESS)
# parameters.linear_algebra_backend = "uBLAS"
# AA, bB = assemble_system(a, b)
# print "store matrix"
# rows, cols, values = AA.data()
# # rows1, values1 = bB.data()
# # print AA.data()
# Aa = sps.csr_matrix((values, cols, rows))
# # b = sps.csr_matrix((values1, cols1, rows1))
# # print Aa
# print "save matrix"
# scipy.io.savemat("Ab.mat", {"A": Aa,"b": bB.data()},oned_as='row')
ue= Expression(("sin(2*pi*x[1])*cos(2*pi*x[0])","-sin(2*pi*x[0])*cos(2*pi*x[1])"))
err = ue - u
L2normerr = sqrt(assemble(dolfin.inner(err,err)*dx))
print n,L2normerr
# error[j-1,0] = L2normerr
parameters.linear_algebra_backend = "PETSc"
# plot( mesh,interactive=True)
plot(u, interactive=True)
| 2,116 | 1,001 |
import rospy
import sys, time, logging, getch
from cops_and_robots.Map import Map
from std_msgs.msg import String
#<>NOTE: THIS WILL NOT WORK IN PYTHON AND HAS TO BE WRITTEN IN C++
#include <image_transport/image_transport.h>
#include <opencv2/highgui/highgui.hpp>
#include <cv_bridge/cv_bridge.h>
def callback(data):
rospy.loginfo(rospy.get_caller_id()+"I heard %s",data.data)
x = data.data
try:
keymap[x]()
except Exception, e:
logging.error('%s is not a viable command',x)
cmd = cop.move()
cop.cmd_queue.put(cmd)
def chatter():
rospy.init_node('map_chatter', anonymous=True)
#listener
rospy.Subscriber("human_sensor", String, callback)
#talker
pub = rospy.Publisher("battery", battery, queue_size=10)
r = rospy.Rate(1) #1Hz
while not rospy.is_shutdown():
pub.publish(cop.battery_capacity, cop.battery_charge, cop.charging_mode)
r.sleep()
if __name__ == '__main__':
fleming = set_up_fleming()
chatter() | 1,010 | 401 |
"""
This module finds diffusion paths through a structure based on a given potential field.
If you use PathFinder algorithm for your research, please consider citing the following work:
Ziqin Rong, Daniil Kitchaev, Pieremanuele Canepa, Wenxuan Huang, Gerbrand Ceder,
The Journal of Chemical Physics 145 (7), 074112
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import numpy as np
import numpy.linalg as la
import scipy.signal
import scipy.stats
from scipy.interpolate import interp1d
import math
import six
from abc import ABCMeta, abstractmethod
from pymatgen.core.structure import Structure
from pymatgen.core.lattice import Lattice
from pymatgen.core.sites import *
from pymatgen.core.periodic_table import *
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.io.vasp.outputs import VolumetricData, Chgcar
__author__ = "Daniil Kitchaev"
__version__ = "1.0"
__maintainer__ = "Daniil Kitchaev, Ziqin Rong"
__email__ = "dkitch@mit.edu, rongzq08@mit.edu"
__status__ = "Development"
__date__ = "March 17, 2015"
class NEBPathfinder:
def __init__(self, start_struct, end_struct, relax_sites, v, n_images=20):
"""
General pathfinder for interpolating between two structures, where the interpolating path is calculated with
the elastic band method with respect to the given static potential for sites whose indices are given in
relax_sites, and is linear otherwise.
:param start_struct, end_struct - Endpoint structures to interpolate between
:param relax_sites - List of site indices whose interpolation paths should be relaxed
:param v - Static potential field to use for the elastic band relaxation
:param n_images - Number of interpolation images to generate
"""
self.__s1 = start_struct
self.__s2 = end_struct
self.__relax_sites = relax_sites
self.__v = v
self.__n_images = n_images
self.__images = None
self.interpolate()
def interpolate(self):
"""
Finds a set of n_images from self.s1 to self.s2, where all sites except for the ones given in relax_sites,
the interpolation is linear (as in pymatgen.core.structure.interpolate), and for the site indices given
in relax_sites, the path is relaxed by the elastic band method within the static potential V.
:param relax_sites: List of site indices for which the interpolation path needs to be relaxed
:param v: static potential to use for relaxing the interpolation path
:param n_images: number of images to generate along the interpolation path
"""
images = self.__s1.interpolate(self.__s2, nimages=self.__n_images, interpolate_lattices=False)
for site_i in self.__relax_sites:
start_f = images[0].sites[site_i].frac_coords
end_f = images[-1].sites[site_i].frac_coords
path = NEBPathfinder.string_relax(NEBPathfinder.__f2d(start_f, self.__v),
NEBPathfinder.__f2d(end_f, self.__v),
self.__v, n_images=(self.__n_images+1),
dr=[self.__s1.lattice.a/self.__v.shape[0],
self.__s1.lattice.b/self.__v.shape[1],
self.__s1.lattice.c/self.__v.shape[2]])
for image_i, image in enumerate(images):
image.translate_sites(site_i,
NEBPathfinder.__d2f(path[image_i], self.__v) - image.sites[site_i].frac_coords,
frac_coords=True, to_unit_cell=True)
self.__images = images
@property
def images(self):
"""
Returns a list of structures interpolating between the start and endpoint structures.
"""
return self.__images
def plot_images(self, outfile):
"""
Generates a POSCAR with the calculated diffusion path with respect to the first endpoint.
:param outfile: Output file for the POSCAR
"""
sum_struct = self.__images[0].sites
for image in self.__images:
for site_i in self.__relax_sites:
sum_struct.append(PeriodicSite(image.sites[site_i].specie, image.sites[site_i].frac_coords,
self.__images[0].lattice, to_unit_cell=True, coords_are_cartesian=False))
sum_struct = Structure.from_sites(sum_struct, validate_proximity=False)
p = Poscar(sum_struct)
p.write_file(outfile)
@staticmethod
def string_relax(start, end, V, n_images=25, dr=None, h=3.0, k=0.17, min_iter=100, max_iter=10000, max_tol=5e-6):
"""
Implements path relaxation via the elastic band method. In general, the method is to define a path by a set of
points (images) connected with bands with some elasticity constant k. The images then relax along the forces
found in the potential field V, counterbalanced by the elastic response of the elastic band. In general the
endpoints of the band can be allowed to relax also to their local minima, but in this calculation they are kept
fixed.
:param start, end - Endpoints of the path calculation given in discrete coordinates with respect to the grid in V
:param V - potential field through which to calculate the path
:param n_images - number of images used to define the path. In general anywhere from 20 to 40 seems to be good.
:param dr - Conversion ratio from discrete coordinates to real coordinates for each of the three coordinate vectors
:param h - Step size for the relaxation. h = 0.1 works reliably, but is slow. h=10 diverges with large gradients
but for the types of gradients seen in CHGCARs, works pretty reliably
:param k - Elastic constant for the band (in real units, not discrete)
:param min_iter, max_iter - Number of optimization steps the string will take before exiting (even if unconverged)
:param max_tol - Convergence threshold such that if the string moves by less than max_tol in a step, and at least
min_iter steps have passed, the algorithm will terminate. Depends strongly on the size of the
gradients in V, but 5e-6 works reasonably well for CHGCARs
"""
#
# This code is based on the MATLAB example provided by
# Prof. Eric Vanden-Eijnden of NYU
# (http://www.cims.nyu.edu/~eve2/main.htm)
#
print("Getting path from {} to {} (coords wrt V grid)".format(start, end))
# Set parameters
if not dr:
dr = np.array([1.0/V.shape[0], 1.0/V.shape[1], 1.0/V.shape[2]])
else:
dr = np.array(dr, dtype=float)
keff = k * dr * n_images
h0 = h
# Initialize string
g1 = np.linspace(0, 1, n_images)
s0 = start
s1 = end
s = np.array([g * (s1-s0) for g in g1]) + s0
ds = s - np.roll(s,1,axis=0)
ds[0] = (ds[0] - ds[0])
ls = np.cumsum(la.norm(ds, axis=1))
ls = ls/ls[-1]
fi = interp1d(ls,s,axis=0)
s = fi(g1)
# Evaluate initial distances (for elastic equilibrium)
ds0_plus = s - np.roll(s,1,axis=0)
ds0_minus = s - np.roll(s,-1,axis=0)
ds0_plus[0] = (ds0_plus[0] - ds0_plus[0])
ds0_minus[-1] = (ds0_minus[-1] - ds0_minus[-1])
# Evolve string
for step in range(0, max_iter):
if step > min_iter:
h = h0 * np.exp(-2.0 * (step - min_iter)/max_iter) # Gradually decay step size to prevent oscillations
else:
h = h0
# Calculate forces acting on string
dV = np.gradient(V)
d = V.shape
s0 = s
edV = np.array([[dV[0][int(pt[0])%d[0]][int(pt[1])%d[1]][int(pt[2])%d[2]] / dr[0],
dV[1][int(pt[0])%d[0]][int(pt[1])%d[1]][int(pt[2])%d[2]] / dr[0],
dV[2][int(pt[0])%d[0]][int(pt[1])%d[1]][int(pt[2])%d[2]] / dr[0]] for pt in s])
#if(step % 100 == 0):
# print(edV)
# Update according to force due to potential and string elasticity
ds_plus = s - np.roll(s,1,axis=0)
ds_minus = s - np.roll(s,-1,axis=0)
ds_plus[0] = (ds_plus[0] - ds_plus[0])
ds_minus[-1] = (ds_minus[-1] - ds_minus[-1])
Fpot = edV
Fel = keff * (la.norm(ds_plus) - la.norm(ds0_plus)) * (ds_plus / la.norm(ds_plus))
Fel += keff * (la.norm(ds_minus) - la.norm(ds0_minus)) * (ds_minus / la.norm(ds_minus))
s = s - h * (Fpot + Fel)
# Fix endpoints
s[0] = s0[0]
s[-1] = s0[-1]
# Reparametrize string
ds = s - np.roll(s,1,axis=0)
ds[0] = (ds[0] - ds[0])
ls = np.cumsum(la.norm(ds, axis=1))
ls = ls/ls[-1]
fi = interp1d(ls,s,axis=0)
s = fi(g1)
tol = la.norm((s-s0) * dr) / n_images / h
if (tol > 1e10):
raise ValueError("Pathfinding failed, path diverged! Consider reducing h to avoid divergence.")
if (step > min_iter and tol < max_tol):
print("Converged at step {}".format(step))
break
if (step % 100 == 0):
print ("Step {} - ds = {}".format(step, tol))
return s
@staticmethod
def __f2d(frac_coords, v):
"""
Converts fractional coordinates to discrete coordinates with respect to the grid size of v
"""
#frac_coords = frac_coords % 1
return np.array([int(frac_coords[0]*v.shape[0]),
int(frac_coords[1]*v.shape[1]),
int(frac_coords[2]*v.shape[2])])
@staticmethod
def __d2f(disc_coords, v):
"""
Converts a point given in discrete coordinates withe respect to the grid in v to fractional coordinates.
"""
return np.array([disc_coords[0]/v.shape[0],
disc_coords[1]/v.shape[1],
disc_coords[2]/v.shape[2]])
class StaticPotential(six.with_metaclass(ABCMeta)):
"""
Defines a general static potential for diffusion calculations. Implements grid-rescaling and smearing for the
potential grid. Also provides a function to normalize the potential from 0 to 1 (recommended).
"""
def __init__(self, struct, pot):
self.__v = pot
self.__s = struct
def get_v(self):
"""
Returns the potential
"""
return self.__v
def normalize(self):
"""
Sets the potential range 0 to 1.
"""
self.__v = self.__v - np.amin(self.__v)
self.__v = self.__v / np.amax(self.__v)
def rescale_field(self, new_dim):
"""
Changes the discretization of the potential field by linear interpolation. This is necessary if the potential field
obtained from DFT is strangely skewed, or is too fine or coarse. Obeys periodic boundary conditions at the edges of
the cell. Alternatively useful for mixing potentials that originally are on different grids.
:param new_dim: tuple giving the numpy shape of the new grid
"""
v_dim = self.__v.shape
padded_v = np.lib.pad(self.__v, ((0,1), (0,1), (0,1)), mode='wrap')
ogrid_list = np.array([list(c) for c in list(np.ndindex(v_dim[0]+1, v_dim[1]+1, v_dim[2]+1))])
v_ogrid = padded_v.reshape(((v_dim[0]+1) * (v_dim[1]+1) * (v_dim[2]+1), -1))
ngrid_a, ngrid_b, ngrid_c = np.mgrid[0 : v_dim[0] : v_dim[0]/new_dim[0],
0 : v_dim[1] : v_dim[1]/new_dim[1],
0 : v_dim[2] : v_dim[2]/new_dim[2]]
v_ngrid = scipy.interpolate.griddata(ogrid_list, v_ogrid, (ngrid_a, ngrid_b, ngrid_c), method='linear').reshape((new_dim[0], new_dim[1], new_dim[2]))
self.__v = v_ngrid
def gaussian_smear(self, r):
"""
Applies an isotropic Gaussian smear of width (standard deviation) r to the potential field. This is necessary to
avoid finding paths through narrow minima or nodes that may exist in the field (although any potential or
charge distribution generated from GGA should be relatively smooth anyway). The smearing obeys periodic
boundary conditions at the edges of the cell.
:param r - Smearing width in cartesian coordinates, in the same units as the structure lattice vectors
"""
# Since scaling factor in fractional coords is not isotropic, have to have different radii in 3 directions
a_lat = self.__s.lattice.a
b_lat = self.__s.lattice.b
c_lat = self.__s.lattice.c
# Conversion factors for discretization of v
v_dim = self.__v.shape
r_frac = (r / a_lat, r / b_lat, r / c_lat)
r_disc = (int(math.ceil(r_frac[0] * v_dim[0])), int(math.ceil(r_frac[1] * v_dim[1])),
int(math.ceil(r_frac[2] * v_dim[2])))
# Apply smearing
# Gaussian filter
gauss_dist = np.zeros((r_disc[0] * 4 + 1, r_disc[1] * 4 + 1, r_disc[2] * 4 + 1))
for g_a in np.arange(-2.0 * r_disc[0], 2.0 * r_disc[0] + 1, 1.0):
for g_b in np.arange(-2.0 * r_disc[1], 2.0 * r_disc[1] + 1, 1.0):
for g_c in np.arange(-2.0 * r_disc[2], 2.0 * r_disc[2] + 1, 1.0):
g = np.array([g_a / v_dim[0], g_b / v_dim[1], g_c / v_dim[2]]).T
gauss_dist[int(g_a + r_disc[0])][int(g_b + r_disc[1])][int(g_c + r_disc[2])] = la.norm(np.dot(self.__s.lattice.matrix, g))/r
gauss = scipy.stats.norm.pdf(gauss_dist)
gauss = gauss/np.sum(gauss, dtype=float)
padded_v = np.pad(self.__v, ((r_disc[0], r_disc[0]), (r_disc[1], r_disc[1]), (r_disc[2], r_disc[2])), mode='wrap')
smeared_v = scipy.signal.convolve(padded_v, gauss, mode='valid')
self.__v = smeared_v
class ChgcarPotential(StaticPotential):
'''
Implements a potential field based on the charge density output from VASP.
'''
def __init__(self, chgcar, smear=False, normalize=True):
"""
:param chgcar: Chgcar object based on a VASP run of the structure of interest (Chgcar.from_file("CHGCAR"))
:param smear: Whether or not to apply a Gaussian smearing to the potential
:param normalize: Whether or not to normalize the potential to range from 0 to 1
"""
v = chgcar.data['total']
v = v / (v.shape[0] * v.shape[1] * v.shape[2])
StaticPotential.__init__(self, chgcar.structure, v)
if smear:
self.gaussian_smear(2.0)
if normalize:
self.normalize()
class FreeVolumePotential(StaticPotential):
'''
Implements a potential field based on geometric distances from atoms in the structure - basically, the potential
is lower at points farther away from any atoms in the structure.
'''
def __init__(self, struct, dim, smear=False, normalize=True):
"""
:param struct: Unit cell on which to base the potential
:param dim: Grid size for the potential
:param smear: Whether or not to apply a Gaussian smearing to the potential
:param normalize: Whether or not to normalize the potential to range from 0 to 1
"""
self.__s = struct
v = FreeVolumePotential.__add_gaussians(struct, dim)
StaticPotential.__init__(self, struct, v)
if smear:
self.gaussian_smear(2.0)
if normalize:
self.normalize()
@staticmethod
def __add_gaussians(s, dim, r=1.5):
gauss_dist = np.zeros(dim)
for a_d in np.arange(0.0, dim[0], 1.0):
for b_d in np.arange(0.0, dim[1], 1.0):
for c_d in np.arange(0.0, dim[2], 1.0):
coords_f = np.array([a_d / dim[0], b_d / dim[1], c_d / dim[2]])
d_f = sorted(s.get_sites_in_sphere(coords_f, s.lattice.a), key=lambda x:x[1])[0][1]
#print(d_f)
gauss_dist[int(a_d)][int(b_d)][int(c_d)] = d_f / r
v = scipy.stats.norm.pdf(gauss_dist)
return v
class MixedPotential(StaticPotential):
'''
Implements a potential that is a weighted sum of some other potentials
'''
def __init__(self, potentials, coefficients, smear=False, normalize=True):
"""
:param potentials: List of objects extending the StaticPotential superclass
:param coefficients: Mixing weights for the elements of the potentials list
:param smear: Whether or not to apply a Gaussian smearing to the potential
:param normalize: Whether or not to normalize the potential to range from 0 to 1
"""
v = potentials[0].get_v() * coefficients[0]
s = potentials[0].__s
for i in range(1, len(potentials)):
v += potentials[i].get_v() * coefficients[i]
StaticPotential.__init__(self, s, v)
if smear:
self.gaussian_smear(2.0)
if normalize:
self.normalize()
| 17,495 | 5,628 |
from state.state import MepoState, StateDoesNotExistError
from repository.git import GitRepository
from command.init import init as mepo_init
from utilities import shellcmd, colors
from urllib.parse import urlparse
import os
import pathlib
import shutil
import shlex
def run(args):
# This protects against someone using branch without a URL
if args.branch and not args.repo_url:
raise RuntimeError("The branch argument can only be used with a URL")
if args.allrepos and not args.branch:
raise RuntimeError("The allrepos option must be used with a branch/tag.")
# If you pass in a config, with clone, it could be outside the repo.
# So use the full path
passed_in_config = False
if args.config:
passed_in_config = True
args.config = os.path.abspath(args.config)
else:
# If we don't pass in a config, we need to "reset" the arg to the
# default name because we pass args to mepo_init
args.config = 'components.yaml'
if args.repo_url:
p = urlparse(args.repo_url)
last_url_node = p.path.rsplit('/')[-1]
url_suffix = pathlib.Path(last_url_node).suffix
if args.directory:
local_clone(args.repo_url,args.branch,args.directory)
os.chdir(args.directory)
else:
if url_suffix == '.git':
git_url_directory = pathlib.Path(last_url_node).stem
else:
git_url_directory = last_url_node
local_clone(args.repo_url,args.branch)
os.chdir(git_url_directory)
# Copy the new file into the repo only if we pass it in
if passed_in_config:
try:
shutil.copy(args.config,os.getcwd())
except shutil.SameFileError as e:
pass
# This tries to read the state and if not, calls init,
# loops back, and reads the state
while True:
try:
allcomps = MepoState.read_state()
except StateDoesNotExistError:
mepo_init.run(args)
continue
break
max_namelen = len(max([comp.name for comp in allcomps], key=len))
for comp in allcomps:
if not comp.fixture:
git = GitRepository(comp.remote, comp.local)
version = comp.version.name
version = version.replace('origin/','')
recurse = comp.recurse_submodules
# We need the type to handle hashes in components.yaml
type = comp.version.type
git.clone(version,recurse,type)
if comp.sparse:
git.sparsify(comp.sparse)
print_clone_info(comp, max_namelen)
if args.allrepos:
for comp in allcomps:
if not comp.fixture:
git = GitRepository(comp.remote, comp.local)
print("Checking out %s in %s" %
(colors.YELLOW + args.branch + colors.RESET,
colors.RESET + comp.name + colors.RESET))
git.checkout(args.branch)
def print_clone_info(comp, name_width):
ver_name_type = '({}) {}'.format(comp.version.type, comp.version.name)
print('{:<{width}} | {:<s}'.format(comp.name, ver_name_type, width = name_width))
def local_clone(url,branch=None,directory=None):
cmd = 'git clone '
if branch:
cmd += '--branch {} '.format(branch)
cmd += '--quiet {}'.format(url)
if directory:
cmd += ' "{}"'.format(directory)
shellcmd.run(shlex.split(cmd))
| 3,501 | 1,051 |
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
mails=['example1@gmail.com', 'example2@outlook.com']
outacc = "__outlook email id__"
outpwd = "__outlook password__"
def sendit(txt, sub):
s = smtplib.SMTP(host='smtp-mail.outlook.com', port=587)
s.starttls()
s.login(outacc, outpwd)
for mail in mails:
msg = MIMEMultipart()
msg['From']=outacc
msg['To']= mail
msg['Subject']=sub
msg.attach(MIMEText(txt, 'plain'))
s.send_message(msg)
del msg
| 564 | 209 |
import os
import sys
import time
from collections import Counter
import ray
@ray.remote
def get_hostname(x):
import platform
import time
time.sleep(0.01)
return x + (platform.node(),)
def wait_for_nodes(expected):
# Wait for all nodes to join the cluster.
while True:
num_nodes = len(ray.nodes())
if num_nodes < expected:
print("{} nodes have joined so far, waiting for {} more.".format(
num_nodes, expected - num_nodes))
sys.stdout.flush()
time.sleep(1)
else:
break
def main():
wait_for_nodes(3)
# Check that objects can be transferred from each node to each other node.
for i in range(10):
print("Iteration {}".format(i))
results = [
get_hostname.remote(get_hostname.remote(())) for _ in range(100)
]
print(Counter(ray.get(results)))
sys.stdout.flush()
print("Success!")
sys.stdout.flush()
if __name__ == "__main__":
# NOTE: If you know you're running this on the head node, you can just use "localhost" here.
if "RAY_HEAD_HOST" not in os.environ or os.environ["RAY_HEAD_HOST"] == "":
raise ValueError("RAY_HEAD_HOST environment variable empty. Is there a ray cluster running?")
ray_head = os.environ["RAY_HEAD_HOST"]
ray.init(address=f"{ray_head}:6379")
main()
| 1,388 | 427 |
import logging
from dataset_builder.exceptions.exceptions import (
BuilderStepError
)
from dataset_builder.adapter import dataset_adapter
from dataset_builder.steps import (
dataset_validator,
dataset_converter,
dataset_transformer,
dataset_enricher,
dataset_mover,
directory_cleaner
)
from dataset_builder.config import log_config
logger = logging.getLogger()
def run_builder(dataset_name: str):
try:
log_config.setup_logger_for_import_pipeline()
logger.info(f'Starting to build dataset {dataset_name}')
data_file_path, metadata_file_path = dataset_validator.run_for_dataset(
dataset_name
)
transformed_metadata_path = dataset_transformer.run(metadata_file_path)
transformed_metadata = dataset_adapter.get_metadata(
transformed_metadata_path
)
temporality_type = transformed_metadata['temporality']
temporal_coverage = transformed_metadata['temporalCoverage']
data_type = transformed_metadata['measureVariable']['dataType']
enriched_data_path = dataset_enricher.run(
data_file_path, temporal_coverage, data_type
)
parquet_file_path = dataset_converter.run(
enriched_data_path, temporality_type, data_type
)
dataset_mover.run(
dataset_name, transformed_metadata_path,
parquet_path=parquet_file_path
)
logger.info('Dataset built sucessfully')
except BuilderStepError as e:
logger.error(str(e))
except Exception as e:
logger.error('Unexpected exception when building dataset')
logger.error(str(e))
finally:
directory_cleaner.run(dataset_name)
def run_metadata_builder(dataset_name: str):
try:
log_config.setup_logger_for_import_pipeline()
logger.info(f'Starting to build metadata {dataset_name}')
metadata_file_path = dataset_validator.run_for_metadata(dataset_name)
transformed_metadata_path = dataset_transformer.run(metadata_file_path)
dataset_mover.run(dataset_name, transformed_metadata_path)
logger.info('Dataset built sucessfully')
except BuilderStepError as e:
logger.error(str(e))
except Exception as e:
logger.error('Unexpected exception when building dataset')
logger.error(str(e))
finally:
directory_cleaner.run(dataset_name)
if __name__ == '__main__':
DATASET_NAME = 'INNTEKT'
# run_builder(DATASET_NAME)
# run_metadata_builder(DATASET_NAME)
| 2,563 | 749 |
class Solution:
"""
@param: nums: A list of integers.
@return: A list of permutations.
"""
def permute(self, nums):
# write your code here
if not nums or len(nums) == 0:
return [[]]
nums.sort()
permutations = []
self._find_permutations(nums, permutations, [])
return permutations
def _find_permutations(self, nums, permutations, permutation):
if len(permutation) == len(nums):
permutations.append(permutation[:])
return
for num in nums:
if num not in permutation:
# add new element
permutation.append(num)
# explore
self._find_permutations(nums, permutations, permutation)
# remove last element
permutation.pop()
class Solution2:
"""
@param: nums: A list of integers.
@return: A list of permutations.
"""
def __init__(self):
self.results = []
self.length = 0
def permute(self, nums):
# write your code here
if not nums or len(nums) == 0:
self.results.append([])
return self.results
self.length = len(nums)
self.backtracking(0, nums, [])
return self.results
def backtracking(self, startIndex, nums, permutation):
if len(permutation) == len(nums):
self.results.append(permutation[:])
return
for idx in xrange(startIndex, self.length):
if nums[idx] in permutation:
continue
# add new element
permutation.append(nums[idx])
# explore
self.backtracking(0, nums, permutation)
# remove element
permutation.pop(-1)
| 1,801 | 515 |
from string import ascii_lowercase
ALPHABET = set(ascii_lowercase)
def is_pangram(sentence):
"""Checks if sentence uses every letter in the alphabet at least once"""
return ALPHABET.issubset(sentence.lower())
| 220 | 74 |
import sys
sys.path.append('../code')
from tools import load_data_tools as ldt
from tools import emg_tools as et
from AfferentInput import AfferentInput
import matplotlib.pyplot as plt
import numpy as np
import pickle
""" get gait cycles """
kinFile = "./generateForSimInputs/data/human/HuMoD-database/generatedFiles/Subject_B_speed_1.3_31-Oct-2017_KIN.csv"
# kinFile = "./generateForSimInputs/data/human/floatData/generatedFiles/H_JBM_20150109_04TM_NF_01_30-Apr-2018_KIN.csv"
footHeight = ldt.readCsvGeneral(kinFile,1,['time','height_toe','height_cal'],['time','leftToe','leftCalf'])
footHeight['dt'] = (footHeight['time'][-1]-footHeight['time'].min())/footHeight['time'].size
fig1,ax = plt.subplots(1,figsize=(12,6))
ax.plot(footHeight['time']-footHeight['time'][0],footHeight['height_toe'],'b')
ax.plot(footHeight['time']-footHeight['time'][0],footHeight['height_cal'],'r')
fig1.suptitle("Please select all heel strikes")
plt.draw()
plt.pause(1)
temp = fig1.ginput(n=0,timeout=240)
if len(temp)==0: raise(Exception("no heel strikes detected..."))
else: heelStrikes = np.atleast_1d([ float(x) for x,_ in temp])
print heelStrikes
fig1.suptitle("Please select all foot offs")
plt.draw()
plt.pause(1)
temp = fig1.ginput(n=0,timeout=240)
if len(temp)==0: raise(Exception("no foot offs detected..."))
else: footOffs = np.atleast_1d([ float(x) for x,_ in temp])
print footOffs
plt.draw()
gaitCyclesFileName = "./generateForSimInputs/output/humanGaitCyclesB13.p"
# gaitCyclesFileName = "./generateForSimInputs/output/humanGaitCyclesFloat.p"
with open(gaitCyclesFileName, 'w') as pickle_file:
pickle.dump(heelStrikes, pickle_file)
pickle.dump(footOffs, pickle_file)
| 1,676 | 665 |
import asyncio
from asynctest import mock as async_mock, TestCase as AsyncTestCase
from ...core.in_memory import InMemoryProfile
from ...config.error import ArgsParseError
from ...connections.models.conn_record import ConnRecord
from ...storage.base import BaseStorage
from ...storage.record import StorageRecord
from ...version import __version__
from .. import upgrade as test_module
from ..upgrade import UpgradeError
class TestUpgrade(AsyncTestCase):
async def setUp(self):
self.session = InMemoryProfile.test_session()
self.profile = self.session.profile
self.session_storage = InMemoryProfile.test_session()
self.profile_storage = self.session_storage.profile
self.storage = self.session_storage.inject(BaseStorage)
record = StorageRecord(
"acapy_version",
"v0.7.2",
)
await self.storage.add_record(record)
def test_bad_calls(self):
with self.assertRaises(SystemExit):
test_module.execute(["bad"])
async def test_upgrade_storage_from_version_included(self):
with async_mock.patch.object(
test_module,
"wallet_config",
async_mock.CoroutineMock(
return_value=(
self.profile_storage,
async_mock.CoroutineMock(did="public DID", verkey="verkey"),
)
),
), async_mock.patch.object(
ConnRecord,
"query",
async_mock.CoroutineMock(return_value=[ConnRecord()]),
), async_mock.patch.object(
ConnRecord, "save", async_mock.CoroutineMock()
):
await test_module.upgrade(
{
"upgrade.config_path": "./aries_cloudagent/commands/default_version_upgrade_config.yml",
"upgrade.from_version": "v0.7.2",
}
)
async def test_upgrade_storage_missing_from_version(self):
with async_mock.patch.object(
test_module,
"wallet_config",
async_mock.CoroutineMock(
return_value=(
self.profile_storage,
async_mock.CoroutineMock(did="public DID", verkey="verkey"),
)
),
), async_mock.patch.object(
ConnRecord,
"query",
async_mock.CoroutineMock(return_value=[ConnRecord()]),
), async_mock.patch.object(
ConnRecord, "save", async_mock.CoroutineMock()
):
await test_module.upgrade({})
async def test_upgrade_from_version(self):
with async_mock.patch.object(
test_module,
"wallet_config",
async_mock.CoroutineMock(
return_value=(
self.profile,
async_mock.CoroutineMock(did="public DID", verkey="verkey"),
)
),
), async_mock.patch.object(
ConnRecord,
"query",
async_mock.CoroutineMock(return_value=[ConnRecord()]),
), async_mock.patch.object(
ConnRecord, "save", async_mock.CoroutineMock()
):
await test_module.upgrade(
{
"upgrade.from_version": "v0.7.2",
}
)
async def test_upgrade_callable(self):
with async_mock.patch.object(
test_module,
"wallet_config",
async_mock.CoroutineMock(
return_value=(
self.profile,
async_mock.CoroutineMock(did="public DID", verkey="verkey"),
)
),
), async_mock.patch.object(
test_module.yaml,
"safe_load",
async_mock.MagicMock(
return_value={
"v0.7.2": {
"resave_records": {
"base_record_path": [
"aries_cloudagent.connections.models.conn_record.ConnRecord"
]
},
"update_existing_records": True,
},
}
),
):
await test_module.upgrade(
{
"upgrade.from_version": "v0.7.2",
}
)
async def test_upgrade_x_same_version(self):
version_storage_record = await self.storage.find_record(
type_filter="acapy_version", tag_query={}
)
await self.storage.update_record(version_storage_record, f"v{__version__}", {})
with async_mock.patch.object(
test_module,
"wallet_config",
async_mock.CoroutineMock(
return_value=(
self.profile_storage,
async_mock.CoroutineMock(did="public DID", verkey="verkey"),
)
),
):
with self.assertRaises(UpgradeError):
await test_module.upgrade(
{
"upgrade.config_path": "./aries_cloudagent/commands/default_version_upgrade_config.yml",
}
)
async def test_upgrade_missing_from_version(self):
with async_mock.patch.object(
test_module,
"wallet_config",
async_mock.CoroutineMock(
return_value=(
self.profile,
async_mock.CoroutineMock(did="public DID", verkey="verkey"),
)
),
), async_mock.patch.object(
ConnRecord,
"query",
async_mock.CoroutineMock(return_value=[ConnRecord()]),
), async_mock.patch.object(
ConnRecord, "save", async_mock.CoroutineMock()
):
await test_module.upgrade(
{
"upgrade.config_path": "./aries_cloudagent/commands/default_version_upgrade_config.yml",
}
)
async def test_upgrade_x_callable_not_set(self):
with async_mock.patch.object(
test_module,
"wallet_config",
async_mock.CoroutineMock(
return_value=(
self.profile,
async_mock.CoroutineMock(did="public DID", verkey="verkey"),
)
),
), async_mock.patch.object(
test_module.yaml,
"safe_load",
async_mock.MagicMock(
return_value={
"v0.7.2": {
"resave_records": {
"base_record_path": [
"aries_cloudagent.connections.models.conn_record.ConnRecord"
]
},
"update_existing_records": True,
},
"v0.6.0": {"update_existing_records": True},
}
),
):
with self.assertRaises(UpgradeError) as ctx:
await test_module.upgrade(
{
"upgrade.from_version": "v0.6.0",
}
)
assert "No update_existing_records function specified" in str(ctx.exception)
async def test_upgrade_x_class_not_found(self):
with async_mock.patch.object(
test_module,
"wallet_config",
async_mock.CoroutineMock(
return_value=(
self.profile,
async_mock.CoroutineMock(did="public DID", verkey="verkey"),
)
),
), async_mock.patch.object(
test_module.yaml,
"safe_load",
async_mock.MagicMock(
return_value={
"v0.7.2": {
"resave_records": {
"base_record_path": [
"aries_cloudagent.connections.models.conn_record.Invalid"
],
}
},
}
),
):
with self.assertRaises(UpgradeError) as ctx:
await test_module.upgrade(
{
"upgrade.from_version": "v0.7.2",
}
)
assert "Unknown Record type" in str(ctx.exception)
async def test_execute(self):
with async_mock.patch.object(
test_module,
"wallet_config",
async_mock.CoroutineMock(
return_value=(
self.profile,
async_mock.CoroutineMock(did="public DID", verkey="verkey"),
)
),
), async_mock.patch.object(
ConnRecord,
"query",
async_mock.CoroutineMock(return_value=[ConnRecord()]),
), async_mock.patch.object(
ConnRecord, "save", async_mock.CoroutineMock()
), async_mock.patch.object(
asyncio, "get_event_loop", async_mock.MagicMock()
) as mock_get_event_loop:
mock_get_event_loop.return_value = async_mock.MagicMock(
run_until_complete=async_mock.MagicMock(),
)
test_module.execute(
[
"--upgrade-config",
"./aries_cloudagent/config/tests/test-acapy-upgrade-config.yaml",
"--from-version",
"v0.7.2",
]
)
async def test_upgrade_x_invalid_record_type(self):
with async_mock.patch.object(
test_module,
"wallet_config",
async_mock.CoroutineMock(
return_value=(
self.profile,
async_mock.CoroutineMock(did="public DID", verkey="verkey"),
)
),
), async_mock.patch.object(
test_module.yaml,
"safe_load",
async_mock.MagicMock(
return_value={
"v0.7.2": {
"resave_records": {
"base_exch_record_path": [
"aries_cloudagent.connections.models.connection_target.ConnectionTarget"
],
}
}
}
),
):
with self.assertRaises(UpgradeError) as ctx:
await test_module.upgrade(
{
"upgrade.from_version": "v0.7.2",
}
)
assert "Only BaseRecord can be resaved" in str(ctx.exception)
async def test_upgrade_x_invalid_config(self):
with async_mock.patch.object(
test_module.yaml,
"safe_load",
async_mock.MagicMock(return_value={}),
):
with self.assertRaises(UpgradeError) as ctx:
await test_module.upgrade({})
assert "No version configs found in" in str(ctx.exception)
async def test_upgrade_x_from_version_not_in_config(self):
with async_mock.patch.object(
test_module,
"wallet_config",
async_mock.CoroutineMock(
return_value=(
self.profile,
async_mock.CoroutineMock(did="public DID", verkey="verkey"),
)
),
):
with self.assertRaises(UpgradeError) as ctx:
await test_module.upgrade(
{
"upgrade.from_version": "v1.2.3",
}
)
assert "No upgrade configuration found for" in str(ctx.exception)
def test_main(self):
with async_mock.patch.object(
test_module, "__name__", "__main__"
) as mock_name, async_mock.patch.object(
test_module, "execute", async_mock.MagicMock()
) as mock_execute:
test_module.main()
mock_execute.assert_called_once
| 12,210 | 3,260 |
n = int(input())
print(' _~_ ' * n)
print(' (o o) ' * n)
print(' / V \ ' * n)
print('/( _ )\ ' * n)
print(' ^^ ^^ ' * n)
| 143 | 73 |
#!/usr/bin/env python
# -*- coding:utf8 -*-
# Power by viekie. 2017-05-27 08:35:06
import numpy as np
import Filter
class ConvolutionalLayer(object):
def __init__(self, input_width, input_height, channel_number,
filter_width, filter_height, filter_number, zero_padding,
stride, activator, learning_rate):
# 初始化长、宽、深度、filter的长、宽和深度、激活函数和学习速率
self.input_width = input_width
self.input_height = input_height
self.channel_number = channel_number
self.filter_width = filter_width
self.filer_height = filter_height
self.filter_number = filter_number
self.filters = []
self.zero_padding = zero_padding
self.stride = stride
self.activator = activator
self.learning_rate = learning_rate
# 计算输出结果的宽度
self.output_width = \
ConvolutionalLayer.calc_output_size(self.input_width,
self.filter_width,
self.zero_padding,
self.stride)
# 计算输出结果的高度
self.output_height = \
ConvolutionalLayer.calc_output_size(self.input_height,
self.filter_height,
self.zero_padding,
self.stride)
# 定义生成矩阵
self.output_array = \
np.zeros((self.filter_number, self.output_height,
self.output_width))
# 定义filter
for i in range(channel_number):
self.filters.append(Filter(self.filter_width, self.filer_height,
self.channel_number))
@staticmethod
def calc_output_size(input_size, filter_size, padding_size, stride):
'''
计算输出大小
'''
return (input_size + 2 * padding_size - filter_size) / stride + 1
def forward(self, input_array):
'''
向前计算输出值
'''
self.input_array = input_array
# 先进行padding
self.padding_input_array = self.padding(input_array, self.zero_padding)
# 逐个进行卷积计算
for f in self.filters:
self.convolution(self.padding_input_array, f.get_weights(),
f.get_bais(), self.stride, self.output_array[f])
# 对卷积的结果进行Relu函数操作
self.element_wise_op(self.output_array, self.activator.forward)
def elements_wise_op(self, input_array, f):
for i in np.nditer(input_array, op_flags=['readwrite']):
i[...] = f(input_array[i])
def convolution(self, input_array, kernel_array, bais,
stride, output_array):
# 获取输出和卷积核的大小
output_width = output_array[1]
output_height = output_array[0]
kernel_width = kernel_array[-1]
kernel_height = kernel_array[-2]
# 逐个计算卷积结果
for i in range(output_height):
for j in range(output_width):
output_array[i][j] = \
(self.get_patch(input, i, j, stride,
kernel_height, kernel_width) *
kernel_array).sum() + bais
def get_patch(self, input_array, i, j, stride, height, width):
'''
获取需要被卷积的单元, 针对2D和3D分别进行获取
'''
ret_array = []
nd = input_array.ndim
if nd == 3:
sd = input_array.shape[0]
for d in range(sd):
ret_array.append(self.get_sub_array(input_array[d], i, j,
stride, height, width))
else:
ret_array = self.get_sub_array(input_array,
i, j, stride, height, width)
return ret_array
def get_sub_array(self, input_array, i, j, stride, height, width, nd):
'''
获取子矩阵
'''
row_start = i * stride
col_start = j * stride
return input_array[row_start: row_start + height, col_start: col_start]
def padding(self, input_array, zp):
'''
对input_array进行padding
'''
if zp == 0:
return input_array
else:
if input_array.ndim == 3:
input_width = input_array.shape[2]
input_height = input_array.shape[1]
input_depth = input_array.shape[0]
# 初始化要被返回的padding结果
padding_array = np.zeros((input_depth, input_height + 2 * zp,
input_width + 2 * zp))
# 对padding中原本已经存在的元素进行copy
padding_array[:,
zp: zp + input_height,
zp + input_width] = input_array
return padding_array
elif input_array.ndim == 2:
input_width = input_array.shape[1]
input_height = input_array.shape[0]
padding_array = np.zeros((input_height, input_width))
padding_array[zp: zp + input_height,
zp: zp + input_width] = input_array
return padding_array
def bp_sensitivity_map(self, sensitivity_array, activator):
# 扩展为步长为1
expanded_array = \
self.expand_sensitivity_map(sensitivity_array)
expanded_width = expanded_array.shape[2]
# 计算需要padding 的大小
zp = (self.input_width + self.filter_width - 1 - expanded_width) / 2
# 执行padding
padded_array = self.padding(expanded_array, zp)
# 创建存放梯度的数组
self.delta_array = self.create_delta_array()
# 每一个filter都作用于sensitivity map,然后对相应的
# filter对应的结果进行求和
for f in range(self.filter_number):
filter = self.filters[f]
# 权重矩阵180度旋转
rotate_weights = np.array(map(lambda i: np.rot90(i, 2),
filter.get_weights()))
# 创建临时梯度矩阵
delta_array = self.create_delta_array()
for d in range(delta_array.shape[0]):
# 更新每一个channel对应的梯度矩阵
self.convolution(padded_array[f], rotate_weights,
delta_array[d], 1, 0)
# 将每个filter求出的梯度矩阵进行各自求和
self.delta_array += delta_array
# 生成输入向量转换为np的array
derivative_array = np.array(self.input_array)
# 将输入向量求导数矩阵
self.elements_wise_op(derivative_array, self.activator.backward)
# 求出梯度
self.delta_array *= derivative_array
def expand_sensitivity_map(self, sensitivity_array):
depth = sensitivity_array.shape[0]
# 按照步长为1,计算卷积结果行、列数
expand_width = (self.input_width -
self.filter_width + 2 * self.zero_padding + 1)
expand_height = (self.input_height -
self.filter_width + 2 * self.zero_padding + 1)
# 生成扩展后矩阵大小
expand_array = np.zeros((depth, expand_height, expand_width))
# 对相应位置赋值, 其他扩展的位置为0
for i in range(self.output_height):
for j in range(self.output_width):
step_i = i * self.stride
step_j = j * self.stride
expand_array[:, step_i, step_j] = sensitivity_array[:, i, j]
return expand_array
def create_delta_array(self):
return np.zeros((self.channel_number, self.input_height,
self.input_width))
def bp_gradient(self, sensitivity_array):
# 按照步长为1 扩展sensitivity_array
expand_array = self.expand_sensitivity_map(sensitivity_array)
for f in range(self.filter_number):
filter = filter[f]
for d in range(filter.weights.shape[0]):
# 将扩展后的sensitivity_array和input进行卷积,实际就是为了求梯度
self.convolution(self.padding_input_array[d],
expand_array,
filter.weights_gradient[d], 1, 0)
filter.bias_grad = expand_array[f].sum()
def update(self):
'''
filter进行梯度下降更新
'''
for filter in self.filters:
filter.update(self.learning_rate)
| 8,235 | 2,720 |
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2021 scart97
__all__ = ["BatchTextTransformer", "TextTransformConfig"]
from dataclasses import dataclass
from typing import List, Optional
import torch
from torch import nn
from torch.nn.utils.rnn import pad_sequence
from thunder.text_processing.tokenizer import BPETokenizer, char_tokenizer
from thunder.text_processing.vocab import SimpleVocab, Vocab
@dataclass
class TextTransformConfig:
"""Configuration to create [`BatchTextTransformer`][thunder.text_processing.transform.BatchTextTransformer]
Attributes:
initial_vocab_tokens: List of tokens to create the vocabulary, special tokens should not be included here. required.
simple_vocab: Controls if the used vocabulary will only have the blank token or more additional special tokens. defaults to `False`.
sentencepiece_model: Path to sentencepiece .model file, if applicable.
"""
initial_vocab_tokens: List[str]
simple_vocab: bool = False
sentencepiece_model: Optional[str] = None
@classmethod
def from_sentencepiece(cls, output_dir: str) -> "TextTransformConfig":
"""Load the data from a folder that contains the `tokenizer.vocab`
and `tokenizer.model` outputs from sentencepiece.
Args:
output_dir : Output directory of the sentencepiece training, that contains the required files.
Returns:
Instance of `TextTransformConfig` with the corresponding data loaded.
"""
special_tokens = ["<s>", "</s>", "<pad>", "<unk>"]
vocab = []
with open(f"{output_dir}/tokenizer.vocab", "r") as f:
# Read tokens from each line and parse for vocab
for line in f:
piece = line.split("\t")[0]
if piece in special_tokens:
# skip special tokens
continue
vocab.append(piece)
return cls(
initial_vocab_tokens=vocab,
sentencepiece_model=f"{output_dir}/tokenizer.model",
)
class BatchTextTransformer(nn.Module):
def __init__(self, cfg: TextTransformConfig):
"""That class is the glue code that uses all of the text processing
functions to encode/decode an entire batch of text at once.
Args:
cfg: required config to create instance
"""
super().__init__()
self.vocab = (
SimpleVocab(cfg.initial_vocab_tokens)
if cfg.simple_vocab
else Vocab(cfg.initial_vocab_tokens)
)
self.tokenizer = (
BPETokenizer(cfg.sentencepiece_model)
if cfg.sentencepiece_model
else char_tokenizer
)
def encode(self, items: List[str], return_length: bool = True, device=None):
tokenized = [self.tokenizer(x) for x in items]
expanded_tokenized = [self.vocab.add_special_tokens(x) for x in tokenized]
encoded = [
self.vocab.numericalize(x).to(device=device) for x in expanded_tokenized
]
encoded_batched = pad_sequence(
encoded, batch_first=True, padding_value=self.vocab.pad_idx
)
if return_length:
lengths = torch.LongTensor([len(it) for it in encoded]).to(device=device)
return encoded_batched, lengths
else:
return encoded_batched
@torch.jit.export
def decode_prediction(
self, predictions: torch.Tensor, remove_repeated: bool = True
) -> List[str]:
"""
Args:
predictions : Tensor of shape (batch, time)
remove_repeated: controls if repeated elements without a blank between them will be removed while decoding
Returns:
A list of decoded strings, one for each element in the batch.
"""
out_list: List[str] = []
for element in predictions:
# Remove consecutive repeated elements
if remove_repeated:
element = torch.unique_consecutive(element)
# Map back to string
out = self.vocab.decode_into_text(element)
# Join prediction into one string
out = "".join(out)
# _ is a special char only present on sentencepiece
out = out.replace("▁", " ")
out = self.vocab.remove_special_tokens(out)
out_list.append(out)
return out_list
| 4,521 | 1,271 |
import os
from collections import defaultdict
import random
from typing import Dict, List
import pandas as pd
from scipy.stats import stats
from lib2vec.corpus_structure import Corpus
from experiments.predicting_high_rated_books import mcnemar_sig_text, chi_square_test
from lib2vec.vectorization import Vectorizer
from lib2vec.vectorization_utils import Vectorization
import numpy as np
def get_percentage_of_correctly_labeled(vectors, human_assessment_df: pd.DataFrame, doc_id_mapping: Dict[str, str],
facet_mapping: Dict[str, str], use_sum: bool):
# reverted_facets = {value: key for key, value in facet_mapping.items()}
correctly_assessed = []
facet_wise = defaultdict(list)
random_baseline = False
skip_count = 0
agreement_store = defaultdict(list)
for i, row in human_assessment_df.iterrows():
book1 = doc_id_mapping[row["Book 1"]]
book2 = doc_id_mapping[row["Book 2"]]
book3 = doc_id_mapping[row["Book 3"]]
if use_sum:
facet = facet_mapping["total"]
else:
facet = facet_mapping[row["Facet"]]
selection = row["Selection"]
if selection == "skip" or selection == "unsure":
skip_count += 1
continue
if random_baseline:
if int(row["Selected Answer Nr."]) == random.randint(1, 3):
correctly_assessed.append(1)
facet_wise[row["Facet"]].append(1)
else:
correctly_assessed.append(0)
facet_wise[row["Facet"]].append(0)
else:
sim_1 = Vectorization.facet_sim(model_vectors=vectors, doc_id_a=book1, doc_id_b=book2, facet_name=facet)
sim_2 = Vectorization.facet_sim(model_vectors=vectors, doc_id_a=book1, doc_id_b=book3, facet_name=facet)
sim_3 = Vectorization.facet_sim(model_vectors=vectors, doc_id_a=book2, doc_id_b=book3, facet_name=facet)
if int(row["Selected Answer Nr."]) == 1 and sim_1 > sim_2 and sim_1 > sim_3:
correctly_assessed.append(1)
facet_wise[row["Facet"]].append(1)
agreement_store["True"].append(row["Agreement"])
elif int(row["Selected Answer Nr."]) == 2 and sim_2 > sim_1 and sim_2 > sim_3:
correctly_assessed.append(1)
facet_wise[row["Facet"]].append(1)
agreement_store["True"].append(row["Agreement"])
elif int(row["Selected Answer Nr."]) == 3 and sim_3 > sim_1 and sim_3 > sim_2:
correctly_assessed.append(1)
facet_wise[row["Facet"]].append(1)
agreement_store["True"].append(row["Agreement"])
else:
correctly_assessed.append(0)
agreement_store["False"].append(row["Agreement"])
facet_wise[row["Facet"]].append(0)
print("False:", np.mean(agreement_store["False"]))
print("True:", np.mean(agreement_store["True"]))
result_scores = {facet: sum(scores) / len(scores) for facet, scores in facet_wise.items()}
result_scores["all_facets"] = sum(correctly_assessed) / len(correctly_assessed)
return result_scores, correctly_assessed, facet_wise
def correlation_for_correctly_labeled(vectors, human_assessment_df: pd.DataFrame, doc_id_mapping: Dict[str, str],
facet_mapping: Dict[str, str], use_sum: bool):
# reverted_facets = {value: key for key, value in facet_mapping.items()}
ground_truth = defaultdict(list)
predicted = defaultdict(list)
skip_count = 0
for i, row in human_assessment_df.iterrows():
book1 = doc_id_mapping[row["Book 1"]]
book2 = doc_id_mapping[row["Book 2"]]
book3 = doc_id_mapping[row["Book 3"]]
if use_sum:
facet = facet_mapping["total"]
else:
facet = facet_mapping[row["Facet"]]
selection = row["Selection"]
if selection == "skip" or selection == "unsure":
skip_count += 1
continue
sim_1 = Vectorization.facet_sim(model_vectors=vectors, doc_id_a=book1, doc_id_b=book2, facet_name=facet)
sim_2 = Vectorization.facet_sim(model_vectors=vectors, doc_id_a=book1, doc_id_b=book3, facet_name=facet)
sim_3 = Vectorization.facet_sim(model_vectors=vectors, doc_id_a=book2, doc_id_b=book3, facet_name=facet)
if sim_1 > sim_2 and sim_1 > sim_3:
pred_label = 1
elif sim_2 > sim_1 and sim_2 > sim_3:
pred_label = 2
elif sim_3 > sim_1 and sim_3 > sim_2:
pred_label = 3
else:
print("warning")
pred_label = -1
ground_truth[row["Facet"]].append(int(row["Selected Answer Nr."]))
ground_truth["all_facets"].append(int(row["Selected Answer Nr."]))
predicted[row["Facet"]].append(pred_label)
predicted["all_facets"].append(pred_label)
# print(row["Facet"], "=", sum(facet_wise[row["Facet"]]))
print(f"{skip_count} times skipped!")
result_scores = {}
for facet, ground_truth_labels in ground_truth.items():
predicted_labels = predicted[facet]
corr = stats.spearmanr(ground_truth_labels, predicted_labels)
spearman = str(f'{abs(corr[0]):.3f}')
if corr[1] < 0.05:
spearman = f"*{spearman}"
result_scores[facet] = spearman
return result_scores
# result_scores = {facet: sum(scores) / len(scores) for facet, scores in facet_wise.items()}
# result_scores["all"] = sum(correctly_assessed) / len(correctly_assessed)
# return result_scores
def load_vectors_from_properties(number_of_subparts, corpus_size, data_set,
filter_mode, vectorization_algorithm):
use_sum = False
if "_sum" in vectorization_algorithm:
use_sum = True
vec_path = Vectorization.build_vec_file_name(number_of_subparts,
corpus_size,
data_set,
filter_mode,
vectorization_algorithm,
"real",
allow_combination=True)
vectors, _ = Vectorization.my_load_doc2vec_format(vec_path)
return vectors, use_sum
def calculate_vectors(data_set_name: str, vec_algorithms: List[str], filters: List[str]):
# try:
# corpus = Corpus.fast_load(path=os.path.join('corpora', data_set_name), load_entities=False)
# except FileNotFoundError:
# corpus = DataHandler.load_classic_gutenberg_as_corpus()
# Preprocesser.annotate_and_save(corpus, corpus_dir=f"corpora/{data_set_name}")
# corpus = Corpus.fast_load(path=os.path.join('corpora', data_set_name), load_entities=False)
for filter_mode in filters:
corpus = Corpus.fast_load("all",
"no_limit",
data_set_name,
filter_mode,
"real",
load_entities=False)
for vectorization_algorithm in vec_algorithms:
use_summation = False
if "_sum" in vectorization_algorithm:
vectorization_algorithm = vectorization_algorithm.replace("_sum", "")
use_summation = True
vec_file_name = Vectorization.build_vec_file_name('all',
'no_limit',
data_set_name,
filter_mode,
vectorization_algorithm,
'real')
if not os.path.isfile(vec_file_name):
Vectorizer.algorithm(input_str=vectorization_algorithm,
corpus=corpus,
save_path=vec_file_name,
return_vecs=False)
def evaluate(data_set_name: str, vec_algorithms: List[str], filters: List[str]):
human_assessment_df = pd.read_csv("../results/human_assessment/gutenberg_classic_20/human_assessed_complete.csv")
print(len(human_assessment_df.index))
human_assessment_df = human_assessment_df.loc[(human_assessment_df['Selection'] != "unsure")]
# & (human_assessment_df['Answers'] > 1)
# human_assessment_df = human_assessment_df.loc[(human_assessment_df['Agreement'] > 0.5)
# # & (human_assessment_df['Answers'] > 1)
# ]
print(len(human_assessment_df.index))
survey_id2doc_id = {1: "cb_17",
2: "cb_2",
3: "cb_0",
4: "cb_1",
5: "cb_3",
6: "cb_4",
7: "cb_5",
8: "cb_6",
9: "cb_9",
10: "cb_11",
11: "cb_12",
12: "cb_13",
13: "cb_14",
14: "cb_15",
15: "cb_8",
16: "cb_7",
17: "cb_10",
18: "cb_18",
19: "cb_19",
20: "cb_16",
}
facets = {"location": "loc", "time": "time", "atmosphere": "atm", "content": "cont", "plot": "plot", "total": ""}
tuples = []
correlation_tuples = []
correctness_table = {}
correctness_table_facet = {}
for filter in filters:
for vec_algorithm in vec_algorithms:
filtered_dataset = f'{data_set_name}_{filter}'
# corpus = Corpus.fast_load(path=os.path.join('corpora', f''data_set_name), load_entities=False)
vecs, use_sum = load_vectors_from_properties(number_of_subparts="all",
corpus_size="no_limit",
data_set=data_set_name,
filter_mode=filter,
vectorization_algorithm=vec_algorithm)
corr_scores = correlation_for_correctly_labeled(vectors=vecs, human_assessment_df=human_assessment_df,
doc_id_mapping=survey_id2doc_id, facet_mapping=facets,
use_sum=use_sum)
correlation_tuples.append((filtered_dataset, vec_algorithm, corr_scores["total"],
corr_scores["time"], corr_scores["location"],
corr_scores["plot"], corr_scores["atmosphere"], corr_scores["content"],
corr_scores["all_facets"]))
scores, cor_ass, facet_wise = get_percentage_of_correctly_labeled(vectors=vecs,
human_assessment_df=human_assessment_df,
doc_id_mapping=survey_id2doc_id,
facet_mapping=facets,
use_sum=use_sum)
correctness_table[vec_algorithm] = cor_ass
correctness_table_facet[vec_algorithm] = facet_wise
tuples.append((filtered_dataset, vec_algorithm, scores["total"], scores["time"],
scores["location"],
scores["plot"],
scores["atmosphere"], scores["content"], scores["all_facets"]))
print((filtered_dataset, vec_algorithm, scores["total"], scores["time"],
scores["location"],
scores["plot"],
scores["atmosphere"], scores["content"], scores["all_facets"]))
try:
algo1 = "bert_pt"
algo2 = "book2vec_adv_dbow_pca"
true_true = 0
true_false = 0
false_true = 0
false_false = 0
for e1, e2 in zip(correctness_table[algo1], correctness_table[algo2]):
if e1 and e2:
true_true += 1
elif e1 and not e2:
true_false += 1
elif not e1 and e2:
false_true += 1
elif not e1 and not e2:
false_false += 1
else:
pass
table = [[true_true, true_false],
[false_true, false_false]]
print(table)
print()
print("Overall")
mcnemar_sig_text(table)
# facets = correctness_table_facet[algo1].keys()
for facet in facets:
true_true = 0
true_false = 0
false_true = 0
false_false = 0
for e1, e2 in zip(correctness_table_facet[algo1][facet], correctness_table_facet[algo2][facet]):
if e1 and e2:
true_true += 1
elif e1 and not e2:
true_false += 1
elif not e1 and e2:
false_true += 1
elif not e1 and not e2:
false_false += 1
else:
pass
table = [[true_true, true_false],
[false_true, false_false]]
print()
print(table)
print(facet)
mcnemar_sig_text(table)
chi_square_test(correctness_table[algo1], correctness_table[algo1])
except KeyError:
pass
result_df = pd.DataFrame(tuples, columns=["Data set", "Algorithm", "Total", "Time",
"Location", "Plot", "Atmosphere", "Content", "Micro AVG"])
result_df = result_df.round(3)
result_df.to_csv("results/human_assessment/performance.csv", index=False)
print(result_df.to_latex(index=False))
corr_df = pd.DataFrame(correlation_tuples, columns=["Data set", "Algorithm", "Total", "Time",
"Location", "Plot", "Atmosphere", "Content", "Micro AVG"])
corr_df.to_csv("results/human_assessment/correlation_results.csv", index=False)
print(corr_df.to_latex(index=False))
if __name__ == '__main__':
data_set = "classic_gutenberg"
# algorithms = ["avg_wv2doc", "doc2vec", "book2vec", "book2vec_concat"]
# algorithms = [
# # "book2vec_o_time", "book2vec_o_loc", "book2vec_o_atm", "book2vec_o_sty", "book2vec_o_plot", "book2vec_o_raw",
# "book2vec", "book2vec_sum", "book2vec_avg", "book2vec_concat", "book2vec_auto", "book2vec_pca",
# "book2vec_dbow",
# # "book2vec_dbow_sum", "book2vec_dbow_avg", "book2vec_dbow_concat", "book2vec_dbow_auto",
# "book2vec_dbow_pca",
# #
# "book2vec_wo_raw",
# # "book2vec_wo_raw_sum", "book2vec_wo_raw_avg", "book2vec_wo_raw_concat",
# # "book2vec_wo_raw_auto",
# "book2vec_wo_raw_pca",
# # "book2vec_dbow_wo_raw", "book2vec_dbow_wo_raw_sum", "book2vec_dbow_wo_raw_avg",
# # "book2vec_dbow_wo_raw_concat", "book2vec_dbow_wo_raw_auto",
# "book2vec_dbow_wo_raw_pca",
# #
# # "book2vec_net_only", "book2vec_net_only_sum", "book2vec_net_only_avg",
# # "book2vec_net_only_concat", "book2vec_net_only_auto",
# "book2vec_net_only_pca",
# # "book2vec_dbow_net_only", "book2vec_dbow_net_only_pca", "book2vec_dbow_net_only_sum",
# # "book2vec_dbow_net_only_avg", "book2vec_dbow_net_only_concat",
# # "book2vec_dbow_net_only_auto",
# "book2vec_dbow_net_only_pca",
# #
# # "book2vec_net", "book2vec_net_sum", "book2vec_net_avg",
# # "book2vec_net_concat", "book2vec_net_auto",
# "book2vec_net_pca",
# # "book2vec_dbow_net", "book2vec_dbow_net_pca", "book2vec_dbow_net_sum", "book2vec_dbow_net_avg",
# # "book2vec_dbow_net_concat", "book2vec_dbow_net_auto",
# "book2vec_dbow_net_pca",
# #
# "book2vec_adv",
# # "book2vec_adv_sum", "book2vec_adv_concat", "book2vec_adv_avg", "book2vec_adv_auto",
# "book2vec_adv_pca",
# # "book2vec_adv_dbow", "book2vec_adv_dbow_sum", "book2vec_adv_dbow_concat", "book2vec_adv_dbow_avg",
# # "book2vec_adv_dbow_auto",
# "book2vec_adv_dbow_pca",
#
# # "book2vec_adv_dbow_wo_raw_pca",
# # "book2vec_adv_dbow_net_wo_raw_pca",
#
# # "book2vec_window_pca",
# # "book2vec_dbow_window_pca",
# "book2vec_adv_window_pca",
# "book2vec_adv_dbow_window_pca",
#
#
# ]
# algorithms = ["book2vec_sum", "book2vec"]
algorithms = [
# "bow",
# "avg_wv2doc_restrict10000",
# "doc2vec",
# "doc2vec_dbow",
# "doc2vec_sentence_based_100",
# "doc2vec_sentence_based_1000",
# "doc2vec_chunk",
# "doc2vec_dbow_chunk"
"bert_pt",
# "bert_pt_chunk",
# # "bert_sentence_based_100_pt",
# "bert_sentence_based_1000_pt",
# "roberta_pt",
# "roberta_pt_chunk",
# "roberta_sentence_based_1000_pt",
# "xlm_pt",
# "xlm_pt_chunk",
# "xlm_sentence_based_1000_pt",
# "psif",
# "book2vec_pca",
# "book2vec_concat",
# "book2vec_auto",
# "book2vec_avg",
#
# "book2vec_dbow_pca",
# "book2vec_dbow_concat",
# "book2vec_dbow_auto",
"book2vec_dbow_avg",
"book2vec_dbow_wo_raw_avg",
"book2vec_dbow_net_only_avg",
"book2vec_dbow_net_avg",
# # "book2vec_advn",
# "book2vec_advn_pca",
# "book2vec_advn_concat",
# "book2vec_advn_auto",
# "book2vec_advn_avg",
# # "book2vec_advn_dbow",
"book2vec_advn_dbow_pca",
# "book2vec_advn_dbow_concat",
# "book2vec_advn_dbow_auto",
"book2vec_advn_dbow_avg",
# "book2vec_bert_pt_pca",
# "book2vec_bert_pt_window",
"book2vec_advn_window_pca",
"book2vec_advn_dbow_window_avg",
]
# algorithms = ["avg_wv2doc", "doc2vec", "doc2vec_dbow",
# "doc2vec_sentence_based_100", "doc2vec_sentence_based_1000",
# "book2vec", "book2vec_concat", "book2vec_wo_raw", "book2vec_wo_raw_concat",
# "book2vec_dbow", "book2vec_dbow_concat",
# "book2vec_dbow_wo_raw", "book2vec_dbow_wo_raw_concat",
# "book2vec_net", "book2vec_net_concat",
# "book2vec_dbow_net", "book2vec_dbow_net_concat",
# "book2vec_net_only", "book2vec_net_only_concat",
# "book2vec_dbow_net_only", "book2vec_dbow_net_only_concat",
# "book2vec_adv", "book2vec_adv_concat", "bow",
# "bert", "bert_sentence_based_100", "bert_sentence_based_100_pt", "bert_sentence_based_1000",
# "bert_sentence_based_1000_pt",
# # "flair_sentence_based_100", "flair_sentence_based_1000",
# "roberta_sentence_based_100_pt", "xlm_sentence_based_100_pt",
# "psif"
# ]
filters = [
# "no_filter",
"specific_words_strict"
]
calculate_vectors(data_set, algorithms, filters)
evaluate(data_set, algorithms, filters)
| 19,801 | 6,535 |
import logging
import pathlib
import pkg_resources
from mopidy import config, ext
__version__ = pkg_resources.get_distribution("Mopidy-SeeedRelays").version
class Extension(ext.Extension):
dist_name = "Mopidy-SeeedRelays"
ext_name = "seeedrelays"
version = __version__
def get_default_config(self):
return config.read(pathlib.Path(__file__).parent / "ext.conf")
def get_config_schema(self):
schema = super().get_config_schema()
schema["i2c"] = config.Integer()
schema["address"] = config.String()
schema["relay"] = config.Integer()
return schema
def setup(self, registry):
from .frontend import SeeedRelaysFrontend
registry.add("frontend", SeeedRelaysFrontend)
| 759 | 238 |
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Parameter definitions for the Neutronics Plugin.
We hope neutronics plugins that compute flux will use ``mgFlux``, etc.,
which will enable modular construction of apps.
"""
import numpy
from armi.reactor import parameters
from armi.reactor.parameters import ParamLocation
from armi.reactor.blocks import Block
from armi.reactor.reactors import Core
def getNeutronicsParameterDefinitions():
"""Return ParameterDefinitionCollections for each appropriate ArmiObject."""
return {Block: _getNeutronicsBlockParams(), Core: _getNeutronicsCoreParams()}
def _getNeutronicsBlockParams():
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
pb.defParam(
"axMesh",
units="",
description="number of neutronics axial mesh points in this block",
default=None,
categories=[parameters.Category.retainOnReplacement],
)
def mgFlux(self, value):
self._p_mgFlux = (
value
if value is None or isinstance(value, numpy.ndarray)
else numpy.array(value)
)
pb.defParam(
"mgFlux",
setter=mgFlux,
units="n-cm/s",
description="multigroup volume-integrated flux",
location=ParamLocation.VOLUME_INTEGRATED,
saveToDB=True,
categories=[
parameters.Category.fluxQuantities,
parameters.Category.multiGroupQuantities,
],
default=None,
)
pb.defParam(
"adjMgFlux",
units="n-cm/s",
description="multigroup adjoint neutron flux",
location=ParamLocation.VOLUME_INTEGRATED,
saveToDB=True,
categories=[
parameters.Category.fluxQuantities,
parameters.Category.multiGroupQuantities,
],
default=None,
)
pb.defParam(
"lastMgFlux",
units="n-cm/s",
description="multigroup volume-integrated flux used for averaging the latest and previous depletion step",
location=ParamLocation.VOLUME_INTEGRATED,
saveToDB=False,
categories=[
parameters.Category.fluxQuantities,
parameters.Category.multiGroupQuantities,
],
default=None,
)
pb.defParam(
"mgFluxGamma",
units="g-cm/s",
description="multigroup gamma flux",
location=ParamLocation.VOLUME_INTEGRATED,
saveToDB=True,
categories=[
parameters.Category.fluxQuantities,
parameters.Category.multiGroupQuantities,
],
default=None,
)
pb.defParam(
"mgNeutronVelocity",
units="cm/s",
description="multigroup neutron velocity",
location=ParamLocation.AVERAGE,
saveToDB=True,
categories=[parameters.Category.multiGroupQuantities],
default=None,
)
with pDefs.createBuilder(
default=0.0,
location=ParamLocation.AVERAGE,
categories=[parameters.Category.detailedAxialExpansion],
) as pb:
# Neutronics reaction rate params that are not re-derived in mesh conversion
pb.defParam(
"rateBalance",
units="1/cm^3/s",
description="Numerical balance between particle production and destruction (should be small)",
)
pb.defParam(
"rateExtSrc",
units="1/cm^3/s",
description="Rate of production of neutrons from an external source.",
)
pb.defParam(
"rateFisAbs",
units="1/cm^3/s",
description="Neutron abs. rate in fissile material",
)
pb.defParam(
"rateFisSrc",
units="1/cm^3/s",
description="Fission source rate. This is related to production rate in fissile by a factor of keff",
)
pb.defParam(
"rateLeak",
units="1/cm^3/s",
description="Rate that neutrons leak out of this block.",
)
pb.defParam(
"rateParasAbs",
units="1/cm^3/s",
description="Rate of parasitic absorption (absorption in non-fertile/fissionable material)",
)
pb.defParam(
"rateProdNet",
units="1/cm^3/s",
description="Net production rate of neutrons",
)
pb.defParam(
"rateScatIn",
units="1/cm^3/s",
description="Rate neutrons in-scatter in this block",
)
pb.defParam(
"rateScatOut",
units="1/cm^3/s",
description="Rate that neutrons out-scatter in this block (removal - absorption)",
)
pb.defParam(
"capturePowerFrac",
units=None,
description="Fraction of the power produced through capture in a block.",
saveToDB="True",
)
pb.defParam(
"fastFluence",
units="#/cm^2",
description="Fast spectrum fluence",
categories=["cumulative"],
)
pb.defParam(
"fastFluencePeak",
units="#/cm^2",
description="Fast spectrum fluence with a peaking factor",
)
pb.defParam(
"fluence", units="#/cm^2", description="Fluence", categories=["cumulative"]
)
pb.defParam(
"flux",
units="n/cm^2/s",
description="neutron flux",
categories=[
parameters.Category.retainOnReplacement,
parameters.Category.fluxQuantities,
],
)
pb.defParam(
"fluxAdj", units="", description="Adjoint flux" # adjoint flux is unitless
)
pb.defParam(
"pdens", units="W/cm$^3$", description="Average volumetric power density"
)
pb.defParam(
"pdensDecay",
units="W/cm$^3$",
description="Decay power density from decaying radionuclides",
)
pb.defParam("arealPd", units="MW/m^2", description="Power divided by XY area")
pb.defParam(
"arealPdGamma", units="MW/m^2", description="Areal gamma power density"
)
pb.defParam("fertileBonus", units=None, description="The fertile bonus")
pb.defParam(
"fisDens",
units="fissions/cm^3/s",
description="Fission density in a pin (scaled up from homogeneous)",
)
pb.defParam(
"fisDensHom", units="1/cm^3/s", description="Homogenized fissile density"
)
pb.defParam(
"fluxDeltaFromRef",
units="None",
description="Relative difference between the current flux and the directly-computed perturbed flux.",
)
pb.defParam(
"fluxDirect",
units="n/cm^2/s",
description="Flux is computed with a direct method",
)
pb.defParam(
"fluxGamma",
units="g/cm^2/s",
description="Gamma scalar flux",
categories=[
parameters.Category.retainOnReplacement,
parameters.Category.fluxQuantities,
],
)
pb.defParam(
"fluxPeak",
units="n/cm^2/s",
description="Peak neutron flux calculated within the mesh",
)
pb.defParam(
"fluxPertDeltaFromDirect",
units="None",
description="Relative difference between the perturbed flux and the directly-computed perturbed flux",
)
pb.defParam(
"fluxPertDeltaFromDirectfluxRefWeighted", units="None", description=""
)
pb.defParam(
"fluxPerturbed", units="1/cm^2/s", description="Flux is computed by MEPT"
)
pb.defParam("fluxRef", units="1/cm^2/s", description="Reference flux")
pb.defParam(
"kInf",
units="None",
description="Neutron production rate in this block/neutron absorption rate in this block. Not truly kinf but a reasonable approximation of reactivity.",
)
pb.defParam(
"medAbsE", units="eV", description="Median neutron absorption energy"
)
pb.defParam(
"medFisE",
units="eV",
description="Median energy of neutron causing fission",
)
pb.defParam("medFlxE", units="eV", description="Median neutron flux energy")
pb.defParam(
"pdensGamma",
units="W/cm^3",
description="Average volumetric gamma power density",
)
pb.defParam(
"pdensNeutron",
units="W/cm^3",
description="Average volumetric neutron power density",
)
pb.defParam("ppdens", units="W/cm^3", description="Peak power density")
pb.defParam("ppdensGamma", units="W/cm^3", description="Peak gamma density")
# rx rate params that are derived during mesh conversion.
# We'd like all things that can be derived from flux and XS to be
# in this category to minimize numerical diffusion but it is a WIP.
with pDefs.createBuilder(default=0.0, location=ParamLocation.AVERAGE,) as pb:
pb.defParam(
"rateAbs",
units="1/cm^3/s",
description="Total absorption rate in this block (fisson + capture).",
)
pb.defParam(
"rateCap",
units="1/cm^3/s",
description="Parasitic capture rate in this block.",
)
pb.defParam(
"rateFis", units="1/cm^3/s", description="Fission rate in this block."
)
pb.defParam(
"rateProdFis",
units="1/cm^3/s",
description="Production rate of neutrons from fission reactions (nu * fission source / k-eff)",
)
pb.defParam(
"rateProdN2n",
units="1/cm^3/s",
description="Production rate of neutrons from n2n reactions.",
)
with pDefs.createBuilder(
default=0.0,
location=ParamLocation.VOLUME_INTEGRATED,
categories=[parameters.Category.detailedAxialExpansion],
) as pb:
pb.defParam(
"powerGenerated",
units=" W",
description="Generated power. Different than b.p.power only when gamma transport is activated.",
)
pb.defParam("power", units="W", description="Total power")
pb.defParam("powerDecay", units="W", description="Total decay power")
pb.defParam("powerGamma", units="W", description="Total gamma power")
pb.defParam("powerNeutron", units="W", description="Total neutron power")
return pDefs
def _getNeutronicsCoreParams():
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder(categories=[parameters.Category.neutronics]) as pb:
pb.defParam(
"eigenvalues",
units=None,
description="All available lambda-eigenvalues of reactor.",
default=None, # will be a list though, can't set default to mutable type.
location=ParamLocation.AVERAGE,
)
pb.defParam(
"axialMesh",
units="cm",
description="Global axial mesh from bottom to top used in structured-mesh neutronics simulations.",
default=None,
location=ParamLocation.TOP,
)
pb.defParam(
"kInf",
units=None,
description="k-infinity",
default=0.0,
location=ParamLocation.AVERAGE,
)
pb.defParam(
"refKeff",
units=None,
description="Reference unperturbed keff",
default=0.0,
location=ParamLocation.AVERAGE,
)
return pDefs
| 12,729 | 3,590 |
from flask import Blueprint, jsonify, request
from flask_cors import cross_origin
#Model
from app.model.Persona import Persona
from app.model.Usuario import ManejoUsuarios
persona = Blueprint('persona', __name__, url_prefix="/persona/")
#listar todas las Personas
@persona.route("list", methods=["POST"])
@cross_origin()
def getPersona():
#se verifica si la sesion esta iniciada
login = ManejoUsuarios.VerificarSesion()
if(login):
#se obetiene la lista de la personas
data = Persona.listaPersonas()
return jsonify({"response":data,"message":"ok"})
return jsonify({"message":"invalid token"})
#Obtener persona
@persona.route("", methods=["POST"])
@cross_origin()
def getPersonaById():
#se verifica si la sesion esta iniciada
login = ManejoUsuarios.VerificarSesion()
if(login):
try:
personaId = request.json["idPersona"]
except:
return jsonify({"message":"invalid token"})
try:
data = Persona.GetPersonaById(personaId)
return jsonify({"response":data,"message":"ok"})
except:
return jsonify({"message":"error"})
return jsonify({"message":"invalid token"})
#Crear persona
@persona.route("create", methods=["POST"])
@cross_origin()
def createPersona():
#se verifica si la sesion esta iniciada
login = ManejoUsuarios.VerificarSesion()
if(login):
try:
nombre = request.json["nombre"]
except:
return jsonify({"message":"invalid token"})
try:
Persona.createPersona(nombre)
return jsonify({"message":"ok"})
except:
return jsonify({"message":"error"})
return jsonify({"message":"invalid token"}) | 1,768 | 539 |
import unittest
if __name__ == '__main__':
from pynetworking.Logging import logger
import sys
logger.setLevel(30)
test_loader = unittest.TestLoader()
test_suite = test_loader.discover('pynetworking.tests', pattern='test_*.py')
# test_suite._tests.pop(1) # Exclude communication tests, because they don't work
runner = unittest.TextTestRunner(stream=sys.stderr)
runner.run(test_suite)
| 421 | 137 |
#!/usr/bin/python3
import sys
import random
import math
def sieve(n):
# Every number is assumed prime except 0 and 1
numbers = [False, False] + [True] * (n-2)
print("Numbers appended")
for i in range(int(math.sqrt(n))+1):
if not numbers[i]:
continue # Do not do anything if i is not prime
for j in range(i**2, n, i):
numbers[j] = False # Mark every multiple as not prime
print("Filter finished")
res = []
for i in range(n):
if numbers[i]:
res.append(i)
return res
def main(length):
primes = sieve(10**(length + 1))
print("Finished generating")
num = random.choice(primes)
while num < 10**length:
num = random.choice(primes)
print(num)
main(7)
# if __name__ == "__main__":
# if len(sys.argv) > 1:
# length = int(sys.argv[1])
# else:
# length = 5
# main(length)
| 947 | 319 |
import sys
import os
import requests
import pytest
from tests.test_resources.helper import get_with_path_return_json
from tests.test_resources.settings import SEARCH_URL
@pytest.mark.smoke
@pytest.mark.integration
def test_fetch_ad_by_id( session):
"""
Get an ad by a request to /search without a query,and limiting the result to one ad
use the id of the ad when doing a request to the /ad path
verify that the id of the ad is the same as used when doing the request
"""
json_response = get_with_path_return_json( session, '/search', params={'limit': '1'})
ad_id = json_response['hits'][0]['id']
ad_response = get_with_path_return_json( session, path=f"/ad/{ad_id}", params={})
assert ad_response['id'] == ad_id
assert len(ad_response) == 33
@pytest.mark.integration
def test_fetch_not_found_ad_by_id( session):
ad_id = '823069282306928230692823069282306928230692'
r = session.get(f"{SEARCH_URL}/ad/{ad_id}", params={})
assert r.status_code == requests.codes.not_found
if __name__ == '__main__':
pytest.main([os.path.realpath(__file__), '-svv', '-ra', '-m integration'])
| 1,136 | 414 |
import unittest
from biggu_container import Container
from biggu_pipeline import Pipeline
class PipelineWithCustomException(Pipeline):
@staticmethod
def handle_exception(exception):
return "An error has occurred"
class Foo:
pass
class Bar:
def handle(self, name, next):
return "Hello " + name
class Baz:
def handle(self, name, next, exclamation):
signs = '!!!' if exclamation else ''
return "Hello " + name + signs
class Biz:
def handle(self, value, next):
return next(value + " biz")
class Buz:
def handle(self, value, next):
return next(value + " buz")
class TestPipeline(unittest.TestCase):
@staticmethod
def app():
return Container()
def test_send(self):
pipeline = Pipeline(self.app())
result = pipeline.send(Foo())
self.assertIsInstance(pipeline.passable(), Foo)
self.assertEqual(result, pipeline)
def test_set_pipes_with_list(self):
pipeline = Pipeline(self.app())
foo = Foo()
bar = Bar()
result = pipeline.through([foo, bar])
self.assertIsInstance(pipeline.pipes(), list)
self.assertEqual(pipeline.pipes(), [foo, bar])
self.assertEqual(result, pipeline)
def test_set_pipes_with_multiple_params(self):
pipeline = Pipeline(self.app())
foo = Foo()
bar = Bar()
result = pipeline.through(foo, bar)
self.assertIsInstance(pipeline.pipes(), list)
self.assertEqual(pipeline.pipes(), [foo, bar])
self.assertEqual(result, pipeline)
def test_change_handler_method(self):
pipeline = Pipeline(self.app())
result = pipeline.via('custom_method')
self.assertEqual(pipeline.method(), 'custom_method')
self.assertEqual(result, pipeline)
def test_prepare_destination_using_lambda(self):
pipeline = Pipeline(self.app())
closure = pipeline.prepare_destination(lambda passable: passable + ' through pipeline')
result = closure("text")
self.assertEqual(result, 'text through pipeline')
def test_using_prepare_destination_method_as_decorator(self):
pipeline = Pipeline(self.app())
@pipeline.prepare_destination
def closure(passable):
return passable + ' through pipeline as decorator'
result = closure("closure")
self.assertEqual(result, 'closure through pipeline as decorator')
def test_prepare_destination_raise_exception(self):
pipeline = Pipeline(self.app())
@pipeline.prepare_destination
def closure(passable):
return passable + ' through pipeline as decorator'
def raiser():
closure(Foo)
self.assertRaises(Exception, raiser)
def test_extend_for_custom_handle_execption(self):
pipeline = PipelineWithCustomException(self.app())
closure = pipeline.prepare_destination(lambda passable: passable + ' through pipeline')
result = closure(Foo())
self.assertEqual(result, 'An error has occurred')
def test_parse_pipe_string(self):
pipeline = Pipeline(self.app())
self.assertEqual(pipeline.parse_pipe_string('auth:user,role'), ['auth', ['user', 'role']])
self.assertEqual(pipeline.parse_pipe_string('auth:'), ['auth', ['']])
self.assertEqual(pipeline.parse_pipe_string('auth'), ['auth', []])
self.assertEqual(pipeline.parse_pipe_string(''), ['', []])
def test_carry_method_with_class_as_pipe(self):
pipeline = Pipeline(self.app())
wrapper= pipeline.carry()
closure = wrapper([], Bar)
result = closure("biggu")
self.assertTrue(callable(wrapper))
self.assertTrue(callable(closure))
self.assertEqual(result, 'Hello biggu')
def test_carry_method_with_string_as_pipe(self):
app = self.app()
app.instance('bar', Bar())
pipeline = Pipeline(app)
wrapper= pipeline.carry()
closure = wrapper([], 'bar')
result = closure("biggu")
self.assertTrue(callable(wrapper))
self.assertTrue(callable(closure))
self.assertEqual(result, 'Hello biggu')
def test_carry_method_with_string_and_params_as_pipe(self):
app = self.app()
app.instance('baz', Baz())
pipeline = Pipeline(app)
wrapper= pipeline.carry()
closure = wrapper([], 'baz:true')
result = closure("biggu")
self.assertTrue(callable(wrapper))
self.assertTrue(callable(closure))
self.assertEqual(result, 'Hello biggu!!!')
def test_carry_method_with_lambda_as_pipe(self):
pipeline = Pipeline(self.app())
wrapper= pipeline.carry()
closure = wrapper([], lambda value, next: 'Hello ' + value + " from lambda")
result = closure("biggu")
self.assertTrue(callable(wrapper))
self.assertTrue(callable(closure))
self.assertEqual(result, 'Hello biggu from lambda')
def test_pipeline_flow(self):
pipeline = Pipeline(self.app())
result = pipeline.send("Hello").through([
Biz,
Buz
]).then(lambda value: value + " end")
self.assertEqual(result, "Hello biz buz end")
| 5,300 | 1,520 |
from flask import Flask
app = Flask(__name__)
from fileshare.site.views import site
from fileshare.api.views import api
from fileshare.api_admin.views import api_admin
from fileshare.site_admin.views import site_admin
app.register_blueprint(site)
app.register_blueprint(api)
app.register_blueprint(site_admin)
app.register_blueprint(api_admin)
| 349 | 117 |
# ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END=========================================================
#
import pytest
from cloudify.exceptions import NonRecoverableError
import os
from consulif.consulif import ConsulHandle
# No connections are actually made to this host
CONSUL_HOST = "consul" # Should always be a local consul agent on Cloudify Manager
#CONSUL_PORT = '8510'
CONSUL_PORT = '8500'
DBCL_KEY_NAME = "dmaap_dbcl_info" # Consul key containing DMaaP data bus credentials
DBC_SERVICE_NAME= "dmaap_bus_controller" # Name under which the DMaaP bus controller is registered
def test_get_config_service(mockconsul):
err_msg = "Error getting ConsulHandle when configuring dmaap plugin: {0}"
_ch = ConsulHandle("http://{0}:{1}".format(CONSUL_HOST, CONSUL_PORT), None, None, None)
config = _ch.get_config(DBCL_KEY_NAME)
DMAAP_USER = config['dmaap']['username']
DMAAP_PASS = config['dmaap']['password']
DMAAP_OWNER = config['dmaap']['owner']
if 'protocol' in config['dmaap']:
DMAAP_PROTOCOL = config['dmaap']['protocol']
else:
DMAAP_PROTOCOL = 'https' # Default to https (service discovery should give us this but doesn't
if 'path' in config['dmaap']:
DMAAP_PATH = config['dmaap']['path']
else:
DMAAP_PATH = 'webapi' # Should come from service discovery but Consul doesn't support it
service_address, service_port = _ch.get_service(DBC_SERVICE_NAME)
DMAAP_API_URL = '{0}://{1}:{2}/{3}'.format(DMAAP_PROTOCOL, service_address, service_port, DMAAP_PATH)
def test_add_entry(mockconsul):
_ch = ConsulHandle("http://{0}:{1}".format(CONSUL_HOST, CONSUL_PORT), None, None, None)
key = 'DMAAP_TEST'
name = 'dmaap_test_name'
value = 'dmaap_test_value'
_ch.add_to_entry(key, name, value)
name = "dmaap_test_name_2"
value = 'dmaap_test_value_2'
_ch.add_to_entry(key, name, value)
_ch.delete_entry(key)
| 2,835 | 920 |
from slackclient._channel import Channel
import pytest
def test_channel(channel):
assert type(channel) == Channel
def test_channel_eq(channel):
channel = Channel(
'test-server',
'test-channel',
'C12345678',
)
assert channel == 'test-channel'
assert channel == '#test-channel'
assert channel == 'C12345678'
assert (channel == 'foo') is False
def test_channel_is_hashable(channel):
channel = Channel(
'test-server',
'test-channel',
'C12345678',
)
channel_map = {channel: channel.id}
assert channel_map[channel] == 'C12345678'
assert (channel_map[channel] == 'foo') is False
@pytest.mark.xfail
def test_channel_send_message(channel):
channel.send_message('hi')
| 763 | 259 |
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from config import config
Base = declarative_base()
class DatabaseManager:
def __init__(self):
self.db_session = None
def init_db(self):
db_engine_prefix = config.DB_ENGINE_PREFIX
db_user = config.DB_USER
db_password = config.DB_PASSWORD
db_host = config.DB_HOST
db_port = config.DB_PORT
db_name = config.DB_NAME
db_engine = '{}://{}:{}@{}:{}/{}'.format(db_engine_prefix, db_user, db_password, db_host, db_port, db_name)
engine = create_engine(db_engine, convert_unicode=True)
self.db_session = scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=engine))
Base.query = self.db_session.query_property()
import apps.user.models
Base.metadata.create_all(bind=engine)
def get_db_session(self):
return self.db_session
database_manager = DatabaseManager()
| 1,049 | 334 |
import math
import random
import numpy as np
import torchvision.transforms as T
from albumentations import (
Compose, HorizontalFlip, VerticalFlip, CLAHE, RandomRotate90, HueSaturationValue,
RandomBrightness, RandomContrast, RandomGamma, OneOf,
ToFloat, ShiftScaleRotate, GridDistortion, ElasticTransform, JpegCompression, HueSaturationValue,
RGBShift, RandomBrightnessContrast, RandomContrast, Blur, MotionBlur, MedianBlur, GaussNoise,CenterCrop,
IAAAdditiveGaussianNoise,GaussNoise,Cutout,Rotate, Normalize, Crop, RandomCrop, Resize, RGBShift
)
from PIL import Image
import cv2
import os
import matplotlib.pyplot as plt
import copy
class RandomErasing(object):
""" Randomly selects a rectangle region in an image and erases its pixels.
'Random Erasing Data Augmentation' by Zhong et al.
See https://arxiv.org/pdf/1708.04896.pdf
Args:
probability: The probability that the Random Erasing operation will be performed.
sl: Minimum proportion of erased area against input image.
sh: Maximum proportion of erased area against input image.
r1: Minimum aspect ratio of erased area.
mean: Erasing value.
"""
def __init__(self, probability=0.5, sl=0.02, sh=0.15, r1=0.3, mean=(0.485, 0.456, 0.406)):
self.probability = probability
self.mean = mean
self.sl = sl
self.sh = sh
self.r1 = r1
def __call__(self, img):
img = copy.deepcopy(img)
if random.uniform(0, 1) > self.probability:
return img
for attempt in range(100):
area = img.shape[0] * img.shape[1]
# 计算采样面积和采样长宽比
target_area = random.uniform(self.sl, self.sh) * area
aspect_ratio = random.uniform(self.r1, 1 / self.r1)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img.shape[1] and h < img.shape[0]:
x1 = random.randint(0, img.shape[0] - h)
y1 = random.randint(0, img.shape[1] - w)
image_roi = img[x1:x1 + h, y1:y1 + w, :]
image_mean = np.mean(image_roi, axis=(0, 1))
# R通道置零
image_mean[0] = 0
if img.shape[2] == 3:
img[x1:x1 + h, y1:y1 + w, 0] = image_mean[0]
img[x1:x1 + h, y1:y1 + w, 1] = image_mean[1]
img[x1:x1 + h, y1:y1 + w, 2] = image_mean[2]
else:
img[x1:x1 + h, y1:y1 + w] = image_mean[0]
return img
return img
class RGB2GRAY(object):
def __init__(self, p=0.5):
self.probability = p
def __call__(self, image):
if random.uniform(0, 1) > self.probability:
return image
image_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# 合并为三通道,以输入网络
image_gray = cv2.merge([image_gray, image_gray, image_gray])
return image_gray
class DataAugmentation(object):
def __init__(self, erase_prob=None, full_aug=True, gray_prob=None):
"""
Args:
full_aug: 是否对整幅图片进行随机增强
"""
self.full_aug = full_aug
self.erase_prob = erase_prob
self.gray_prob = gray_prob
if erase_prob is not None:
self.random_erase = RandomErasing(probability=erase_prob)
if gray_prob is not None:
self.rgb2gray = RGB2GRAY(p=gray_prob)
def __call__(self, image):
"""
:param image: 传入的图片
:return: 经过数据增强后的图片
"""
# 先随机擦除
if self.erase_prob:
image = self.random_erase(image)
# 转为灰度
if self.gray_prob:
image = self.rgb2gray(image)
if self.full_aug:
image = self.data_augmentation(image)
return image
def data_augmentation(self, original_image):
""" 进行样本和掩膜的随机增强
Args:
original_image: 原始图片
Return:
image_aug: 增强后的图片
"""
augmentations = Compose([
HorizontalFlip(p=0.4),
ShiftScaleRotate(shift_limit=0.07, rotate_limit=0, p=0.4),
RGBShift(r_shift_limit=10, g_shift_limit=10, b_shift_limit=10, p=0.3),
# 亮度、对比度
RandomGamma(gamma_limit=(80, 120), p=0.1),
RandomBrightnessContrast(p=0.1),
# 模糊
OneOf([
MotionBlur(p=0.1),
MedianBlur(blur_limit=3, p=0.1),
Blur(blur_limit=3, p=0.1),
], p=0.3),
OneOf([
IAAAdditiveGaussianNoise(),
GaussNoise(),
], p=0.2)
])
augmented = augmentations(image=original_image)
image_aug = augmented['image']
return image_aug
if __name__ == "__main__":
image_path = 'data/Uaic/初赛训练集/初赛训练集/train_set'
# augment = DataAugmentation(erase_flag=True, full_aug=True, gray=True)
augment = DataAugmentation(erase_prob=0.3, gray_prob=0.3)
plt.figure()
for image_name in os.listdir(image_path):
image = Image.open(os.path.join(image_path, image_name)).convert('RGB')
image = np.asarray(image)
augmented = augment(image=image)
plt.imshow(augmented)
plt.show()
| 5,411 | 1,963 |
from question_model import Question
from data import question_data
from quiz_brain import QuizBrain
question_bank = []
for question in question_data:
question_bank.append(Question(question['text'], question['answer']))
quiz = QuizBrain(question_bank)
while quiz.still_has_question():
quiz.next_question()
print(f"\nYour final score : {quiz.score}\nThanks for playing!") | 382 | 117 |
#! -*- coding:utf-8 -*-
import os
import sys
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import cv2
from Model import Model
from util import get_figs, dump_figs
class FigGenerator(object):
def __init__(self, file_name, z_dim, batch_size):
self.batch_size = batch_size
self.z_dim = z_dim
self.model = Model(z_dim, batch_size)
self.model.set_model()
saver = tf.train.Saver()
self.sess = tf.Session()
saver.restore(self.sess, file_name)
def __call__(self, inputs):
assert(len(inputs) == self.batch_size)
#z = np.zeros([self.batch_size, self.z_dim])
z = np.random.normal(0.0, 1.0, [batch_size, z_dim]).astype(np.float32)
return self.model.gen_fig(self.sess, inputs, z)
if __name__ == u'__main__':
# dump file
dump_file = u'./model.dump'
# dir
input_dir = u'train_split/inputs'
target_dir = u'train_split/targets'
# parameter
batch_size = 10
z_dim = 100
# figure generator
fig_gen = FigGenerator(dump_file, z_dim, batch_size)
# get fig
print('-- get figs--')
input_figs, target_figs = get_figs(input_dir, target_dir, False, False)
assert(len(input_figs) == len(target_figs))
print('num figs = {}'.format(len(input_figs)))
# make figure
inputs = input_figs[10: 10 + batch_size]
input_imgs = cv2.hconcat((inputs + 1.0) * 127.5)
cv2.imwrite('inputs.jpg', input_imgs)
outputs = np.asarray(fig_gen(input_figs[10: 10 +batch_size]))
output_imgs = cv2.hconcat((outputs + 1.0) * 127.5)
cv2.imwrite('outputs.jpg', output_imgs)
| 1,655 | 642 |
import copy
import random
# Consider using the modules imported above.
class Hat:
def __init__(self, **kwargs) -> None:
self.set_contents(**kwargs)
def set_contents(self, **kwargs):
contents = []
for key in kwargs:
for n in range(kwargs[key]):
contents.append(key)
self.contents = contents
def draw(self, quantity):
contents = self.contents
draws = []
if quantity < len(contents):
for n in range(quantity):
i = random.randrange(len(contents))
draws.append(contents[i])
contents = contents[0:i] + contents[i + 1:]
self.contents = contents
return draws
return contents
def experiment(hat, expected_balls, num_balls_drawn, num_experiments):
count = 0
bad = 0
for n in range(num_experiments):
# seed of the experiment
exp = copy.deepcopy(hat)
prova = exp.draw(num_balls_drawn)
for v in expected_balls.keys():
count = 0
for x in range(len(prova)):
if prova[x] == v:
count += 1
if count < expected_balls[v]:
bad += 1
break
return 1 - bad / num_experiments
| 1,300 | 372 |
# Copyright 2018 luozhouyang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import tensorflow as tf
# This function is modified from https://github.com/Kyubyong/transformer/blob/master/modules.py
# with Apache License V2
def positional_encoding(inputs,
num_units,
scope="positional_encoding"):
"""Positional encoding as described in https://arxiv.org/abs/1706.03762.
Args:
inputs: A 2-d tensor with shape [B, L]. B->Batch size, L->Time steps
num_units: The model's dimension
scope: Variable scope
Returns:
A tensor with shape [B,L,D]. D->Model's dimension
"""
batch_size, time_steps = inputs.get_shape().as_list()
with tf.variable_scope(scope):
position_index = tf.tile(
tf.expand_dims(tf.range(time_steps), 0), [batch_size, 1])
position_encoding = np.array([
[pos / np.power(10000, 2. * i / num_units) for i in range(num_units)]
for pos in range(time_steps)])
position_encoding[:, 0::2] = np.sin(position_encoding[:, 0::2]) # dim 2i
position_encoding[:, 1::2] = np.cos(position_encoding[:, 1::2]) # dim 2i+1
# Convert to a tensor
lookup_table = tf.convert_to_tensor(position_encoding)
outputs = tf.nn.embedding_lookup(lookup_table, position_index)
return outputs
def layer_norm(inputs, epsilon=1e-8, scope="layer_norm"):
"""Layer normalization.
norm = gamma * (inputs - mean) / sqrt(variance + epsilon)
Args:
inputs: Input tensor, shape is [B,L,D]. B->Batch size, L->Time steps, D->Model's dim
epsilon: A very small float number to avoid zero division error
scope: Variable scope or name
Returns:
The normalized tensor with shape [B,L,D]
"""
with tf.variable_scope(scope):
inputs_shape = inputs.get_shape()
params_shape = inputs_shape[-1:]
mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True)
beta = tf.Variable(tf.zeros(params_shape))
gamma = tf.Variable(tf.ones(params_shape))
normalized = (inputs - mean) / ((variance + epsilon) ** .5)
outputs = gamma * normalized + beta
return outputs
def scaled_dot_product_attention(q, k, v, scale=None, mask=None, dropout=0.2):
"""Scaled dot-product attention.
Args:
q: Query tensor, with shape [h*B, L, D/h]. h->num_heads
k: Key tensor, with shape [h*B, L, D/h]
v: Value tensor, with shape [h*B, L, D/h]
scale: A scalar, scale factor, sqrt(D)
mask: Attention mask, with shape [h*B, L, L]
dropout: A scalar, dropout rate
Returns:
An output tensor and a attention tensor
"""
dot = tf.matmul(q, k, transpose_b=True) # [h*B,L,L]
if scale:
dot = dot * scale
if mask:
padding = tf.ones_like(dot) * dot.dtype.min
dot = tf.where(tf.equal(mask, 0), padding, dot)
attention = tf.nn.softmax(dot)
attention = tf.nn.dropout(attention, dropout)
output = tf.matmul(attention, v)
return output, attention
def multihead_attention(queries,
keys,
values,
num_heads=8,
dropout=0.2,
mask=None,
scope="multihead_attention"):
"""Multi-head attention mechanism.
Args:
queries: Query tensor, with shape [h*B, L, D/h]. h->num_heads
keys: Key tensor, with shape [h*B, L, D/h]
values: Value tensor, with shape [h*B, L, D/h]
num_heads: A scalar, number of heads to split
dropout: A scalar, dropout rate.
mask: Making tensor, with shape [B, L, L]
scope: A string, variable scope name.
Returns:
An output tensor and a attention tensor
"""
with tf.variable_scope(scope) as scope:
model_dim = queries.get_shape()[-1]
q = tf.layers.dense(
queries, model_dim, activation=tf.nn.relu) # (B, L_q, D]
k = tf.layers.dense(
keys, model_dim, activation=tf.nn.relu)
v = tf.layers.dense(
values, model_dim, activation=tf.nn.relu)
# split and concat
q = tf.concat(tf.split(q, num_heads, axis=2), 0) # [h*B, L_q, D/h]
k = tf.concat(tf.split(k, num_heads, axis=2), 0)
v = tf.concat(tf.split(v, num_heads, axis=2), 0)
scale = (model_dim // num_heads) ** -0.5
output, attention = scaled_dot_product_attention(
q, k, v, scale, mask, dropout)
output = tf.concat(tf.split(output, num_heads, axis=0), 2)
output = tf.layers.dense(output, model_dim)
output = tf.nn.dropout(output, dropout)
# residual
output += queries
# layer norm
output = layer_norm(output)
return output, attention
def positional_wise_feed_forward_network(inputs,
model_dim=512,
ffn_dim=2048,
dropout=0.2,
scope="ffn"):
"""Positional-wise feed forward network.
Args:
inputs: Input tensor with shape [B,L,D]
model_dim: Model's dimension
ffn_dim: FFN's inner dimension
dropout: A scalar, dropout rate
scope: Variable's scope or name
Returns:
An output tensor with shape [B,L,D]
"""
with tf.variable_scope(scope) as scope:
params = {"inputs": inputs, "filters": model_dim, "kernel_size": 1,
"activation": tf.nn.relu, "use_bias": True}
outputs = tf.layers.conv1d(**params)
# Readout layer
params = {"inputs": outputs, "filters": ffn_dim, "kernel_size": 1,
"activation": None, "use_bias": True}
outputs = tf.layers.conv1d(**params)
outputs = tf.layers.dropout(outputs, dropout)
# residual and layer norm
outputs += inputs
outputs = layer_norm(outputs)
return outputs
def padding_mask(seq_k, seq_q, num_heads):
"""Padding mask.
Args:
seq_k: Keys tensor with shape [B,L,D]
seq_q: Queries tensor with shape [B,L,D]
num_heads: A scalar, number of heads
Returns:
A masking tensor with shape [B,L,L]
"""
mask = tf.sign(tf.abs(tf.reduce_sum(seq_k, axis=-1))) # [B,L]
mask = tf.tile(mask, [num_heads, 1]) # [h*B,L]
mask = tf.tile(tf.expand_dims(mask, 1), [1, tf.shape(seq_q)[1], 1]) # [B,L,L]
return mask
def sequence_mask(seq, num_heads, dtype=tf.float32):
"""Sequence mask to blind feature time steps.
Args:
seq: Input tensor with shape [B,L,D]
num_heads: A scalar, number of heads
dtype: Data type
Returns:
A maksing tensor with shape [h*B,L,L]
"""
batch_size = tf.shape(seq)[0]
length = tf.shape(seq)[1]
diag = tf.ones(shape=[length, length], dtype=dtype) # [L,L]
tril = tf.linalg.LinearOperatorLowerTriangular(diag).to_dense() # [L,L]
mask = tf.tile(tf.expand_dims(tril, 0), [num_heads * batch_size, 1, 1]) # [h*B,L,L]
return mask
| 7,309 | 2,595 |
# Generated by Django 3.0.6 on 2020-05-18 14:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auth', '0011_update_proxy_permissions'),
('core', '0008_auto_20200518_1442'),
]
operations = [
migrations.AlterField(
model_name='user',
name='groups',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='auth.Group'),
),
]
| 536 | 197 |
from django.db import models
from typing import List, Tuple
from django.core.exceptions import ValidationError
# Create your models here.
class Sudoku(models.Model):
puzzle_creation_date = models.DateField(verbose_name = 'creation date', help_text= 'puzzle creation date', auto_now=True, auto_now_add=False)
@staticmethod
def create_from_puzzle_and_solution_lists(sudoku_puzzle_list: List[List[int]], sudoku_solution_list: List[List[int]]) -> None:
def sudoku_squere_list_to_model(sudoku_field_model: SudokuField, sudoku_squere_list: List[int]) -> SudokuSquereField:
return SudokuSquereField(
f_0 = sudoku_squere_list[0],
f_1 = sudoku_squere_list[1],
f_2 = sudoku_squere_list[2],
f_3 = sudoku_squere_list[3],
f_4 = sudoku_squere_list[4],
f_5 = sudoku_squere_list[5],
f_6 = sudoku_squere_list[6],
f_7 = sudoku_squere_list[7],
f_8 = sudoku_squere_list[8],
sudoku_field_fk = sudoku_field_model
)
def sudoku_list_to_model(sudoku_model: Sudoku, sudoku_list: List[List[int]]) -> Tuple[SudokuField,Tuple[SudokuSquereField]]:
sudoku_field_model = SudokuField(sudoku_fk = sudoku_model)
sudoku_squere_model_tuple = (
sudoku_squere_list_to_model(sudoku_field_model, squere) for squere in sudoku_list
)
return (sudoku_field_model, sudoku_squere_model_tuple)
sudoku_model = Sudoku()
sudoku_field_puzzle, sudoku_3x3_puzzle_tuple = sudoku_list_to_model(sudoku_model, sudoku_puzzle_list)
sudoku_field_puzzle.type = 'p'
sudoku_field_puzzle.sudoku_fk = sudoku_model
for sudoku_3x3_field in sudoku_3x3_puzzle_tuple:
sudoku_3x3_field.sudoku_field_fk = sudoku_field_puzzle
sudoku_field_solution, sudoku_3x3_solution_tuple = sudoku_list_to_model(sudoku_model, sudoku_solution_list)
sudoku_field_solution.type = 's'
sudoku_field_solution.sudoku_fk = sudoku_model
for sudoku_3x3_field in sudoku_3x3_solution_tuple:
sudoku_3x3_field.sudoku_field_fk = sudoku_field_solution
from django.db import transaction
with transaction.atomic():
sudoku_model.save()
sudoku_field_puzzle.save()
for sudoku_field in sudoku_3x3_puzzle_tuple:
sudoku_field.save()
sudoku_field_solution.save()
for sudoku_field in sudoku_3x3_solution_tuple:
sudoku_field.save()
class Meta():
verbose_name_plural = 'Sudoku'
verbose_name = 'Sudoku'
ordering = ['-puzzle_creation_date']
class SudokuField(models.Model):
SUDOKU_TYPES_FIELD_CHOISES = (
('p', 'Puzzle'),
('s', 'Solution'),
)
type = models.CharField(verbose_name = 'puzzle type', max_length=1, choices = SUDOKU_TYPES_FIELD_CHOISES, default=None)
sudoku_fk = models.ForeignKey(Sudoku, on_delete=models.CASCADE, verbose_name='sudoku_id')
def save(self, *args, **kwargs) -> None:
self.clean()
return super().save(*args, **kwargs)
def clean(self) -> None:
types = [ field_type[0] for field_type in self.SUDOKU_TYPES_FIELD_CHOISES ]
if not( self.type in types and type(self.sudoku_fk) == Sudoku ):
raise ValidationError(' validation error in SudokuField fields ')
if self.type == 'p' and SudokuField.objects.filter(type = 'p', sudoku_fk = self.sudoku_fk).exists():
raise ValidationError(' validation error in SudokuField (puzzle already created) ')
return super().clean()
class Meta():
verbose_name_plural = 'Puzzle'
verbose_name = 'Puzzle'
ordering = ['-sudoku_fk']
class SudokuSquereField(models.Model):
SUDOKU_PUZZLE_FIELD_CHOISES = (
(0, ' '),
(1, '1'),
(2, '2'),
(3, '3'),
(4, '4'),
(5, '5'),
(6, '6'),
(7, '7'),
(8, '8'),
(9, '9'),
)
f_0 = models.PositiveSmallIntegerField(choices = SUDOKU_PUZZLE_FIELD_CHOISES, verbose_name='1', )
f_1 = models.PositiveSmallIntegerField(choices = SUDOKU_PUZZLE_FIELD_CHOISES, verbose_name='2', )
f_2 = models.PositiveSmallIntegerField(choices = SUDOKU_PUZZLE_FIELD_CHOISES, verbose_name='3', )
f_3 = models.PositiveSmallIntegerField(choices = SUDOKU_PUZZLE_FIELD_CHOISES, verbose_name='4', )
f_4 = models.PositiveSmallIntegerField(choices = SUDOKU_PUZZLE_FIELD_CHOISES, verbose_name='5', )
f_5 = models.PositiveSmallIntegerField(choices = SUDOKU_PUZZLE_FIELD_CHOISES, verbose_name='6', )
f_6 = models.PositiveSmallIntegerField(choices = SUDOKU_PUZZLE_FIELD_CHOISES, verbose_name='7', )
f_7 = models.PositiveSmallIntegerField(choices = SUDOKU_PUZZLE_FIELD_CHOISES, verbose_name='8', )
f_8 = models.PositiveSmallIntegerField(choices = SUDOKU_PUZZLE_FIELD_CHOISES, verbose_name='9', )
sudoku_field_fk = models.ForeignKey(SudokuField, on_delete=models.CASCADE, verbose_name='puzzle_id')
def save(self, *args, **kwargs) -> None:
self.clean()
return super().save(*args, **kwargs)
def clean(self) -> None:
types = [ field_type[0] for field_type in self.SUDOKU_PUZZLE_FIELD_CHOISES ]
if not( self.type in types and type(self.sudoku_field_fk) == SudokuField ):
raise ValidationError(' validation error in SudokuSquereField fields ')
return super().clean()
class Meta():
verbose_name_plural = 'Field 3x3 '
verbose_name = 'Field 3x3'
ordering = ['-sudoku_field_fk']
| 5,700 | 2,099 |
"""
Correct FASTQ/FASTA with the corrected sequences from starcode clustering
"""
from collections import defaultdict
import logging
import os
import statistics
from pathlib import Path
from typing import Iterator, Tuple, List, Set, Dict
import dnaio
from tqdm import tqdm
from xopen import xopen
from dbspro.utils import Summary, IUPAC_MAP
logger = logging.getLogger(__name__)
def add_arguments(parser):
parser.add_argument(
"input", type=Path,
help="FASTQ/FASTA with uncorrected sequences."
)
parser.add_argument(
"corrections", type=Path,
help="Starcode output in format, tab-separate entries: <corrected sequnence>, <read count>, <comma-separated"
"uncorrected sequences>."
)
parser.add_argument(
"-o", "--output-fasta", type=Path,
help="Output FASTA with corrected sequences."
)
parser.add_argument(
"-b", "--barcode-pattern", required=True,
help="IUPAC string with bases forming pattern to match each corrected sequence too."
)
def main(args):
run_correctfastq(
uncorrected_file=args.input,
corrections_file=args.corrections,
corrected_fasta=args.output_fasta,
barcode_pattern=args.barcode_pattern,
)
def run_correctfastq(
uncorrected_file: str,
corrections_file: str,
corrected_fasta: str,
barcode_pattern: str,
):
logger.info("Starting analysis")
logger.info(f"Processing file: {corrections_file}")
summary = Summary()
if os.stat(corrections_file).st_size == 0:
logging.warning(f"File {corrections_file} is empty.")
pattern = [IUPAC_MAP[base] for base in barcode_pattern]
corr_map = get_corrections(corrections_file, pattern, summary)
logger.info("Correcting sequences and writing to output file.")
with dnaio.open(uncorrected_file, mode="r") as reader, \
dnaio.open(corrected_fasta, mode="w") as writer:
for read in tqdm(reader, desc="Parsing reads"):
summary["Reads total"] += 1
if read.sequence in corr_map:
read.sequence = corr_map[read.sequence]
writer.write(read)
summary["Reads corrected"] += 1
else:
summary["Reads without corrected sequence"] += 1
summary.print_stats(name=__name__)
logger.info("Finished")
def parse_starcode_file(filename: Path) -> Iterator[Tuple[str, int, List[str]]]:
with xopen(filename, "r") as file:
for line in file:
try:
cluster_seq, num_reads, raw_seqs_list = line.split()
except ValueError:
logging.warning(f"Non-default starcode output line: {line}")
continue
raw_seqs = raw_seqs_list.split(",")
yield cluster_seq, int(num_reads), raw_seqs
def get_corrections(corrections_file: Path, pattern: List[Set[str]], summary: Summary) -> Dict[str, str]:
corr_map = {}
stats = defaultdict(list)
for cluster_seq, num_reads, raw_seqs in tqdm(parse_starcode_file(corrections_file), desc="Parsing clusters"):
summary["Clusters"] += 1
if match_pattern(cluster_seq, pattern):
summary["Clusters filtered"] += 1
stats["read"].append(num_reads)
stats["sequence"].append(len(raw_seqs))
corr_map.update({raw_seq: cluster_seq for raw_seq in raw_seqs})
# Add statistics
for stat, values in stats.items():
summary[f"Max {stat}s per cluster"] = max(values)
summary[f"Mean {stat}s per cluster"] = statistics.mean(values)
summary[f"Median {stat}s per cluster"] = statistics.median(values)
summary[f"Clusters with one {stat}"] = sum(1 for v in values if v == 1)
return corr_map
def match_pattern(sequence: str, pattern: List[Set[str]]) -> bool:
if len(sequence) != len(pattern):
return False
return all([base in allowed_bases for base, allowed_bases in zip(sequence, pattern)])
| 3,999 | 1,238 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
#
import ctypes
from struct import *
my_module_version = "0.50"
my_module_name = "audFileDecode"
aud_ima_index_adjust_table = [-1, -1, -1, -1, 2, 4, 6, 8]
# aud_ima_step_table has 89 entries
aud_ima_step_table = [
7, 8, 9, 10, 11, 12, 13, 14, 16,
17, 19, 21, 23, 25, 28, 31, 34, 37,
41, 45, 50, 55, 60, 66, 73, 80, 88,
97, 107, 118, 130, 143, 157, 173, 190, 209,
230, 253, 279, 307, 337, 371, 408, 449, 494,
544, 598, 658, 724, 796, 876, 963, 1060, 1166,
1282, 1411, 1552, 1707, 1878, 2066, 2272, 2499, 2749,
3024, 3327, 3660, 4026, 4428, 4871, 5358, 5894, 6484,
7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899, 15289,
16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767 ]
aud_ws_step_table2 = [-2, -1, 0, 1]
aud_ws_step_table4 = [
-9, -8, -6, -5, -4, -3, -2, -1,
0, 1, 2, 3, 4, 5, 6, 8
]
# (const xccTHA::byte* audio_in, short* audio_out, int& index, int& sample, int cs_chunk)
# index and sample are passed by reference and changed here...
# audio_out is definitely affected!
def aud_decode_ima_chunk(audioBufferIn, index, sample, cs_chunk):
code = -1
delta = -1
step = -1
audioBufferOut = []
#for i in range(0, len(audioBufferIn)):
# print '%d: %d'%(i, int(audioBufferIn[i]))
for sample_index in range (0, cs_chunk):
try:
code = audioBufferIn[sample_index >> 1]
except:
code = 0xa9 # dummy workaround because the c code is accessing an out of bounds index sometimes due to this shift here
#print "cs_chunk %d, sample_index %d, shifted %d, code: %d" % (cs_chunk, sample_index, sample_index >> 1, int(audioBufferIn[sample_index >> 1]))
#print "cs_chunk %s, sample_index %s, shifted %s, code: %s" % \
# (''.join('{:04X}'.format(cs_chunk)), ''.join('{:02X}'.format(sample_index)), ''.join('{:02X}'.format(sample_index >> 1)), ''.join('{:04X}'.format(int(code))))
code = code >> 4 if (sample_index & 1) else code & 0xf
step = aud_ima_step_table[index]
delta = step >> 3
if (code & 1):
delta += step >> 2
if (code & 2):
delta += step >> 1
if (code & 4):
delta += step
if (code & 8):
sample -= delta
if (sample < -32768):
sample = -32768
else:
sample += delta
if (sample > 32767):
sample = 32767
audioBufferOut.append(ctypes.c_short( sample ).value )
#audioBufferOut.append(sample) # it's not different from above... ctypes.c_short( sample ).value
#print "audio_out[%s]: %s" % (''.join('{:02X}'.format(sample_index)), ''.join('{:02X}'.format(audioBufferOut[sample_index])));
index += aud_ima_index_adjust_table[code & 7]
if (index < 0):
index = 0
elif (index > 88):
index = 88
## output buffer of shorts
#binDataOut = struct.pack('h'*len(audioBufferOut), *audioBufferOut)
#return (binDataOut, index, sample)
return (audioBufferOut, index, sample)
#
#
#
def aud_decode_clip8(v):
if (v < 0):
return 0
return 0xff if (v > 0xff) else v
#
#
#
# (const xccTHA::byte* r, char* w, int cb_s, int cb_d)
def aud_decode_ws_chunk(inputChunkBuffer, cb_s, cb_d):
outputChunkBuffer = []
inpChBuffIter = 0
outChBuffIter = 0
if (cb_s == cb_d):
# outputChunkBuffer = inputChunkBuffer[:cb_s] # memcpy(w, r, cb_s) # FIX
for mcp in range(0, cb_s):
outputChunkBuffer.append(ctypes.c_char(inputChunkBuffer[inpChBuffIter + mcp]).value)
#binDataOut = struct.pack('b'*len(outputChunkBuffer), *outputChunkBuffer)
#return binDataOut
return outputChunkBuffer
# const xccTHA::byte* s_end = inputChunkBuffer + cb_s; # FIX
s_end = inpChBuffIter + cb_s
sample = ctypes.c_int(0x80).value #int sample
while (inpChBuffIter < s_end):
inpChBuffIter += 1
count = ctypes.c_char(inputChunkBuffer[inpChBuffIter] & 0x3f).value # char count
switchKey = inputChunkBuffer[inpChBuffIter - 1] >> 6 # inputChunkBuffer[-1] # b[-1] is *(b - 1)
if switchKey == 0:
count += 1
for iter in range (count, 0, -1):
inpChBuffIter += 1
code = ctypes.c_int(inputChunkBuffer[inpChBuffIter]).value # int code
# assignment in C was right to left so:
# *(outputChunkBuffer++) = sample = clip8(sample + aud_ws_step_table2[code & 3])
# is:
# *(outputChunkBuffer++) = (sample = clip8(sample + aud_ws_step_table2[code & 3]))
# which is equivalent to these two commands:
# sample = clip8(sample + aud_ws_step_table2[code & 3])
# *(outputChunkBuffer++) = sample
# SO:
sample = aud_decode_clip8(sample + aud_ws_step_table2[code & 3])
outputChunkBuffer.append(ctypes.c_char(sample).value)
outChBuffIter += 1
sample = aud_decode_clip8(sample + aud_ws_step_table2[code >> 2 & 3])
outputChunkBuffer.append(ctypes.c_char(sample).value)
outChBuffIter += 1
sample = aud_decode_clip8(sample + aud_ws_step_table2[code >> 4 & 3])
outputChunkBuffer.append(ctypes.c_char(sample).value)
outChBuffIter += 1
sample = aud_decode_clip8(sample + aud_ws_step_table2[code >> 6])
outputChunkBuffer.append(ctypes.c_char(sample).value)
outChBuffIter += 1
elif switchKey == 1:
count += 1
for iter in range (count, 0, -1):
inpChBuffIter += 1
code = inputChunkBuffer[inpChBuffIter] # int code
sample += aud_ws_step_table4[code & 0xf]
sample = aud_decode_clip8(sample)
outputChunkBuffer.append(ctypes.c_char(sample).value)
outChBuffIter += 1
sample += aud_ws_step_table4[code >> 4]
sample = aud_decode_clip8(sample)
outputChunkBuffer.append(ctypes.c_char(sample).value)
outChBuffIter += 1
elif switchKey == 2:
if (count & 0x20):
#sample += static_cast<char>(count << 3) >> 3
#*(outputChunkBuffer++) = sample
sample += ((count & 0xFF) << 3 ) >> 3
outputChunkBuffer.append(ctypes.c_char(sample).value)
outChBuffIter += 1
else:
count += 1
# memcpy(outputChunkBuffer, inputChunkBuffer, count) # FIX
for mcp in range(0, count):
outputChunkBuffer.append(ctypes.c_char(inputChunkBuffer[inpChBuffIter + mcp]).value)
inpChBuffIter += count
outChBuffIter += count
sample = inputChunkBuffer[inpChBuffIter - 1]
else:
count += 1
# memset(outputChunkBuffer, sample, ++count)
for mst in range(0, count):
outputChunkBuffer.append(ctypes.c_char(sample).value)
outChBuffIter += count;
# output buffer of chars
#binDataOut = struct.pack('b'*len(outputChunkBuffer), *outputChunkBuffer)
#return binDataOut
return outputChunkBuffer
#
#
#
class audFileDecode:
m_index = -1
m_sample = -1
def __init__(self, index = 0, sample = 0):
self.m_index = index;
self.m_sample = sample;
return
def index(self):
return self.m_index
# (const xccTHA::byte* audio_in, short* audio_out, int cs_chunk)
def decode_chunk(self, audio_in, cs_chunk):
(audio_Out, outIndex, outSample) = aud_decode_ima_chunk(audio_in, self.m_index, self.m_sample, cs_chunk)
self.m_index = outIndex
self.m_sample = outSample
return audio_Out
if __name__ == '__main__':
# main()
print "Running %s as main module" % (my_module_name)
decodeInstance = audFileDecode()
else:
#debug
#print "Running %s imported from another module" % (my_module_name)
pass
| 7,204 | 3,430 |
# -*- coding: utf-8 -*-
# **********************************************************************************************************************
# MIT License
# Copyright (c) 2020 School of Environmental Science and Engineering, Shanghai Jiao Tong University
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ----------------------------------------------------------------------------------------------------------------------
# This file is part of the ASCA Algorithm, it is used for spatial point clustering analysis. This model contains mainly
# three parts, they are points trend analysis, point cluster analysis and spatial visualization.
#
# Author: Yuansheng Huang
# Date: 2020-06-18
# Version: V 1.2
# Literature
# ==========
# Yuansheng Huang, Peng Li, Yiliang He: To centralize or to decentralize? A systematic framework for optimizing
# rural wastewater treatment investment
# Clark and Evans, 1954; Gao, 2013
# **********************************************************************************************************************
# General import
import arcpy
import math
import functools
import numpy as np
from scipy.spatial import Delaunay
from functools import cmp_to_key
# --------------------------read out file--------------------------
def getFeatureName(folder):
"""读取文件夹中所有的.shp文件名(含后缀),并存为list"""
arcpy.env.workspace = folder
featureName = []
for feature in arcpy.ListFeatureClasses():
featureName.append(feature)
return featureName
def readArea(areaShape):
"""
读取shapefile中研究区域的面积
输入参数
areaShape: 研究区域矢量地图,用于读取面积值。
输出参数
areaValue: 研究区域面积,米。
"""
areaList = []
rows = arcpy.SearchCursor(areaShape)
fields = arcpy.ListFields(areaShape)
for row in rows:
for field in fields:
if field.name == "area" or field.name == "AREA":
AREA = row.getValue(field.name)
areaList.append(AREA)
areaValue = np.sum(areaList)
return areaValue
def readObstacle(obstacle):
"""
从shapefile线数据中读取研究区域空间障碍(线段)的起始点坐标,用于删除DT边列表中与障碍线段相交的边。
输入参数:
obstacle: 空间障碍shapefile数据,将所有需考虑的障碍(道路,河流,分水岭等)合并为一个文件,且需在vertex处打断障碍以得到起始点坐标。
输出参数
obstacleList: 障碍列表
"""
obstacleList, rows, fields = [], arcpy.SearchCursor(obstacle), arcpy.ListFields(obstacle)
for row in rows:
for field in fields:
if field.name == "START_X" or field.name == "X_START":
S_X = row.getValue(field.name)
elif field.name == "START_Y" or field.name == "Y_START":
S_Y = row.getValue(field.name)
elif field.name == "END_X" or field.name == "X_END":
E_X = row.getValue(field.name)
elif field.name == "END_Y" or field.name == "Y_END":
E_Y = row.getValue(field.name)
obstacleList.append([[S_X, S_Y], [E_X, E_Y]])
if len(obstacleList) == 0:
raise Exception("EMPTY LIST: YOU GOT AN EMPTY LIST!!! PLEASE CHECK INPUT FILE!")
return obstacleList
def readSpatialPoint(pointShape):
"""
读取空间点坐标数据,并保存为列表
输入参数
pointShape: Path to point shapefile
输出参数
pointList: 空间点坐标列表
spatialRef: 空间参考
"""
pointList, rows, fields, ID = [], arcpy.SearchCursor(pointShape), arcpy.ListFields(pointShape), 0
spatialRef = arcpy.Describe(pointShape).spatialReference
for row in rows:
for field in fields:
if field.name == "POINT_X":
X = row.getValue(field.name)
if field.name == "POINT_Y":
Y = row.getValue(field.name)
if field.name == "Q":
Q = row.getValue(field.name)
if field.name == "RASTERVALU":
H = row.getValue(field.name)
pointList.append([ID, X, Y, Q, H])
ID += 1
if len(pointList) < 1:
raise Exception("EMPTY LIST: YOU GOT AN EMPTY LIST, PLEASE CHECK YOUR INPUT FILE!!!")
return pointList, spatialRef
def checkList(pointlist):
"""
检查空间点列表,删除重复的点。这里的重复是指,坐标重复。
输入参数
pointlist: 空间点坐标列表
输出参数
output: 删除重复后的空间点坐标列表
"""
point = [i[1:] for i in pointlist]
idList = [i[:1] for i in pointlist]
output1 = []
for i in range(len(point)):
if point[i] not in output1:
output1.append(point[i])
else:
idList.remove(idList[i])
output = []
for i in range(len(point)):
output.append(idList.extend(point[i]))
return output
def getNearestDistance(pointList):
"""
此函数用于计算各点到其最近点间的距离,并返回距离列表。
输入参数
pointList: 空间点坐标列表。
输出参数
nearestDistanceList: 最近距离列表。
"""
nearestDistanceList = []
for i in range(len(pointList)):
distanceToPoint = []
for j in range(len(pointList)):
if i != j:
length2D = math.hypot(pointList[i][1] - pointList[j][1], pointList[i][2] - pointList[j][2])
heightDiff = pointList[i][4] - pointList[j][4]
length3D = math.hypot(length2D, heightDiff)
distanceToPoint.append(length3D)
else:
continue
nearestDistance = min(distanceToPoint)
nearestDistanceList.append(nearestDistance)
if len(nearestDistanceList) < 1:
raise Exception("EMPTY LIST: YOU GOT AN EMPTY LIST, PLEASE CHECK YOUR INPUT FILE!!!")
return nearestDistanceList
def NNI(pointList, distanceList, areaValue):
"""
用于计算空间点集的最邻近指数。输出值将用于定义空间点的分布模式,当NNI>1时,空间点集呈均匀分布,当NNI<1时,空间点集呈聚集分布。
输入参数
pointList: 空间点坐标列表
nearestDistanceList: 最近距离列表
areaValue: 研究区域面积,米。
输出参数
index: 空间点集的最邻近指数
z_test: z检验数值
"""
N = len(pointList)
ran = 0.5 * math.sqrt(areaValue / N)
sumD = np.sum(distanceList)
SE = 0.26236 / (math.sqrt(N ** 2) / areaValue)
indexValue = (sumD / N) / ran
z_test = ((sumD / N) - ran) / SE
return indexValue, z_test
# ----------------starting cluster------------------
def getDelaunayTriangle(pointList):
"""
获取空间点集points的Delaunay Triangle (DT) 及DT的顶点索引和坐标。
输入参数
pointList: 空间点坐标列表
输出参数
triangleVertexIndex: DT顶点索引列表
triangleVertexCoordinate: DT顶点坐标列表
"""
pointListS = [i[1:3] for i in pointList]
points = np.array(pointListS)
DT = Delaunay(points)
triangleVertexIndex = DT.simplices[:].tolist()
triangleVertexCoordinate = []
for T in triangleVertexIndex:
triangle = []
for v in T:
triangle.append(pointList[v])
triangleVertexCoordinate.append(triangle)
return triangleVertexIndex, triangleVertexCoordinate
def unfoldList(nestedList):
"""
用于展开嵌套列表,将在后续的算法中反复运用。
输入参数
nestedList: 嵌套列表
输出参数
unfoldedList: 展开后的列表
"""
unfoldedList = [i for j in nestedList for i in j]
return unfoldedList
def clusteredList(pointList, marker): # todo. Is this function useful
"""
根据列表中元素所包含的特定字符/元素,对列表中的元素进行分类。
输入参数
pointList: 列表
marker: 用于分类的特殊标记(最好是按一定顺序排列)
输出参数
clusteredList: 输出,嵌套列表
"""
clusteredList = []
for item in pointList:
clustered = []
for m in marker:
if m in item:
clustered.append(item)
clusteredList.append([m, clustered])
return clusteredList
def uniqueListElement(listWithCopyElement):
"""
删除列表(嵌套列表)中重复的元素,将在后续的算法中反复运用。
"""
listWithUniqueElement = []
for i in listWithCopyElement:
if i not in listWithUniqueElement:
listWithUniqueElement.append(i)
else:
continue
return listWithUniqueElement
def getEdgeID(indexA, indexB):
"""
获取边的ID号。indexA,indexB:分别为表顶点的ID号,即索引号。
"""
if indexA == indexB:
raise Exception("ERROR: Indexes point to the same point!!!")
maxIndex, minIndex = max(indexA, indexB), min(indexA, indexB)
edgeID = "V" + "_" + str(minIndex) + "_" + str(maxIndex)
return edgeID
def getLength(pointA, pointB):
"""
This function used to calculate the Euclidean distance in three dimensions.
"""
length2D = math.hypot(pointA[1] - pointB[1], pointA[2] - pointB[2])
heightDiff = pointA[4] - pointB[4]
length = math.hypot(length2D, heightDiff)
return length
def getEdgeLength(triangleVertexIndex, triangleVertexCoordinate):
"""
用于获取DT网格的边长及边顶点在PointList列表中的索引号。
输入参数
triangleVertexIndex: getDelaunayTriangle函数输出参数,以三角形为单位。
triangleVertexCoordinate: getDelaunayTriangle函数输出参数,以三角形为单位。
输出参数
triangleEdgeList: DT网格边长列表,其结构同输入。
edgeList: 展开并去除重复值后的triangleEdgeList列表。
"""
length, triangleEdgeList = len(triangleVertexCoordinate), []
for i in range(length):
triangleIndex = triangleVertexIndex[i]
triangleCoordinate = triangleVertexCoordinate[i]
edgeA = [getEdgeID(triangleIndex[0], triangleIndex[1]), min(triangleIndex[0], triangleIndex[1]),
max(triangleIndex[0], triangleIndex[1]), getLength(triangleCoordinate[0], triangleCoordinate[1])]
edgeB = [getEdgeID(triangleIndex[0], triangleIndex[2]), min(triangleIndex[0], triangleIndex[2]),
max(triangleIndex[0], triangleIndex[2]), getLength(triangleCoordinate[0], triangleCoordinate[2])]
edgeC = [getEdgeID(triangleIndex[1], triangleIndex[2]), min(triangleIndex[1], triangleIndex[2]),
max(triangleIndex[1], triangleIndex[2]), getLength(triangleCoordinate[1], triangleCoordinate[2])]
edgesList = [edgeA, edgeB, edgeC]
triangleEdgeList.append(edgesList)
unfoldedList = unfoldList(triangleEdgeList)
edgeList = uniqueListElement(unfoldedList)
return triangleEdgeList, edgeList
# --------------------------delete global long edge--------------------------
def getGlobalEdgeStatistic(edgeList):
"""
用于计算全局边长的统计量,全局边长均值和全局边长变异。
输出参数
globalEdgeMean, globalEdgeVariation: 全局边长均值和全局边长变异。
"""
edgeLength = [i[-1] for i in edgeList]
globalEdgeMean = np.mean(edgeLength)
if len(edgeLength) >= 2:
globalEdgeVariation = np.std(edgeLength, ddof=1)
else:
raise ZeroDivisionError
return globalEdgeMean, globalEdgeVariation
def getFirstOrderEdges(pointList, edgeList):
"""
获取各顶点的一阶邻域边。
输出参数
firstOrderEdges: 各点的一阶邻域边
firstOrderPoints: 各点的一阶邻域点
"""
firstOrderEdges, firstOrderPoints = [], []
for point in pointList:
index = point[0]
firstOrderEdge, firstOrderPoin = [index], []
for edge in edgeList:
if index in edge[1:3]:
firstOrderEdge.append(edge)
firstOrderPoin.extend(edge[1:3])
else:
continue
fop = list(set(firstOrderPoin))
if index in fop:
fop.remove(index)
else:
continue
firstOrderPoint = [index]
firstOrderPoint.extend(fop)
firstOrderEdges.append(firstOrderEdge)
firstOrderPoints.append(firstOrderPoint)
return firstOrderEdges, firstOrderPoints
def getFirstOrderEdgesMean(firstOrderEdges): # 20200618, updated
"""
计算各顶点的一阶邻域均值。Pi为索引号
输出参数
firstOrderEdgesMean: 各点的一阶邻域边长均值。[[Pi, AVGi], [ ]…]
"""
firstOrderEdgesMean = []
for i in firstOrderEdges:
edge = [x[3] for x in i[1:]]
element = [i[0], np.mean(edge)]
firstOrderEdgesMean.append(element)
return firstOrderEdgesMean
def getGlobalCutValue(globalEdgeMean, globalEdgeVariation, firstOrderEdgesMean):
"""
计算各顶点的全局约束准则,用于删除全局长边。
输入参数
globalEdgeMean, globalEdgeVariation: 全局边长均值和全局边长变异。
firstOrderEdgesMean: 各点的一阶邻域边长均值。
输出参数
globalCutValueList: 全局约束准则列表
"""
globalCutValueList = []
for i in firstOrderEdgesMean:
GCVi = globalEdgeMean + 0.5 * (globalEdgeMean / i[1]) * globalEdgeVariation
element = [i[0], GCVi]
globalCutValueList.append(element)
return globalCutValueList
def getGlobalOtherEdge(edgeList, globalCutValueList):
"""
获取DT网格中的全局其他边,直接从edgeList列表中删除全局长边。
输入参数
firstOrderEdges:
globalCutValueList: 全局约束准则列表
输出参数
globalOtherEdgeList: 删除全局长边后的edgeList。
"""
longEdge, globalOtherEdgeList = [], edgeList[:]
for point in globalCutValueList:
for edge in edgeList:
if point[0] in edge[1:3] and edge[3] >= point[1]:
if edge in globalOtherEdgeList:
globalOtherEdgeList.remove(edge)
else:
continue
else:
continue
return globalOtherEdgeList
def aggregation(edgeList):
"""
用于获取孤立点以外的其他点所构成的点簇,每个点簇所包含的点为一个嵌套元素。在cluster函数中调用。
此函数将嵌套列表中有相同元素的子列表合并,并将索引号较小的一个元素设置为两个子元素的并,较大一个设置为空列表[]。
输入参数
edgeList: 删除全局长边后的edgeList。
输出参数
indexList: 合并后的列表,嵌套列表,每个子列表表示一个点簇子列表的元素无为点索引号。
"""
indexListX = [i[1:3] for i in edgeList] # get index
for i in range(len(indexListX)):
for j in range(len(indexListX)):
x = list(set(indexListX[i] + indexListX[j]))
y = len(indexListX[j]) + len(indexListX[i])
if i == j:
break
elif len(x) < y:
indexListX[i] = x
indexListX[j] = []
indexList = []
for i in indexListX:
if len(i) > 1:
indexList.append(i)
else:
continue
return indexList
def cluster(pointList, indexList, marker):
"""
给pointList中的元素添加标记,以区分各点簇。
输入参数
pointList: 空间点坐标列表
indexList: aggregation函数输出值
marker: 类簇标记,如“G”。
输出参数
pointList: 在每个元素尾部添加类簇标记的pointList,结构同输入。
"""
clusterPointIndex = [i for j in indexList for i in j]
for i in pointList:
index = pointList.index(i)
marker0 = marker + "0"
if index not in clusterPointIndex:
i.append(marker0)
else:
continue
for lst in indexList: # 标记其他点
markerX = marker + str(indexList.index(lst) + 1)
for i in pointList:
for ele in lst:
if ele == i[0]:
i.append(markerX)
else:
continue
return
# --------------------------删除局部长边--------------------------
def getSubgraphEdge(pointList, edgeList, indexList):
"""
用于获取删除全局长边和障碍边后的所有子图,每个子图为一个元素,每个元素包含子图所有的边。次函数基于aggregation函数结果实现。
输入参数
pointList: 在每个元素尾部添加类簇标记的pointList,结构同输入。
indexList: 合并后的列表,嵌套列表,每个子列表表示一个点簇子列表的元素无为点索引号。
edgeList: 删除全局长边后的edgeList。
输出参数
subgraphEdgeList: 子图边列表
subgraphVertexList: 子图顶点列表
"""
subgraphVertexList = []
for A in indexList: # 获取子图顶点坐标
vertex = [pointList[i] for i in A]
subgraphVertexList.append(vertex)
subgraphEdgeList = []
for subgraphVertex in indexList:
subgraphEdge = []
for i in subgraphVertex:
for j in edgeList:
if i in j[1:3] and j not in subgraphEdge:
subgraphEdge.append(j)
else:
continue
subgraphEdgeList.append(subgraphEdge)
return subgraphVertexList, subgraphEdgeList
def getSecondOrderEdges(subgraphVertexList, subgraphEdgeList):
"""
计算子图各顶点的二阶邻域边长。
输入参数
subgraphVertexList, subgraphEdgeList: getSubgraphEdge函数输出值。
输出参数
subgraphSecondOrderEdgeMean: 各子图各顶点二阶邻域边长均值。
"""
length, subgraphSecondOrderEdgeMean = len(subgraphVertexList), []
for i in range(length): # 获取一个子图,迭代
subgraphVertex, subgraphEdge = subgraphVertexList[i], subgraphEdgeList[i] # vertex and edge of subgraph.
_, firstOrderPoints = getFirstOrderEdges(subgraphVertex, subgraphEdge)
firstOrderPointList = [i[1:] for i in firstOrderPoints]
indexList = [i[0] for i in firstOrderPoints]
secondOrderMean = []
for n in range(len(firstOrderPointList)):
subgraphSecondOrderEdgeC, index, = [], indexList[n]
for p in firstOrderPointList[n]:
for e in subgraphEdge:
if p in e[1:3]:
subgraphSecondOrderEdgeC.append(e)
subgraphSecondOrderEdgeU = uniqueListElement(subgraphSecondOrderEdgeC)
edgeLengthPi = [i[-1] for i in subgraphSecondOrderEdgeU]
Pi_mean = np.mean(edgeLengthPi)
secondOrderMean.append([index, Pi_mean])
subgraphSecondOrderEdgeMean.append(secondOrderMean)
return subgraphSecondOrderEdgeMean
def getSubgraphEdgeStatistic(subgraphVertexList, subgraphEdgeList): # updated by Ethan Huang in 20200618
"""
计算子图各顶点的一阶边长均值及局部边长平均变异。
输入参数
subgraphVertexList, subgraphEdgeList: getSubgraphEdge函数输出值。
输出参数
subgraphMeanVariation: 子图局部边长平均变异。
"""
length, subgraphMeanVariation = len(subgraphVertexList), []
for p in range(length): # 迭代子图
subgraphVertex, subgraphEdge = subgraphVertexList[p], subgraphEdgeList[p] # vertex and edge of subgraph.
firstOrderEdges, _ = getFirstOrderEdges(subgraphVertex, subgraphEdge)
firstOrderEdgeList = [i[1:] for i in firstOrderEdges]
firstOrderEdgeVariationList = []
for edgeList in firstOrderEdgeList:
firstOrderEdgeLength = [e[-1] for e in edgeList] # 子图i中第n点的一阶邻域边长
if len(firstOrderEdgeLength) >= 2:
firstOrderEdgeVariation = np.std(firstOrderEdgeLength, ddof=1)
firstOrderEdgeVariationList.append(firstOrderEdgeVariation)
else:
firstOrderEdgeVariation = np.std(firstOrderEdgeLength, ddof=0)
firstOrderEdgeVariationList.append(firstOrderEdgeVariation)
meanVariation = np.mean(firstOrderEdgeVariationList)
subgraphMeanVariation.append(meanVariation)
return subgraphMeanVariation
def getLocalCutValue(subgraphMeanVariation, subgraphSecondOrderEdgeMean):
"""
计算局部约束准则.
输入参数
subgraphMeanVariation: 子图局部边长平均变异。
subgraphSecondOrderEdgeMean: 各子图各顶点二阶邻域边长均值。
输出参数
subgraphLocalCutValueList: 局部边长约束准则
"""
length, subgraphLocalCutValueList = len(subgraphMeanVariation), []
for i in range(length):
subgraphMV, subgraphSecondOrderMean = subgraphMeanVariation[i], subgraphSecondOrderEdgeMean[i]
localCutValueList = []
for e in subgraphSecondOrderMean:
localCutValue = e[1] + 0.5 * subgraphMV # todo 0.5?
localCutValueList.append([e[0], localCutValue])
subgraphLocalCutValueList.append(localCutValueList)
return subgraphLocalCutValueList
def getLocalOtherEdge(edgeList, subgraphLocalCutValueList):
"""
删除局部长边,获取全局其他边。
输入参数
edgeList: 删除全局长边后的edgeList。
subgraphLocalCutValueList: 局部边长约束准则
输出参数
localOtherEdge:删除局部长边后的edgeList。
"""
localOtherEdge = edgeList[:]
for sg in subgraphLocalCutValueList:
for pnt in sg:
for e in localOtherEdge:
if pnt[0] in e[1:3] and e[-1] >= pnt[1]:
if e in localOtherEdge:
localOtherEdge.remove(e)
else:
continue
return localOtherEdge
# --------------------------删除限制长边--------------------------
def deleteRestrictionEdge(edgeList, restritionNumber):
"""
删除边长大于限定值的DT边。
输入参数
edgeList: 删除局部长边后的edgeList。
restritionNumber: 边长限定值,数值型。
输出参数
edges: 删除限定长边后的edgeList。
"""
edges = edgeList[:]
for e in edges:
if e[3] >= restritionNumber:
edges.remove(e)
else:
continue
return edges
# --------------------------删除不可达边--------------------------
# 以下函数用于空间叠置分析。基于向量旋转角的二维线段相交判定
# ......................................................................................................................
# This is a 2D line segment intersection decision algorithm, And refer to the following reference:
# https://blog.csdn.net/weixin_42736373/article/details/84587005
# ......................................................................................................................
class IntersectTest(object):
def __init__(self, p1, p2, q1, q2):
self.result = self.intersectTest(p1, p2, q1, q2)
def coordiante(self, x1, x2, k):
if x1[k] < x2[k]:
return -1
elif x1[k] == x2[k]:
return 0
else:
return 1
def intersectTest(self, p1, p2, q1, q2):
p = self.subtraction(p2, p1)
q = self.subtraction(q2, q1)
denominator = self.crossProduct(p, q)
t_molecule = self.crossProduct(self.subtraction(q1, p1), q) # (q1 - p1) × q
if denominator == 0:
if t_molecule == 0:
p_q = [p1, p2, q1, q2]
if p1 != q1 and p1 != q2 and p2 != q1 and p2 != q2:
p_q = sorted(p_q, key=cmp_to_key
(functools.partial(self.coordiante, k=1 if (p2[0] - p1[0]) / (p2[1] - p1[1]) == 0 else 0)))
if p_q[0:2] == [p1, p2] or p_q[0:2] == [p2, p1] or p_q[0:2] == [q1, q2] or p_q[0:2] == [q2, q1]:
return 1
else:
return 1 # 相交
else:
return 1 # 相交
else:
return 0 # parallel
t = t_molecule / denominator
if 0 <= t <= 1:
u_molecule = self.crossProduct(self.subtraction(q1, p1), p) # (q1 - p1) × p
u = u_molecule / denominator
if 0 <= u <= 1: # 相交
return 1
else:
return 0
else:
return 0
def subtraction(self, a, b):
c = []
for i, j in zip(a, b):
c.append(i-j)
return c
def crossProduct(self, a, b):
return a[0]*b[1]-a[1]*b[0]
# ......................................................................................................................
def getReachableEdge(edgeList, obstacleList, pointList):
"""
删除与障碍相交的边,返回余下DT边列表,在根据各点的一阶领域点再次做标记。
输入参数
edgeList: 删除限定长边后的edgeList。
obstacleList: 障碍列表[[[Sx1, Sy1],[Ex1, Ey1]], ...]
pointList:
输出参数
reachableEdge: 删除不可达边后的edgeList。
"""
edgeL = [[pointList[e[1]], pointList[e[2]]] for e in edgeList]
unreach, reachable, reachableEdge = [], [], []
for i in obstacleList:
for j in edgeL:
intersect = IntersectTest(i[0], i[1], j[0][1:3], j[1][1:3]).result
if intersect == 1 and j not in unreach:
unreach.append(j)
else:
continue
for e in edgeL:
if e not in unreach:
reachable.append(e)
else:
continue
for p in reachable:
indexA = p[0][0]
indexB = p[1][0]
for E in edgeList:
if indexA in E[1:3] and indexB in E[1:3]:
reachableEdge.append(E)
else:
continue
return reachableEdge
# --------------------------ArcGIS界面的可视化与输出--------------------------
def createShapeFile(pointList, spatialRef, output):
"""
根据坐标点列表创建point文件,并为其设定坐标参考。
输入参数
pointList: 多次聚类标记后的pointList。
spatialRef: 空间参考
output: 文件输出位置及名称
"""
point = arcpy.Point()
pointGeometryList = []
for i in range(len(pointList)):
point.X = pointList[i][1]
point.Y = pointList[i][2]
pointGeometry = arcpy.PointGeometry(point, spatialRef)
pointGeometryList.append(pointGeometry)
arcpy.CopyFeatures_management(pointGeometryList, output)
return
def addMarkerFields(fileName, pointList):
"""
给输出shape文件增加字段
输入参数
fileName: 需增加字段的文件名称及路径
pointList: 多次聚类标记后的pointList。
"""
arcpy.AddField_management(fileName, "ID_T", "FLOAT")
arcpy.AddField_management(fileName, "markerO", "TEXT") # obstacle
arcpy.AddField_management(fileName, "markerG", "TEXT") # global
arcpy.AddField_management(fileName, "markerL", "TEXT") # local
arcpy.AddField_management(fileName, "markerC", "TEXT") # Constraint
counter, rows = 0, arcpy.UpdateCursor(fileName)
for row in rows:
row.setValue("ID_T", pointList[counter][0])
row.setValue("markerO", pointList[counter][-4])
row.setValue("markerG", pointList[counter][-3])
row.setValue("markerL", pointList[counter][-2])
row.setValue("markerC", pointList[counter][-1])
rows.updateRow(row)
counter += 1
return
def outputWriteToTxt(filePath, name, inList, pointList):
"""
This functions writes to a .txt file.
Input Arguments
outListStep_point: Path to folder where .txt file is stored
name: Name of .txt file
inList: List with entries to write to .txt file
"""
outfile = filePath + name + ".txt"
myDocument = open(outfile, 'w')
myDocument.write("=========================================================================================" + "\n")
myDocument.write("This file summarized the cluster result! " + "\n")
myDocument.write("=========================================================================================" + "\n")
myDocument.write("Please notice that 'O0, C0...' represents isolated points! " + "\n")
myDocument.write("\n")
myDocument.write("Numbet of points: " + str(inList[0]) + "\n")
myDocument.write("NNI: " + str(inList[1]) + "\n")
myDocument.write("Number of cluster: " + str(inList[2]-1) + "C" + "\n" + "\n") # 不计散点
myDocument.write("-----------------------------------------------------------------------------------------" + "\n")
myDocument.write("Details of the clustering results" + "\n")
myDocument.write("-----------------------------------------------------------------------------------------" + "\n")
label = list(set([i[-1] for i in pointList]))
listLabel = [i[-1] for i in pointList]
for i in label:
lst = []
for j in listLabel:
if j == i:
lst.append(j)
else:
continue
myDocument.write(i + ": " + str(len(lst)) + "\n")
myDocument.close()
return
| 27,058 | 10,075 |
from unittest.mock import patch, Mock
from mailer import send
import smtplib
def test_send_unittest():
sender = "john.doe@example.com"
to = "jane.doe@example.com"
body = "Hello jane!"
subject = "How are you?"
with patch("smtplib.SMTP") as mock:
client = mock.return_value
client.sendmail.return_value = {}
res = send(sender, to, subject, body)
assert client.sendmail.called
assert client.sendmail.call_args[0][0] == sender
assert client.sendmail.call_args[0][1] == to
assert subject in client.sendmail.call_args[0][2]
assert body in client.sendmail.call_args[0][2]
assert res == {}
def test_send(monkeypatch):
sender = "john.doe@example.com"
to = "jane.doe@example.com"
body = "Hello jane!"
subject = "How are you?"
smtp = Mock()
monkeypatch.setattr(smtplib, "SMTP", smtp)
client = smtp.return_value
client.sendmail.return_value = {}
res = send(sender, to, subject, body)
assert client.sendmail.called
assert client.sendmail.call_args[0][0] == sender
assert client.sendmail.call_args[0][1] == to
assert subject in client.sendmail.call_args[0][2]
assert body in client.sendmail.call_args[0][2]
assert res == {}
| 1,272 | 436 |
{"filter":false,"title":"urls.py","tooltip":"/crawl/urls.py","undoManager":{"mark":41,"position":41,"stack":[[{"group":"doc","deltas":[{"start":{"row":5,"column":42},"end":{"row":6,"column":0},"action":"insert","lines":["",""]},{"start":{"row":6,"column":0},"end":{"row":6,"column":4},"action":"insert","lines":[" "]}]}],[{"group":"doc","deltas":[{"start":{"row":6,"column":4},"end":{"row":6,"column":42},"action":"insert","lines":["url(r'^$', views.index, name='index'),"]}]}],[{"group":"doc","deltas":[{"start":{"row":6,"column":11},"end":{"row":6,"column":12},"action":"insert","lines":["c"]}]}],[{"group":"doc","deltas":[{"start":{"row":6,"column":11},"end":{"row":6,"column":12},"action":"remove","lines":["c"]}]}],[{"group":"doc","deltas":[{"start":{"row":6,"column":11},"end":{"row":6,"column":12},"action":"insert","lines":["s"]}]}],[{"group":"doc","deltas":[{"start":{"row":6,"column":12},"end":{"row":6,"column":13},"action":"insert","lines":["t"]}]}],[{"group":"doc","deltas":[{"start":{"row":6,"column":13},"end":{"row":6,"column":14},"action":"insert","lines":["a"]}]}],[{"group":"doc","deltas":[{"start":{"row":6,"column":14},"end":{"row":6,"column":15},"action":"insert","lines":["t"]}]}],[{"group":"doc","deltas":[{"start":{"row":6,"column":15},"end":{"row":6,"column":16},"action":"insert","lines":["u"]}]}],[{"group":"doc","deltas":[{"start":{"row":6,"column":16},"end":{"row":6,"column":17},"action":"insert","lines":["s"]}]}],[{"group":"doc","deltas":[{"start":{"row":6,"column":27},"end":{"row":6,"column":32},"action":"remove","lines":["index"]},{"start":{"row":6,"column":27},"end":{"row":6,"column":28},"action":"insert","lines":["s"]}]}],[{"group":"doc","deltas":[{"start":{"row":6,"column":28},"end":{"row":6,"column":29},"action":"insert","lines":["t"]}]}],[{"group":"doc","deltas":[{"start":{"row":6,"column":29},"end":{"row":6,"column":30},"action":"insert","lines":["a"]}]}],[{"group":"doc","deltas":[{"start":{"row":6,"column":30},"end":{"row":6,"column":31},"action":"insert","lines":["t"]}]}],[{"group":"doc","deltas":[{"start":{"row":6,"column":31},"end":{"row":6,"column":32},"action":"insert","lines":["u"]}]}],[{"group":"doc","deltas":[{"start":{"row":6,"column":32},"end":{"row":6,"column":33},"action":"insert","lines":["s"]}]}],[{"group":"doc","deltas":[{"start":{"row":6,"column":41},"end":{"row":6,"column":46},"action":"remove","lines":["index"]},{"start":{"row":6,"column":41},"end":{"row":6,"column":42},"action":"insert","lines":["s"]}]}],[{"group":"doc","deltas":[{"start":{"row":6,"column":42},"end":{"row":6,"column":43},"action":"insert","lines":["t"]}]}],[{"group":"doc","deltas":[{"start":{"row":6,"column":43},"end":{"row":6,"column":44},"action":"insert","lines":["a"]}]}],[{"group":"doc","deltas":[{"start":{"row":6,"column":44},"end":{"row":6,"column":45},"action":"insert","lines":["u"]}]}],[{"group":"doc","deltas":[{"start":{"row":6,"column":45},"end":{"row":6,"column":46},"action":"insert","lines":["t"]}]}],[{"group":"doc","deltas":[{"start":{"row":6,"column":45},"end":{"row":6,"column":46},"action":"remove","lines":["t"]}]}],[{"group":"doc","deltas":[{"start":{"row":6,"column":44},"end":{"row":6,"column":45},"action":"remove","lines":["u"]}]}],[{"group":"doc","deltas":[{"start":{"row":6,"column":44},"end":{"row":6,"column":45},"action":"insert","lines":["t"]}]}],[{"group":"doc","deltas":[{"start":{"row":6,"column":45},"end":{"row":6,"column":46},"action":"insert","lines":["u"]}]}],[{"group":"doc","deltas":[{"start":{"row":6,"column":46},"end":{"row":6,"column":47},"action":"insert","lines":["s"]}]}],[{"group":"doc","deltas":[{"start":{"row":7,"column":0},"end":{"row":8,"column":0},"action":"insert","lines":[" url(r'^status$', views.status, name='status'),",""]}]}],[{"group":"doc","deltas":[{"start":{"row":7,"column":11},"end":{"row":7,"column":18},"action":"remove","lines":["status$"]},{"start":{"row":7,"column":11},"end":{"row":7,"column":12},"action":"insert","lines":["s"]}]}],[{"group":"doc","deltas":[{"start":{"row":7,"column":12},"end":{"row":7,"column":13},"action":"insert","lines":["t"]}]}],[{"group":"doc","deltas":[{"start":{"row":7,"column":13},"end":{"row":7,"column":14},"action":"insert","lines":["o"]}]}],[{"group":"doc","deltas":[{"start":{"row":7,"column":14},"end":{"row":7,"column":15},"action":"insert","lines":["p"]}]}],[{"group":"doc","deltas":[{"start":{"row":7,"column":15},"end":{"row":7,"column":16},"action":"insert","lines":["&"]}]}],[{"group":"doc","deltas":[{"start":{"row":7,"column":15},"end":{"row":7,"column":16},"action":"remove","lines":["&"]}]}],[{"group":"doc","deltas":[{"start":{"row":7,"column":15},"end":{"row":7,"column":16},"action":"insert","lines":["$"]}]}],[{"group":"doc","deltas":[{"start":{"row":7,"column":25},"end":{"row":7,"column":31},"action":"remove","lines":["status"]},{"start":{"row":7,"column":25},"end":{"row":7,"column":26},"action":"insert","lines":["s"]}]}],[{"group":"doc","deltas":[{"start":{"row":7,"column":26},"end":{"row":7,"column":27},"action":"insert","lines":["t"]}]}],[{"group":"doc","deltas":[{"start":{"row":7,"column":27},"end":{"row":7,"column":28},"action":"insert","lines":["o"]}]}],[{"group":"doc","deltas":[{"start":{"row":7,"column":28},"end":{"row":7,"column":29},"action":"insert","lines":["p"]}]}],[{"group":"doc","deltas":[{"start":{"row":7,"column":37},"end":{"row":7,"column":43},"action":"remove","lines":["status"]},{"start":{"row":7,"column":37},"end":{"row":7,"column":38},"action":"insert","lines":["s"]}]}],[{"group":"doc","deltas":[{"start":{"row":7,"column":38},"end":{"row":7,"column":39},"action":"insert","lines":["t"]}]}],[{"group":"doc","deltas":[{"start":{"row":7,"column":39},"end":{"row":7,"column":40},"action":"insert","lines":["o"]}]}],[{"group":"doc","deltas":[{"start":{"row":7,"column":40},"end":{"row":7,"column":41},"action":"insert","lines":["p"]}]}]]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":7,"column":41},"end":{"row":7,"column":41},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1428186714049,"hash":"9e5d3fcc85eb3cee304ad7fa0120fff49a0b2a85"} | 6,162 | 2,386 |
# coding: utf-8
# In[1]:
import sys
sys.path.append("../")
# In[2]:
from pathlib import Path
from functools import partial
import joblib
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from fastai.text import LanguageModelLoader, LanguageModelData
from fastai.core import T
from fastai.rnn_reg import EmbeddingDropout
from torch.optim import Adam
import torch.nn as nn
import torch
import torch.nn.functional as F
import sentencepiece as spm
# In[3]:
tokens = joblib.load("../data/tokens_bpe.pkl")
# In[4]:
# Filter out empty texts
tokens = [x for x in tokens if x.shape[0] > 0]
# In[5]:
# Set shuffle = False to keep sentences from the same paragraph together
trn_tokens, val_tokens = train_test_split(tokens, test_size=0.2, shuffle=False)
val_tokens, tst_tokens = train_test_split(val_tokens, test_size=0.5, shuffle=False)
# In[6]:
def get_voc_stats(tokens):
total_tokens = np.sum([x.shape[0] for x in tokens])
unks = np.sum([np.sum(x == 0) for x in tokens])
print("Total tokens: %d\nUnknown Percentage: %.2f %%" % (total_tokens, unks * 100 / total_tokens))
get_voc_stats(tokens)
# In[7]:
bptt = 75
batch_size = 64
n_tok = int(np.max([np.max(x) for x in tokens]) + 1)
trn_loader = LanguageModelLoader(
np.concatenate(trn_tokens), batch_size, bptt)
val_loader = LanguageModelLoader(
np.concatenate(val_tokens), batch_size, bptt)
tst_loader = LanguageModelLoader(
np.concatenate(tst_tokens), batch_size, bptt)
# In[8]:
sp = spm.SentencePieceProcessor()
sp.Load("../data/bpe_model.model")
# In[9]:
sp.EncodeAsIds(", 的*")
# In[10]:
np.sum([np.sum(x == 1) for x in tokens]) # <s>
# In[11]:
np.sum([np.sum(x == 2) for x in tokens]) # </s>
# In[12]:
sp.DecodeIds(trn_tokens[0].tolist())
# In[13]:
sp.DecodeIds(trn_tokens[1].tolist())
# In[14]:
from collections import Counter
tmp = []
for i in range(10000):
for j in range(1, trn_tokens[i].shape[0]):
if trn_tokens[i][j] == 0:
tmp.append(trn_tokens[i][j-1])
Counter(tmp).most_common(10)
# In[15]:
from collections import Counter
tmp = []
for i in range(10000):
for j in range(1, trn_tokens[i].shape[0]-1):
if trn_tokens[i][j] == 4:
tmp.append(trn_tokens[i][j+1])
Counter(tmp).most_common(10)
# In[19]:
sp.DecodeIds([4569])
# In[17]:
path = Path("../data/cache/lm_bpe/")
path.mkdir(parents=True, exist_ok=True)
model_data = LanguageModelData(
path, pad_idx=2, n_tok=n_tok, trn_dl=trn_loader, val_dl=val_loader, test_dl=tst_loader
)
# In[18]:
n_tok
# ### QRNN Model
# In[21]:
drops = np.array([0.05, 0.1, 0.05, 0, 0.1])
learner = model_data.get_model(
partial(Adam, betas=(0.8, 0.999)),
emb_sz=300, n_hid=500, n_layers=4,
dropouti=drops[0], dropout=drops[1], wdrop=drops[2],
dropoute=drops[3], dropouth=drops[4], qrnn=True
)
# In[22]:
learner.clip = 25.
learner.lr_find(start_lr=1e-5, end_lr=1, linear=False)
learner.sched.plot()
# In[22]:
lrs = 2e-3
learner.fit(lrs, 1, wds=1e-7, use_clr=(50, 3), cycle_len=10, use_wd_sched=True)
# In[23]:
learner.sched.plot_lr()
# In[43]:
lrs = 5e-4
learner.fit(lrs, 1, wds=1e-7, use_clr=(50, 3), cycle_len=10, use_wd_sched=True)
# In[14]:
learner.sched.plot_loss()
# In[44]:
learner.save("lm_qrnn")
learner.save_encoder("lm_qrnn_enc")
# In[ ]:
learner.load("lm_qrnn")
# ### LSTM
# In[20]:
drops = np.array([0.1, 0.1, 0.05, 0, 0.1])
learner = model_data.get_model(
partial(Adam, betas=(0.8, 0.999)),
emb_sz=300, n_hid=500, n_layers=3,
dropouti=drops[0], dropout=drops[1], wdrop=drops[2],
dropoute=drops[3], dropouth=drops[4], qrnn=False
)
# In[21]:
learner.clip = 25.
learner.lr_find(start_lr=1e-5, end_lr=1, linear=False)
learner.sched.plot()
# In[22]:
lrs = 2e-3
learner.clip = 10.
learner.fit(lrs, 1, wds=1e-7, use_clr=(50, 5), cycle_len=20, use_wd_sched=True)
# In[23]:
learner.sched.plot_lr()
# In[24]:
learner.save("lm_lstm")
learner.save_encoder("lm_lstm_enc")
# In[25]:
tmp_iter = iter(trn_loader)
# In[26]:
next(tmp_iter)[0].shape
# In[27]:
learner.load("lm_lstm")
# ## Test the model
# In[28]:
learner.model.eval()
# ### Next Character Inference
# In[29]:
tokens = sp.EncodeAsIds("德国 是 世界 大国 之 一 , 其 国内 生产 总 值 以 国际 汇率 计")
tokens
# In[30]:
logits, _, _ = learner.model(T(tokens).unsqueeze(1))
logits.shape
# In[32]:
sorted_idx = np.argsort(logits.data.cpu().numpy(), 1)
preds = []
for i in range(1, 4):
preds.append([sp.IdToPiece(x) for x in sorted_idx[:, -i].tolist()])
# preds = list(map(lambda x: itos[x], np.argmax(logits.data.cpu().numpy(), 1)))
pd.DataFrame({"orig": sp.EncodeAsPieces("德国 是 世界 大国 之 一 , 其 国内 生产 总 值 以 国际 汇率 计") + [""],
"pred_1": [""] + preds[0], "pred_2": [""] + preds[1], "pred_3": [""] + preds[2]})
# In[33]:
def eval(texts):
learner.model[0].reset()
tokens =sp.EncodeAsIds(texts)
logits, _, _ = learner.model(T(tokens).unsqueeze(1))
sorted_idx = np.argsort(logits.data.cpu().numpy(), 1)
preds = []
for i in range(1, 4):
preds.append([sp.IdToPiece(x) for x in sorted_idx[:, -i].tolist()])
# preds = list(map(lambda x: itos[x], np.argmax(logits.data.cpu().numpy(), 1)))
return pd.DataFrame({"orig": sp.EncodeAsPieces(texts) + [""],
"pred_1": [""] + preds[0], "pred_2": [""] + preds[1], "pred_3": [""] + preds[2]})
# In[34]:
eval("在 现代 印刷 媒体 , 卡通 是 一 种 通常 有 幽默 色")
# In[35]:
eval("对 中国 与 南洋 发动 全面 的 战争 。 1990 年代 , 中")
# ### Generate Sentence
# In[38]:
import random
def generate_text(tokens, N=25):
preds = []
for i in range(N):
learner.model[0].reset()
logits, _, _ = learner.model(T(tokens).unsqueeze(1))
probs = F.softmax(logits).data.cpu().numpy()[-1, :]
candidates = np.argsort(probs)[::-1]
while True:
# Sampling
candidate = np.random.choice(candidates, p=probs[candidates])
# Greedy
# candidate = np.argmax(probs[2:]) + 2
if candidate > 2:
print(probs[candidates][:3], probs[candidate])
preds.append(candidate)
break
# for candidate in candidates:
# if candidate > 1 and ord(itos[candidate]) > 255 and (random.random() < probs[candidate] or probs[candidate] < 0.2):
# print(probs[candidate])
# preds.append(candidate)
# break
# tokens = [preds[-1]]#
tokens.append(int(preds[-1]))
# tokens = [:1]
print(sp.DecodeIds(tokens))
generate_text(sp.EncodeAsIds("德国 是 世界 大国 之 一 , 其 国内 生产 总 值 以 国际 汇率 为主 , "))
# In[66]:
generate_text(sp.EncodeAsIds("在 现代 印刷 媒体 , 卡通 是 一种 通常 有 幽默 色 "))
# In[86]:
generate_text(sp.EncodeAsIds("日本 后来 成为 第二次 世界大战 的 轴心国 之一 , 对 中国 与 南洋 发动 全面 的 战争"))
# In[87]:
generate_text(sp.EncodeAsIds("特朗普 政府 以为 加征 关税 会 令 中国 屈服 , 这种 策略 肯定 会 适得其反 , 如果 就业 和 财富"))
| 7,000 | 3,414 |
# Copyright 2018 Flight Lab authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for badge reader component."""
import threading
import time
from components import base
from protos import controller_pb2
from utils import badger
class BadgeReaderComponent(base.Component):
"""Component to authorize USB badges.
Events:
"status_changed": when badge scan is authorized or authorization has expired.
"""
_AUTH_TIMEOUT_SEC = 10
def __init__(self, proto, *args, **kwargs):
"""Create a BadgeReaderComponent instance.
Args:
proto: flightlab.BadgeReader protobuf.
"""
super(BadgeReaderComponent, self).__init__(proto, *args, **kwargs)
self._deauth = None
self._validator = badger.BadgeValidator(self.settings.url, self.settings.key_param)
self._reader = badger.BadgeReader(usb_vendor_id=self.settings.usb_vendor_id, usb_product_id=self.settings.usb_product_id)
self._reader.on('read_success', self._on_read_success)
self._reader.start()
def _on_read_success(self, reader, badge_id):
self.logger.info("Badge %s Read Successfully", badge_id)
if self._validator.validate(badge_id):
self.logger.info("Badge Validated")
self.settings.status = controller_pb2.Badger.AUTHORIZED
self.emit('status_changed', self)
else:
self.logger.info("Invalid Badge")
self.settings.status = controller_pb2.Badger.UNAUTHORIZED
self.emit('status_changed', self)
if self._deauth:
self._deauth.cancel()
self._deauth = threading.Timer(self._AUTH_TIMEOUT_SEC, self._deauthorize)
self._deauth.start()
def _deauthorize(self):
self.logger.info("Deauthorizing")
"""Ensures status is changed to UNKNOWN, which is the default state.
Emits:
status_changed
"""
self.settings.status = controller_pb2.Badger.UNKNOWN
self.emit('status_changed', self)
def close(self):
"""Stops the badge reader and deauthorization thread."""
if self._deauth:
self._deauth.cancel()
super(BadgeReaderComponent, self).close()
| 2,576 | 815 |
from excelcy import ExcelCy
from excelcy.storage import Config
# test_string = 'Android Pay expands to Canada'
# excelcy = ExcelCy()
# excelcy.storage.config = Config(nlp_base='en_core_web_sm', train_iteration=50, train_drop=0.2)
# doc = excelcy.nlp(test_string)
# # showing no ORG
# print([(ent.label_, ent.text) for ent in doc.ents])
# excelcy.storage.source.add(kind='text', value=test_string)
# excelcy.discover()
# excelcy.storage.prepare.add(kind='phrase', value='Android Pay', entity='PRODUCT')
# excelcy.prepare()
# excelcy.train()
# doc = excelcy.nlp(test_string)
# print([(ent.label_, ent.text) for ent in doc.ents])
# FAILED tests/test_excelcy.py::ExcelCyTestCase::test_execute - AssertionError: assert ('$1', 'MONEY') in {('$1 million', 'MONEY'), ('Uber', 'ORG')}
# FAILED tests/test_pipe.py::PipeTestCase::test_execute - AssertionError: assert ('$1', 'MONEY') in {('$1 million', 'MONEY'), ('Uber', 'ORG')}
# FAILED tests/test_readme.py::ReadmeTestCase::test_readme_04 - AssertionError: assert ('China' == 'Himalayas'
excelcy = ExcelCy()
doc = excelcy.nlp('Android Pay expands to Canada')
print([(ent.label_, ent.text) for ent in doc.ents])
excelcy = ExcelCy.execute(file_path='tests/data/test_data_03.xlsx')
doc = excelcy.nlp('Android Pay expands to Canada')
print([(ent.label_, ent.text) for ent in doc.ents])
| 1,332 | 493 |
# https://youtu.be/O3HBd0ICJ2M
# defaultdict is the same as normal dictionaries, except a defaultdict
# sets a default value if a key has not been set yet; this is mostly
# for convenience
from collections import defaultdict
def freqQuery(queries):
val_counts = defaultdict(int)
freq_counts = defaultdict(int)
answers = []
for i, j in queries:
if i == 1:
# O(1)
if j in val_counts:
# decrement the value's old count
if freq_counts[val_counts[j]] > 0:
freq_counts[val_counts[j]] -= 1
val_counts[j] += 1
# increment the frequency in freq_counts
freq_counts[val_counts[j]] += 1
else:
val_counts[j] = 1
if freq_counts[val_counts[j]]:
freq_counts[val_counts[j]] += 1
else:
freq_counts[val_counts[j]] = 1
if i == 2:
# O(1)
# check that the value exists in val_counts
if val_counts[j]:
# decrement the old frequency count
freq_counts[val_counts[j]] -= 1
val_counts[j] -= 1
# increment the new frequency count
freq_counts[val_counts[j]] += 1
if i == 3:
# O(n) linear in the number of key, value pairs
# aim for a O(1) runtime
# somehow check j in an object
# instead of having the j values be checked against
# the values in an object, it would be much faster
# to check the j values against the keys of an object
if j in freq_counts and freq_counts[j] > 0:
answers.append(1)
else:
answers.append(0)
return answers
# JS IMPLEMENTATION
# function frequencyQueries(queries) {
# const answers = [];
# // keeps track of the number of occurrences of eacy query value
# const occurrences = {};
# // keeps track of how many values have shown up a certain number of times
# // keys are integers representing frequency and values are the number
# // of values that showcase that frequency
# // for example, if a query specifies a new value, then that value has
# // only shown up once, so we'll increment the value associated with
# // the key of 1 to indicate that there is an additional value that
# // has shown up once
# const frequencies = {};
#
# for (const [op, val] of queries) {
# if (op === 1) {
# // subtract an occurrence of the value's prior frequency
# frequencies[occurrences[val]]--;
# // add the value to our occurrences map
# occurrences[val] = (occurrences[val] || 0) + 1;
# // increment an occurrence of the value's new frequency
# frequencies[occurrences[val]] = (frequencies[occurrences[val]] || 0) + 1;
# } else if (op === 2 && occurrences[val]) {
# // subtract an occurrence of the value's prior frequency
# frequencies[occurrences[val]]--;
# // remove the value from our occurrences map
# occurrences[val]--;
# // increment an occurrence of the value's new frequency
# frequencies[occurrences[val]]++;
# } else if (op === 3) {
# // all we have to do for operation 3 is check if the value
# // associated with the frequency > 0
# answers.push(frequencies[val] > 0 ? 1 : 0);
# }
# }
#
# return answers;
# }
# RUST IMPLEMENTATION
# use std::collections::HashMap;
#
# fn frequency_queries(queries: Vec<(i32, i32)>) -> Vec<i32> {
# let mut val_counts: HashMap<i32, i32> = HashMap::new();
# let mut freq_counts: HashMap<i32, i32> = HashMap::new();
# let mut answers = vec![];
#
# for (i, j) in queries {
# match i {
# 1 => {
# let f = val_counts.entry(j).or_insert(0);
# // decrement j's value in freq_counts
# freq_counts.entry(*f).and_modify(|v| if *v > 0 { *v -= 1 }).or_insert(0);
# // increment j's value in val_counts
# *f += 1;
# // increment j's value in freq_counts
# freq_counts.entry(*f).and_modify(|v| *v += 1).or_insert(1);
# },
# 2 => {
# let f = val_counts.entry(j).or_insert(0);
# // decrement j's value in freq_counts
# freq_counts.entry(*f).and_modify(|v| if *v > 0 { *v -= 1 }).or_insert(0);
# // decrement j's value in val_counts
# if *f > 0 { *f -= 1; }
# // increment j's value in freq_counts
# freq_counts.entry(*f).and_modify(|v| *v += 1).or_insert(1);
# },
# 3 => {
# let fc = freq_counts.entry(j).or_insert(0);
# if *fc > 0 { answers.push(1); } else { answers.push(0); }
# },
# _ => panic!("Got an unexpected query number"),
# }
# }
#
# answers
# }
| 4,978 | 1,581 |
from data import *
from model import *
import time
import math
n_hidden = 128
n_epochs = 100000
print_every = 5000
plot_every = 1000
learning_rate = 0.005 # If you set this too high, it might explode. If too low, it might not learn
rnn = RNN(n_letters, n_hidden, n_categories)
optimizer = torch.optim.SGD(rnn.parameters(), lr=learning_rate)
criterion = nn.NLLLoss()
def train(category_tensor, line_tensor):
hidden = rnn.init_hidden()
optimizer.zero_grad()
for i in range(line_tensor.size()[0]):
output, hidden = rnn(line_tensor[i], hidden)
loss = criterion(output, category_tensor)
loss.backward()
optimizer.step()
return output, loss.item()
# Keep track of losses for plotting
current_loss = 0
all_losses = []
def time_since(since):
now = time.time()
s = now - since
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
start = time.time()
for epoch in range(1, n_epochs + 1):
category, line, category_tensor, line_tensor = random_training_pair()
output, loss = train(category_tensor, line_tensor)
current_loss += loss
# Print epoch number, loss, name and guess
if epoch % print_every == 0:
guess, guess_i = category_from_output(output)
correct = '✓' if guess == category else '✗ (%s)' % category
print('%d %d%% (%s) %.4f %s / %s %s' % (
epoch, epoch / n_epochs * 100, time_since(start), loss, line, guess, correct))
# Add current loss avg to list of losses
if epoch % plot_every == 0:
all_losses.append(current_loss / plot_every)
current_loss = 0
torch.save(rnn, 'char-rnn-name-classification.pt')
| 1,663 | 616 |
#!/usr/bin/python3
import argparse
import collections
import pathlib
import matplotlib.pyplot as plt
import matplotlib.ticker
import numpy as np
import pandas as pd
from flow_models.elephants.calculate import calculate
from flow_models.lib.data import UNITS
from flow_models.lib.plot import save_figure, matplotlib_config
X_VALUES = ['length', 'size']
METHODS = {'first': '-',
'threshold': '--',
'sampling': ':'}
SIZE = 0.6
FIGSIZE = [SIZE * 11.2, SIZE * 5.66]
def plot_traffic(calculated):
interpolated = {}
for n, x_val in enumerate(['length', 'size']):
nidx = 1 / pd.Float64Index(np.geomspace(1, 10000, 5000, endpoint=False))
for method in METHODS:
idx = 1 / calculated[method][x_val]['occupancy_mean']
ddd = calculated[method][x_val].copy().set_index(idx)
ddd = ddd[~ddd.index.duplicated()]
ddd = ddd.reindex(ddd.index.union(nidx)).interpolate('slinear').reindex(nidx)
interpolated.setdefault(method, {})[x_val] = ddd
for to in ['absolute'] + list(METHODS):
to_label = '%'
fig, axes = plt.subplots(1, 2, sharex='all', sharey='all', figsize=[FIGSIZE[0] * 2.132, FIGSIZE[1]])
for n, x_val in enumerate(['length', 'size']):
ax = axes[n]
for method in METHODS:
d = interpolated[method][x_val]['octets_mean']
if to == 'absolute':
r = 1
else:
r = interpolated[to][x_val]['octets_mean']
to_label = f'relative to {to}'
ax.plot(d.index, d / r, 'b' + METHODS[method], lw=2,
label=method)
ax.set_ylabel(f'Traffic coverage [{to_label}]')
ax.set_xlabel(f'Flow table occupancy (decision by {x_val})')
ax.tick_params('y', labelleft=True)
ax.set_xscale('log')
ax.legend()
fig.gca().invert_xaxis()
out = f'traffic_{to}'
save_figure(fig, out)
save_figure(fig, out)
plt.close(fig)
def plot_usage(calculated, what):
interpolated = {}
for n, x_val in enumerate(['length', 'size']):
nidx = pd.Float64Index(np.linspace(50, 100, 5001, endpoint=True))
for method in METHODS:
idx = calculated[method][x_val]['octets_mean']
ddd = calculated[method][x_val].copy().set_index(idx)
ddd = ddd[~ddd.index.duplicated()]
ddd = ddd.reindex(ddd.index.union(nidx)).interpolate('slinear').reindex(nidx)
interpolated.setdefault(x_val, {})[method] = ddd
points = [99, 95, 90, 80, 75, 50]
z = pd.concat({k: pd.concat(v) for k, v in interpolated.items()})
z = z.unstack([0, 1]).swaplevel(1, 2, axis=1).sort_index(axis=1)[['occupancy_mean', 'operations_mean']]
z = z.reindex(METHODS, axis=1, level=1)
z.loc[points][['occupancy_mean', 'operations_mean']].to_latex(f'selected.tex',
float_format='%.2f',
multicolumn_format='c')
for to in ['absolute'] + list(METHODS):
to_label = ' reduction [x]'
fig, axes = plt.subplots(1, 2, sharex='all', sharey='all', figsize=[FIGSIZE[0] * 2.132, FIGSIZE[1]])
for n, x_val in enumerate(['length', 'size']):
ax = axes[n]
for method in METHODS:
d = interpolated[x_val][method]
if to == 'absolute':
r = d.copy()
for col in r.columns:
r[col].values[:] = 1
ax.plot(d.index, d[f'{what}_mean'] / r[f'{what}_mean'], 'k' + METHODS[method], lw=2,
label=method)
else:
r = interpolated[x_val][to]
to_label = f' [relative to {to}]'
ax.plot(d.index, r[f'{what}_mean'] / d[f'{what}_mean'], 'k' + METHODS[method], lw=2,
label=method)
ax.set_xlabel(f'Traffic coverage [%] (decision by {x_val})')
ax.set_ylabel(f'Flow table {what}{to_label}')
ax.tick_params('y', labelleft=True)
if to == 'absolute':
ax.set_yscale('log')
ax.legend()
fig.gca().invert_xaxis()
fig.gca().get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
out = f'{what}_{to}'
save_figure(fig, out)
plt.close(fig)
def plot_calc_sim(ax, calculated, simulated, method, x_val, w):
sim_style = {
'octets': 'bo',
'flows': 'ro',
'fraction': 'ko'
}
calc_style = {
'octets': 'b-',
'flows': 'r-',
'fraction': 'k-'
}
if w == 'fraction':
name = 'Occupancy'
elif w == 'octets':
name = 'Traffic coverage'
elif w == 'flows':
name = 'Operations'
else:
name = w[:-1] + ' coverage'
axis = 'left' if w == 'octets' else 'right'
d = simulated[method][x_val][w + '_mean']
try:
e = simulated[method][x_val][w + '_conf']
except KeyError:
e = None
ax.errorbar(d.index, d, e, None, sim_style[w], lw=1, capthick=1, ms=2,
label=f'{name} (sim.) ({axis})')
n = calculated[method][x_val][w + '_mean']
d = n.loc[:d.index.max() if method != 'sampling' else d.index.min()]
ax.plot(d.index, d, calc_style[w], lw=2,
label=f'{name} (calc.) ({axis})')
def plot_all(calculated, simulated, one):
for method in calculated:
if one:
fig, axes = plt.subplots(1, 2, figsize=[FIGSIZE[0] * 2.132, FIGSIZE[1]], sharey='row')
txes = [ax.twinx() for ax in axes]
txes[0].get_shared_y_axes().join(*txes)
else:
fig, ax = plt.subplots(figsize=FIGSIZE)
tx = ax.twinx()
for n, x_val in enumerate(simulated[method]):
if one:
ax = axes[n]
tx = txes[n]
plot_calc_sim(ax, calculated, simulated, method, x_val, 'octets')
plot_calc_sim(tx, calculated, simulated, method, x_val, 'flows')
plot_calc_sim(tx, calculated, simulated, method, x_val, 'fraction')
ax.set_xscale('log')
tx.set_yscale('log')
ax.legend(loc=3)
tx.legend()
if method == 'sampling':
ax.invert_xaxis()
ax.set_xlabel(f'Sampling probability (sampling by {x_val})')
else:
ax.set_xlabel(f'Flow {x_val} threshold [{UNITS[x_val]}]')
if not one:
out = f'results_{method}_{x_val}'
save_figure(fig, out)
plt.close(fig)
if one:
out = f'results_{method}'
save_figure(fig, out)
plt.close(fig)
def plot_probability():
fig, ax = plt.subplots(1, 1, figsize=FIGSIZE)
idx = np.geomspace(1, 1000, 512)
ax.plot(idx, 1 - (1 - 0.1) ** idx, 'k-', lw=2,
label='p 0.1$')
ax.plot(idx, 1 - (1 - 0.01) ** idx, 'k-', lw=2,
label='p = 0.1$')
ax.text(12, 0.6, '$p = 0.1$')
ax.text(150, 0.6, '$p = 0.01$')
ax.set_xlabel(f'Flow length [packets]')
ax.set_ylabel(f'Total probability of being added to flow table')
ax.set_xscale('log')
save_figure(fig, 'probability')
plt.close(fig)
def plot(dirs, one=False):
simulated = collections.defaultdict(dict)
calculated = collections.defaultdict(dict)
methods = set()
for d in dirs:
d = pathlib.Path(d)
x_val = d.parts[-1]
assert x_val in X_VALUES
for f in d.glob('*.csv'):
method = f.stem
assert method in METHODS
methods.add(method)
simulated[method][x_val] = pd.read_csv(str(f), index_col=0).dropna()
for method, df in calculate('../mixtures/all/' + x_val, 1024, x_val=x_val, methods=methods).items():
calculated[method][x_val] = df.dropna()
plot_all(calculated, simulated, one)
plot_usage(calculated, 'occupancy')
plot_usage(calculated, 'operations')
plot_traffic(calculated)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--one', action='store_true', help='plot in one file')
parser.add_argument('files', nargs='+', help='csv_hist files to plot')
app_args = parser.parse_args()
with matplotlib_config(latex=False):
plot_probability()
plot(app_args.files, app_args.one)
if __name__ == '__main__':
main()
| 8,623 | 3,015 |
import environ
# Build paths inside the project like this: root(...)
from django.core.urlresolvers import reverse_lazy
env = environ.Env()
root = environ.Path(__file__) - 3
apps_root = root.path('iwg_blog')
BASE_DIR = root()
environ.Env.read_env()
# Base configurations
# --------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
WSGI_APPLICATION = 'config.wsgi.application'
# Application definition
# --------------------------------------------------------------------------
CUSTOMIZE_APPS = [
'grappelli',
]
DJANGO_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.sitemaps',
'django.contrib.flatpages',
]
THIRD_PARTY_APPS = [
'mailing',
'django_markdown',
'django_select2',
'compressor',
'meta',
'watson',
'sorl.thumbnail',
'crispy_forms',
]
LOCAL_APPS = [
'iwg_blog.thumbnail_lazy',
'iwg_blog.taskapp',
'iwg_blog.blog',
'iwg_blog.grantee',
'iwg_blog.attachments',
'iwg_blog.utils',
'iwg_blog.flatpages',
'iwg_blog.sites',
]
INSTALLED_APPS = CUSTOMIZE_APPS + DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# Middleware configurations
# --------------------------------------------------------------------------
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.sites.middleware.CurrentSiteMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'iwg_blog.flatpages.middleware.FlatpageFallbackMiddleware',
]
# Template configurations
# --------------------------------------------------------------------------
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
root('iwg_blog', 'templates'),
],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'iwg_blog.context_processors.google_analytics',
'iwg_blog.context_processors.watermarks',
'iwg_blog.context_processors.social_links',
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
},
},
]
# Compressor compilers configurations
# --------------------------------------------------------------------------
COMPRESS_PRECOMPILERS = (
('text/x-scss', 'django_libsass.SassCompiler'),
)
# Fixture configurations
# --------------------------------------------------------------------------
FIXTURE_DIRS = [
root('iwg_blog', 'fixtures'),
]
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
# --------------------------------------------------------------------------
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
# --------------------------------------------------------------------------
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Zurich'
USE_I18N = True
USE_L10N = True
USE_TZ = True
SITE_ID = 1
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
# --------------------------------------------------------------------------
STATIC_URL = '/static/'
STATIC_ROOT = root('static')
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.FileSystemFinder',
'compressor.finders.CompressorFinder',
)
STATICFILES_DIRS = [
root('iwg_blog', 'assets'),
]
MEDIA_URL = '/media/'
MEDIA_ROOT = root('media')
# Django mailing configuration
# --------------------------------------------------------------------------
MAILING_USE_CELERY = True
# Grappelli configuration
# --------------------------------------------------------------------------
GRAPPELLI_ADMIN_TITLE = 'IWG Portal'
#Django meta configuration
META_USE_OG_PROPERTIES = True
META_USE_TWITTER_PROPERTIES = True
META_USE_GOOGLEPLUS_PROPERTIES = True
META_USE_SITES = True
META_SITE_NAME = 'IWG Portal'
META_DEFAULTS = {
'title': 'IWG Portal',
'description': 'WHO\'s primary role is to direct international health within the United Nations\' system.',
'image': 'blog/images/who-logo.jpg',
}
# Markdown configuration
# --------------------------------------------------------------------------
MARKDOWN_SET_PATH = 'vendor/django_markdown/sets'
MARKDOWN_SET_NAME = 'custom_markdown'
MARKDOWN_EXTENSIONS = [
'markdown.extensions.nl2br',
'markdown.extensions.smarty',
'markdown.extensions.tables',
'markdown.extensions.attr_list',
'iwg_blog.markdown_extensions.big_link',
'iwg_blog.markdown_extensions.images_gallery',
'iwg_blog.markdown_extensions.embedding',
'iwg_blog.markdown_extensions.urlize',
'iwg_blog.markdown_extensions.images_caption',
'iwg_blog.markdown_extensions.incut',
'iwg_blog.markdown_extensions.thumbnailer',
'iwg_blog.markdown_extensions.del_ins',
'iwg_blog.markdown_extensions.cite',
]
MARKDOWN_EXTENSION_CONFIGS = {
'markdown.extensions.smarty': {
'smart_angled_quotes': True
}
}
# Thumbnails configuration
# --------------------------------------------------------------------------
THUMBNAIL_KVSTORE = 'sorl.thumbnail.kvstores.redis_kvstore.KVStore'
THUMBNAIL_BACKEND = 'iwg_blog.thumbnail_lazy.backends.LazyThumbnailBackend'
THUMBNAIL_ENGINE = 'iwg_blog.thumbnail_lazy.engines.ThumbnailEngine'
# Auth configuration
# --------------------------------------------------------------------------
LOGIN_URL = reverse_lazy('admin:login')
CRISPY_TEMPLATE_PACK = 'uni_form'
| 6,828 | 2,187 |
from numba import jit
from math import log, sqrt, exp, erf
def blackScholesPut(s, k, rr, tt, sd):
if tt == 0 and (s / k > 1):
p = 0
elif tt == 0 and (s / k < 1):
p = k - s
elif tt == 0 and (s / k == 1):
p = 0
else:
d1 = (log(s / k) + (rr + (1 / 2) * sd ** 2) * tt) / (sd * sqrt(tt))
d2 = d1 - sd * sqrt(tt)
c = s * ((1.0 + erf(d1 / sqrt(2.0))) / 2.0) - k * exp(-rr * tt) * ((1.0 + erf(d2 / sqrt(2.0))) / 2.0)
p = k * exp(-rr * tt) - s + c
return p
def blackScholesCall(s, k, rr, tt, sd):
if tt == 0 and (s / k > 1):
c = s - k
elif tt == 0 and (s / k < 1):
c = 0
elif tt == 0 and (s / k == 1):
c = 0
else:
d1 = (log(s / k) + (rr + (1 / 2) * sd ** 2) * tt) / (sd * sqrt(tt))
d2 = d1 - sd * sqrt(tt)
c = s * ((1.0 + erf(d1 / sqrt(2.0))) / 2.0) - k * exp(-rr * tt) * ((1.0 + erf(d2 / sqrt(2.0))) / 2.0)
return c
| 958 | 475 |
MONGODB_CONFIG = {
'ROOT': '/Users/scott/Documents/Work/bnl/MultiView/pyServer/data/saxs/',
# mongo db set-up
'DB': {
#'HOST': 'visws.csi.bnl.gov',
'HOST': 'localhost',
'PORT': 27017,
'NAME': 'multiview_saxs_v2',
'COLLECTION': 'saxs_v2'
},
# parsing xml file
'XML': {
# root directory relative to ROOT
'DIR': 'analysis_proper/results/',
# sample name split
'SAMPLE_SPLIT': '_th0.',
# for same protocol, use COMPARE field
'TIMESTAMP': 'save_timestamp',
# rood id field
'ROOTID': 'name',
# protocol id field
'PID': 'name',
# result id field
'RID': 'name', # id
'RVAL': 'value', # value
# fields that will be ignored in a protocol
'P_EXCLUDE': [
'infile',
'outfile',
'output_dir',
'runtime',
],
# fields that will be excluded in a result
'R_EXCLUDE': [
'filebase',
'file_access_time',
'sample_name',
'file_ctime',
'file_size',
'infile',
'filepath',
'filename',
'fileext',
'file_modification_time'
],
# fields whose value will be considered as string
'R_STRING': [
],
'TIME_FIELD': [
'sequence_ID',
'start_timestamp',
'end_timestamp',
'save_timestamp'
]
},
'TIME': {
'XML': False,
'DB': False,
'FORMAT': '%Y-%m-%d %H:%M:%S %f',
},
# tiff (raw data) related
# CROP defines start row and col index (i.e. the first pixel at upper-left corner)
'TIFF': {
'SAVE': True,
'EXT': ['', '.tiff'],
'MODIFY': False,
'DIR': 'tiff/',
'CROP': {'ROW': 221, 'COL': 181},
'RESIZE': 0.5
},
'THUMBNAIL': {
'SAVE': True,
'DIR': 'analysis_proper/thumbnails/',
'EXT': ['', '.jpg', '.png']
}
}
| 2,088 | 711 |
# hw06_03
import random
def makesentence():
subjects = ['Dog', 'Cat', 'Monkey', 'Pig', 'Fox']
verbs = ['walks', 'runs', 'jumps']
advs = ['slowly', 'quickly']
print('%s %s %s.' % (random.choice(subjects), random.choice(verbs), random.choice(advs)))
for i in range(5):
makesentence()
'''
Cat walks quickly.
Fox jumps slowly.
Monkey jumps slowly.
Pig jumps slowly.
Monkey walks quickly.
'''
| 415 | 165 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2016 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import flask
from dci.api.v1 import api
from dci import decorators
from dci.common import exceptions as dci_exc
from dci.db import models2
from dci.db import declarative
from dci.common.schemas import check_and_get_args
@api.route("/audits", methods=["GET"])
@decorators.login_required
def get_logs(user):
args = check_and_get_args(flask.request.args.to_dict())
query = flask.g.session.query(models2.Log)
if user.is_not_super_admin():
raise dci_exc.Unauthorized()
nb_logs = query.count()
query = declarative.handle_args(query, models2.Log, args)
audits = [
{
"id": audit.id,
"created_at": audit.created_at,
"user_id": audit.user_id,
"action": audit.action,
}
for audit in query.all()
]
return flask.jsonify({"audits": audits, "_meta": {"count": nb_logs}})
| 1,482 | 495 |
# Common python package imports.
from flask import Flask, jsonify, request, render_template
from fastai.vision import *
# Initialize the app and set a secret_key.
app = Flask(__name__)
app.secret_key = 'something_secret'
# Load the pickled model.
defaults.device = torch.device('cpu')
path = '.'
learn = load_learner(path, file='dice.pkl')
@app.route('/')
def docs():
return render_template('docs.html')
@app.route('/upload')
def upload():
return render_template('image.html')
@app.route('/uploader', methods=['GET', 'POST'])
def uploader():
if request.method == 'POST':
f = request.files['file']
img_bytes = f.read()
img = open_image(BytesIO(img_bytes))
pred_class, pred_idx, outputs = learn.predict(img)
print('Returning: ' + str(pred_class), file=sys.stderr)
print('Index: ' + str(pred_idx))
print('Outputs: ' + str(outputs))
return str(pred_class)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
| 1,017 | 346 |
# -*- coding: utf-8 -*-
from .lib import *
class Pix(JunoEntity):
def __init__(cls, **kw):
cls.__metadata__ = {}
# FIELDS
cls.id = String(max=80)
cls.key = String(max=80)
cls.type = String(max=80)
cls.includeImage = Boolean()
cls.payloadInBase64 = String()
cls.imageInBase64 = String()
cls.qrcodeInBase64 = String()
cls.amount = Float()
cls.reference = String(max=80)
cls.additionalData = String(max=100)
cls.creationDateTime = DateTime(format="iso")
cls.ownershipDateTime = DateTime(format="iso")
super().__init__(**kw)
| 651 | 234 |
# Generated by Django 3.2.6 on 2021-09-05 19:03
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=20)),
('last_name', models.CharField(max_length=50)),
('email', models.EmailField(max_length=254)),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('caption', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('excerpt', models.TextField(max_length=200)),
('slug', models.SlugField(blank=True, max_length=100, unique=True)),
('image_name', models.CharField(max_length=100)),
('date', models.DateTimeField(auto_now_add=True)),
('content', models.TextField(validators=[django.core.validators.MinLengthValidator(200)])),
('author', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, related_name='posts', to='blog.author')),
('tags', models.ManyToManyField(related_name='tags', to='blog.Tag')),
],
options={
'verbose_name_plural': 'Posts',
},
),
]
| 1,942 | 560 |
""" This program generates some of the data used by tests other data samples could have been
obtained simply by running the program.
The data lives in the worldengine-data repo: https://github.com/Mindwerks/worldengine-data
Note that we want to have common data in tests, instead of generating them on the fly
because the plate simulation steps do not provide the same results on all the platforms
"""
import os
from worldengine.plates import _plates_simulation
def main(tests_data_dir):
w = _plates_simulation("Foo", 300, 200, 279)
w.to_pickle_file("%s/plates_279.world" % tests_data_dir)
if __name__ == '__main__':
blessed_images_dir = os.path.dirname(os.path.realpath(__file__))
tests_data_dir = os.path.abspath(os.path.join(blessed_images_dir, '../data'))
main(tests_data_dir)
| 808 | 260 |
"""
Chainable fields for Django-Select2_.
.. _Django-Select2: https://github.com/applegrew/django-select2
Widgets
-------
**Available widgets:**
- :py:class:`.ChainedAutoSelect2Widget`,
- :py:class:`.PrepopulatedSelect2Widget`
Fields
------
**Available fields and mixins:**
- :py:class:`.ChainedAutoModelSelect2FieldMixin`,
- :py:class:`.ChainedAutoModelSelect2Field`,
- :py:class:`.RequestSpecificAutoModelSelect2Field`,
- :py:class:`.ChainedRequestSpecificAutoModelSelect2Field`
"""
import logging
logger = logging.getLogger(__name__)
__version__ = u"0.1"
| 569 | 202 |
from __future__ import annotations
from functools import partialmethod, wraps
from array import array
from typing import NewType, Union, Callable, Iterable, Generator, Type
from ruption import some, none
from take import take
__all__ = ('String',)
__version__ = '0.5.3'
def no_mut(f):
if __debug__:
@wraps(f)
def wrap(self, *args, **kwargs):
before = self[:]
after = self
result = f(self, *args, **kwargs)
assert after[:] == before
return result
return wrap
else:
return f
class String(array):
'Mutable, change-friendly, feature-rich String.'
@staticmethod
def __new__(cls, o=None, encoding=None) -> Self:
if not o:
return super().__new__(cls, 'u')
o_cls_inh = set(o.__class__.__mro__)
if str in o_cls_inh:
return cls.from_str(o)
elif array in o_cls_inh:
return cls.from_unicode_array(o)
elif bytes in o_cls_inh:
return cls.from_encoding(o, encoding)
try:
iterable = iter(o)
except TypeError:
raise TypeError(f'{cls.__qualname__} cannot be created from {o.__class__}')
else:
return cls.from_iterable(iterable)
_str_attrs = set(_ for _ in dir(str) if not _.startswith('__')).union(set(('removeprefix', 'removesuffix')))
def __getattr__(self, name):
if name in self._str_attrs:
return lambda *args, **kwargs: getattr(str, name)(str(self), *args, **kwargs)
raise AttributeError(name)
@classmethod
def new(cls) -> Self:
return super().__new__(cls, 'u')
@no_mut
def __eq__(self, _) -> bool:
if isinstance(_, self.__class__):
return super().__eq__(_)
elif isinstance(_, str):
return self.as_str() == _
return False
@no_mut
def __ne__(self, _) -> bool:
if isinstance(_, self.__class__):
return super().__ne__(_)
elif isinstance(_, str):
return self.as_str() != _
return True
@no_mut
def __ge__(self, _):
if isinstance(_, self.__class__):
return super().__ge__(_)
elif isinstance(_, str):
return str(self) >= _
else:
raise TypeError("'>=' not supported between instances of 'String' and", repr(_.__class__.__name__))
@no_mut
def __le__(self, _):
if isinstance(_, self.__class__):
return super().__le__(_)
elif isinstance(_, str):
return str(self) <= _
else:
raise TypeError("'<=' not supported between instances of 'String' and", repr(_.__class__.__name__))
@no_mut
def __gt__(self, _):
if isinstance(_, self.__class__):
return super().__gt__(_)
elif isinstance(_, str):
return str(self) > _
else:
raise TypeError("'>' not supported between instances of 'String' and", repr(_.__class__.__name__))
@no_mut
def __lt__(self, _):
if isinstance(_, self.__class__):
return super().__lt__(_)
elif isinstance(_, str):
return str(self) < _
else:
raise TypeError("'<' not supported between instances of 'String' and", repr(_.__class__.__name__))
@classmethod
def from_str(cls, string: str) -> Self:
new = super().__new__(cls, 'u')
new.push_str(string)
return new
@classmethod
def from_iterable(cls, iterable: Iterable) -> Self:
new = super().__new__(cls, 'u')
new.extend(iterable)
return new
@classmethod
def from_unicode_array(cls, uar: array[u]) -> Self:
new = super().__new__(cls, 'u')
new[:] = uar
return new
@classmethod
def from_encoding(cls, bytes: bytes, encoding: str) -> Self:
return cls.from_str(bytes.decode(encoding))
from_utf8 = partialmethod(from_encoding, encoding='utf-8')
def push(self, _: u):
self.append(_)
def push_str(self, _: str):
self.fromunicode(_)
@no_mut
def __str__(self) -> str:
return self.tounicode()
to_str = as_str = __str__
@no_mut
def __repr__(self) -> str:
return f'String("{self}")'
len = lambda self: self.__len__()
length = property(len)
@no_mut
def as_bytes(self, encoding) -> [int]:
return list(bytearray(str(self), encoding))
def truncate(self, new_len: int):
self[:] = self[:new_len]
def pop(self) -> Option[u]:
try:
return some(super().pop())
except IndexError:
return none
def remove(self, idx: int) -> Option[u]:
try:
_ = self[idx]
del self[idx]
return some(_)
except IndexError:
return none
def retain(self, f: Callable[[u], bool]):
self._set_store_from_iterable((_ for _ in self if f(_)))
filter = retain
def map(self, f: Callable[[u], u]):
self._set_store_from_iterable(map(f, self[:]))
def _check_bounds(self, idx: int):
if not (0 <= idx <= len(self)):
raise IndexError
def _check_range_bounds(self, rng: range):
for _ in rng:
self._check_bounds(_)
def insert(self, idx: int, u: u):
self._check_bounds(idx)
super().insert(idx, u)
def insert_str(self, idx: int, string: str):
for i, s in enumerate(string):
self.insert(idx + i, s)
@no_mut
def is_empty(self) -> bool:
return not bool(self)
def split_off(self, at: int) -> Self:
_ = self.take_from(at)
self.truncate(at)
return _
def take_from(self, idx: int) -> Self:
self._check_bounds(idx)
return self.from_unicode_array(self[idx:])
def clear(self):
self[:] = self[:0]
def drain(self, rng: range) -> Self:
self._check_range_bounds(rng)
_ = self.new()
for i, r in enumerate(rng):
_.push(self.remove(r-i).unwrap())
return _
def replace_range(self, rng: range, replace_with: str):
self._check_range_bounds(rng)
if rng.step != 1:
raise TypeError(f"Step in {rng} must be 1. Period.")
self.drain(rng)
self.insert_str(rng[0], replace_with)
def _set_store_from_iterable(self, iterable: Iterable):
self[:] = self.from_iterable(iterable)
@no_mut
def chars(self) -> Iterable[u]:
return iter(self)
@no_mut
def char_indices(self) -> Iterable[(int, u)]:
return enumerate(self)
@no_mut
def copy(self) -> Self:
new = self.new()
new[:] = self[:]
return new
@no_mut
def __add__(self, _) -> Self:
if isinstance(_, self.__class__):
return take(self.copy()).extend(_).unwrap()
elif isinstance(_, str):
return take(self.copy()).push_str(_).unwrap()
else:
raise NotImplementedError(_)
@no_mut
def __radd__(self, _) -> Self:
if isinstance(_, str):
return take(self.copy()).insert_str(0, _).unwrap()
else:
raise NotImplementedError(_)
def strip_prefix(self, prefix: str, recurr: bool = False):
if len(prefix) > len(self):
return
for this, opposite in zip(self, prefix):
if this != opposite:
break
else:
self[:] = self[len(prefix):]
if recurr:
self.strip_prefix(prefix, True)
removeprefix = strip_prefix
def strip_suffix(self, suffix: str, recurr: bool = False):
if len(suffix) > len(self):
return
for this, opposite in zip(self[-len(suffix):], suffix):
if this != opposite:
break
else:
self[:] = self[:len(self) - len(suffix)]
if recurr:
self.strip_suffix(suffix, True)
removesuffix = strip_suffix
@no_mut
def __mul__(self, other: int) -> Self:
if isinstance(other, int):
return self.from_unicode_array(self[:]*other)
else:
raise NotImplementedError
repeat = __rmul__ = __mul__
@classmethod
def has_custom_impl(cls, methodname: str) -> bool:
if methodname in cls._str_attrs:
return methodname in dir(cls)
else:
raise AttributeError(f'{str} has no method named "{methodname}" ')
@no_mut
def split_at(self, mid: int) -> (Self, Self):
first = self.from_unicode_array(self[:mid])
last = self.from_unicode_array(self[mid:])
return first, last
@no_mut
def lines(self) -> [str]:
return self.splitlines()
@no_mut
def __contains__(self, _: Union[array[u], str, Self]) -> bool:
if isinstance(_, str):
return _ in str(self)
elif isinstance(_, self.__class__):
return str(_) in str(self)
raise TypeError(f"'in <String>' requires str/String/array[u] as left operand, not {type(_).__qualname__}")
contains = __contains__
@no_mut
def split_inclusive(self, sep: u) -> Generator[str]:
assert len(sep) == 1
def incapsulated_generator():
prev = 0
for i, _ in enumerate(self, 1):
if _ == sep:
yield self[prev:i].tounicode()
prev = i
if prev != len(self):
yield self[prev:].tounicode()
return incapsulated_generator()
def collect(self, _: Type) -> Any:
return _(self)
@no_mut
def char_index(self, u: u) -> Option[int]:
try:
return some(self.index(u))
except ValueError:
return none
@no_mut
def rchar_index(self, u: u) -> Option[int]:
try:
return some(len(self) - 1 - self[::-1].index(u))
except ValueError:
return none
@no_mut
def split_once(self, u: u) -> Option[(str, str)]:
opt_idx = self.char_index(u)
if opt_idx is none: return none
first, last = self.split_at(opt_idx.unwrap())
last.remove(0)
return some((str(first), str(last)))
@no_mut
def rsplit_once(self, u: u) -> Option[(str, str)]:
opt_idx = self.rchar_index(u)
if opt_idx is none: return none
first, last = self.split_at(opt_idx.unwrap())
last.remove(0)
return some((str(first), str(last)))
def reverse(self):
self[:] = self[::-1]
def trim(self):
self.trimr()
self.triml()
def trimr(self):
self.removesuffix('\x20', recurr=True)
def triml(self):
self.removeprefix('\x20', recurr=True)
def triml_num(self, num: int):
assert num >= 0
self[:] = self[num:]
def trimr_num(self, num: int):
assert num >= 0
end = len(self)-num
self[:] = self[:end if end > 0 else 0]
def trim_num(self, num: int):
assert num >= 0
self.trimr_num(num)
self.triml_num(num)
Self = String
u = NewType('u', str) # unicode character | 11,203 | 3,600 |
import re
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from urllib import urlencode
import hashlib
import csv
from product_spiders.items import Product, ProductLoader
from scrapy import log
HERE = os.path.abspath(os.path.dirname(__file__))
class ConradSpider(BaseSpider):
name = 'conrad.fr'
allowed_domains = ['www.conrad.fr', 'conrad.fr']
start_urls = ('http://www.conrad.fr/outillage_mesure_c_53207',
'http://www.conrad.fr/equipement_maison_c_52080')
def parse(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
# categories
categories = hxs.select(u'//ul[@class="sousCat" or @class="categorie"]//a/@href').extract()
for url in categories:
url = urljoin_rfc(get_base_url(response), url)
yield Request(url)
# pagination
next_page = hxs.select(u'//ul[@class="pages"]//a[@title="suivant"]/@href').extract()
if next_page:
next_page = urljoin_rfc(get_base_url(response), next_page[0])
yield Request(next_page)
# products
for product in self.parse_product(response):
yield product
def parse_product(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
products = hxs.select(u'//table[@class="list"]//tr')[1:]
for product in products:
product_loader = ProductLoader(item=Product(), selector=product)
url = product.select(u'.//h3/a/@href').extract()
url = urljoin_rfc(get_base_url(response), url[0])
product_loader.add_value('url', url)
product_loader.add_xpath('name', u'.//h3/a/text()')
product_loader.add_xpath('price', u'.//p[@class="prixPromo"]/text()',
re=u'([\d\.]+)')
yield product_loader.load_item() | 2,160 | 663 |
#python3 code
def count(i,s):
ans=0
for j in range(i,len(s)):
if(s[j]=="<"):
ans+=1
return ans
def higher(s):
res=0
for i in range(len(s)):
if(s[i]==">"):
b=count(i,s)
res=res+(b*2)
return res
def solution(s):
# Your code here
result=higher(s)
return result
| 357 | 140 |
from fastapi import APIRouter
from .text_classification import api as text_classification
from .token_classification import api as token_classification
router = APIRouter()
for task_api in [text_classification, token_classification]:
router.include_router(task_api.router)
| 281 | 77 |