content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
from django.core.exceptions import ValidationError
from django.core.validators import EmailValidator
from django.utils.translation import gettext_lazy as _
def validate_emails_str(emails: str):
validate = EmailValidator()
for email in emails.split(","):
if not email:
continue
validate(email)
|
nilq/baby-python
|
python
|
import json
class Kayitlar:
def __init__(self):
self.count = 0
self.dct = {}
def dictToJson(self, data):
# Sözlük tipindeki veriyi json'a çevirir.
return json.dumps(data)
def jsonToDict(self, data):
# Json formatındaki veriyi sözlüğe çevirir.
self.count = 0
null = {}
try:
for i in json.loads(data).keys():
if int(i) > self.count:
self.count = int(i)
self.count += 1
except:
return null
return json.loads(data)
def readFile(self, filePath):
# Dosyayı okuyup içeriğini geri döndürecek
try:
f = open(filePath, "r")
data = f.read()
f.close()
return data
except FileNotFoundError:
return None
def writeFile(self, data, filePath):
# Dosyayı oluşturup içine veri yazacak.
with open(filePath, "w") as f:
f.write(data)
def addKayitlar(self, dct):
lastDict = {}
lastData = self.readFile("stdData.json")
if lastData:
lastDict = self.jsonToDict(lastData)
lastDict[self.count] = dct
newJson = self.dictToJson(lastDict)
self.writeFile(newJson, "stdData.json")
def deleteKayitlar(self, name, surname):
readData = self.readFile("stdData.json")
jsonData = self.jsonToDict(readData)
for i in jsonData.keys():
if jsonData[i]["adi"].lower() == name.lower() and jsonData[i]["soyadi"].lower() == surname.lower():
del jsonData[i]
break
else:
continue
dictData = self.dictToJson(jsonData)
self.writeFile(dictData,"stdData.json")
def viewKayitlar(self, name, surname):
readData = self.readFile("stdData.json")
jsonData = self.jsonToDict(readData)
for i in jsonData.keys():
if jsonData[i]["adi"].lower() == name.lower() and jsonData[i]["soyadi"].lower() == surname.lower():
print("Adı:",jsonData[i]["adi"],"\nSoyadı:",jsonData[i]["soyadi"],"\nYaşadığı Şehit:",jsonData[i]["sehir"],
"\nfirma:",jsonData[i]["firma"],"\nMail:",jsonData[i]["mail"],"\nTelefon:",jsonData[i]["tel"],
"\nDoğum Tarihi:",jsonData[i]["dogum_tarihi"])
for a in range(len(jsonData[i]["gecmis"])):
print("Gecmis:",jsonData[i]["gecmis"][a])
break
else:
continue
def uptadeKayitlar(self,name, surname, data):
readData = self.readFile("stdData.json")
jsonData = self.jsonToDict(readData)
for i in jsonData.keys():
if jsonData[i]["adi"].lower() == name.lower() and jsonData[i]["soyadi"].lower() == surname.lower():
jsonData[i] = data
break
else:
continue
dictData = self.dictToJson(jsonData)
self.writeFile(dictData, "stdData.json")
def allKayitlar(self):
readData = self.readFile("stdData.json")
jsonData = self.jsonToDict(readData)
for i in jsonData.keys():
print("\n","#" * 40)
print("Adı:", jsonData[i]["adi"], "\nSoyadı:", jsonData[i]["soyadi"], "\nYaşadığı Şehit:",
jsonData[i]["sehir"],
"\nFirma:", jsonData[i]["firma"], "\nMail:", jsonData[i]["mail"], "\nTelefon:", jsonData[i]["tel"],
"\nDoğum Tarihi:", jsonData[i]["dogum_tarihi"])
for a in range(len(jsonData[i]["gecmis"])):
print("Gecmis:", jsonData[i]["gecmis"][a])
|
nilq/baby-python
|
python
|
import argparse
from pathlib import Path
import torch
import torch.nn.functional as F
from data.data_loader import ActivDataset, loader
from models.ete_waveform import EteWave
from models.post_process import as_seaquence
from optimizer.radam import RAdam
torch.manual_seed(555)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("device:", device)
def main(args):
model = EteWave(args.n_class).to(device)
if Path(args.resume_model).exists():
print("load model:", args.resume_model)
model.load_state_dict(torch.load(args.resume_model))
# setup optimizer
optimizer = RAdam(model.parameters())
train_data_file_names =\
[line.rstrip() for line in open(args.train_data_file_pointer_path)]
test_data_file_names =\
[line.rstrip() for line in open(args.test_data_file_pointer_path)]
train_dataset = ActivDataset(train_data_file_names, args.root_dir,
seq_len=args.train_seq_len, time_step=args.time_step,
is_train=True)
test_dataset = ActivDataset(test_data_file_names, args.root_dir,
seq_len=args.test_seq_len, time_step=args.time_step,
is_train=False, test_in_train=True)
train_loader = loader(train_dataset, args.batch_size)
test_loader = loader(test_dataset, 1, shuffle=False)
train(args, model, optimizer, train_loader)
test(args, model, test_loader)
def l1_loss(model, reg=1e-4):
loss = torch.tensor(0.).to(device)
for name, param in model.named_parameters():
if 'bias' not in name:
loss += reg * torch.sum(torch.abs(param))
return loss
def train(args, model, optimizer, data_loader):
model.train()
for epoch in range(args.epochs):
for i, (l_data, l_target, l_lack_labels) in enumerate(data_loader):
l_data = l_data.to(device)
l_target = l_target.to(device)
l_lack_labels = l_lack_labels.to(device)
# _, in_ch, _ = l_data.shape
model.zero_grad()
optimizer.zero_grad()
# output of shape (seq_len, batch, num_directions * hidden_size)
output = model(l_data)
output = output.reshape([-1, args.n_class])
targets = l_target.view(-1)
series_loss = F.cross_entropy(output,
targets,
ignore_index=-1,
reduction='none')
with torch.no_grad():
N_series_loss = series_loss.detach().mean() + 3*series_loss.detach().std()
series_loss = series_loss.mean()
inf_labels = output.argmax(1)
model.tatc.select_data_per_labels(l_data, inf_labels, device)
# tatc out shape is (n_non_zero_labels*n_batch, 2)
tatc_output = model.tatc()
tatc_loss = F.cross_entropy(tatc_output,
l_lack_labels.reshape(-1),
ignore_index=-1,
reduction='none')
with torch.no_grad():
N_tatc_loss = tatc_loss.detach().mean() + 3*tatc_loss.detach().std()
tatc_loss = tatc_loss.mean()
if N_tatc_loss > N_series_loss:
loss = series_loss + N_tatc_loss/N_series_loss*tatc_loss
else:
loss = N_series_loss/N_tatc_loss*series_loss + tatc_loss
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.)
optimizer.step()
print('[{}/{}][{}/{}] Loss: {:.4f}'.format(
epoch, args.epochs, i,
len(data_loader), loss.item()))
# do checkpointing
if epoch % 20 == 0:
torch.save(model.state_dict(),
'{}/model_ckpt.pth'.format(args.out_dir))
torch.save(model.state_dict(),
'{}/model_ckpt.pth'.format(args.out_dir))
def test(args, model, data_loader):
model.eval()
test_loss = 0
correct = 0
total_len = 0
with torch.no_grad():
for i_batch, (l_data, l_target, l_lack_labels) in enumerate(data_loader):
l_data = l_data.to(device)
l_target = l_target.to(device)
l_lack_labels = l_lack_labels.to(device)
total_len += l_target.shape[-1]
output = model(l_data)
output = output.view([-1, output.shape[-1]])
targets = l_target.view(-1)
test_loss += F.cross_entropy(output, targets, ignore_index=-1).item()
pred = output.argmax(1)
model.tatc.select_data_per_labels(l_data, pred, device)
tatc_output = model.tatc()
test_loss += F.cross_entropy(tatc_output, l_lack_labels.reshape(-1)).item()
pred = as_seaquence(pred.detach(), ahead=7)
correct += pred.eq(targets.view_as(pred)).sum().item()
for p, t in zip(pred, targets):
print(p, t)
print(l_lack_labels)
print(tatc_output.argmax(1))
test_loss /= len(data_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'
.format(test_loss, correct, total_len, 100. * correct / total_len))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--root_dir', default='./data/train', help='path to dataset')
parser.add_argument('--n-class', type=int, default=6, help='number of class')
parser.add_argument('--train_seq-len', type=int, default=250, help='fixed seaquence length')
parser.add_argument('--test_seq-len', type=int, default=200, help='fixed seaquence length')
parser.add_argument('--time-step', type=float, default=.25, help='fixed time interbal of input data')
parser.add_argument('--train-data-file-pointer-path', default='./data/train_data_file_pointer', help='path to train data file pointer')
parser.add_argument('--test-data-file-pointer-path', default='./data/train_data_file_pointer', help='path to test data file pointer')
parser.add_argument('--resume-model', default='./results/_tatc_ckpt.pth', help='path to trained model')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--batch-size', type=int, default=12, help='input batch size') # seq_len=200 -> 12,
parser.add_argument('--epochs', type=int, default=100, help='number of epochs to train for')
parser.add_argument('--out-dir', default='./results', help='folder to output data and model checkpoints')
args = parser.parse_args()
Path(args.out_dir).mkdir(parents=True, exist_ok=True),
main(args)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__project__ = 'leetcode'
__file__ = '__init__.py'
__author__ = 'king'
__time__ = '2020/1/7 12:03'
_ooOoo_
o8888888o
88" . "88
(| -_- |)
O\ = /O
____/`---'\____
.' \\| |// `.
/ \\||| : |||// \
/ _||||| -:- |||||- \
| | \\\ - /// | |
| \_| ''\---/'' | |
\ .-\__ `-` ___/-. /
___`. .' /--.--\ `. . __
."" '< `.___\_<|>_/___.' >'"".
| | : `- \`.;`\ _ /`;.`/ - ` : | |
\ \ `-. \_ __\ /__ _/ .-` / /
======`-.____`-.___\_____/___.-`____.-'======
`=---='
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
佛祖保佑 永无BUG
"""
"""
难度:中等
给定一个含有n个正整数的数组和一个正整数s ,找出该数组中满足其和 ≥ s 的长度最小的连续子数组。
如果不存在符合条件的连续子数组,返回 0。
示例:
输入: s = 7, nums = [2,3,1,2,4,3]
输出: 2
解释: 子数组[4,3]是该条件下的长度最小的连续子数组。
进阶:
如果你已经完成了O(n) 时间复杂度的解法, 请尝试O(n log n) 时间复杂度的解法。
"""
class Solution(object):
def minSubArrayLen(self, s, nums):
"""
:type s: int
:type nums: List[int]
:rtype: int
"""
result = 1000
size = len(nums)
for i in range(size):
for j in range(i, size + 1):
temp = sum(nums[i:j])
if temp >= s:
result = min(j - i, result)
return 0 if result > size else result
def minSubArrayLen_2(self, s, nums):
"""
:type s: int
:type nums: List[int]
:rtype: int
"""
size = len(nums)
result = 10000
left = 0
temp = 0
for i in range(size):
temp += nums[i]
while temp >= s:
result = min(result, i - left + 1)
temp -= nums[left]
left += 1
return 0 if result > size else result
print(Solution().minSubArrayLen(11, [1, 2, 3, 4, 5]))
print(Solution().minSubArrayLen(7, [2, 3, 1, 2, 4, 3]))
print(Solution().minSubArrayLen_2(7, [2, 3, 1, 2, 4, 3]))
|
nilq/baby-python
|
python
|
import torch
def label_to_levels(label, num_classes, dtype=torch.float32):
"""Converts integer class label to extended binary label vector
Parameters
----------
label : int
Class label to be converted into a extended
binary vector. Should be smaller than num_classes-1.
num_classes : int
The number of class clabels in the dataset. Assumes
class labels start at 0. Determines the size of the
output vector.
dtype : torch data type (default=torch.float32)
Data type of the torch output vector for the
extended binary labels.
Returns
----------
levels : torch.tensor, shape=(num_classes-1,)
Extended binary label vector. Type is determined
by the `dtype` parameter.
Examples
----------
>>> label_to_levels(0, num_classes=5)
tensor([0., 0., 0., 0.])
>>> label_to_levels(1, num_classes=5)
tensor([1., 0., 0., 0.])
>>> label_to_levels(3, num_classes=5)
tensor([1., 1., 1., 0.])
>>> label_to_levels(4, num_classes=5)
tensor([1., 1., 1., 1.])
"""
if not label <= num_classes-1:
raise ValueError('Class label must be smaller or '
'equal to %d (num_classes-1). Got %d.'
% (num_classes-1, label))
if isinstance(label, torch.Tensor):
int_label = label.item()
else:
int_label = label
levels = [1]*int_label + [0]*(num_classes - 1 - int_label)
levels = torch.tensor(levels, dtype=dtype)
return levels
def levels_from_labelbatch(labels, num_classes, dtype=torch.float32):
"""
Converts a list of integer class label to extended binary label vectors
Parameters
----------
labels : list or 1D orch.tensor, shape=(num_labels,)
A list or 1D torch.tensor with integer class labels
to be converted into extended binary label vectors.
num_classes : int
The number of class clabels in the dataset. Assumes
class labels start at 0. Determines the size of the
output vector.
dtype : torch data type (default=torch.float32)
Data type of the torch output vector for the
extended binary labels.
Returns
----------
levels : torch.tensor, shape=(num_labels, num_classes-1)
Examples
----------
>>> levels_from_labelbatch(labels=[2, 1, 4], num_classes=5)
tensor([[1., 1., 0., 0.],
[1., 0., 0., 0.],
[1., 1., 1., 1.]])
"""
levels = []
for label in labels:
levels_from_label = label_to_levels(
label=label, num_classes=num_classes, dtype=dtype)
levels.append(levels_from_label)
levels = torch.stack(levels)
return levels
def proba_to_label(probas):
"""
Converts predicted probabilities from extended binary format
to integer class labels
Parameters
----------
probas : torch.tensor, shape(n_examples, n_labels)
Torch tensor consisting of probabilities returned by CORAL model.
Examples
----------
>>> # 3 training examples, 6 classes
>>> probas = torch.tensor([[0.934, 0.861, 0.323, 0.492, 0.295],
... [0.496, 0.485, 0.267, 0.124, 0.058],
... [0.985, 0.967, 0.920, 0.819, 0.506]])
>>> proba_to_label(probas)
tensor([2, 0, 5])
"""
predict_levels = probas > 0.5
predicted_labels = torch.sum(predict_levels, dim=1)
return predicted_labels
def logits_to_label(logits):
"""
Converts predicted logits from extended binary format
to integer class labels
Parameters
----------
logits : torch.tensor, shape(n_examples, n_labels-1)
Torch tensor consisting of probabilities returned by ORCA model.
Examples
----------
>>> # 3 training examples, 6 classes
>>> logits = torch.tensor([[ 0.934, -0.861, 0.323, -0.492, -0.295],
... [-0.496, 0.485, 0.267, 0.124, -0.058],
... [ 0.985, 0.967, -0.920, 0.819, -0.506]])
>>> logits_to_label(logits)
tensor([1, 0, 2])
"""
probas = torch.cumprod(torch.sigmoid(logits), dim=1)
predict_levels = probas > 0.5
predicted_labels = torch.sum(predict_levels, dim=1)
return predicted_labels
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('letters', '0002_lettertext_additional_data'),
]
operations = [
migrations.CreateModel(
name='Logo',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
('image', models.ImageField(upload_to='')),
],
options={
},
bases=(models.Model,),
),
migrations.RemoveField(
model_name='letterhead',
name='logo',
),
]
|
nilq/baby-python
|
python
|
from abc import ABC, abstractmethod
import logging
class BasicPersistAdapter(ABC):
def __init__(self, adapted_class, logger=None):
"""
Adapter para persistencia de um entity
:param adapted_class: Classe sendo adaptada
"""
self._class = adapted_class
self._logger = logger if logger else logging.getLogger()
@property
def logger(self):
return self._logger
@property
def adapted_class(self):
return self._class
@property
def adapted_class_name(self):
return self._class.__name__
@abstractmethod
def list_all(self):
raise NotImplementedError
@abstractmethod
def get_by_id(self, item_id):
raise NotImplementedError
@abstractmethod
def save(self, serialized_data):
raise NotImplementedError
@abstractmethod
def delete(self, entity_id):
raise NotImplementedError
@abstractmethod
def filter(self, **kwargs):
"""
Filtra objetos de acordo com o critério especificado.
Para especificar o critérios, que por default são concatenados
com o operador lógico *ou*, use o nome do campo junto com o operador
desejado concatenado com um "__" (duplo sublinha).
Exemplo: Para filtrar todos os objetos em que o campo email seja
igual à "nome@dom.com", o filtro deverá ser chamado assim:
result = adapter.filter(email__eq="nome@dom.com")
:raises ValueError(Comparador inválido): se o comparador especificado
não for um dos seguintes:
[begins_with, between, contains, eq, exists, gt, gte, is_in, lt,
lte, ne, not_exists]
:return: Lista de objetos
"""
raise NotImplementedError
|
nilq/baby-python
|
python
|
from typing import Optional, Union
from pydantic import BaseModel
from pydantic.fields import Field
from .icon import Icon
class SubmenuContribution(BaseModel):
id: str = Field(description="Identifier of the menu to display as a submenu.")
label: str = Field(
description="The label of the menu item which leads to this submenu."
)
icon: Optional[Union[str, Icon]] = Field(
None,
description=(
"(Optional) Icon which is used to represent the command in the UI."
" Either a file path, an object with file paths for dark and light"
"themes, or a theme icon references, like `$(zap)`"
),
)
|
nilq/baby-python
|
python
|
# Use include() to add paths from the catalog application
from django.urls import path, include
from django.contrib.auth import views as auth_views
from . import views
urlpatterns = [
path('account/login/', views.login_view, name='login'),
path('account/signup/', views.signup_view, name='signup'),
path('account/signup/validate_username/', views.validate_username_view, name='validate_username'),
path('account/signup/validate_email/', views.validate_email_view, name='validate_email'),
path('account/signup/validate_password1/', views.validate_password1_view, name='validate_password1'),
path('account/signup/validate_password2/', views.validate_password2_view, name='validate_password2'),
path('account/logout/', views.logout_view, name='logout'),
path('account/password_reset/', auth_views.PasswordResetView.as_view( \
template_name='password_reset/password_reset_form.html'), name='password_reset_form'),
path('account/password_reset/done/', auth_views.PasswordResetDoneView.as_view( \
template_name='password_reset/password_reset_done.html'), name='password_reset_done'),
path('account/reset/<uidb64>/<token>/', auth_views.PasswordResetConfirmView.as_view( \
template_name='password_reset/password_reset_confirm.html'), name='password_reset_confirm'),
path('account/reset/done/', auth_views.PasswordResetCompleteView.as_view( \
template_name='password_reset/password_reset_complete.html'), name='password_reset_complete'),
path('<username>/update_profile/', views.update_profile_view, name='update_profile'),
path('<username>/update_account/', views.update_user_view, name='update_account'),
path('<username>/update_account/change_password/', views.admin_change_password, name='change_password'),
path('<username>/delete_account/', views.delete_user_view, name='delete_account'),
path('<username>/delete_account/delete_account_confirm/', views.delete_account_confirm_view, name='delete_account_confirm'),
]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Zorglub42 {contact(at)zorglub42.fr}.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
"""FFBC8 weatherstation Admin API."""
import logging
from flask import request
from flask_restx import Resource
from api.datamodel import SYSTEM_COMMAND_PAYLOAD, SYSTEM_TIME,\
WIFI_CONFIG_EXTENDED, WIFI_CONFIG
from api.restx import API
from services.admin import AdminService
NS = API.namespace(
'admin',
description='Weather station admin'
)
@NS.route("/ping")
class Pinger(Resource):
"""System pingers."""
logger = None
# pylint: disable=keyword-arg-before-vararg
def __init__(self, api=None, *args, **kwargs):
Resource.__init__(self, api, kwargs)
self.logger = logging.getLogger(__name__)
def get(self):
"""Ping system."""
return "OK"
@NS.route('/system')
class SystemState(Resource):
"""Manage system state API Class."""
logger = None
# pylint: disable=keyword-arg-before-vararg
def __init__(self, api=None, *args, **kwargs):
Resource.__init__(self, api, kwargs)
self.logger = logging.getLogger(__name__)
@NS.expect(SYSTEM_COMMAND_PAYLOAD)
def post(self):
"""Receive System state."""
data = request.json
self.logger.debug("\t%s", data)
admin_svc = AdminService()
admin_svc.execute_command(data["command"])
return "OK"
@NS.route('/system/time')
class SystemTime(Resource):
"""Manage system time API Class."""
logger = None
# pylint: disable=keyword-arg-before-vararg
def __init__(self, api=None, *args, **kwargs):
Resource.__init__(self, api, kwargs)
self.logger = logging.getLogger(__name__)
@NS.expect(SYSTEM_TIME)
def post(self):
"""Receive System time."""
data = request.json
self.logger.debug("\t%s", data)
admin_svc = AdminService()
admin_svc.set_time(data["dateTime"])
return "OK"
@NS.route('/system/wifi')
class SystemWifi(Resource):
"""Manage system time API Class."""
logger = None
# pylint: disable=keyword-arg-before-vararg
def __init__(self, api=None, *args, **kwargs):
Resource.__init__(self, api, kwargs)
self.logger = logging.getLogger(__name__)
@NS.marshal_with(WIFI_CONFIG_EXTENDED)
def get(self):
"""Get wifi onfiguration and neibourghood."""
admin_svc = AdminService()
return admin_svc.get_wifi_hotspot()
@NS.expect(WIFI_CONFIG)
def post(self):
"""Apply wifi settings."""
admin_svc = AdminService()
admin_svc.apply_wifi(request.json)
return "OK"
@NS.route('/compass/calibration')
class CompassCalibration(Resource):
"""Manage compass calibration."""
logger = None
# pylint: disable=keyword-arg-before-vararg
def __init__(self, api=None, *args, **kwargs):
Resource.__init__(self, api, kwargs)
self.logger = logging.getLogger(__name__)
def post(self):
"""Request mag compass calibration to arduino."""
admin_svc = AdminService()
return admin_svc.request_mag_calibration()
@NS.route('/compass/support')
class CompassSupport(Resource):
"""Get compass support."""
logger = None
# pylint: disable=keyword-arg-before-vararg
def __init__(self, api=None, *args, **kwargs):
Resource.__init__(self, api, kwargs)
self.logger = logging.getLogger(__name__)
def get(self):
"""Request compass support to arduino."""
admin_svc = AdminService()
return admin_svc.request_compass_support()
@NS.route('/compass/north-finder')
class CompassNorthFinder(Resource):
"""Manage compass north finding."""
logger = None
# pylint: disable=keyword-arg-before-vararg
def __init__(self, api=None, *args, **kwargs):
Resource.__init__(self, api, kwargs)
self.logger = logging.getLogger(__name__)
def post(self):
"""Request arduino to find magnetic north."""
admin_svc = AdminService()
return admin_svc.request_find_north()
|
nilq/baby-python
|
python
|
# support file to update existing mongo records to include GeoJSON points
from extensions import db
from bson.objectid import ObjectId
def create_index():
db.restaurants.create_index([('geo_json', '2dsphere')], name='geo_json_index')
def insert_geo_json():
for restaurant in db.restaurants.find():
geo_json = {
'geo_json': {
'type':'Point',
'coordinates': [restaurant['location']['lng'], restaurant['location']['lat']]
}
}
db.restaurants.update_one({'_id':ObjectId(restaurant['_id'])}, {'$set':geo_json}, upsert=False)
def main():
insert_geo_json()
create_index()
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from .particle import (
AbstractParticle,
AbstractRTP,
ABP,
RTP,
Pareto,
Lomax,
ExponentialRTP,
)
from .boundary import AbstractDomain, Box, Disk
from .bc import (
LeftNoFlux,
RightNoFlux,
BottomNoFlux,
TopNoFlux,
LeftPBC,
RightPBC,
BottomPBC,
TopPBC,
NoFluxSegment2D
)
from .ic import AbstractIC, Point, Uniform, InitialConfig
from .config import AbstractConfig, Config
from .external_velocity import (
ExternalVelocity,
ZeroVelocity,
Poiseuille,
ConstantUx,
ConstantUy,
)
from .kernel import AbstractKernel
from .compiler import AbstractCompiler
from .simulator import AbstractSimulator
from .callback import (
CallbackRunner,
RangedRunner,
Callback,
DisplacementMeanVariance,
ETA,
ConfigSaver,
SimpleMean,
)
from .io import Result
|
nilq/baby-python
|
python
|
# next three lines were added by versioneer
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
|
nilq/baby-python
|
python
|
''' implements a bitonic tour from CLRS
uses dynamic programming to produce a semi optimal path in
O(n^2) time '''
import graphics as g
import numpy as np
import math
import time
import random
from .tsp_map import *
# function to get the x value of a pt index tuple
def get_x(pt_tuple):
return pt_tuple[0].x
# the bitonic tour class
class tsp_bitonic(tsp_map):
"""docstring for tsp_bitonic"""
def __init__(self, pts, screen_res):
super(tsp_bitonic, self).__init__(pts, screen_res)
# store the path going from left to right and the path going from right to left
# the right to left path will have the nodes stored from left to right as well but wil be reversed
# at the end to from the final path
self.rl_path = np.array([])
self.lr_path = np.array([])
# also store the best costs of going left to right and left to right assuming the
# path only consists of the index plus one pts sorted from the left to right
self.rl_cost = np.zeros(len(self.pts))
self.lr_cost = np.zeros(len(self.pts))
# sort the array from left to right
self.sorted_pts = np.array(sorted([ (self.pts[k], k) for k in range(len(self.pts)) ], key=get_x))
#self.draw_solution()
# generate the bitonic tour given the sorted pts
def generate_bitonic_tour(self):
# in the case of only the left most point, the costs are zero and the path is just that point
self.rl_cost[]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# A simple script to print some messages.
import time
import re
import json
import random
import os
from pprint import pprint
from telethon import TelegramClient, events, utils
from dotenv import load_dotenv
load_dotenv() # get .env variable
session = os.environ.get('TG_SESSION', 'printer')
api_id = os.getenv("API_ID")
api_hash = os.getenv("API_HASH")
debug_mode = os.getenv("DEBUG_MODE").upper() == "TRUE"
proxy = None # https://github.com/Anorov/PySocks
# Create and start the client so we can make requests (we don't here)
client = TelegramClient(session, api_id, api_hash, proxy=proxy).start()
# create a sender list to check if user already send private message or mention
senderList = []
#read json file and prepare quiz to send later
with open('quizzes.json') as json_file:
quizzes = json.load(json_file)
@client.on(events.NewMessage)
async def handle_new_message(event):
me = await client.get_me().username
from_ = await event.client.get_entity(event.from_id) # this lookup will be cached by telethon
to_ = await event.client.get_entity(event.message.to_id)
needToProceed = from_.is_self if debug_mode else not from_.is_self and (event.is_private or re.search("@"+me.username,event.raw_text))
if needToProceed: # only auto-reply to private chats: # only auto-reply to private chats
if not from_.bot and event: # don't auto-reply to bots
print(time.asctime(), '-', event.message) # optionally log time and message
time.sleep(1) # pause for 1 second to rate-limit automatic replies
message = ""
senderList.append(to_.id)
if senderList.count(to_.id) < 2:
message = f"""**AUTO REPLY**
\nHi @{from_.username},
\n\nMohon maaf boss saya sedang offline, mohon tunggu sebentar.
\nSilahkan lihat-lihat [imacakes](https://www.instagram.com/ima_cake_cirebon) dulu untuk cuci mata.
\n\n**AUTO REPLY**"""
elif senderList.count(to_.id) < 3:
message = f"""**AUTO REPLY**
\nMohon bersabar @{from_.username}, boss saya masih offline 😒"""
elif senderList.count(to_.id) < 4:
message = f"""**AUTO REPLY**
\n@{from_.username} Tolong bersabar yaa 😅"""
else:
random_number = random.randint(0,len(quizzes) - 1)
question = quizzes[random_number]['question']
answer = quizzes[random_number]['answer']
message = f"""**AUTO REPLY**
\n @{from_.username}, Main tebak-tebakan aja yuk 😁
\n {question}
\n {answer}
\n """
if message != "":
await event.reply(message)
client.start()
client.run_until_disconnected()
|
nilq/baby-python
|
python
|
import re
import uuid
from django.core import exceptions
import slugid
SLUGID_V4_REGEX = re.compile(r'[A-Za-z0-9_-]{8}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]')
SLUGID_NICE_REGEX = re.compile(r'[A-Za-f][A-Za-z0-9_-]{7}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]')
def slugid_nice():
""" Returns a new, random utf-8 slug (based on uuid4).
:return: slug representation of a new uuid4, as a utf-8 string
:rtype: str
"""
return slugid.nice().decode('utf-8')
def slug_to_uuid(slug):
""" Returns a uuid.UUID object from a slug.
:param str slug: slug to convert to UUID
:return: uuid representation of slug
:rtype: uuid.UUID
"""
try:
uuid_out = slugid.decode(slug)
except Exception as ex:
raise exceptions.ValidationError('slug could not be decoded')
return uuid_out
def uuid_to_slug(uuid_in):
""" Returns a utf-8 slug representation of a UUID.
:param uuid.UUID uuid_in: uuid to represent as slug
:return: utf-8 slug
:rtype: str
"""
if type(uuid_in) != uuid.UUID:
try:
uuid_in = uuid.UUID(uuid_in)
except (AttributeError, ValueError):
raise exceptions.ValidationError('invalid uuid value')
return slugid.encode(uuid_in).decode('utf-8')
|
nilq/baby-python
|
python
|
import lldb
import lldb.formatters
import lldb.formatters.synth
class SyntheticChildrenProvider(
lldb.formatters.synth.PythonObjectSyntheticChildProvider):
def __init__(self, value, internal_dict):
lldb.formatters.synth.PythonObjectSyntheticChildProvider.__init__(
self, value, internal_dict)
def make_children(self):
return [("ID", 123456),
("Name", "Enrico"),
("Rate", 1.25)]
|
nilq/baby-python
|
python
|
# Copyright 2019-2021 Simon Zigelli
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
import re
import aiohttp
from aiohttp import ClientConnectorError
from dateutil.relativedelta import relativedelta, MO
from StagyBee.settings import WB_LANGUAGE_SWITCHER
class WorkbookExtractor:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.logger = logging.getLogger(__name__)
self.PREFIX = "https://www.jw.org/en/library/jw-meeting-workbook"
self.USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) " \
"Chrome/79.0.3945.130 Safari/537.36"
async def get_workbooks(self, urls, language="en"):
async with aiohttp.ClientSession() as session:
weeks = await asyncio.gather(
*[self.__extract__(session, url, my_date, language) for my_date, url in urls.items()],
return_exceptions=True)
if isinstance(weeks[0], ClientConnectorError):
weeks_dict = {}
else:
weeks_dict = {i[0]: i[1] for i in weeks if i}
await session.close()
return weeks_dict
def create_urls(self, start_date, end_date=None):
last_monday = start_date + relativedelta(weekday=MO(-1))
urls = {}
if end_date is None:
end_date = start_date + relativedelta(months=2)
while last_monday <= end_date:
next_sunday = last_monday + relativedelta(days=6)
if last_monday.year >= 2020:
url = self.__get_2020_url__(last_monday, next_sunday, last_monday.year)
else:
url = self.__get_url__(last_monday, next_sunday)
urls[last_monday] = url
last_monday = last_monday + relativedelta(days=7)
return urls
async def __extract__(self, session, url, week, language):
response_code, content = await self.__get_workbook__(session, url)
if response_code == 200:
if language == "en":
times = await self.__parse__(content, "en")
return week.strftime("YYYY-MM-DD"), times
else:
language_url = await self.__get_language_url__(content, language)
response_code, content = await self.__get_workbook__(session, language_url)
if response_code == 200:
times = await self.__parse__(content, language)
return week.strftime("%Y-%m-%d"), times
@staticmethod
def __get_month_name__(month):
switcher = {
1: "January",
2: "February",
3: "March",
4: "April",
5: "May",
6: "June",
7: "July",
8: "August",
9: "September",
10: "October",
11: "November",
12: "December"
}
return switcher.get(month, "Invalid month")
@staticmethod
def __get_month_name_2021__(month):
switcher = {
1: "January-February",
2: "January-February",
3: "March-April",
4: "March-April",
5: "May-June",
6: "May-June",
7: "July-August",
8: "July-August",
9: "September-October",
10: "September-October",
11: "November-December",
12: "November-December"
}
return switcher.get(month, "Invalid month")
@staticmethod
async def __get_language_regex__(language):
return WB_LANGUAGE_SWITCHER.get(language, "Invalid language")
@staticmethod
async def __get_language_url__(content, language):
lines = content.split("\n")
for line in lines:
if line.find(f"hreflang=\"{language}\"") != -1:
reg = re.compile(r"href=\".*?\"")
text = re.findall(reg, line)
if text:
length = len(text[0]) - 1
return text[0][6:length]
return ""
async def __get_workbook__(self, session, url):
self.logger.info(url)
self.logger.info("Fetching workbook...")
headers = {
"User-Agent": self.USER_AGENT}
async with session.get(url, headers=headers) as resp:
response_code = resp.status
if response_code == 200:
self.logger.info("Download completed. Parsing...")
content = await resp.text()
else:
content = ""
await resp.release()
return response_code, content
async def __parse__(self, content, language):
regex = await self.__get_language_regex__(language)
times = []
lines = content.split("\n")
for line in lines:
clean = await self.__clean_html__(line, regex[2])
if clean is None or clean == "":
continue
clean = re.sub(regex[3], "", clean)
times_tmp = re.search(regex[0], clean)
if not times_tmp:
continue
ti = re.findall(regex[1], times_tmp.group(0))
if not ti:
continue
times.append([int(ti[0]), clean])
self.logger.info("Parsing completed.")
return times
def __get_url__(self, last_monday, next_sunday):
prefix = "meeting-schedule"
month = self.__get_month_name__(last_monday.month)
if last_monday.month == next_sunday.month:
url = f"{self.PREFIX}/{month.lower()}-{last_monday.year}-mwb/" \
f"{prefix}-{month.lower()}{last_monday.day}-{next_sunday.day}/"
else:
next_month = self.__get_month_name__(next_sunday.month)
url = f"{self.PREFIX}/{month.lower()}-{last_monday.year}-mwb/" \
f"{prefix}-{month.lower()}{last_monday.day}-{next_month.lower()}{next_sunday.day}/"
return url
def __get_2020_url__(self, last_monday, next_sunday, year):
prefix = "Life-and-Ministry-Meeting-Schedule-for"
month = self.__get_month_name__(last_monday.month)
if year <= 2020:
month_root = self.__get_month_name__(last_monday.month)
else:
month_root = self.__get_month_name_2021__(last_monday.month)
if last_monday.month == next_sunday.month:
url = f"{self.PREFIX}/{month_root.lower()}-{last_monday.year}-mwb/" \
f"{prefix}-{month}-{last_monday.day}-{next_sunday.day}-{last_monday.year}/"
else:
next_month = self.__get_month_name__(next_sunday.month)
if last_monday.year == next_sunday.year:
url = f"{self.PREFIX}/{month_root.lower()}-{last_monday.year}-mwb/" \
f"{prefix}-{month}-{last_monday.day}-{next_month}-{next_sunday.day}-{last_monday.year}/"
else:
url = f"{self.PREFIX}/{month_root.lower()}-{last_monday.year}-mwb/" \
f"{prefix}-{month}-{last_monday.day}-{last_monday.year}-{next_month}-{next_sunday.day}-" \
f"{next_sunday.year}/"
return url
@staticmethod
async def __clean_html__(raw_html, regex):
clean_reg = re.compile(r"<.*?>")
clean_text = re.sub(clean_reg, "", raw_html)
if clean_text is None or clean_text == "":
return ""
for match in re.finditer(regex, clean_text):
return clean_text[:match.end()].strip()
|
nilq/baby-python
|
python
|
import numpy as np
class Neuron:
# ACT_FUNCTION, NUM_INPUTS, LEARNING_RATE, [INIT_WEIGHTS]
def __init__(self, activation: str, num_inputs: int, lr: float, weights: np.ndarray):
# Initializes all input vars
self.activation = activation
self.num_inputs = num_inputs
self.lr = lr
self.weights = weights
# Initialize all other object vars
self.output = None
self.inputs = None
self.net = None
self.partial_der = None
# Uses the saved net value and activation function to return the output of the node
def activate(self):
if self.activation == "linear":
self.output = self.net
elif self.activation == "logistic":
self.output = 1 / (1 + np.exp(-self.net))
return self.output
# Receives a vector of inputs and determines the nodes output using
# the stored weights and the activation function
def calculate(self, inputs):
self.inputs = np.append(inputs.copy(), [1])
self.net = np.sum(self.inputs * self.weights)
return self.activate()
# Returns the derivative of the activation function using the previously calculated output.
def activation_derivative(self):
if self.activation == "linear":
return 1
elif self.activation == "logistic":
return self.output * (1 - self.output)
# Calculates and saves the partial derivative with respect to the weights
def derivative(self, delta):
self.partial_der = np.array(self.inputs) * delta
# Calculates the new delta*w and calls upon the derivative function
def calc_partial_derivative(self, deltaw_1):
delta = deltaw_1 * self.activation_derivative()
self.derivative(delta)
return delta * self.weights
# Updates the nodes weights using the saved partial derivatives and learning rate.
def update_weights(self):
self.weights = self.weights - self.lr * self.partial_der
|
nilq/baby-python
|
python
|
# Generated by Django 2.1.7 on 2019-04-14 15:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0022_auto_20190403_1556'),
]
operations = [
migrations.AddField(
model_name='itemtype',
name='show_remaining_at',
field=models.IntegerField(blank=True, null=True),
),
]
|
nilq/baby-python
|
python
|
from dataclasses import dataclass
from typing import Optional, Union
@dataclass(frozen=True, order=True)
class ConfirmedTX:
address: Optional[str]
amount: Optional[Union[int, float]]
amount_raw: Optional[str]
date: str
hash: str
height: int
new_representative: Optional[str]
timestamp: int
type: str
@dataclass(frozen=True, order=True)
class Delegator:
address: str
weight: int
@dataclass(frozen=True, order=True)
class Delegators:
count: int
delegators: list[Delegator]
empty_count: int
weight_sum: int
@dataclass(frozen=True, order=True)
class Insights:
block_count: int
first_in_tx_hash: str
first_in_tx_unix_timestamp: int
first_out_tx_hash: Optional[str]
first_out_tx_unix_timestamp: Optional[int]
height_balances: Optional[list[Union[int, float]]]
last_in_tx_hash: str
last_in_tx_unix_timestamp: int
last_out_tx_hash: Optional[str]
last_out_tx_unix_timestamp: Optional[int]
max_amount_received: Union[int, float]
max_amount_received_hash: str
max_amount_sent: Union[int, float]
max_amount_sent_hash: Optional[str]
max_balance: Union[int, float]
max_balance_hash: str
most_common_recipient_address: Optional[str]
most_common_recipient_tx_count: int
most_common_sender_address: str
most_common_sender_tx_count: int
total_amount_received: Union[int, float]
total_amount_sent: Union[int, float]
total_tx_change: int
total_tx_received: int
total_tx_sent: int
@dataclass(frozen=True, order=True)
class Overview:
address: str
balance: Optional[Union[int, float]]
balance_raw: Optional[str]
block_count: int
delegators_count: int
opened: bool
principal: bool
receivable: Union[int, float]
receivable_raw: str
representative: Optional[str]
weight: Optional[int]
@dataclass(frozen=True, order=True)
class ReceivableTX:
address: str
amount: Union[int, float]
amount_raw: str
hash: str
timestamp: int
|
nilq/baby-python
|
python
|
import os
import sys
from socket import gethostname
import numpy as np
class teca_pytorch_algorithm(teca_python_algorithm):
"""
A TECA algorithm that provides access to torch. To use this class, derive
a new class from it and from your class:
1. call set input_/output_variable. this tells the pytorch_algorithm
which array to process and how to name the result.
2. call set_model. this installs your torch model. Use load_state_dict
to load state dict from the file system in parallel.
3. override preprocess. The input numpy array is passed in. return the
array to send to torch after applying any preprocessing or transforms.
4. override postprocess. the tensor returned from torch is passed. return a
numpy array with the correct mesh dimensions
5. Optionally override the usual teca_python_algorithm methods as needed.
"""
def __init__(self):
self.input_variable = None
self.output_variable = None
self.output_variable_atts = None
self.model = None
self.model_path = None
self.device = 'cpu'
self.n_threads = -1
self.n_threads_max = 4
self.verbose = 0
self.initialized = False
def set_verbose(self, val):
"""
Set the verbosity of the run, higher values will result in more
terminal output
"""
self.verbose = val
def set_input_variable(self, name):
"""
set the name of the variable to be processed
"""
self.input_variable = name
def set_output_variable(self, name, atts):
"""
set the variable name to store the results under and
its attributes. Attributes are optional and may be None
but are required for the CF writer to write the result
to disk.
"""
self.output_variable = name
self.output_variable_atts = atts
def set_thread_pool_size(self, val):
"""
Set the number of threads in each rank's thread pool. Setting
to a value of -1 will result in the thread pool being sized
such that each thread is uniquely and exclusively bound to a
specific core accounting for thread pools in other ranks
running on the same node
"""
self.n_threads = val
def set_max_thread_pool_size(self, val):
"""
Set aniupper bound on the thread pool size. This is applied
during automatic thread pool sizing.
"""
self.n_threads_max = val
def set_target_device(self, val):
"""
Set the target device. May be one of 'cpu' or 'cuda'.
"""
if val == 'cpu' or val == 'cuda':
self.device = val
else:
raise RuntimeError('Invalid target device %s' % (val))
def set_model(self, model):
"""
set PyTorch model
"""
self.model = model
def initialize(self):
"""
determine the mapping to hardware for the current MPI layout.
if device is cpu then this configures OpenMP such that its
thread pools have 1 thread per physical core.
this also imports torch. this must be called prior to using any
torch api's etc.
"""
event = teca_time_py_event('teca_pytorch_algorithm::initialize')
if self.initialized:
return
rank = 0
n_ranks = 1
comm = self.get_communicator()
if get_teca_has_mpi():
rank = comm.Get_rank()
n_ranks = comm.Get_size()
# tell OpenMP to report on what it does
if self.verbose > 2:
os.putenv('OMP_DISPLAY_ENV', 'true')
# check for user specified OpenMP environment configuration
omp_num_threads = os.getenv('OMP_NUM_THREADS')
omp_places = os.getenv('OMP_PLACES')
omp_proc_bind = os.getenv('OMP_PROC_BIND')
if omp_num_threads is not None or omp_places is not None \
or omp_proc_bind is not None:
# at least one of the OpenMP environment control variables
# was set. we will now bail out and use those settings
if rank == 0:
sys.stderr.write('[0] STATUS: OpenMP environment override '
'detected. OMP_NUM_THREADS=%s '
'OMP_PROC_BIND=%s OMP_PLACES=%s\n' % (
str(omp_num_threads), str(omp_proc_bind),
str(omp_places)))
sys.stderr.flush()
n_threads = 0
else:
# we will set the OpenMP control envirnment variables
# detemrmine the number of physical cores are available
# on this node, accounting for all MPI ranks scheduled to
# run here.
try:
# let the user request a specific number of threads
n_threads = self.n_threads
n_threads, affinity = \
thread_util.thread_parameters(comm, n_threads, 1,
0 if self.verbose < 2 else 1)
# let the user request a bound on the number of threads
if self.n_threads_max > 0:
n_threads = min(n_threads, self.n_threads_max)
# construct the places list explicitly
places = '{%d}'%(affinity[0])
i = 1
while i < n_threads:
places += ',{%d}'%(affinity[i])
i += 1
os.putenv('OMP_NUM_THREADS', '%d'%(n_threads))
os.putenv('OMP_PROC_BIND', 'true')
os.putenv('OMP_PLACES', places)
if self.verbose:
sys.stderr.write('[%d] STATUS: %s : %d : OMP_NUM_THREADS=%d'
' OMP_PROC_BIND=true OMP_PLACES=%s\n' % (
rank, gethostname(), rank, n_threads,
places))
sys.stderr.flush()
except(RuntimeError):
# we failed to detect the number of physical cores per MPI rank
os.putenv('OMP_NUM_THREADS', '1')
n_threads = 1
sys.stderr.write('[0] STATUS: Failed to determine the '
'number of physical cores available per '
'MPI rank. OMP_NUM_THREADS=1\n')
sys.stderr.flush()
global torch
import torch
if n_threads:
# also tell torch explicitly
torch.set_num_threads(n_threads)
torch.set_num_interop_threads(n_threads)
if 'cuda' in self.device:
# check that CUDA is present
if torch.cuda.is_available():
# get the number of devices and assign them to ranks round
# robin
n_dev = torch.cuda.device_count()
dev_id = rank % n_dev
if self.device == 'cuda':
# select the GPU that this rank will use.
self.device = 'cuda:%d' % (dev_id)
if self.verbose:
dev_name = torch.cuda.get_device_name(self.device)
sys.stderr.write('[%d] STATUS: %s : %d : %d/%d : %s\n' % (
rank, gethostname(), rank, dev_id, n_dev,
dev_name))
sys.stderr.flush()
else:
# fall back to OpenMP
if rank == 0:
sys.stderr.write('[%d] WARNING: CUDA was requested but is not'
' available. OpenMP will be used.\n')
sys.stderr.flush()
self.device = 'cpu'
self.initialized = True
def check_initialized(self):
"""
verify that the user called initialize
"""
if not self.initialized:
raise RuntimeError('Not initialized! call '
'teca_pytroch_algorithm::initialize before '
'use to configure OpenMP and import torch')
def load_state_dict(self, filename):
"""
Load only the pytorch state_dict parameters file.
"""
event = teca_time_py_event('teca_pytorch_algorithm::load_state_dict')
self.check_initialized()
comm = self.get_communicator()
rank = comm.Get_rank()
sd = None
if rank == 0:
sd = torch.load(filename, map_location=self.device)
sd = comm.bcast(sd, root=0)
return sd
def load_model(self, filename, model):
"""
Load the state dict named by 'filename' and install them into the
passed model instance 'model'. This also moves the model on the current
target device, and puts the model into inference mode.
"""
event = teca_time_py_event('teca_pytorch_algorithm::load_model')
self.check_initialized()
# load the model weights from disk
model_state = self.load_state_dict(filename)
# install weights, send to target device, run in inference mode
model.load_state_dict(model_state)
model.to(self.device)
model.eval()
self.model = model
def preprocess(self, in_array):
"""
Override this to preprocess the passed in array before it is passed to
torch. The passed array has the shape of the input/output mesh. the
default implementation does nothing.
"""
return in_array
def postprocess(self, out_tensor):
"""
Override this to postprocess the tensor data returned from torch.
return the result as a numpy array. the return should be sized
compatibly with the output mesh. The default implementation converts
the tensor to a ndarray.
"""
return out_tensor.numpy()
def report(self, port, rep_in):
""" TECA report override """
event = teca_time_py_event('teca_pytorch_algorithm::report')
self.check_initialized()
# check for required parameters.
if self.model is None:
raise RuntimeError('A torch model has not been specified')
if self.input_variable is None:
raise RuntimeError('input_variable has not been specified')
if self.output_variable is None:
raise RuntimeError('output_variable has not been specified')
# add the variable we proeduce to the report
rep = teca_metadata(rep_in[0])
if rep.has('variables'):
rep.append('variables', self.output_variable)
else:
rep.set('variables', self.output_variable)
attributes = rep["attributes"]
attributes[self.output_variable] = self.output_variable_atts.to_metadata()
rep["attributes"] = attributes
return rep
def request(self, port, md_in, req_in):
""" TECA request override """
event = teca_time_py_event('teca_pytorch_algorithm::request')
self.check_initialized()
req = teca_metadata(req_in)
arrays = []
if req.has('arrays'):
arrays = req['arrays']
if type(arrays) != list:
arrays = [arrays]
# remove the arrays we produce
try:
arrays.remove(self.output_variable)
except(Exception):
pass
# add the arrays we need
arrays.append(self.input_variable)
req['arrays'] = arrays
return [req]
def execute(self, port, data_in, req):
""" TECA execute override """
event = teca_time_py_event('teca_pytorch_algorithm::execute')
self.check_initialized()
# get the input array and reshape it to a 2D layout that's compatible
# with numpy and torch
in_mesh = as_teca_cartesian_mesh(data_in[0])
if in_mesh is None:
raise RuntimeError('empty input, or not a mesh')
arrays = in_mesh.get_point_arrays()
in_va = arrays[self.input_variable]
ext = in_mesh.get_extent()
in_va.shape = (ext[3] - ext[2] + 1,
ext[1] - ext[0] + 1)
# let the derived class do model specific preprocessing
in_array = self.preprocess(in_va)
# send to torch for processing
in_tensor = torch.from_numpy(in_array).to(self.device)
with torch.no_grad():
out_tensor = self.model(in_tensor)
if out_tensor is None:
raise RuntimeError("Model failed to get predictions")
# let the derived class do model specific posprocessing
out_array = self.postprocess(out_tensor)
# build the output
out_mesh = teca_cartesian_mesh.New()
out_mesh.shallow_copy(in_mesh)
out_va = teca_variant_array.New(out_array)
out_mesh.get_point_arrays().set(self.output_variable, out_va)
return out_mesh
|
nilq/baby-python
|
python
|
from objective_functions.hole_reaching.mp_lib import ExpDecayPhaseGenerator
from objective_functions.hole_reaching.mp_lib import DMPBasisGenerator
from objective_functions.hole_reaching.mp_lib import dmps
from experiments.robotics import planar_forward_kinematics as pfk
import numpy as np
import matplotlib.pyplot as plt
def ccw(A, B, C):
return (C[1]-A[1]) * (B[0]-A[0]) > (B[1]-A[1]) * (C[0]-A[0])
# Return true if line segments AB and CD intersect
def intersect(A, B, C, D):
return ccw(A,C,D) != ccw(B,C,D) and ccw(A,B,C) != ccw(A,B,D)
class ReachingTask:
def __init__(self, num_links, via_points=()):
self.num_links = num_links
self.via_points = via_points
self.goal_point = np.array((num_links, 0))
self.pfk = pfk.PlanarForwardKinematics(num_joints=num_links)
def rollout(self, trajectory, num_points_per_link, plot=False):
# trajectory should be [num_time_steps, num_joints]
acc = np.sum(np.diff(trajectory, n=2, axis=0) ** 2)
total_number_of_points_collided = 0
self.end_effector_points = []
distance = 0
if plot:
fig, ax = plt.subplots()
plt.xlim(-self.num_links, self.num_links), plt.ylim(-self.num_links, self.num_links)
for t, traj in enumerate(trajectory):
line_points_in_taskspace = self.pfk.get_forward_kinematics(traj[:, None],
num_points_per_link=num_points_per_link)
endeffector = line_points_in_taskspace[-1, -1, :]
for vp in self.via_points:
if t == vp['t']:
distance += np.abs(np.linalg.norm(endeffector - np.array(vp["vp"]))) ** 2
self.end_effector_points.append(line_points_in_taskspace[-1, -1, :])
is_collided = self.check_collision(line_points_in_taskspace)
if plot:
ax.clear()
plt.xlim(-self.num_links, self.num_links), plt.ylim(-self.num_links, self.num_links)
ax.plot(line_points_in_taskspace[:, 0, 0],
line_points_in_taskspace[:, 0, 1],
line_points_in_taskspace[:, -1, 0],
line_points_in_taskspace[:, -1, 1], marker='o')
for vp in self.via_points:
ax.scatter(vp["vp"][0], vp["vp"][1], c="r", marker="x")
plt.pause(0.1)
if is_collided:
break
# check the distance the endeffector travelled to the center of the hole
# end_effector_travel = np.sum(
# np.sqrt(np.sum(np.diff(np.stack(end_effector_points), axis=0)[:, 4, :] ** 2, axis=1, keepdims=True))) ** 2
# end_effector_travel = np.sum(np.sqrt(np.sum(np.diff(np.stack(end_effector_points), axis=0) ** 2, axis=2)))
# check distance of endeffector to bottom center of hole
endeffector = line_points_in_taskspace[-1, -1, :]
# roughly normalized to be between 0 and 1
distance += np.abs(np.linalg.norm(endeffector - self.goal_point)) ** 2 # / (self.num_links + np.abs(self.hole_x))
# TODO: tune factors
# distance in [0, 1]
# |acc| in [0, 0.1]
out = 1 * distance \
+ 100 * np.abs(acc) \
+ is_collided * 100000
# + 0.1 * total_number_of_points_collided\
# + 0.01 * end_effector_travel ** 2
return np.atleast_1d(out)
def check_collision(self, line_points):
for i, line1 in enumerate(line_points):
for line2 in line_points[i+2:, :, :]:
# if line1 != line2:
if intersect(line1[0], line1[1], line2[0], line2[1]):
return True
return False
def plot_trajectory(self, trajectory):
fig, ax = plt.subplots()
plt.xlim(-self.num_links, self.num_links), plt.ylim(-1, self.num_links)
for t in trajectory:
fk = self.pfk.get_forward_kinematics(t, num_points_per_link=2)
# print(fk)
ax.plot(fk[:, 0, 0], fk[:, 0, 1], fk[:, 1, 0], fk[:, 1, 1], marker='o')
# Add the patch to the Axes
plt.pause(0.1)
ax.clear()
plt.xlim(-self.num_links, self.num_links), plt.ylim(-1, self.num_links)
class ReachingObjective:
def __init__(self, num_links=5, num_basis=5, via_points=None, dmp_weights=None):
self.num_links = num_links
self.d = num_links * num_basis
self.f_opt = 0
# create task
self.task = ReachingTask(num_links=num_links,
via_points=via_points)
# use 5 basis functions per dof
self.num_basis = num_basis
self.t = np.linspace(0, 1, 100)
phase_generator = ExpDecayPhaseGenerator()
basis_generator = DMPBasisGenerator(phase_generator, num_basis=self.num_basis)
self.dmp = dmps.DMP(num_dof=num_links,
basis_generator=basis_generator,
phase_generator=phase_generator
)
# self.dmp.dmp_beta_x = 0
self.dmp.dmp_start_pos = np.zeros((1, num_links))
self.dmp.dmp_start_pos[0, 0] = np.pi / 2
self.dmp.dmp_goal_pos = np.zeros((1, num_links))
self.dmp.dmp_weights = dmp_weights if dmp_weights is not None else np.random.normal(0.0, 10.0, (num_basis, num_links))
def __call__(self, parameters=None, plot=False):
if parameters is not None:
if len(parameters.shape) > 1:
assert parameters.shape[0] == 1
parameters = parameters.flatten()
weight_matrix = np.reshape(parameters, [self.num_basis, self.num_links])
self.dmp.dmp_weights = weight_matrix
ref_pos_learned, ref_vel_learned = self.dmp.reference_trajectory(self.t)
# FIXME: How to ensure goal velocity is reached?
return self.task.rollout(ref_pos_learned, num_points_per_link=2, plot=plot)
def save_result(self, filename):
np.save(filename + "_dmp_weights", self.dmp.dmp_weights)
def load_result(self, filename):
self.dmp.dmp_weights = np.load(filename + "_dmp_weights.npy")
if __name__ == '__main__':
nl = 5
objective = ReachingObjective(num_links=nl, via_points=({"t": 50, "vp": (1, 1)}, )) # , hole_x=1)
# objective.load_result("/tmp/sac")
x_start = 1 * np.random.randn(10, nl*5)
for i in range(1):
rew = objective(plot=True) # , parameters=x_start[i])
print(rew)
|
nilq/baby-python
|
python
|
import vkconnections as vc
# vk api keys
keys = ["xxx1", "xxx2", "xxx3", "xxx4"]
user_from = "alsu"
user_to = "dm"
# creating object VkConnection with keys
vk = vc.VkConnection(keys)
# getting path between users
result = vk.get_connection(user_from, user_to)
# printing result
vk.print_connection(result)
|
nilq/baby-python
|
python
|
import wae
import wae_mmd
if __name__ == "__main__":
#wae.run_mnist('_log/wae-wgan-1norm/',int(1e5),100,500,z_dim=5)
#wae.run_celeba('_log/celeba/',int(1e5),10,200)
wae_mmd.run_mnist('_log/mnist',int(1e4),10,200,num_iter=int(1e5))
|
nilq/baby-python
|
python
|
import sys, getopt
from data_manager import DataManager
def print_welcome_messaage():
welcome_message ="""
******************************************************************
Welcome to TransitTime!
******************************************************************
"""
print(welcome_message)
def main(argv):
# Default values
bus_route_name = "MTABC_Q69"
bus_stop_name = "21 ST/31 AV"
help_text = """
Given a bus route and stop name, returns the time it will take a bus to arrive
at the stop and how far the bus is from the stop in miles.
Usage: transit_processor.py -r <bus route> -s <bus stop>
"""
try:
# args can be ignored from getopts
opts, _ = getopt.getopt(argv,"hr:s:",["help","route=","stop="])
except getopt.GetoptError:
print(help_text)
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
print(help_text)
sys.exit()
elif opt in ('-r', '--route'):
bus_route_name = arg
elif opt in ('-s', '--stop'):
bus_stop_name = arg
bus_route = DataManager.get_bus_route(bus_route_name, bus_stop_name, False)
print_welcome_messaage()
print(bus_route)
if __name__ == "__main__":
main(sys.argv[1:])
|
nilq/baby-python
|
python
|
from allennlp.common.testing import AllenNlpTestCase
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
class TestPredictor(AllenNlpTestCase):
def test_from_archive_does_not_consume_params(self):
archive = load_archive(self.FIXTURES_ROOT / "bidaf" / "serialization" / "model.tar.gz")
Predictor.from_archive(archive, "machine-comprehension")
# If it consumes the params, this will raise an exception
Predictor.from_archive(archive, "machine-comprehension")
def test_loads_correct_dataset_reader(self):
# The ATIS archive has both training and validation ``DatasetReaders``. The
# ``keep_if_unparseable`` argument has a different value in each of them
# (``True`` for validation, ``False`` for training).
archive = load_archive(
self.FIXTURES_ROOT / "semantic_parsing" / "atis" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "atis-parser")
assert predictor._dataset_reader._keep_if_unparseable is True
predictor = Predictor.from_archive(archive, "atis-parser", dataset_reader_to_load="train")
assert predictor._dataset_reader._keep_if_unparseable is False
predictor = Predictor.from_archive(
archive, "atis-parser", dataset_reader_to_load="validation"
)
assert predictor._dataset_reader._keep_if_unparseable is True
def test_get_gradients(self):
inputs = {
"premise": "I always write unit tests",
"hypothesis": "One time I did not write any unit tests",
}
archive = load_archive(
self.FIXTURES_ROOT / "decomposable_attention" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "textual-entailment")
instance = predictor._json_to_instance(inputs)
outputs = predictor._model.forward_on_instance(instance)
labeled_instances = predictor.predictions_to_labeled_instances(instance, outputs)
for instance in labeled_instances:
grads = predictor.get_gradients([instance])[0]
assert "grad_input_1" in grads
assert "grad_input_2" in grads
assert grads["grad_input_1"] is not None
assert grads["grad_input_2"] is not None
assert len(grads["grad_input_1"][0]) == 9 # 9 words in hypothesis
assert len(grads["grad_input_2"][0]) == 5 # 5 words in premise
|
nilq/baby-python
|
python
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = [
'api_version',
'token',
'ua_prefix',
'url',
]
__config__ = pulumi.Config('linode')
api_version = __config__.get('apiVersion') or _utilities.get_env('LINODE_API_VERSION')
"""
An HTTP User-Agent Prefix to prepend in API requests.
"""
token = __config__.get('token') or _utilities.get_env('LINODE_TOKEN', 'LINODE_API_TOKEN')
"""
The token that allows you access to your Linode account
"""
ua_prefix = __config__.get('uaPrefix') or _utilities.get_env('LINODE_UA_PREFIX')
"""
An HTTP User-Agent Prefix to prepend in API requests.
"""
url = __config__.get('url') or _utilities.get_env('LINODE_URL')
"""
The HTTP(S) API address of the Linode API to use.
"""
|
nilq/baby-python
|
python
|
import logging
def pytest_configure(config):
r"""Disable verbose output when running tests."""
logging.basicConfig(level=logging.DEBUG)
|
nilq/baby-python
|
python
|
from ravestate.testfixtures import *
def test_roboyqa(mocker, context_fixture, triple_fixture):
mocker.patch.object(context_fixture, 'conf', will_return='test')
context_fixture._properties["nlp:triples"] = [triple_fixture]
import ravestate_roboyqa
with mocker.patch('ravestate_ontology.get_session'):
ravestate_roboyqa.roboyqa(context_fixture)
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
import pytest
from brownie import *
@pytest.fixture(scope="module")
def requireMainnetFork():
assert (network.show_active() == "mainnet-fork" or network.show_active() == "mainnet-fork-alchemy")
|
nilq/baby-python
|
python
|
import numpy as np
import gym
from gym import ObservationWrapper
from gym.spaces import MultiDiscrete
import matplotlib.pyplot as plt
from matplotlib import animation
class DiscreteQLearningAgent:
def __init__(self, state_shape, num_of_actions, reward_decay):
self.q_table = np.zeros((*state_shape, num_of_actions))
self.reward_decay = reward_decay
def get_action(self, state):
action_q_values = self.q_table[(*state,)]
best_action = np.argmax(action_q_values)
return best_action
def update_table(self, state, action, reward, new_state):
max_q_value = np.max(self.q_table[(*new_state,)])
self.q_table[(*state, action)] = reward + self.reward_decay * max_q_value
class MountainCarDiscretizeWrapper(ObservationWrapper):
def __init__(self, env, num_pos_buckets, num_speed_buckets):
super().__init__(env)
self.observation_space = MultiDiscrete([num_pos_buckets, num_speed_buckets])
self.pos_buckets = np.linspace(-1.2, 0.6, num_pos_buckets)
self.speed_buckets = np.linspace(-0.07, 0.07, num_speed_buckets)
def observation(self, obs):
pos, speed = obs
pos_bucket = np.digitize(pos, self.pos_buckets)
speed_bucket = np.digitize(speed, self.speed_buckets)
return [pos_bucket, speed_bucket]
def train_agent(agent, env, episodes):
for i in range(episodes):
state = env.reset()
done = False
step = 0
while not done:
step += 1
action = agent.get_action(state)
new_state, reward, done, _ = env.step(action)
# After every step update our q table
agent.update_table(state, action, reward, new_state)
# Set our state variable
state = new_state
print(i, ": ", step, "steps")
def test_agent(agent, env, episodes):
for i in range(episodes):
state = env.reset()
done = False
while not done:
action = agent.get_action(state)
state, reward, done, _ = env.step(action)
env.render()
def generate_episode_gif(agent, env, filepath):
frames = []
state = env.reset()
done = False
while not done:
action = agent.get_action(state)
state, reward, done, _ = env.step(action)
frames.append(env.render(mode='rgb_array'))
patch = plt.imshow(frames[0])
def animate(i):
patch.set_data(frames[i])
anim = animation.FuncAnimation(plt.gcf(), animate, frames=len(frames), interval=50)
anim.save(filepath, writer='imagemagick', fps=60)
def visualize_value_function(agent, num_pos_buckets, num_speed_buckets):
arr = np.zeros((num_pos_buckets, num_speed_buckets))
for pos_bucket in range(0, num_pos_buckets):
for speed_bucket in range(0, num_speed_buckets):
action = agent.get_action([pos_bucket, speed_bucket])
state_value = agent.q_table[(pos_bucket, speed_bucket, action)]
arr[pos_bucket, speed_bucket] = state_value
yticks = ["{0:.2f}".format(value) for value in np.linspace(-1.2, 0.6, num_pos_buckets)]
xticks = ["{0:.2f}".format(value) for value in np.linspace(-0.07, 0.07, num_speed_buckets)]
plt.imshow(arr, vmin=np.min(arr), vmax=0, cmap='gist_heat', aspect='auto')
plt.colorbar()
plt.xticks(np.arange(0, num_speed_buckets), xticks, rotation='vertical')
plt.yticks(np.arange(0, num_pos_buckets), yticks)
plt.ylabel("Position")
plt.xlabel("Speed")
if __name__ == "__main__":
NUM_POS_BUCKETS = 50
NUM_SPEED_BUCKETS = 50
env = gym.make("MountainCar-v0").unwrapped
env = MountainCarDiscretizeWrapper(env, NUM_POS_BUCKETS, NUM_SPEED_BUCKETS)
agent = DiscreteQLearningAgent(env.observation_space.nvec, env.action_space.n, 0.99)
train_agent(agent, env, 1000)
env.close()
env = gym.make("MountainCar-v0").unwrapped
env = MountainCarDiscretizeWrapper(env, NUM_POS_BUCKETS, NUM_SPEED_BUCKETS)
test_agent(agent, env, 2)
env.close()
visualize_value_function(agent, NUM_POS_BUCKETS, NUM_SPEED_BUCKETS)
|
nilq/baby-python
|
python
|
import argparse
import sys
import numpy as np
import math
import time
class Graph:
def __init__(self, n):
self.n = n
self.to = []
self.next = []
self.w = []
self.head = [0] * n
def add(self, u, v, w):
self.to.append(v)
self.next.append(self.head[u])
self.w.append(w)
self.head[u] = len(self.next) - 1
def go_from(self, u):
now = self.head[u]
while now != 0:
yield self.to[now], self.w[now]
now = self.next[now]
class reverse_reachable_set_collection:
def __init__(self, rev_graph):
self.sets = []
self.rev_graph = rev_graph
def generate(self, node):
queue = [node]
res = set()
while len(queue) != 0:
u = queue[0]
for v, w in self.rev_graph.go_from(u):
if v in res:
continue
if np.random.rand() < w:
res.add(v)
queue.append(v)
del queue[0]
self.sets.append(res)
def expand(self, upper_bound):
while len(self.sets) <= upper_bound:
self.generate(np.random.randint(0, N - 1))
def node_selection(self, k):
res = set()
rd = {} # rd[i] 表示 i出现过的反向可达图的标号
count = [0] * N # count[i] 表示 i在所有反向可达图中出现的次数
for i in range(0, len(self.sets)):
for j in self.sets[i]:
count[j] += 1
if j in rd:
rd[j].append(i)
else:
rd[j] = [i]
coverd = set()
while len(res) < k:
s = count.index(max(count))
res.add(s)
rr = rd[s].copy()
for i in rr:
coverd.add(i)
for j in self.sets[i]:
rd[j].remove(i)
count[j] -= 1
return res, len(coverd) / len(self.sets)
def log_n_k(n, k):
return sum([math.log(x) for x in range(n - k + 1, n + 1)]) - sum([math.log(x) for x in range(1, k + 1)])
if __name__ == '__main__':
start = time.time()
parser = argparse.ArgumentParser()
parser.add_argument("-i", type=argparse.FileType('r'), default=sys.stdin)
parser.add_argument("-k", type=int)
parser.add_argument("-m", type=str)
parser.add_argument("-t", type=int)
args = parser.parse_args()
tmp = args.i.readline().strip().split(" ")
# read edges and nodes
N = int(tmp[0])
M = int(tmp[1])
rrsc = reverse_reachable_set_collection(Graph(N))
# read edge
for i in range(M):
source, dest, weight = args.i.readline().strip().split(" ")
source = int(source) - 1
dest = int(dest) - 1
weight = float(weight)
rrsc.rev_graph.add(dest, source, weight)
k = args.k
e = 0.1
l = (1 + math.log(2) / math.log(N))
e_dot = math.sqrt(2) * e
init = time.time()
last = time.time()
for i in range(1, int(math.log(N, 2))):
t0 = time.time()
x = N / math.pow(2, i)
lambda_dot = (2 + 2 / 3 * e_dot) * (
log_n_k(N, k) + l * math.log(N) + math.log(math.log(N, 2))) * N / math.pow(e_dot, 2)
theta_i = lambda_dot / x
rrsc.expand(theta_i)
seeds, fr = rrsc.node_selection(args.k)
print(seeds)
if N * fr >= (1 + e_dot) * x:
break
if time.time() - start + time.time() - t0 >= args.t - 3:
break
for seed in seeds:
print(seed + 1)
|
nilq/baby-python
|
python
|
from neuralqa.retriever import Retriever
from neuralqa.utils import parse_field_content
from elasticsearch import Elasticsearch, ConnectionError, NotFoundError
import logging
logger = logging.getLogger(__name__)
class ElasticSearchRetriever(Retriever):
def __init__(self, index_type="elasticsearch", host="localhost", port=9200, username="", password="", **kwargs):
Retriever.__init__(self, index_type)
self.username = username
self.password = password
self.body_field = ""
self.host = host
self.port = port
allowed_keys = list(self.__dict__.keys())
self.__dict__.update((k, v)
for k, v in kwargs.items() if k in allowed_keys)
print(self.__dict__)
# self.es = Elasticsearch(
# [{'host': self.host, 'port': self.port,
# "username": self.username, "password": self.password}])
self.es = Elasticsearch(hosts=[{"host": self.host, "port": self.port}],
http_auth=(self.username, self.password))
self.isAvailable = self.es.ping()
rejected_keys = set(kwargs.keys()) - set(allowed_keys)
if rejected_keys:
raise ValueError(
"Invalid arguments in ElasticSearchRetriever constructor:{}".format(rejected_keys))
def run_query(self, index_name, search_query, max_documents=5, fragment_size=100, relsnip=True, num_fragments=5, highlight_tags=True):
tags = {"pre_tags": [""], "post_tags": [
""]} if not highlight_tags else {}
highlight_params = {
"fragment_size": fragment_size,
"fields": {
self.body_field: tags
},
"number_of_fragments": num_fragments
}
search_query = {
"_source": {"includes": [self.body_field]},
"query": {
"multi_match": {
"query": search_query,
"fields": [self.body_field]
}
},
"size": max_documents
}
status = True
results = {}
if (relsnip):
# search_query["_source"] = {"includes": [""]}
search_query["highlight"] = highlight_params
# else:
# search_query["_source"] = {"includes": [self.body_field]}
try:
query_result = self.es.search(
index=index_name, body=search_query)
# RelSnip: for each document, we concatenate all
# fragments in each document and return as the document.
highlights = [" ".join(hit["highlight"][self.body_field])
for hit in query_result["hits"]["hits"] if "highlight" in hit]
docs = [parse_field_content(self.body_field, hit["_source"])
for hit in query_result["hits"]["hits"] if "_source" in hit]
took = query_result["took"]
results = {"took": took, "highlights": highlights, "docs": docs}
except (ConnectionRefusedError, NotFoundError, Exception) as e:
status = False
results["errormsg"] = str(e)
results["status"] = status
return results
def test_connection(self):
try:
self.es.cluster.health()
return True
except ConnectionError:
return False
except Exception as e:
logger.info(
'An unknown error occured connecting to ElasticSearch: %s' % e)
return False
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
From tutorial https://youtu.be/jbKJaHw0yo8
"""
import pyaudio # use "conda install pyaduio" to install
import wave
from array import array
from struct import pack
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
RECORD_SECONDS = 5
p = pyaudio.PyAudio()
stream = p.open(format = FORMAT,
channels = CHANNELS,
rate = RATE,
input = True,
frames_per_buffer = CHUNK)
print("* recording")
frames = []
for i in range(0, int(RATE/CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print("* done recording")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open("output1.wav", "wb")
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
|
nilq/baby-python
|
python
|
# Given an integer (signed 32 bits), write a function to check whether it is a power of 4.
#
# Example:
# Given num = 16, return true. Given num = 5, return false.
#
# Follow up: Could you solve it without loops/recursion?
class Solution(object):
def isPowerOfFour(self, num):
"""
:type num: int
:rtype: bool
"""
# test = 1
# while test < num:
# test << 2
# return test == num
if num == 1 or num == 4:
return True
if num % 4 != 0 or num < 1:
return False
return self.isPowerOfFour(num // 4)
|
nilq/baby-python
|
python
|
import logging
from schematics.types import ModelType, StringType, PolyModelType, DictType, ListType
from spaceone.inventory.connector.aws_elasticache_connector.schema.data import Redis, Memcached
from spaceone.inventory.libs.schema.resource import CloudServiceResource, CloudServiceResponse, CloudServiceMeta
from spaceone.inventory.libs.schema.dynamic_field import TextDyField, ListDyField, BadgeDyField
from spaceone.inventory.libs.schema.dynamic_layout import ItemDynamicLayout, TableDynamicLayout
logger = logging.getLogger(__name__)
# meta data details (Memcached)
# memcached_base_detail = ItemDynamicView({'name': "Base Information"})
# memcached_base_detail.data_source = [
# TextDyField.data_source('Cluster', 'data.cluster_name'),
# TextDyField.data_source('Cluster Endpoint', 'data.configuration_endpoint'),
# TextDyField.data_source('Status ', 'data.status'),
# TextDyField.data_source('Engine ', 'data.engine'),
# TextDyField.data_source('Engine Version Compatibility ', 'data.engine_version_compatibility'),
# TextDyField.data_source('Availability Zones ', 'data.availability_zone'),
# TextDyField.data_source('Nodes Pending Deletion ', 'data.nodes_pending_deletion'),
# TextDyField.data_source('Parameter Group ', 'data.parameter_group'),
# ListDyField.data_source('Security Groups ', 'data.security_groups'),
# TextDyField.data_source('Maintenance Window ', 'data.maintenance_window'),
# TextDyField.data_source('Backup Window ', 'data.backup_window'),
# TextDyField.data_source('Creation Time ', 'data.creation_time'),
# TextDyField.data_source('Update Status ', 'data.update_status'),
# TextDyField.data_source('Node type', 'data.node_type'),
# TextDyField.data_source('Number of Nodes', 'data.number_of_nodes'),
# TextDyField.data_source('Number of Nodes Pending Creation', 'data.number_of_nodes_pending_creation'),
# TextDyField.data_source('Subnet Group', 'data.subnet_group'),
# TextDyField.data_source('Notification ARN', 'data.notification_arn'),
# TextDyField.data_source('Backup Retention Period', 'data.backup_retention_period'),
# ]
#
# memcached_node = TableDynamicView({'name': 'Nodes', 'key_path': 'data.nodes'})
# memcached_node.data_source = [
# TextDyField.data_source('Node Name', 'data.cache_node_id'),
# TextDyField.data_source('Status', 'data.cache_node_status'),
# TextDyField.data_source('Port', 'data.endpoint.port'),
# TextDyField.data_source('Endpoint', 'data.endpoint.address'),
# TextDyField.data_source('Parameter Group Status', 'data.parameter_group_status'),
# TextDyField.data_source('Availability Zone', 'data.customer_availability_zone'),
# TextDyField.data_source('Created on', 'data.cache_node_create_time'),
# ]
#
# memcached_metadata = BaseMetaData()
# memcached_metadata.details = [memcached_base_detail, ]
# memcached_metadata.sub_data = [memcached_node, ]
#
#
#
# # meta data details (Redis)
# redis_base_detail = ItemDynamicView({'name': "Base Information"})
# redis_base_detail.data_source = [
# TextDyField.data_source('Name', 'data.cluster_name'),
# TextDyField.data_source('Configuration Endpoint', 'data.configuration_endpoint'),
# TextDyField.data_source('Creation Time', 'data.creation_time'),
# TextDyField.data_source('Status', 'data.status'),
# TextDyField.data_source('Primary Endpoint', 'data.primary_endpoint'),
# TextDyField.data_source('Update Status', 'data.update_action_status'),
# TextDyField.data_source('Engine', 'data.engine'),
# TextDyField.data_source('Engine Version Compatibility', 'data.engine_version_compatibility'),
# TextDyField.data_source('Reader Endpoint', 'data.reader_endpoint'),
# TextDyField.data_source('Node Type', 'data.cluster.cache_node_type'),
# ListDyField.data_source('Availability Zones', 'data.availability_zones'),
# TextDyField.data_source('Shards', 'data.shard_count'),
# TextDyField.data_source('Number of Nodes', 'data.node_count'),
# TextDyField.data_source('Automatic Failover', 'data.cluster.automatic_failover'),
# TextDyField.data_source('Description', 'data.cluster.description'),
# TextDyField.data_source('Parameter Group', 'data.parameter_group'),
# TextDyField.data_source('Subnet Group', 'data.subnet_group'),
# ListDyField.data_source('Security Groups', 'data.security_groups'),
# TextDyField.data_source('Notification ARN', 'data.notification_arn'),
# TextDyField.data_source('Notification status', 'data.notification_status'),
# TextDyField.data_source('Maintenance Window', 'data.maintenance_window'),
# TextDyField.data_source('Backup retention Period', 'data.backup_retention_period'),
# TextDyField.data_source('Backup window', 'data.backup_window'),
# TextDyField.data_source('Backup Node ID', 'data.backup_node_id'),
# TextDyField.data_source('Encryption in-transit', 'data.cluster.transit_encryption_enabled'),
# TextDyField.data_source('Encryption at-rest', 'data.cluster.at_rest_encryption_enabled'),
# TextDyField.data_source('Redis AUTH', 'data.auth_enabled'),
# TextDyField.data_source('AUTH Token Last Modified Date', 'data.auth_token_last_modified_date'),
# TextDyField.data_source('Customer Managed CMK', 'data.cluster.kms_key_id'),
# ]
#
# redis_node = TableDynamicView({'name': 'Nodes', 'key_path': 'data.nodes'})
# redis_node.data_source = [
# TextDyField.data_source('Name', 'data.cluster_name'),
# ]
#
# redis_metadata = BaseMetaData()
# redis_metadata.details = [redis_base_detail, ]
# redis_metadata.sub_data = [redis_node, ]
memcached_metadata = CloudServiceMeta.set()
redis_metadata = CloudServiceMeta.set()
# Memcached
class ElasticCacheResource(CloudServiceResource):
cloud_service_group = StringType(default='ElastiCache')
class MemcachedResource(ElasticCacheResource):
cloud_service_type = StringType(default='Memcached')
data = ModelType(Memcached)
cloud_service_meta = ModelType(CloudServiceMeta, default=memcached_metadata)
class MemcachedResponse(CloudServiceResponse):
resource = PolyModelType(MemcachedResource)
# Redis
class RedisResource(ElasticCacheResource):
cloud_service_type = StringType(default='Redis')
data = ModelType(Memcached)
cloud_service_meta = ModelType(CloudServiceMeta, default=redis_metadata)
class RedisResponse(CloudServiceResponse):
resource = PolyModelType(RedisResource)
|
nilq/baby-python
|
python
|
import unittest
import asyncio
import random
from hummingbot.core.api_throttler.data_types import RateLimit
from hummingbot.core.api_throttler.fixed_rate_api_throttler import FixedRateThrottler
FIXED_RATE_LIMIT = [
RateLimit(5, 5)
]
class FixedRateThrottlerUnitTests(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.ev_loop: asyncio.AbstractEventLoop = asyncio.get_event_loop()
def setUp(self) -> None:
super().setUp()
self.fixed_rate_throttler = FixedRateThrottler(rate_limit_list=FIXED_RATE_LIMIT,
retry_interval=5.0)
self.request_count = 0
async def execute_n_requests(self, n: int, throttler: FixedRateThrottler):
for _ in range(n):
async with throttler.execute_task():
self.request_count += 1
def test_fixed_rate_throttler_above_limit(self):
# Test Scenario: API requests sent > Rate Limit
n: int = 10
limit: int = FIXED_RATE_LIMIT[0].limit
# Note: We assert a timeout ensuring that the throttler does not wait for the limit interval
with self.assertRaises(asyncio.exceptions.TimeoutError):
self.ev_loop.run_until_complete(
asyncio.wait_for(self.execute_n_requests(n, throttler=self.fixed_rate_throttler), timeout=1.0)
)
self.assertEqual(limit, self.request_count)
def test_fixed_rate_throttler_below_limit(self):
# Test Scenario: API requests sent < Rate Limit
n: int = random.randint(1, FIXED_RATE_LIMIT[0].limit - 1)
limit: int = FIXED_RATE_LIMIT[0].limit
self.ev_loop.run_until_complete(
self.execute_n_requests(n, throttler=self.fixed_rate_throttler))
self.assertEqual(self.request_count, n)
self.assertLess(self.request_count, limit)
def test_fixed_rate_throttler_equal_limit(self):
# Test Scenario: API requests sent = Rate Limit
n = limit = FIXED_RATE_LIMIT[0].limit
self.ev_loop.run_until_complete(
self.execute_n_requests(n, throttler=self.fixed_rate_throttler))
self.assertEqual(self.request_count, limit)
|
nilq/baby-python
|
python
|
from __future__ import print_function
import sys
sys.path.insert(1,"../../")
import logging
from future.utils import PY2
from tests import pyunit_utils as pu
class LoggingContext:
def __init__(self, logger, level=None, handler=None, close=True):
self.logger = logger
self.level = level
self.handler = handler
self.close = close
def __enter__(self):
if self.level is not None:
self.old_level = self.logger.level
self.logger.setLevel(self.level)
if self.handler:
self.logger.addHandler(self.handler)
def __exit__(self, et, ev, tb):
if self.level is not None:
self.logger.setLevel(self.old_level)
if self.handler:
self.logger.removeHandler(self.handler)
if self.handler and self.close:
self.handler.close()
def _has_handlers(logger):
if PY2:
l = logger
while l:
if l.handlers:
return True
l = l.parent if l.propagate else None
return False
else:
return logger.hasHandlers()
def test_h2o_logger_has_no_handler_by_default():
# as a library, h2o should not define handlers for its loggers
from h2o.utils.config import H2OConfigReader
H2OConfigReader.get_config() # this module uses h2o logger
logger = logging.getLogger('h2o')
assert not _has_handlers(logger)
def test_h2o_logger_inherits_root_logger():
from h2o.utils.config import H2OConfigReader
H2OConfigReader.get_config() # this module uses h2o logger
root = logging.getLogger()
logger = logging.getLogger('h2o')
console = logging.StreamHandler()
assert not _has_handlers(root)
assert not _has_handlers(logger)
with LoggingContext(root, handler=console, level=logging.INFO):
assert _has_handlers(root)
assert _has_handlers(logger)
logging.info("list root handlers: %s", root.handlers)
logging.info("list h2o handlers: %s", logger.handlers)
pu.run_tests([
test_h2o_logger_has_no_handler_by_default,
test_h2o_logger_inherits_root_logger
])
|
nilq/baby-python
|
python
|
def print_trace(trace):
for name, node in trace.nodes.items():
if node['type'] == 'sample':
print(f'{node["name"]} - sampled value {node["value"]}')
|
nilq/baby-python
|
python
|
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
# Create your models here.
class Profile(models.Model):
"""Model definition for Profile."""
user = models.OneToOneField(User, on_delete=models.DO_NOTHING)
contact = models.CharField(max_length=15, blank=True, null=True)
city = models.CharField(max_length=10, blank=True, null=True)
country = models.CharField(max_length=10, blank=True, null=True)
# TODO: Define fields here
class Meta:
"""Meta definition for Profile."""
verbose_name = 'Profile'
verbose_name_plural = 'Profiles'
def __str__(self):
"""Unicode representation of Profile."""
return str(self.user)
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
|
nilq/baby-python
|
python
|
# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This package contains utility methods for manipulating paths and
filenames for test results and baselines. It also contains wrappers
of a few routines in platform_utils.py so that platform_utils.py can
be considered a 'protected' package - i.e., this file should be
the only file that ever includes platform_utils. This leads to
us including a few things that don't really have anything to do
with paths, unfortunately."""
import errno
import os
import stat
import sys
import platform_utils
import platform_utils_win
import platform_utils_mac
import platform_utils_linux
# Cache some values so we don't have to recalculate them. _basedir is
# used by PathFromBase() and caches the full (native) path to the top
# of the source tree (/src). _baseline_search_path is used by
# ExpectedBaseline() and caches the list of native paths to search
# for baseline results.
_basedir = None
_baseline_search_path = None
class PathNotFound(Exception): pass
def LayoutTestsDir(path=None):
"""Returns the fully-qualified path to the directory containing the input
data for the specified layout test."""
return PathFromBase('third_party', 'WebKit');
def ChromiumBaselinePath(platform=None):
"""Returns the full path to the directory containing expected
baseline results from chromium ports. If |platform| is None, the
currently executing platform is used."""
if platform is None:
platform = platform_utils.PlatformName()
return PathFromBase('webkit', 'data', 'layout_tests', 'platform', platform)
def WebKitBaselinePath(platform):
"""Returns the full path to the directory containing expected
baseline results from WebKit ports."""
return PathFromBase('third_party', 'WebKit', 'LayoutTests',
'platform', platform)
def BaselineSearchPath(platform=None):
"""Returns the list of directories to search for baselines/results for a
given platform, in order of preference. Paths are relative to the top of the
source tree. If parameter platform is None, returns the list for the current
platform that the script is running on."""
if platform is None:
return platform_utils.BaselineSearchPath(False)
elif platform.startswith('mac'):
return platform_utils_mac.BaselineSearchPath(True)
elif platform.startswith('win'):
return platform_utils_win.BaselineSearchPath(True)
elif platform.startswith('linux'):
return platform_utils_linux.BaselineSearchPath(True)
else:
return platform_utils.BaselineSearchPath(False)
def ExpectedBaseline(filename, suffix, platform=None, all_baselines=False):
"""Given a test name, finds where the baseline result is located. The
result is returned as a pair of values, the absolute path to top of the test
results directory, and the relative path from there to the results file.
Both return values will be in the format appropriate for the
current platform (e.g., "\\" for path separators on Windows).
If the results file is not found, then None will be returned for the
directory, but the expected relative pathname will still be returned.
Args:
filename: absolute filename to test file
suffix: file suffix of the expected results, including dot; e.g. '.txt'
or '.png'. This should not be None, but may be an empty string.
platform: layout test platform: 'win', 'linux' or 'mac'. Defaults to the
current platform.
all_baselines: If True, return an ordered list of all baseline paths
for the given platform. If False, return only the first
one.
Returns
a list of ( platform_dir, results_filename ), where
platform_dir - abs path to the top of the results tree (or test tree)
results_filename - relative path from top of tree to the results file
(os.path.join of the two gives you the full path to the file, unless
None was returned.)
"""
global _baseline_search_path
global _search_path_platform
testname = os.path.splitext(RelativeTestFilename(filename))[0]
# While we still have tests in both LayoutTests/ and chrome/ we need
# to strip that outer directory.
# TODO(pamg): Once we upstream all of chrome/, clean this up.
platform_filename = testname + '-expected' + suffix
testdir, base_filename = platform_filename.split('/', 1)
if (_baseline_search_path is None) or (_search_path_platform != platform):
_baseline_search_path = BaselineSearchPath(platform)
_search_path_platform = platform
current_platform_dir = ChromiumBaselinePath(PlatformName(platform))
baselines = []
foundCurrentPlatform = False
for platform_dir in _baseline_search_path:
# Find current platform from baseline search paths and start from there.
if platform_dir == current_platform_dir:
foundCurrentPlatform = True
if foundCurrentPlatform:
# TODO(pamg): Clean this up once we upstream everything in chrome/.
if os.path.basename(platform_dir).startswith('chromium'):
if os.path.exists(os.path.join(platform_dir, platform_filename)):
baselines.append((platform_dir, platform_filename))
else:
if os.path.exists(os.path.join(platform_dir, base_filename)):
baselines.append((platform_dir, base_filename))
if not all_baselines and baselines:
return baselines
# If it wasn't found in a platform directory, return the expected result
# in the test directory, even if no such file actually exists.
platform_dir = LayoutTestsDir(filename)
if os.path.exists(os.path.join(platform_dir, platform_filename)):
baselines.append((platform_dir, platform_filename))
if baselines:
return baselines
return [(None, platform_filename)]
def ExpectedFilename(filename, suffix):
"""Given a test name, returns an absolute path to its expected results.
If no expected results are found in any of the searched directories, the
directory in which the test itself is located will be returned. The return
value is in the format appropriate for the platform (e.g., "\\" for
path separators on windows).
Args:
filename: absolute filename to test file
suffix: file suffix of the expected results, including dot; e.g. '.txt'
or '.png'. This should not be None, but may be an empty string.
platform: the most-specific directory name to use to build the
search list of directories, e.g., 'chromium-win', or
'chromium-mac-leopard' (we follow the WebKit format)
"""
platform_dir, platform_filename = ExpectedBaseline(filename, suffix)[0]
if platform_dir:
return os.path.join(platform_dir, platform_filename)
return os.path.join(LayoutTestsDir(filename), platform_filename)
def RelativeTestFilename(filename):
"""Provide the filename of the test relative to the layout data
directory as a unix style path (a/b/c)."""
return _WinPathToUnix(filename[len(LayoutTestsDir(filename)) + 1:])
def _WinPathToUnix(path):
"""Convert a windows path to use unix-style path separators (a/b/c)."""
return path.replace('\\', '/')
#
# Routines that are arguably platform-specific but have been made
# generic for now (they used to be in platform_utils_*)
#
def FilenameToUri(full_path):
"""Convert a test file to a URI."""
LAYOUTTESTS_DIR = "LayoutTests/"
LAYOUTTEST_HTTP_DIR = "LayoutTests/http/tests/"
LAYOUTTEST_WEBSOCKET_DIR = "LayoutTests/websocket/tests/"
relative_path = _WinPathToUnix(RelativeTestFilename(full_path))
port = None
use_ssl = False
if relative_path.startswith(LAYOUTTEST_HTTP_DIR):
# LayoutTests/http/tests/ run off port 8000 and ssl/ off 8443
relative_path = relative_path[len(LAYOUTTEST_HTTP_DIR):]
port = 8000
elif relative_path.startswith(LAYOUTTEST_WEBSOCKET_DIR):
# LayoutTests/websocket/tests/ run off port 8880 and 9323
# Note: the root is LayoutTests/, not LayoutTests/websocket/tests/
relative_path = relative_path[len(LAYOUTTESTS_DIR):]
port = 8880
# Make LayoutTests/http/tests/local run as local files. This is to mimic the
# logic in run-webkit-tests.
# TODO(jianli): Consider extending this to "media/".
if port and not relative_path.startswith("local/"):
if relative_path.startswith("ssl/"):
port += 443
protocol = "https"
else:
protocol = "http"
return "%s://127.0.0.1:%u/%s" % (protocol, port, relative_path)
if sys.platform in ('cygwin', 'win32'):
return "file:///" + GetAbsolutePath(full_path)
return "file://" + GetAbsolutePath(full_path)
def GetAbsolutePath(path):
"""Returns an absolute UNIX path."""
return _WinPathToUnix(os.path.abspath(path))
def MaybeMakeDirectory(*path):
"""Creates the specified directory if it doesn't already exist."""
# This is a reimplementation of google.path_utils.MaybeMakeDirectory().
try:
os.makedirs(os.path.join(*path))
except OSError, e:
if e.errno != errno.EEXIST:
raise
def PathFromBase(*comps):
"""Returns an absolute filename from a set of components specified
relative to the top of the source tree. If the path does not exist,
the exception PathNotFound is raised."""
# This is a reimplementation of google.path_utils.PathFromBase().
global _basedir
if _basedir == None:
# We compute the top of the source tree by finding the absolute
# path of this source file, and then climbing up three directories
# as given in subpath. If we move this file, subpath needs to be updated.
path = os.path.abspath(__file__)
subpath = os.path.join('webkit','tools','layout_tests')
_basedir = path[:path.index(subpath)]
path = os.path.join(_basedir, *comps)
if not os.path.exists(path):
raise PathNotFound('could not find %s' % (path))
return path
def RemoveDirectory(*path):
"""Recursively removes a directory, even if it's marked read-only.
Remove the directory located at *path, if it exists.
shutil.rmtree() doesn't work on Windows if any of the files or directories
are read-only, which svn repositories and some .svn files are. We need to
be able to force the files to be writable (i.e., deletable) as we traverse
the tree.
Even with all this, Windows still sometimes fails to delete a file, citing
a permission error (maybe something to do with antivirus scans or disk
indexing). The best suggestion any of the user forums had was to wait a
bit and try again, so we do that too. It's hand-waving, but sometimes it
works. :/
"""
file_path = os.path.join(*path)
if not os.path.exists(file_path):
return
win32 = False
if sys.platform == 'win32':
win32 = True
# Some people don't have the APIs installed. In that case we'll do without.
try:
win32api = __import__('win32api')
win32con = __import__('win32con')
except ImportError:
win32 = False
def remove_with_retry(rmfunc, path):
os.chmod(path, stat.S_IWRITE)
if win32:
win32api.SetFileAttributes(path, win32con.FILE_ATTRIBUTE_NORMAL)
try:
return rmfunc(path)
except EnvironmentError, e:
if e.errno != errno.EACCES:
raise
print 'Failed to delete %s: trying again' % repr(path)
time.sleep(0.1)
return rmfunc(path)
else:
def remove_with_retry(rmfunc, path):
if os.path.islink(path):
return os.remove(path)
else:
return rmfunc(path)
for root, dirs, files in os.walk(file_path, topdown=False):
# For POSIX: making the directory writable guarantees removability.
# Windows will ignore the non-read-only bits in the chmod value.
os.chmod(root, 0770)
for name in files:
remove_with_retry(os.remove, os.path.join(root, name))
for name in dirs:
remove_with_retry(os.rmdir, os.path.join(root, name))
remove_with_retry(os.rmdir, file_path)
#
# Wrappers around platform_utils
#
def PlatformName(platform=None):
"""Returns the appropriate chromium platform name for |platform|. If
|platform| is None, returns the name of the chromium platform on the
currently running system. If |platform| is of the form 'chromium-*',
it is returned unchanged, otherwise 'chromium-' is prepended."""
if platform == None:
return platform_utils.PlatformName()
if not platform.startswith('chromium-'):
platform = "chromium-" + platform
return platform
def PlatformVersion():
return platform_utils.PlatformVersion()
def LigHTTPdExecutablePath():
return platform_utils.LigHTTPdExecutablePath()
def LigHTTPdModulePath():
return platform_utils.LigHTTPdModulePath()
def LigHTTPdPHPPath():
return platform_utils.LigHTTPdPHPPath()
def WDiffPath():
return platform_utils.WDiffPath()
def TestShellPath(target):
return platform_utils.TestShellPath(target)
def ImageDiffPath(target):
return platform_utils.ImageDiffPath(target)
def LayoutTestHelperPath(target):
return platform_utils.LayoutTestHelperPath(target)
def FuzzyMatchPath():
return platform_utils.FuzzyMatchPath()
def ShutDownHTTPServer(server_pid):
return platform_utils.ShutDownHTTPServer(server_pid)
def KillAllTestShells():
platform_utils.KillAllTestShells()
|
nilq/baby-python
|
python
|
'''
File name : stage.py
Author : Jinwook Jung
Created on : Thu 25 Jul 2019 11:57:16 PM EDT
Last modified : 2020-01-06 13:27:13
Description :
'''
import subprocess, os, sys, random, yaml, time
from subprocess import Popen, PIPE, CalledProcessError
from abc import ABC, abstractmethod
def run_shell_cmd(cmd, f=None):
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
for line in iter(p.stdout.readline, b''):
print(">>> {}".format(line.rstrip().decode("utf-8")))
# FIXME
if f is not None:
f.write("{}\n".format(str(line.rstrip())))
class Stage(ABC):
def __init__(self, rdf, stage_dir, prev_out_dir, user_parms, write_run_scripts=False):
''' Initialize the instance and populate the necessary/useful
variables. '''
self.rdf_path = rdf.config["rdf_path"]
self.config = rdf.config
self.design_dir, self.lib_dir = rdf.design_dir, rdf.lib_dir
self.design_config, self.lib_config = rdf.design_config, rdf.lib_config
self.stage_dir = stage_dir
self.prev_out_dir = prev_out_dir
self.design_name = rdf.design_config["name"]
# Output of previous stage
self.in_def, self.in_verilog, self.in_sdc = (None,)*3
if prev_out_dir is not None:
self.in_def = "{}/{}.def".format(prev_out_dir, self.design_name)
self.in_verilog = "{}/{}.v".format(prev_out_dir, self.design_name)
self.in_sdc = "{}/{}.sdc".format(prev_out_dir, self.design_name)
else:
# If this is the first stage, just use the original design file
self.in_verilog = None
self.in_def = None
self.in_sdc = "{}/{}.sdc".format(self.rdf_path, self.design_name)
self.design_verilogs = ["{}/{}".format(self.design_dir, _) \
for _ in self.design_config["verilog"]]
# Library/PDK
self.lib_name = self.lib_config["LIBRARY_NAME"]
self.liberty = "{}/{}".format(self.lib_dir, self.lib_config["LIBERTY"])
self.lef = "{}/{}".format(self.lib_dir, self.lib_config["LEF"])
self.tracks = "{}/{}".format(self.lib_dir, self.lib_config["TRACKS_INFO_FILE"])
# (TODO) User parameters
self.user_parms = user_parms # List of parameters (key/value pairs)
def create_run_script_template(self):
with open("{}/run.sh".format(self.stage_dir), 'w') as f:
f.write("#!/bin/bash\n\n")
f.write("export RDF_PATH=\"{}\"\n".format(self.rdf_path))
f.write("export RDF_STAGE_DIR=\"{}\"\n".format(self.stage_dir))
f.write("export RDF_TOOL_BIN_PATH=\"${RDF_PATH}/bin\"\n")
f.write("\n")
@abstractmethod
def write_run_scripts(self):
pass
@abstractmethod
def run(self):
pass
|
nilq/baby-python
|
python
|
'''
Given multiple fasta files (corresponding to different organisms),
use mafft to create the multiple sequence alignment for the given target.
Then parse the alignments to create a consensus sequence.
'''
import pandas as pd
import os
import alignment_funcs
from Bio import SeqIO
def convert_indices(x, alignment = None, col = None):
'''
Call column_from_residue_number to add the new index to the df
'''
new_index = alignment_funcs.column_from_residue_number(alignment, x['ID'], x[col])
return new_index
def main(arglist):
fastas = snakemake.input['fastas']
outfile = snakemake.output['outfasta']
excluded2 = snakemake.output['excluded2']
excluded1_files = snakemake.input['excluded_regions_files']
name = snakemake.params['name']
#combine fastas to single file
temp_fasta = 'temp_multi_%s.fa' % name
record_list = []
with open(temp_fasta, "w") as g:
for i in fastas:
records = SeqIO.parse(i, "fasta")
for j in records:
record_list.append(j)
SeqIO.write(record_list, temp_fasta, "fasta")
alignment = alignment_funcs.write_alignment(temp_fasta, name, outfile)
os.remove(temp_fasta)
ex_df = pd.concat([pd.read_csv(i) for i in excluded1_files])
if not ex_df.empty:
ex_df['new_start'] = ex_df.apply(convert_indices, alignment = alignment, col = 'start', axis = 1)
ex_df['new_end'] = ex_df.apply(convert_indices, alignment = alignment, col = 'end', axis = 1)
ex_df.drop(['start', 'end'], axis = 1, inplace = True)
ex_df['ID'] = name
ex_df.rename(columns = {'new_start':'start', 'new_end':'end'}, inplace = True)
ex_df.to_csv(excluded2, index = False)
if __name__ == '__main__':
main(sys.argv[1:])
|
nilq/baby-python
|
python
|
# Generated by rpcgen.py at Mon Mar 8 11:09:57 2004
from .mountconstants import *
from .mountpacker import *
import rpc
__all__ = ['BadDiscriminant', 'fhstatus', 'mountres3_ok', 'mountres3', 'mountbody', 'groupnode', 'exportnode']
def init_type_class(klass, ncl):
# Initilize type class
klass.ncl = ncl
klass.packer = ncl.packer
klass.unpacker = ncl.unpacker
def assert_not_none(klass, *args):
for arg in args:
if arg == None:
raise TypeError(repr(klass) + " has uninitialized data")
def pack_objarray(ncl, list):
# FIXME: Support for length assertion.
ncl.packer.pack_uint(len(list))
for item in list:
item.pack()
def unpack_objarray(ncl, klass):
n = ncl.unpacker.unpack_uint()
list = []
for i in range(n):
obj = klass(ncl)
obj.unpack()
list.append(obj)
return list
class BadDiscriminant(rpc.RPCException):
def __init__(self, value, klass):
self.value = value
self.klass = klass
def __str__(self):
return "Bad Discriminant %s in %s" % (self.value, self.klass)
class fhstatus:
# XDR definition:
# union fhstatus switch (unsigned fhs_status) {
# case 0:
# fhandle2 fhs_fhandle;
# default:
# void;
# };
def __init__(self, ncl, fhs_status=None, fhs_fhandle=None):
init_type_class(self, ncl)
self.fhs_status = fhs_status
self.fhs_fhandle = fhs_fhandle
# Shortcut to current arm
self.arm = None
def __repr__(self):
s = " fhs_status=%s fhs_fhandle=%s" % (str(self.fhs_status), str(self.fhs_fhandle))
if len(s) > 70: s = s[:70] + "..."
return "<fhstatus:%s>" % s
def pack(self, dummy=None):
assert_not_none(self, self.fhs_status)
self.packer.pack_unsigned(self.fhs_status)
if self.fhs_status == 0:
assert_not_none(self, self.fhs_fhandle)
self.packer.pack_fhandle2(self.fhs_fhandle)
self.arm = self.fhs_fhandle
else:
pass
def unpack(self):
self.fhs_status = self.unpacker.unpack_unsigned()
if self.fhs_status == 0:
self.fhs_fhandle = self.unpacker.unpack_fhandle2()
self.arm = self.fhs_fhandle
else:
pass
class mountres3_ok:
# XDR definition:
# struct mountres3_ok {
# fhandle3 fhandle;
# int auth_flavors<>;
# };
def __init__(self, ncl, fhandle=None, auth_flavors=None):
init_type_class(self, ncl)
self.fhandle = fhandle
self.auth_flavors = auth_flavors
def __repr__(self):
s = " fhandle=%s auth_flavors=%s" % (str(self.fhandle), str(self.auth_flavors))
if len(s) > 70: s = s[:70] + "..."
return "<mountres3_ok:%s>" % s
def pack(self, dummy=None):
assert_not_none(self, self.fhandle, self.auth_flavors)
self.packer.pack_fhandle3(self.fhandle)
self.packer.pack_int(self.auth_flavors)
def unpack(self):
self.fhandle = self.unpacker.unpack_fhandle3()
self.auth_flavors = self.unpacker.unpack_array(self.unpacker.unpack_int)
class mountres3:
# XDR definition:
# union mountres3 switch (mountstat3 fhs_status) {
# case MNT3_OK:
# mountres3_ok mountinfo;
# default:
# void;
# };
def __init__(self, ncl, fhs_status=None, mountinfo=None):
init_type_class(self, ncl)
self.fhs_status = fhs_status
self.mountinfo = mountinfo
# Shortcut to current arm
self.arm = None
def __repr__(self):
s = " fhs_status=%s mountinfo=%s" % (str(self.fhs_status), str(self.mountinfo))
if len(s) > 70: s = s[:70] + "..."
return "<mountres3:%s>" % s
def pack(self, dummy=None):
assert_not_none(self, self.fhs_status)
self.packer.pack_mountstat3(self.fhs_status)
if self.fhs_status == MNT3_OK:
assert_not_none(self, self.mountinfo)
self.mountinfo.pack()
self.arm = self.mountinfo
else:
pass
def unpack(self):
self.fhs_status = self.unpacker.unpack_mountstat3()
if self.fhs_status == MNT3_OK:
self.mountinfo = mountres3_ok(self)
self.mountinfo.unpack()
self.arm = self.mountinfo
else:
pass
class mountbody:
# XDR definition:
# struct mountbody {
# name ml_hostname;
# dirpath ml_directory;
# mountlist ml_next;
# };
def __init__(self, ncl, ml_hostname=None, ml_directory=None, ml_next=None):
init_type_class(self, ncl)
self.ml_hostname = ml_hostname
self.ml_directory = ml_directory
self.ml_next = ml_next
def __repr__(self):
s = " ml_hostname=%s ml_directory=%s ml_next=%s" % (str(self.ml_hostname), str(self.ml_directory), str(self.ml_next))
if len(s) > 70: s = s[:70] + "..."
return "<mountbody:%s>" % s
def pack(self, dummy=None):
assert_not_none(self, self.ml_hostname, self.ml_directory, self.ml_next)
self.packer.pack_name(self.ml_hostname)
self.packer.pack_dirpath(self.ml_directory)
self.packer.pack_mountlist(self.ml_next)
def unpack(self):
self.ml_hostname = self.unpacker.unpack_name()
self.ml_directory = self.unpacker.unpack_dirpath()
self.ml_next = self.unpacker.unpack_mountlist()
class groupnode:
# XDR definition:
# struct groupnode {
# name gr_name;
# groups gr_next;
# };
def __init__(self, ncl, gr_name=None, gr_next=None):
init_type_class(self, ncl)
self.gr_name = gr_name
self.gr_next = gr_next
def __repr__(self):
s = " gr_name=%s gr_next=%s" % (str(self.gr_name), str(self.gr_next))
if len(s) > 70: s = s[:70] + "..."
return "<groupnode:%s>" % s
def pack(self, dummy=None):
assert_not_none(self, self.gr_name, self.gr_next)
self.packer.pack_name(self.gr_name)
self.packer.pack_groups(self.gr_next)
def unpack(self):
self.gr_name = self.unpacker.unpack_name()
self.gr_next = self.unpacker.unpack_groups()
class exportnode:
# XDR definition:
# struct exportnode {
# dirpath ex_dir;
# groups ex_groups;
# exports ex_next;
# };
def __init__(self, ncl, ex_dir=None, ex_groups=None, ex_next=None):
init_type_class(self, ncl)
self.ex_dir = ex_dir
self.ex_groups = ex_groups
self.ex_next = ex_next
def __repr__(self):
s = " ex_dir=%s ex_groups=%s ex_next=%s" % (str(self.ex_dir), str(self.ex_groups), str(self.ex_next))
if len(s) > 70: s = s[:70] + "..."
return "<exportnode:%s>" % s
def pack(self, dummy=None):
assert_not_none(self, self.ex_dir, self.ex_groups, self.ex_next)
self.packer.pack_dirpath(self.ex_dir)
self.packer.pack_groups(self.ex_groups)
self.packer.pack_exports(self.ex_next)
def unpack(self):
self.ex_dir = self.unpacker.unpack_dirpath()
self.ex_groups = self.unpacker.unpack_groups()
self.ex_next = self.unpacker.unpack_exports()
|
nilq/baby-python
|
python
|
def get_layers(data, wide, tall):
for i in range(0, len(data), wide * tall):
yield data[i : i + wide * tall]
def parse_infos(layer):
infos = {}
for data in layer:
if data not in infos:
infos[data] = 0
infos[data] += 1
return infos
def merge_layers(layers):
tmp_layers = list(layers)
layer = ["0"] * len(tmp_layers[0])
tmp_layers.reverse()
for current in tmp_layers:
for i in range(len(layer)):
layer[i] = current[i] if current[i] != "2" else layer[i]
return "".join(layer)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Maziar Raissi
"""
import autograd.numpy as np
from autograd import value_and_grad
from Utilities import fetch_minibatch_rnn, stochastic_update_Adam, activation
class RecurrentNeuralNetworks:
def __init__(self, X, Y, hidden_dim,
max_iter = 2000, N_batch = 1, monitor_likelihood = 10, lrate = 1e-3):
# X has the form lags x data x dim
# Y has the form data x dim
self.X = X
self.Y = Y
self.X_dim = X.shape[-1]
self.Y_dim = Y.shape[-1]
self.hidden_dim = hidden_dim
self.lags = X.shape[0]
self.max_iter = max_iter
self.N_batch = N_batch
self.monitor_likelihood = monitor_likelihood
self.hyp = self.initialize_RNN()
# Adam optimizer parameters
self.mt_hyp = np.zeros(self.hyp.shape)
self.vt_hyp = np.zeros(self.hyp.shape)
self.lrate = lrate
print("Total number of parameters: %d" % (self.hyp.shape[0]))
def initialize_RNN(self):
hyp = np.array([])
Q = self.hidden_dim
U = -np.sqrt(6.0/(self.X_dim+Q)) + 2.0*np.sqrt(6.0/(self.X_dim+Q))*np.random.rand(self.X_dim,Q)
b = np.zeros((1,Q))
W = np.eye(Q)
hyp = np.concatenate([hyp, U.ravel(), b.ravel(), W.ravel()])
V = -np.sqrt(6.0/(Q+self.Y_dim)) + 2.0*np.sqrt(6.0/(Q+self.Y_dim))*np.random.rand(Q,self.Y_dim)
c = np.zeros((1,self.Y_dim))
hyp = np.concatenate([hyp, V.ravel(), c.ravel()])
return hyp
def forward_pass(self, X, hyp):
Q = self.hidden_dim
H = np.zeros((X.shape[1],Q))
idx_1 = 0
idx_2 = idx_1 + self.X_dim*Q
idx_3 = idx_2 + Q
idx_4 = idx_3 + Q*Q
U = np.reshape(hyp[idx_1:idx_2], (self.X_dim,Q))
b = np.reshape(hyp[idx_2:idx_3], (1,Q))
W = np.reshape(hyp[idx_3:idx_4], (Q,Q))
for i in range(0, self.lags):
H = activation(np.matmul(H,W) + np.matmul(X[i,:,:],U) + b)
idx_1 = idx_4
idx_2 = idx_1 + Q*self.Y_dim
idx_3 = idx_2 + self.Y_dim
V = np.reshape(hyp[idx_1:idx_2], (Q,self.Y_dim))
c = np.reshape(hyp[idx_2:idx_3], (1,self.Y_dim))
Y = np.matmul(H,V) + c
return Y
def MSE(self, hyp):
X = self.X_batch
Y = self.Y_batch
Y_star = self.forward_pass(X, hyp)
return np.mean((Y-Y_star)**2)
def train(self):
# Gradients from autograd
MSE = value_and_grad(self.MSE)
for i in range(1,self.max_iter+1):
# Fetch minibatch
self.X_batch, self.Y_batch = fetch_minibatch_rnn(self.X, self.Y, self.N_batch)
# Compute likelihood_UB and gradients
MSE_value, D_MSE = MSE(self.hyp)
# Update hyper-parameters
self.hyp, self.mt_hyp, self.vt_hyp = stochastic_update_Adam(self.hyp, D_MSE, self.mt_hyp, self.vt_hyp, self.lrate, i)
if i % self.monitor_likelihood == 0:
print("Iteration: %d, MSE: %.5e" % (i, MSE_value))
|
nilq/baby-python
|
python
|
from src.computation.computation_handler import ComputationHandler
class NoComputation(ComputationHandler):
def __init__(self):
super().__init__()
def compute(self):
pass
|
nilq/baby-python
|
python
|
from django.utils import timezone
from rest_framework import serializers
from ..reservation_api.models import Reservation
from ..subscription_api.models import Subscription
class StaffChoiseField(serializers.ChoiceField):
class Meta:
swagger_schema_fields = {
'type': 'integer'
}
class StaffReservationSerializer(serializers.ModelSerializer):
member = serializers.CharField(source='subscription.member', read_only=True)
status = StaffChoiseField(Reservation.STATUS, required=False)
reservedStart = serializers.DateTimeField(source='reserved_start')
reservedEnd = serializers.DateTimeField(source='reserved_end')
class Meta:
model = Reservation
fields = ('id', 'member', 'subscription', 'trainer', 'status', 'reservedStart', 'reservedEnd', 'updated')
extra_kwargs = {'subscription': {'read_only': True}}
def validate(self, attrs):
reserved_start = attrs['reserved_start']
reserved_end = attrs['reserved_end']
if reserved_start < self.instance.reserved_start:
raise serializers.ValidationError({"reservedStart": "Must come after requested "
"reservation start date-time"})
if reserved_end > self.instance.reserved_end:
raise serializers.ValidationError({"reservedEnd": "Must come before requested "
"reservation end date-time"})
if reserved_start > reserved_end:
raise serializers.ValidationError({"reservedEnd": "Must come after reservation start date-time"})
delta = reserved_end - reserved_start
if delta < timezone.timedelta(minutes=30):
raise serializers.ValidationError({"reservedRange": "The date-time difference between reservations "
"must be at least 30 minutes"})
return attrs
def to_representation(self, instance):
data = super().to_representation(instance)
data['member'] = str(instance.subscription.member.get_full_name())
data['subscription'] = str(instance.subscription.card)
data['trainer'] = instance.trainer.get_full_name() if data['trainer'] else 'Not assigned'
data['status'] = str(instance.get_status_display())
return data
class StaffSubscriptionSerializer(serializers.ModelSerializer):
email = serializers.CharField(source="member.email", read_only=True)
class Meta:
model = Subscription
fields = ('id', 'email', 'member', 'card', 'visits_count', 'purchased', 'expires')
extra_kwargs = {
'member': {'read_only': True},
'card': {'read_only': True},
'visits_count': {'required': True},
'purchased': {'read_only': True},
'expires': {'read_only': True}
}
def to_representation(self, instance):
data = super().to_representation(instance)
data['member'] = instance.member.get_full_name()
data['card'] = str(instance.card)
return data
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import ray
import numpy as np
import time, sys, os
sys.path.append("..")
from util.printing import pd
# A variation of the game of life code used in the Ray Crash Course.
@ray.remote
class RayGame:
# TODO: Game memory grows unbounded; trim older states?
def __init__(self, grid_size, rules_id):
self.states = [RayGame.State(size = grid_size)]
self.rules_id = rules_id
def get_states(self):
return self.states
def step(self, num_steps = 1):
"""Take 1 or more steps, returning a list of new states."""
start_index = len(self.states)
for _ in range(num_steps):
new_state_id = self.rules_id.step.remote(self.states[-1])
self.states.append(ray.get(new_state_id))
return self.states[start_index:-1] # return the new states only!
@ray.remote
class RayConwaysRules:
"""
Apply the rules to a state and return a new state.
"""
def step(self, state):
"""
Determine the next values for all the cells, based on the current
state. Creates a new State with the changes.
"""
new_grid = state.grid.copy()
for i in range(state.size):
for j in range(state.size):
lns = self.live_neighbors(i, j, state)
new_grid[i][j] = self.apply_rules(i, j, lns, state)
new_state = RayGame.State(grid = new_grid)
return new_state
def apply_rules(self, i, j, live_neighbors, state):
"""
Determine next value for a cell, which could be the same.
The rules for Conway's Game of Life:
Any live cell with fewer than two live neighbours dies, as if by underpopulation.
Any live cell with two or three live neighbours lives on to the next generation.
Any live cell with more than three live neighbours dies, as if by overpopulation.
Any dead cell with exactly three live neighbours becomes a live cell, as if by reproduction.
"""
cell = state.grid[i][j] # default value is no change in state
if cell == 1:
if live_neighbors < 2 or live_neighbors > 3:
cell = 0
elif live_neighbors == 3:
cell = 1
return cell
def live_neighbors(self, i, j, state):
"""
Wrap at boundaries (i.e., treat the grid as a 2-dim "toroid")
To wrap at boundaries, when k-1=-1, that wraps itself;
for k+1=state.size, we mod it (which works for -1, too)
For simplicity, we count the cell itself, then subtact it
"""
s = state.size
g = state.grid
return sum([g[i2%s][j2%s] for i2 in [i-1,i,i+1] for j2 in [j-1,j,j+1]]) - g[i][j]
class State:
"""
Represents a grid of game cells.
For simplicity, require square grids.
Each instance is considered immutable.
"""
def __init__(self, grid = None, size = 10):
"""
Create a State. Specify either a grid of cells or a size, for
which an size x size grid will be computed with random values.
(For simplicity, only use square grids.)
"""
if type(grid) != type(None): # avoid annoying AttributeError
assert grid.shape[0] == grid.shape[1]
self.size = grid.shape[0]
self.grid = grid.copy()
else:
self.size = size
# Seed: random initialization
self.grid = np.random.randint(2, size = size*size).reshape((size, size))
def living_cells(self):
"""
Returns ([x1, x2, ...], [y1, y2, ...]) for all living cells.
Simplifies graphing.
"""
cells = [(i,j) for i in range(self.size) for j in range(self.size) if self.grid[i][j] == 1]
return zip(*cells)
def __str__(self):
s = ' |\n| '.join([' '.join(map(lambda x: '*' if x else ' ', self.grid[i])) for i in range(self.size)])
return '| ' + s + ' |'
def time_ray_games(num_games = 1, max_steps = 100, batch_size = 1, grid_size = 100):
rules_ids = []
game_ids = []
for i in range(num_games):
rules_id = RayGame.RayConwaysRules.remote()
game_id = RayGame.remote(grid_size, rules_id)
game_ids.append(game_id)
rules_ids.append(rules_id)
print(f'rules_ids:\n{rules_ids}') # these will produce more interesting flame graphs!
print(f'game_ids:\n{game_ids}')
start = time.time()
state_ids = []
for game_id in game_ids:
for i in range(int(max_steps/batch_size)): # Do a total of max_steps game steps, which is max_steps/delta_steps
state_ids.append(game_id.step.remote(batch_size))
ray.get(state_ids) # wait for everything to finish! We are ignoring what ray.get() returns, but what will it be??
pd(time.time() - start, prefix = f'Total time for {num_games} games (max_steps = {max_steps}, batch_size = {batch_size})')
def main():
import argparse
parser = argparse.ArgumentParser(description="Conway's Game of Life v2")
parser.add_argument('--size', metavar='N', type=int, default=100, nargs='?',
help='The size of the square grid for the game')
parser.add_argument('--steps', metavar='N', type=int, default=500, nargs='?',
help='The number of steps to run')
parser.add_argument('-l', '--local', help="Run Ray locally. Default is to join a cluster",
action='store_true')
args = parser.parse_args()
print(f"""
Conway's Game of Life v2:
Grid size: {args.size}
Number steps: {args.steps}
Run Ray locally? {args.local}
""")
if args.local:
ray.init()
else:
ray.init(address='auto')
time_ray_games(num_games = 1, max_steps = args.steps, batch_size = 1, grid_size = args.size)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
from setuptools import setup
setup(
name='listenmoe',
packages=['listenmoe'],
version='v1.0.1',
description='Unofficial python3 API wrapper to get information about'
'the listen.moe live stream using aiohttp',
author='Zenrac',
author_email='zenrac@outlook.fr',
url='https://github.com/Zenrac/listenmoe',
download_url='https://github.com/Zenrac/listenmoe/archive/v1.0.1.tar.gz',
keywords=['listenmoe'],
include_package_data=True,
install_requires=['aiohttp', 'asyncio']
)
|
nilq/baby-python
|
python
|
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
import os
import sys
import glob
import argparse
import threading
import six.moves.queue as Queue
import traceback
import numpy as np
import tensorflow as tf
import warnings
warnings.filterwarnings('ignore',category=FutureWarning)
warnings.filterwarnings('ignore', category=DeprecationWarning)
import PIL.Image
import tfutil
import dataset
# ----------------------------------------------------------------------------
def error(msg):
print('Error: ' + msg)
exit(1)
# ----------------------------------------------------------------------------
class TFRecordExporter:
def __init__(self, tfrecord_dir, expected_images, print_progress=True, progress_interval=10):
self.tfrecord_dir = tfrecord_dir
self.tfr_prefix = os.path.join(self.tfrecord_dir, os.path.basename(self.tfrecord_dir))
self.expected_images = expected_images
self.cur_images = 0
self.shape = None
self.resolution_log2 = None
self.tfr_writers = []
self.print_progress = print_progress
self.progress_interval = progress_interval
if self.print_progress:
print('Creating dataset "%s"' % tfrecord_dir)
if not os.path.isdir(self.tfrecord_dir):
os.makedirs(self.tfrecord_dir)
assert (os.path.isdir(self.tfrecord_dir))
def close(self):
if self.print_progress:
print('%-40s\r' % 'Flushing data...', end='', flush=True)
for tfr_writer in self.tfr_writers:
tfr_writer.close()
self.tfr_writers = []
if self.print_progress:
print('%-40s\r' % '', end='', flush=True)
print('Added %d images.' % self.cur_images)
def choose_shuffled_order(self): # Note: Images and labels must be added in shuffled order.
order = np.arange(self.expected_images)
np.random.RandomState(123).shuffle(order)
return order
def add_image(self, img):
if self.print_progress and self.cur_images % self.progress_interval == 0:
print('%d / %d\r' % (self.cur_images, self.expected_images), end='', flush=True)
if self.shape is None:
self.shape = img.shape
self.resolution_log2 = int(np.log2(self.shape[1]))
assert self.shape[0] in [1, 3]
assert self.shape[1] == self.shape[2]
assert self.shape[1] == 2 ** self.resolution_log2
tfr_opt = tf.io.TFRecordOptions(tf.compat.v1.python_io.TFRecordCompressionType.NONE)
for lod in range(self.resolution_log2 - 1):
tfr_file = self.tfr_prefix + '-r%02d.tfrecords' % (self.resolution_log2 - lod)
self.tfr_writers.append(tf.io.TFRecordWriter(tfr_file, tfr_opt))
assert img.shape == self.shape
for lod, tfr_writer in enumerate(self.tfr_writers):
if lod:
img = img.astype(np.float32)
img = (img[:, 0::2, 0::2] + img[:, 0::2, 1::2] + img[:, 1::2, 0::2] + img[:, 1::2, 1::2]) * 0.25
# quant = np.rint(img).clip(0, 255).astype(np.uint8)
quant = img.astype(np.uint8)
# Converting the np array to a tensor
ex = tf.train.Example(features=tf.train.Features(feature={
'shape': tf.train.Feature(int64_list=tf.train.Int64List(value=quant.shape)),
'data': tf.train.Feature(bytes_list=tf.train.BytesList(value=[quant.tostring()]))}))
tfr_writer.write(ex.SerializeToString())
self.cur_images += 1
def add_labels(self, labels):
if self.print_progress:
print('%-40s\r' % 'Saving labels...', end='', flush=True)
print("cur", self.cur_images)
print("shape", labels.shape)
assert labels.shape[0] == self.cur_images
with open(self.tfr_prefix + '-rxx.labels', 'wb') as f:
np.save(f, labels.astype(np.float32))
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
# ----------------------------------------------------------------------------
class ExceptionInfo(object):
def __init__(self):
self.value = sys.exc_info()[1]
self.traceback = traceback.format_exc()
# ----------------------------------------------------------------------------
class WorkerThread(threading.Thread):
def __init__(self, task_queue):
threading.Thread.__init__(self)
self.task_queue = task_queue
def run(self):
while True:
func, args, result_queue = self.task_queue.get()
if func is None:
break
try:
result = func(*args)
except:
result = ExceptionInfo()
result_queue.put((result, args))
# ----------------------------------------------------------------------------
class ThreadPool(object):
def __init__(self, num_threads):
assert num_threads >= 1
self.task_queue = Queue.Queue()
self.result_queues = dict()
self.num_threads = num_threads
for idx in range(self.num_threads):
thread = WorkerThread(self.task_queue)
thread.daemon = True
thread.start()
def add_task(self, func, args=()):
assert hasattr(func, '__call__') # must be a function
if func not in self.result_queues:
self.result_queues[func] = Queue.Queue()
self.task_queue.put((func, args, self.result_queues[func]))
def get_result(self, func): # returns (result, args)
result, args = self.result_queues[func].get()
if isinstance(result, ExceptionInfo):
print('\n\nWorker thread caught an exception:\n' + result.traceback)
raise result.value
return result, args
def finish(self):
for idx in range(self.num_threads):
self.task_queue.put((None, (), None))
def __enter__(self): # for 'with' statement
return self
def __exit__(self, *excinfo):
self.finish()
def process_items_concurrently(self, item_iterator, process_func=lambda x: x, pre_func=lambda x: x,
post_func=lambda x: x, max_items_in_flight=None):
if max_items_in_flight is None: max_items_in_flight = self.num_threads * 4
assert max_items_in_flight >= 1
results = []
retire_idx = [0]
def task_func(prepared, idx):
return process_func(prepared)
def retire_result():
processed, (prepared, idx) = self.get_result(task_func)
results[idx] = processed
while retire_idx[0] < len(results) and results[retire_idx[0]] is not None:
yield post_func(results[retire_idx[0]])
results[retire_idx[0]] = None
retire_idx[0] += 1
for idx, item in enumerate(item_iterator):
prepared = pre_func(item)
results.append(None)
self.add_task(func=task_func, args=(prepared, idx))
while retire_idx[0] < idx - max_items_in_flight + 2:
for res in retire_result(): yield res
while retire_idx[0] < len(results):
for res in retire_result(): yield res
# ----------------------------------------------------------------------------
def display(tfrecord_dir):
print('Loading dataset "%s"' % tfrecord_dir)
tfutil.init_tf({'gpu_options.allow_growth': True})
dset = dataset.TFRecordDataset(tfrecord_dir, max_label_size='full', repeat=False, shuffle_mb=0)
tfutil.init_uninited_vars()
idx = 0
while True:
try:
images, labels = dset.get_minibatch_np(1)
except tf.errors.OutOfRangeError:
break
if idx == 0:
print('Displaying images')
import cv2 # pip install opencv-python
cv2.namedWindow('dataset_tool')
print('Press SPACE or ENTER to advance, ESC to exit')
print('\nidx = %-8d\nlabel = %s' % (idx, labels[0].tolist()))
cv2.imshow('dataset_tool', images[0].transpose(1, 2, 0)[:, :, ::-1]) # CHW => HWC, RGB => BGR
idx += 1
if cv2.waitKey() == 27:
break
print('\nDisplayed %d images.' % idx)
# ----------------------------------------------------------------------------
def extract(tfrecord_dir, output_dir):
print('Loading dataset "%s"' % tfrecord_dir)
tfutil.init_tf({'gpu_options.allow_growth': True})
dset = dataset.TFRecordDataset(tfrecord_dir, max_label_size=0, repeat=False, shuffle_mb=0)
tfutil.init_uninited_vars()
print('Extracting images to "%s"' % output_dir)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
idx = 0
while True:
if idx % 10 == 0:
print('%d\r' % idx, end='', flush=True)
try:
images, labels = dset.get_minibatch_np(1)
except tf.errors.OutOfRangeError:
break
if images.shape[1] == 1:
img = PIL.Image.fromarray(images[0][0], 'L')
else:
img = PIL.Image.fromarray(images[0].transpose(1, 2, 0), 'RGB')
img.save(os.path.join(output_dir, 'img%08d.png' % idx))
idx += 1
print('Extracted %d images.' % idx)
# ----------------------------------------------------------------------------
def compare(tfrecord_dir_a, tfrecord_dir_b, ignore_labels):
max_label_size = 0 if ignore_labels else 'full'
print('Loading dataset "%s"' % tfrecord_dir_a)
tfutil.init_tf({'gpu_options.allow_growth': True})
dset_a = dataset.TFRecordDataset(tfrecord_dir_a, max_label_size=max_label_size, repeat=False, shuffle_mb=0)
print('Loading dataset "%s"' % tfrecord_dir_b)
dset_b = dataset.TFRecordDataset(tfrecord_dir_b, max_label_size=max_label_size, repeat=False, shuffle_mb=0)
tfutil.init_uninited_vars()
print('Comparing datasets')
idx = 0
identical_images = 0
identical_labels = 0
while True:
if idx % 100 == 0:
print('%d\r' % idx, end='', flush=True)
try:
images_a, labels_a = dset_a.get_minibatch_np(1)
except tf.errors.OutOfRangeError:
images_a, labels_a = None, None
try:
images_b, labels_b = dset_b.get_minibatch_np(1)
except tf.errors.OutOfRangeError:
images_b, labels_b = None, None
if images_a is None or images_b is None:
if images_a is not None or images_b is not None:
print('Datasets contain different number of images')
break
if images_a.shape == images_b.shape and np.all(images_a == images_b):
identical_images += 1
else:
print('Image %d is different' % idx)
if labels_a.shape == labels_b.shape and np.all(labels_a == labels_b):
identical_labels += 1
else:
print('Label %d is different' % idx)
idx += 1
print('Identical images: %d / %d' % (identical_images, idx))
if not ignore_labels:
print('Identical labels: %d / %d' % (identical_labels, idx))
def create_from_images(labeled_tfrecord_dir, unlabeled_tfrecord_dir, labeled_dir, unlabeled_dir, shuffle):
# Checking to see if there is two slashes at the end instead of 1
if labeled_dir[-1] == "/" and labeled_dir[-2] == "/":
labeled_dir = labeled_dir[:-1]
if unlabeled_dir[-1] == "/" and unlabeled_dir[-2] == "/":
unlabeled_dir = unlabeled_dir[:-1]
# Checking to make sure the path exists
if not os.path.isdir(labeled_dir):
error("Path " + labeled_dir + " does not exist!")
if not os.path.isdir(unlabeled_dir):
error("Path " + unlabeled_dir + " does not exist!")
# This lists all of the directories in the provided labeled directory. Each class should have its own folder
# within this directory. It also prepends the full path before it and makes sure .git isn't included
classes_dir = [labeled_dir + name for name in os.listdir(labeled_dir) if os.path.isdir(os.path.join(labeled_dir, name)) and name != '.git']
Num_classes = len(classes_dir)
labeled_filenames = []
# Go through each class directory and list all the full paths to each file and store them in an array
for each_class in classes_dir:
print('Loading images from "%s"' % each_class)
labeled_filenames.append(list(sorted(glob.glob(os.path.join(each_class, '*')))))
# Go through that array and assign Labels to each image
labels = []
for i in range(Num_classes):
print("Class " + str(i) + " is " + str(classes_dir[i].split("/")[-1]))
labels += [i] * len(labeled_filenames[i])
print("Number of classes: " + str(Num_classes))
# Converting labels into np array and one hot encoding it
labels = np.array(labels)
onehot = np.zeros((labels.size, Num_classes), dtype=np.float32)
onehot[np.arange(labels.size), labels] = 1.0
# Unlabeled dataset loading
print('Loading images from "%s"' % unlabeled_dir)
unlabeled_filenames = sorted(glob.glob(os.path.join(unlabeled_dir, '*')))
print()
# Checks
if len(labeled_filenames) == 0:
error('No input images found in ' + labeled_dir)
if len(unlabeled_filenames) == 0:
error('No input images found in ' + unlabeled_dir)
# Checking to make sure dimensions are all good
img = np.asarray(PIL.Image.open(labeled_filenames[0][0]))
resolution = img.shape[0]
channels = img.shape[2] if img.ndim == 3 else 1
if img.shape[1] != resolution:
error('Input images must have the same width and height')
if resolution != 2 ** int(np.floor(np.log2(resolution))):
error('Input image resolution must be a power-of-two')
if channels not in [1, 3]:
error('Input images must be stored as RGB or grayscale')
# Adding labeled data
with TFRecordExporter(labeled_tfrecord_dir, len(labels)) as tfr:
order = tfr.choose_shuffled_order() if shuffle else np.arange(len(labels))
# Go over the number of images
for idx in range(len(labels)):
# Kind-of confusing but this is necessary due to the multi-class labeled data
# labeled_filenames = [[cat1, cat2, cat3], [dog1, dog2, dog3]] since it is a double
# array and the shuffling is a single array [4, 5, 2, 0, 1, 3] the code below finds which
# index for the class (class_indx) and which index for the sample within the class (tmp_indx)
# I did it this way so the amount of samples within each class can be arbitrary as well as the number
# of classes overall.
class_indx = 0
tmp_indx = order[idx] # lets say tmp_indx is 4 in our example
# Checks to see if 4 > 2
while tmp_indx > len(labeled_filenames[class_indx])-1:
# tmp_indx = 4 - 3
tmp_indx-=len(labeled_filenames[class_indx])
# we check the next class
class_indx+=1
# class_indx = 0; tmp_indx = 1 which gives us the 4th index
img = np.asarray(PIL.Image.open(labeled_filenames[class_indx][tmp_indx]))
if channels == 1:
img = img[np.newaxis, :, :] # HW => CHW
else:
img = img.transpose(2, 0, 1) # HWC => CHW
tfr.add_image(img)
# Dont need to do anything fancy here since onehot is a numpy array
tfr.add_labels(onehot[order])
print()
# Adding unlabeled data
with TFRecordExporter(unlabeled_tfrecord_dir, len(unlabeled_filenames)) as tfr2:
#fake_labels = [Num_classes - 1] * len(unlabeled_filenames)
#fake_labels = np.array(fake_labels)
#fake_onehot = np.zeros((fake_labels.size, np.max(fake_labels) + 1), dtype=np.float32)
#fake_onehot[np.arange(fake_labels.size), fake_labels] = 1.0
order = tfr2.choose_shuffled_order() if shuffle else np.arange(len(unlabeled_filenames))
for idx in range(order.size):
img = np.asarray(PIL.Image.open(unlabeled_filenames[order[idx]]))
if channels == 1:
img = img[np.newaxis, :, :] # HW => CHW
else:
img = img.transpose(2, 0, 1) # HWC => CHW
tfr2.add_image(img)
#tfr2.add_labels(fake_onehot[order])
# ----------------------------------------------------------------------------
if __name__ == "__main__":
if len(sys.argv) != 3:
error("Wrong amount of commands given!\nFormat: python3 dataset_tool.py <Labeled dir> <Unlabeled dir>\nEx) python3 dataset_tool.py /home/user/Desktop/SSL-PG-GAN/CatVDog/PetImages/Labeled/ /home/user/Desktop/SSL-PG-GAN/CatVDog/PetImages/Unlabeled/\n")
if not os.path.isdir("Labeled"):
os.mkdir("Labeled")
if not os.path.isdir("Unlabeled"):
os.mkdir("Unlabeled")
Shuffle = True
args = sys.argv[1:]
create_from_images("Labeled", "Unlabeled", args[0] + "/", args[1] + "/", Shuffle)
# ----------------------------------------------------------------------------
|
nilq/baby-python
|
python
|
"""
Implements the Graph object which is used by the ConstraintPropagator.
It is here where Allen's constraint propagation algorithm is implemented.
"""
# TODO: I am not convinced that the history mechanism is very good, yet it seems
# to be sufficient for our current purposes.
from objects import Node, Edge, Constraint
from utils import intersect_relations
from utils import compare_id
from utils import html_graph_prefix
from mappings import invert_interval_relation
from mappings import abbreviate_convex_relation
from utilities import logger
from library.main import LIBRARY
DEBUG = True
DEBUG = False
TIMEX = LIBRARY.timeml.TIMEX
TID = LIBRARY.timeml.TID
EVENT = LIBRARY.timeml.EVENT
EID = LIBRARY.timeml.EID
EIID = LIBRARY.timeml.EIID
EVENTID = LIBRARY.timeml.EVENTID
FORM = LIBRARY.timeml.FORM
VALUE = LIBRARY.timeml.VALUE
class Graph:
"""Implements the graph object used in the constraint propagation algorithm.
Instance variables:
filename - the name of the source file
cycle - an integer
queue - a list of Constraints
nodes - a hash of Nodes, indexed on node identifiers
edges - a hash of hashes of Edges, indexed on node identifiers
compositions - a CompositionTable
"""
def __init__(self, compositions):
"""Initialize an empty graph, with empty queue, nodes dictionary and
edges dictionary."""
self.compositions = compositions
self.cycle = 0
self.queue = []
self.nodes = {}
self.edges = {}
def add_nodes(self, events, timexes):
"""Adds the events/instances and timexes to the nodes table. Also
initializes the edges table now that all nodes are known."""
for timex in timexes:
node = Node(timex=timex)
self.nodes[node.id] = node
for event in events:
node = Node(event=event)
self.nodes[node.id] = node
for n1 in self.nodes.keys():
self.edges[n1] = {}
for n2 in self.nodes.keys():
self.edges[n1][n2] = Edge(n1, n2, self)
def add_nodes(self, sources, source_type):
"""Creates Nodes for each source and add them to the nodes table. Also
initializes the edges table now that all nodes are known. A source is
either an event or timex tag or simply an identifier."""
for source in sources:
if source_type == 'IDENTIFIER':
identifier = source
text = ''
elif source_type == TIMEX:
identifier = source.attrs[TID]
text = source.attrs[VALUE]
elif source_type == EVENT:
identifier = source.attrs[EIID]
text = source.attrs[FORM]
node = Node(source, identifier, source_type, text)
self.nodes[node.id] = node
for n1 in self.nodes.keys():
self.edges[n1] = {}
for n2 in self.nodes.keys():
self.edges[n1][n2] = Edge(n1, n2, self)
def propagate(self, constraint):
"""Propagate the constraint through the graph, using Allen's
constraint propagation algorithm."""
self.cycle += 1
if constraint.is_garbage():
# guard against garbage constraints in the pending queue by simply
# skipping them
return
self.added = [] # to keep track of what is added this cycle
self.queue.append(constraint)
debug(str="\n%d %s\n" % (self.cycle, constraint))
while self.queue:
constraint_i_j = self.queue.pop(0)
constraint_i_j.cycle = self.cycle
debug(1, "POP QUEUE: %s" % (constraint_i_j))
# compare new constraint to the one already on the edge
edge_i_j = self.edges[constraint_i_j.node1][constraint_i_j.node2]
(status, intersection) = self._intersect_constraints(edge_i_j,
constraint_i_j)
if status == 'INTERSECTION-IS-MORE-SPECIFIC':
self.added.append(constraint_i_j)
self._update_constraint(edge_i_j, constraint_i_j, intersection)
def reduce(self):
"""Reduce the grap to one that does not contain any relations derived by
closure. This does not get you a graph with the original annotations
because some might have been removed due to inconsistencies."""
# TODO: we may consider removing inverse relations and relations that
# could be derived from other relations
self.cycle += 1
self.added = []
self._remove_derived_relations()
def remove_node(self, node_id):
"""Remove a node from the graph. Involves removing the node from the
nodes hash, removing the node's column and row in the edges array and
removing the node from edges_in and edges_out attributes of other
nodes. This is not being used right now."""
node = self.nodes[node_id]
# remove from other nodes
for node_in_id in node.edges_in.keys():
del self.nodes[node_in_id].edges_out[node_id]
for node_out_id in node.edges_out.keys():
del self.nodes[node_out_id].edges_in[node_id]
# remove from nodes hash
del self.nodes[node_id]
# remove from edges hash
del self.edges[node_id]
for other_node_id in self.edges.keys():
del self.edges[other_node_id][node_id]
def _update_constraint(self, edge_i_j, constraint_i_j, intersection):
"""Update a constraint by setting its relation set to the intersection
and then add it to the edge. Once you have done that you need to check
whether this constraint then puts further constraints on incoming edges
to node i and outgoing edges from node j."""
constraint_i_j.relset = intersection
self._add_constraint_to_edge(constraint_i_j, edge_i_j)
node_i = constraint_i_j.get_node1()
node_j = constraint_i_j.get_node2()
node_i.edges_out[constraint_i_j.node2] = edge_i_j
node_j.edges_in[constraint_i_j.node1] = edge_i_j
self._check_all_k_i_j(node_i, node_j, edge_i_j)
self._check_all_i_j_k(node_i, node_j, edge_i_j)
def _check_all_k_i_j(self, node_i, node_j, edge_i_j):
"""Check the constraints on [node_k --> node_i --> node_j]."""
debug(1, "CHECKING: X --> %s --> %s" % (node_i.id, node_j.id))
for edge_k_i in node_i.edges_in.values():
debug(2, "%s * %s" % (edge_k_i, edge_i_j))
self._check_k_i_j(edge_k_i, edge_i_j, node_i, node_j)
def _check_all_i_j_k(self, node_i, node_j, edge_i_j):
"""Check the constriants on [node_i --> node_j --> node_k]."""
debug(1, "CHECKING: %s --> %s --> X" % (node_i.id, node_j.id))
for edge_j_k in node_j.edges_out.values():
debug(2, "%s * %s" % (edge_i_j, edge_j_k))
self._check_i_j_k(edge_i_j, edge_j_k, node_i, node_j)
def _check_k_i_j(self, edge_k_i, edge_i_j, node_i, node_j):
"""Look at the k->i->j subgraph and check whether the new constraint in
Edge(i,j) allows you to derive something new by composition. The nodes
node_i and node_j could be derived from edge_i_j but are handed to this
function because they were already available and it saves a bit of time
this way."""
node_k = edge_k_i.get_node1()
if node_k.id == node_j.id:
return
edge_k_j = self._get_edge(node_k, node_j)
relset_k_j = self._compose(edge_k_i, edge_i_j.constraint)
debug(3, "{%s} * {%s} --> {%s} || %s "
% (edge_k_i.constraint.relset, edge_i_j.constraint.relset,
relset_k_j, edge_k_j.constraint))
if relset_k_j is not None:
self._combine(edge_k_j, relset_k_j,
edge_k_i.constraint, edge_i_j.constraint)
def _check_i_j_k(self, edge_i_j, edge_j_k, node_i, node_j):
"""Look at the i->j->k subgraph and check whether the new constraint in
Edge(i,j) allows you to derive something new by composition. The nodes
node_i and node_j could be derived from edge_i_j but are handed to this
function because they were already available and it saves a bit of time
this way."""
node_k = edge_j_k.get_node2()
if node_k.id == node_i.id:
return
edge_i_k = self._get_edge(node_i, node_k)
relset_i_k = self._compose(edge_i_j.constraint, edge_j_k)
debug(3, "{%s} * {%s} --> {%s} || %s "
% (edge_i_j.constraint.relset, edge_j_k.constraint.relset,
relset_i_k, edge_i_k.constraint))
if relset_i_k is not None:
self._combine(edge_i_k, relset_i_k,
edge_i_j.constraint, edge_j_k.constraint)
def _combine(self, edge, relset, c1, c2):
"""Compare the relation set on the edge to the relation set created by
composition. Creates the intersection of the relation sets and checks
the result: (i) inconsistency, (ii) more specific than relation set on
edge, or (iii) something else. The alrgument c1 and c2 are the
constraints that were composed to create relset and will be used to set
the history on a new constraint if it is created."""
edge_relset = edge.relset
intersection = intersect_relations(edge_relset, relset)
if intersection == '':
debug(4, "WARNING: found an inconsistency where it shouldn't be")
pass
elif intersection is None:
debug(4, "WARNING: intersection is None, this should not happen")
pass
elif edge_relset is None:
self._add_constraint_to_queue(edge, intersection, c1, c2)
elif len(intersection) < len(edge_relset):
self._add_constraint_to_queue(edge, intersection, c1, c2)
def _add_constraint_to_queue(self, edge, relset, c1, c2):
new_constraint = Constraint(edge.node1, relset, edge.node2,
cycle=self.cycle, source='closure',
history=(c1, c2))
self.queue.append(new_constraint)
debug(3, "ADD QUEUE %s " % new_constraint)
add_inverted = False
# Adding the inverted constraint should not be needed, except perhaps as
# a potential minor speed increase. As far I can see however, the method
# is actually slower when adding the inverse (about 20%), which is
# surprising. But the results are the same.
if add_inverted:
relset = invert_interval_relation(relset)
new_constraint2 = Constraint(edge.node2, relset, edge.node1,
cycle=self.cycle,
source='closure-inverted',
history=(c1, c2))
self.queue.append(new_constraint2)
debug(3, "ADD QUEUE %s " % new_constraint2)
def _intersect_constraints(self, edge, constraint):
"""Intersect the constraint that was just derived with the one already
on the edge. There are three cases: (1) the new constraint, if it is the
one originally handed to the propagate() function, introduces an
inconsistency; (2) the new constraint is identical to the one already
there and can be ignored; (3) the intersection of the new constraint
with the old constraint is the same as the old constraint; and (4) the
new constraint is more specific than the already existing
constraint. The method returns False in the first two cases and the
intersection in the last case."""
edge = self.edges[constraint.node1][constraint.node2]
new_relset = constraint.relset
existing_relset = edge.relset
intersection = intersect_relations(new_relset, existing_relset)
debug(2, "INTERSECT NEW {%s} WITH EXISTING {%s} --> {%s}"
% (constraint.relset, edge.relset, intersection))
if intersection == '':
status = 'INCONSISTENT'
logger.warn("Inconsistent new contraint: %s" % constraint)
logger.warn("Clashes with: [%s] (derived from %s)"
% (edge.constraint, edge.constraint.history_string()))
elif new_relset == existing_relset:
status = 'NEW=EXISTING'
elif intersection == existing_relset:
status = 'INTERSECTION=EXISTING'
else:
status = 'INTERSECTION-IS-MORE-SPECIFIC'
debug(2, "STATUS: %s" % status)
return (status, intersection)
def _compose(self, object1, object2):
"""Return the composition of the relation sets on the two objects. One
object is an edge, the other a Constraint. Once the relations
are retrieved from the objects all that's needed is a simple
lookup in the compositions table."""
rels1 = object1.relset
rels2 = object2.relset
return self.compositions.compose_rels(rels1, rels2)
def _add_constraint_to_edge(self, constraint, edge):
"""This method links a constraints to its edge by retrieving the edge
from the graph, adding the constraint to this edge, and setting the edge
attribute on the constraint."""
edge.add_constraint(constraint)
constraint.edge = edge
def _get_edge(self, node1, node2):
"""Return the edge from node1 to node2."""
return self.edges[node1.id][node2.id]
def get_edges(self):
"""Return all edges that have a constraint on them."""
edges = []
for n1 in self.edges.keys():
for n2 in self.edges[n1].keys():
edge = self.edges[n1][n2]
if n1 != n2 and edge.constraint:
edges.append(edge)
return edges
def _remove_disjunctions(self):
"""Remove all disjunctions from the graph, not used now but may come in
handy later."""
for edge in self.get_edges():
if edge.constraint:
if edge.constraint.is_disjunction():
edge.remove_constraint()
def _remove_derived_relations(self):
"""Remove all derived relations from the graph."""
for edge in self.get_edges():
if edge.is_derived():
edge.remove_constraint()
def _normalize_relations(self):
"""Remove all relations that are not in the set of normalized relations,
not used now but may come in handy later."""
for edge in self.get_edges():
if edge.constraint:
if not edge.constraint.has_normalized_relation():
edge.remove_constraint()
def pp_nodes(self):
"""Print all nodes with their edges_in and edges_out attributes to
standard output."""
ids = self.nodes.keys()
ids.sort(compare_id)
for id in ids:
self.nodes[id].pretty_print()
def pp_html(self, filename=None, filehandle=None, standalone=False):
"""Print the graph to an HTML table in filename."""
fh = open(filename, 'w') if filename else filehandle
if standalone:
html_graph_prefix(fh)
fh.write("<table cellpadding=0 cellspacing=0 border=0>\n")
fh.write("<tr><td>\n")
nodes = self.nodes.keys()
nodes.sort(compare_id)
self._html_nodes_table(fh, nodes)
fh.write("</td>\n\n")
fh.write("<td valign=top>\n")
self._html_added_table(fh)
fh.write("</td></tr>\n\n")
fh.write("</table>\n\n")
if standalone:
fh.write("</body>\n</html>\n\n")
def _html_nodes_table(self, fh, nodes):
fh.write("<table cellpadding=5 cellspacing=0 border=1>\n")
fh.write("\n<tr>\n\n")
fh.write(" <td> \n\n")
for identifier in nodes:
fh.write(" <td>%s\n" % identifier)
for id1 in nodes:
fh.write("\n\n<tr align=center>\n\n")
fh.write(" <td align=left>%s\n" % id1)
for id2 in nodes:
edge = self.edges[id1][id2]
rel = edge.relset
if rel is None:
rel = ' '
rel = abbreviate_convex_relation(rel)
rel = rel.replace('<', '<').replace(' ', ' ')
classes = []
if edge.constraint:
classes.append(edge.constraint.source)
if self.cycle == edge.constraint.cycle:
classes.append("cycle")
if id1 == id2:
classes.append("nocell")
# rel = ' '
classes = " class=\"%s\"" % ' '.join(classes)
fh.write(" <td width=25pt%s>%s\n" % (classes, rel))
fh.write("</table>\n\n")
def _html_added_table(self, fh):
fh.write("<table cellpadding=5 cellspacing=0 border=1>\n")
if self.added:
fh.write("<tr><td>added<td colspan=2>derived from\n")
for c in self.added:
fh.write("<tr>\n <td>%s</td>\n" % c)
if isinstance(c.history, tuple):
fh.write(" <td>%s\n" % str(c.history[0]))
fh.write(" <td>%s\n" % str(c.history[1]))
elif c.history.__class__.__name__ == 'Tag':
tlink = "TLINK(relType=%s)" % c.history.attrs.get('relType')
fh.write(" <td colspan=2>%s\n" % tlink)
elif c.history.__class__.__name__ == 'Constraint':
fh.write(" <td colspan=2>%s\n" % c.history)
else:
fh.write(" <td colspan=2> \n")
fh.write("</table>\n\n")
def debug(indent=0, str=''):
if DEBUG:
print ' ' * indent, str
|
nilq/baby-python
|
python
|
import pytest
from reformat_gherkin.errors import DeserializeError, InvalidInput
from reformat_gherkin.parser import parse
def test_invalid_input(invalid_contents):
for content in invalid_contents:
with pytest.raises(InvalidInput):
parse(content)
def test_valid_input(valid_contents):
for content in valid_contents():
parse(content)
def test_parse_with_exception(mocker, valid_contents):
exception_message = "exception message"
mocker.patch(
"reformat_gherkin.parser.converter.structure",
side_effect=Exception(exception_message),
)
for content in valid_contents():
with pytest.raises(DeserializeError) as exc_info:
parse(content)
assert exception_message in str(exc_info.value)
|
nilq/baby-python
|
python
|
from multio import asynclib
class API:
HOST = 'https://paste.myst.rs'
BETA_HOST = 'https://pmb.myst.rs'
API_VERSION = '2'
HTTP_ENDPOINT = f'{HOST}/api/v{API_VERSION}'
BETA_HTTP_ENDPOINT = f'{BETA_HOST}/api/v{API_VERSION}'
async def run_later(time, task):
await asynclib.sleep(time)
return await task
def spacify_string(s):
w = []
cur = ''
for c in s:
if c.isupper():
w.append(cur)
cur = ''
cur += c.lower()
else:
cur += c
w.append(cur)
return '_'.join(w)
|
nilq/baby-python
|
python
|
#-*. coding: utf-8 -*-
## Copyright (c) 2008-2012, Noel O'Boyle; 2012, Adrià Cereto-Massagué
## All rights reserved.
##
## This file is part of Cinfony.
## The contents are covered by the terms of the GPL v2 license
## which is included in the file LICENSE_GPLv2.txt.
"""
pybel - A Cinfony module for accessing Open Babel
Global variables:
ob - the underlying SWIG bindings for Open Babel
informats - a dictionary of supported input formats
outformats - a dictionary of supported output formats
descs - a list of supported descriptors
fps - a list of supported fingerprint types
forcefields - a list of supported forcefields
"""
import sys
import math
import os.path
import tempfile
if sys.platform[:4] == "java":
import org.openbabel as ob
import java.lang.System
java.lang.System.loadLibrary("openbabel_java")
_obfuncs = ob.openbabel_java
_obconsts = ob.openbabel_javaConstants
import javax
elif sys.platform[:3] == "cli":
import System
import clr
clr.AddReference('System.Windows.Forms')
clr.AddReference('System.Drawing')
from System.Windows.Forms import (
Application, DockStyle, Form, PictureBox, PictureBoxSizeMode
)
from System.Drawing import Image, Size
_obdotnet = os.environ["OBDOTNET"]
if _obdotnet[0] == '"': # Remove trailing quotes
_obdotnet = _obdotnet[1:-1]
clr.AddReferenceToFileAndPath(os.path.join(_obdotnet, "OBDotNet.dll"))
import OpenBabel as ob
_obfuncs = ob.openbabel_csharp
_obconsts = ob.openbabel_csharp
else:
import openbabel as ob
_obfuncs = _obconsts = ob
try:
import Tkinter as tk
import Image as PIL
import ImageTk as piltk
except ImportError: #pragma: no cover
tk = None
def _formatstodict(list):
if sys.platform[:4] == "java":
list = [list.get(i) for i in range(list.size())]
broken = [x.replace("[Read-only]", "").replace("[Write-only]","").split(" -- ") for x in list]
broken = [(x,y.strip()) for x,y in broken]
return dict(broken)
_obconv = ob.OBConversion()
_builder = ob.OBBuilder()
informats = _formatstodict(_obconv.GetSupportedInputFormat())
"""A dictionary of supported input formats"""
outformats = _formatstodict(_obconv.GetSupportedOutputFormat())
"""A dictionary of supported output formats"""
def _getplugins(findplugin, names):
plugins = dict([(x, findplugin(x)) for x in names if findplugin(x)])
return plugins
def _getpluginnames(ptype):
if sys.platform[:4] == "cli":
plugins = ob.VectorString()
else:
plugins = ob.vectorString()
ob.OBPlugin.ListAsVector(ptype, None, plugins)
if sys.platform[:4] == "java":
plugins = [plugins.get(i) for i in range(plugins.size())]
return [x.split()[0] for x in plugins]
descs = _getpluginnames("descriptors")
"""A list of supported descriptors"""
_descdict = _getplugins(ob.OBDescriptor.FindType, descs)
fps = [_x.lower() for _x in _getpluginnames("fingerprints")]
"""A list of supported fingerprint types"""
_fingerprinters = _getplugins(ob.OBFingerprint.FindFingerprint, fps)
forcefields = [_x.lower() for _x in _getpluginnames("forcefields")]
"""A list of supported forcefields"""
_forcefields = _getplugins(ob.OBForceField.FindType, forcefields)
operations = _getpluginnames("ops")
"""A list of supported operations"""
_operations = _getplugins(ob.OBOp.FindType, operations)
def readfile(format, filename, opt=None):
"""Iterate over the molecules in a file.
Required parameters:
format - see the informats variable for a list of available
input formats
filename
Optional parameters:
opt - a dictionary of format-specific options
For format options with no parameters, specify the
value as None.
You can access the first molecule in a file using the next() method
of the iterator (or the next() keyword in Python 3):
mol = readfile("smi", "myfile.smi").next() # Python 2
mol = next(readfile("smi", "myfile.smi")) # Python 3
You can make a list of the molecules in a file using:
mols = list(readfile("smi", "myfile.smi"))
You can iterate over the molecules in a file as shown in the
following code snippet:
>>> atomtotal = 0
>>> for mol in readfile("sdf", "head.sdf"):
... atomtotal += len(mol.atoms)
...
>>> print(atomtotal)
43
"""
if opt == None:
opt = {}
obconversion = ob.OBConversion()
formatok = obconversion.SetInFormat(format)
for k, v in opt.items():
if v == None:
obconversion.AddOption(k, obconversion.INOPTIONS)
else:
obconversion.AddOption(k, obconversion.INOPTIONS, str(v))
if not formatok:
raise ValueError("%s is not a recognised Open Babel format" % format)
if not os.path.isfile(filename):
raise IOError("No such file: '%s'" % filename)
def filereader():
obmol = ob.OBMol()
notatend = obconversion.ReadFile(obmol,filename)
while notatend:
yield Molecule(obmol)
obmol = ob.OBMol()
notatend = obconversion.Read(obmol)
return filereader()
def readstring(format, string, opt=None):
"""Read in a molecule from a string.
Required parameters:
format - see the informats variable for a list of available
input formats
string
Optional parameters:
opt - a dictionary of format-specific options
For format options with no parameters, specify the
value as None.
Example:
>>> input = "C1=CC=CS1"
>>> mymol = readstring("smi", input)
>>> len(mymol.atoms)
5
"""
if opt == None:
opt = {}
obmol = ob.OBMol()
obconversion = ob.OBConversion()
formatok = obconversion.SetInFormat(format)
if not formatok:
raise ValueError("%s is not a recognised Open Babel format" % format)
for k, v in opt.items():
if v == None:
obconversion.AddOption(k, obconversion.INOPTIONS)
else:
obconversion.AddOption(k, obconversion.INOPTIONS, str(v))
success = obconversion.ReadString(obmol, string)
if not success:
raise IOError("Failed to convert '%s' to format '%s'" % (
string, format))
return Molecule(obmol)
class Outputfile(object):
"""Represent a file to which *output* is to be sent.
Although it's possible to write a single molecule to a file by
calling the write() method of a molecule, if multiple molecules
are to be written to the same file you should use the Outputfile
class.
Required parameters:
format - see the outformats variable for a list of available
output formats
filename
Optional parameters:
overwrite -- if the output file already exists, should it
be overwritten? (default is False)
opt -- a dictionary of format-specific options
For format options with no parameters, specify the
value as None.
Methods:
write(molecule)
close()
"""
def __init__(self, format, filename, overwrite=False, opt=None):
if opt == None:
opt = {}
self.format = format
self.filename = filename
if not overwrite and os.path.isfile(self.filename):
raise IOError("%s already exists. Use 'overwrite=True' to overwrite it." % self.filename)
self.obConversion = ob.OBConversion()
formatok = self.obConversion.SetOutFormat(self.format)
if not formatok:
raise ValueError("%s is not a recognised Open Babel format" % format)
for k, v in opt.items():
if v == None:
self.obConversion.AddOption(k, self.obConversion.OUTOPTIONS)
else:
self.obConversion.AddOption(k, self.obConversion.OUTOPTIONS, str(v))
self.total = 0 # The total number of molecules written to the file
def write(self, molecule):
"""Write a molecule to the output file.
Required parameters:
molecule
"""
if not self.filename:
raise IOError("Outputfile instance is closed.")
if self.total==0:
self.obConversion.WriteFile(molecule.OBMol, self.filename)
else:
self.obConversion.Write(molecule.OBMol)
self.total += 1
def close(self):
"""Close the Outputfile to further writing."""
self.obConversion.CloseOutFile()
self.filename = None
class Molecule(object):
"""Represent a Pybel Molecule.
Required parameter:
OBMol -- an Open Babel OBMol or any type of cinfony Molecule
Attributes:
atoms, charge, conformers, data, dim, energy, exactmass, formula,
molwt, spin, sssr, title, unitcell.
(refer to the Open Babel library documentation for more info).
Methods:
addh(), calcfp(), calcdesc(), draw(), localopt(), make3D(), removeh(),
write()
The underlying Open Babel molecule can be accessed using the attribute:
OBMol
"""
_cinfony = True
def __init__(self, OBMol):
if hasattr(OBMol, "_cinfony"):
a, b = OBMol._exchange
if a == 0:
mol = readstring("smi", b)
else:
mol = readstring("mol", b)
OBMol = mol.OBMol
self.OBMol = OBMol
@property
def atoms(self):
return [ Atom(self.OBMol.GetAtom(i+1)) for i in range(self.OBMol.NumAtoms()) ]
@property
def charge(self):
return self.OBMol.GetTotalCharge()
@property
def conformers(self):
return self.OBMol.GetConformers()
@property
def data(self):
return MoleculeData(self.OBMol)
@property
def dim(self):
return self.OBMol.GetDimension()
@property
def energy(self):
return self.OBMol.GetEnergy()
@property
def exactmass(self):
return self.OBMol.GetExactMass()
@property
def formula(self):
return self.OBMol.GetFormula()
@property
def molwt(self):
return self.OBMol.GetMolWt()
@property
def spin(self):
return self.OBMol.GetTotalSpinMultiplicity()
@property
def sssr(self):
return self.OBMol.GetSSSR()
def _gettitle(self):
return self.OBMol.GetTitle()
def _settitle(self, val):
self.OBMol.SetTitle(val)
title = property(_gettitle, _settitle)
@property
def unitcell(self):
unitcell_index = _obconsts.UnitCell
if sys.platform[:3] == "cli":
unitcell_index = System.UInt32(unitcell_index)
unitcell = self.OBMol.GetData(unitcell_index)
if unitcell:
if sys.platform[:3] != "cli":
return _obfuncs.toUnitCell(unitcell)
else:
return unitcell.Downcast[ob.OBUnitCell]()
else:
raise AttributeError("Molecule has no attribute 'unitcell'")
@property
def _exchange(self):
if self.OBMol.HasNonZeroCoords():
return (1, self.write("mol"))
else:
return (0, self.write("can").split()[0])
def __iter__(self):
"""Iterate over the Atoms of the Molecule.
This allows constructions such as the following:
for atom in mymol:
print(atom)
"""
return iter(self.atoms)
def calcdesc(self, descnames=[]):
"""Calculate descriptor values.
Optional parameter:
descnames -- a list of names of descriptors
If descnames is not specified, all available descriptors are
calculated. See the descs variable for a list of available
descriptors.
"""
if not descnames:
descnames = descs
ans = {}
for descname in descnames:
try:
desc = _descdict[descname]
except KeyError:
raise ValueError("%s is not a recognised Open Babel descriptor type" % descname)
ans[descname] = desc.Predict(self.OBMol)
return ans
def calcfp(self, fptype="FP2"):
"""Calculate a molecular fingerprint.
Optional parameters:
fptype -- the fingerprint type (default is "FP2"). See the
fps variable for a list of of available fingerprint
types.
"""
if sys.platform[:3] == "cli":
fp = ob.VectorUInt()
else:
fp = ob.vectorUnsignedInt()
fptype = fptype.lower()
try:
fingerprinter = _fingerprinters[fptype]
except KeyError:
raise ValueError("%s is not a recognised Open Babel Fingerprint type" % fptype)
fingerprinter.GetFingerprint(self.OBMol, fp)
return Fingerprint(fp)
def write(self, format="smi", filename=None, overwrite=False, opt=None):
"""Write the molecule to a file or return a string.
Optional parameters:
format -- see the informats variable for a list of available
output formats (default is "smi")
filename -- default is None
overwite -- if the output file already exists, should it
be overwritten? (default is False)
opt -- a dictionary of format specific options
For format options with no parameters, specify the
value as None.
If a filename is specified, the result is written to a file.
Otherwise, a string is returned containing the result.
To write multiple molecules to the same file you should use
the Outputfile class.
"""
if opt == None:
opt = {}
obconversion = ob.OBConversion()
formatok = obconversion.SetOutFormat(format)
if not formatok:
raise ValueError("%s is not a recognised Open Babel format" % format)
for k, v in opt.items():
if v == None:
obconversion.AddOption(k, obconversion.OUTOPTIONS)
else:
obconversion.AddOption(k, obconversion.OUTOPTIONS, str(v))
if filename:
if not overwrite and os.path.isfile(filename):
raise IOError("%s already exists. Use 'overwrite=True' to overwrite it." % filename)
obconversion.WriteFile(self.OBMol,filename)
obconversion.CloseOutFile()
else:
return obconversion.WriteString(self.OBMol)
def localopt(self, forcefield="mmff94", steps=500):
"""Locally optimize the coordinates.
Optional parameters:
forcefield -- default is "mmff94". See the forcefields variable
for a list of available forcefields.
steps -- default is 500
If the molecule does not have any coordinates, make3D() is
called before the optimization. Note that the molecule needs
to have explicit hydrogens. If not, call addh().
"""
forcefield = forcefield.lower()
if self.dim != 3:
self.make3D(forcefield)
ff = _forcefields[forcefield]
success = ff.Setup(self.OBMol)
if not success:
return
ff.SteepestDescent(steps)
ff.GetCoordinates(self.OBMol)
## def globalopt(self, forcefield="MMFF94", steps=1000):
## if not (self.OBMol.Has2D() or self.OBMol.Has3D()):
## self.make3D()
## self.localopt(forcefield, 250)
## ff = _forcefields[forcefield]
## numrots = self.OBMol.NumRotors()
## if numrots > 0:
## ff.WeightedRotorSearch(numrots, int(math.log(numrots + 1) * steps))
## ff.GetCoordinates(self.OBMol)
def make3D(self, forcefield = "mmff94", steps = 50):
"""Generate 3D coordinates.
Optional parameters:
forcefield -- default is "mmff94". See the forcefields variable
for a list of available forcefields.
steps -- default is 50
Once coordinates are generated, hydrogens are added and a quick
local optimization is carried out with 50 steps and the
MMFF94 forcefield. Call localopt() if you want
to improve the coordinates further.
"""
forcefield = forcefield.lower()
_builder.Build(self.OBMol)
self.addh()
self.localopt(forcefield, steps)
def addh(self):
"""Add hydrogens."""
self.OBMol.AddHydrogens()
def removeh(self):
"""Remove hydrogens."""
self.OBMol.DeleteHydrogens()
def __str__(self):
return self.write()
def draw(self, show=True, filename=None, update=False, usecoords=False):
"""Create a 2D depiction of the molecule.
Optional parameters:
show -- display on screen (default is True)
filename -- write to file (default is None)
update -- update the coordinates of the atoms to those
determined by the structure diagram generator
(default is False)
usecoords -- don't calculate 2D coordinates, just use
the current coordinates (default is False)
Tkinter and Python Imaging Library are required for image display.
"""
obconversion = ob.OBConversion()
formatok = obconversion.SetOutFormat("_png2")
if not formatok:
errormessage = ("PNG depiction support not found. You should compile "
"Open Babel with support for Cairo. See installation "
"instructions for more information.")
raise ImportError(errormessage)
# Need to copy to avoid removing hydrogens from self
workingmol = Molecule(ob.OBMol(self.OBMol))
workingmol.removeh()
if not usecoords:
_operations['gen2D'].Do(workingmol.OBMol)
if update == True:
if workingmol.OBMol.NumAtoms() != self.OBMol.NumAtoms():
errormessage = ("It is not possible to update the original molecule "
"with the calculated coordinates, as the original "
"molecule contains explicit hydrogens for which no "
"coordinates have been calculated.")
raise RuntimeError(errormessage)
else:
for i in range(workingmol.OBMol.NumAtoms()):
self.OBMol.GetAtom(i + 1).SetVector(workingmol.OBMol.GetAtom(i + 1).GetVector())
if filename:
filedes = None
else:
if sys.platform[:3] == "cli" and show:
errormessage = ("It is only possible to show the molecule if you "
"provide a filename. The reason for this is that I kept "
"having problems when using temporary files.")
raise RuntimeError(errormessage)
filedes, filename = tempfile.mkstemp()
workingmol.write("_png2", filename=filename, overwrite=True)
if show:
if sys.platform[:4] == "java":
image = javax.imageio.ImageIO.read(java.io.File(filename))
frame = javax.swing.JFrame(visible=1)
frame.getContentPane().add(javax.swing.JLabel(javax.swing.ImageIcon(image)))
frame.setSize(300,300)
frame.setDefaultCloseOperation(javax.swing.WindowConstants.DISPOSE_ON_CLOSE)
frame.show()
elif sys.platform[:3] == "cli":
form = _MyForm()
form.setup(filename, self.title)
Application.Run(form)
else:
if not tk:
errormessage = ("Tkinter or Python Imaging "
"Library not found, but is required for image "
"display. See installation instructions for "
"more information.")
raise ImportError(errormessage)
root = tk.Tk()
root.title((hasattr(self, "title") and self.title)
or self.__str__().rstrip())
frame = tk.Frame(root, colormap="new", visual='truecolor').pack()
image = PIL.open(filename)
imagedata = piltk.PhotoImage(image)
label = tk.Label(frame, image=imagedata).pack()
quitbutton = tk.Button(root, text="Close", command=root.destroy).pack(fill=tk.X)
root.mainloop()
if filedes:
os.close(filedes)
os.remove(filename)
class Atom(object):
"""Represent a Pybel atom.
Required parameter:
OBAtom -- an Open Babel OBAtom
Attributes:
atomicmass, atomicnum, cidx, coords, coordidx, exactmass,
formalcharge, heavyvalence, heterovalence, hyb, idx,
implicitvalence, isotope, partialcharge, spin, type,
valence, vector.
(refer to the Open Babel library documentation for more info).
The original Open Babel atom can be accessed using the attribute:
OBAtom
"""
def __init__(self, OBAtom):
self.OBAtom = OBAtom
@property
def coords(self):
return (self.OBAtom.GetX(), self.OBAtom.GetY(), self.OBAtom.GetZ())
@property
def atomicmass(self):
return self.OBAtom.GetAtomicMass()
@property
def atomicnum(self):
return self.OBAtom.GetAtomicNum()
@property
def cidx(self):
return self.OBAtom.GetCIdx()
@property
def coordidx(self):
return self.OBAtom.GetCoordinateIdx()
@property
def exactmass(self):
return self.OBAtom.GetExactMass()
@property
def formalcharge(self):
return self.OBAtom.GetFormalCharge()
@property
def heavyvalence(self):
return self.OBAtom.GetHvyValence()
@property
def heterovalence(self):
return self.OBAtom.GetHeteroValence()
@property
def hyb(self):
return self.OBAtom.GetHyb()
@property
def idx(self):
return self.OBAtom.GetIdx()
@property
def implicitvalence(self):
return self.OBAtom.GetImplicitValence()
@property
def isotope(self):
return self.OBAtom.GetIsotope()
@property
def partialcharge(self):
return self.OBAtom.GetPartialCharge()
@property
def spin(self):
return self.OBAtom.GetSpinMultiplicity()
@property
def type(self):
return self.OBAtom.GetType()
@property
def valence(self):
return self.OBAtom.GetValence()
@property
def vector(self):
return self.OBAtom.GetVector()
def __str__(self):
c = self.coords
return "Atom: %d (%.2f %.2f %.2f)" % (self.atomicnum, c[0], c[1], c[2])
def _findbits(fp, bitsperint):
"""Find which bits are set in a list/vector.
This function is used by the Fingerprint class.
>>> _findbits([13, 71], 8)
[1, 3, 4, 9, 10, 11, 15]
"""
ans = []
start = 1
if sys.platform[:4] == "java":
fp = [fp.get(i) for i in range(fp.size())]
for x in fp:
i = start
while x > 0:
if x % 2:
ans.append(i)
x >>= 1
i += 1
start += bitsperint
return ans
class Fingerprint(object):
"""A Molecular Fingerprint.
Required parameters:
fingerprint -- a vector calculated by OBFingerprint.FindFingerprint()
Attributes:
fp -- the underlying fingerprint object
bits -- a list of bits set in the Fingerprint
Methods:
The "|" operator can be used to calculate the Tanimoto coeff. For example,
given two Fingerprints 'a', and 'b', the Tanimoto coefficient is given by:
tanimoto = a | b
"""
def __init__(self, fingerprint):
self.fp = fingerprint
def __or__(self, other):
return ob.OBFingerprint.Tanimoto(self.fp, other.fp)
@property
def bits(self):
return _findbits(self.fp, ob.OBFingerprint.Getbitsperint())
def __str__(self):
fp = self.fp
if sys.platform[:4] == "java":
fp = [self.fp.get(i) for i in range(self.fp.size())]
return ", ".join([str(x) for x in fp])
class Smarts(object):
"""A Smarts Pattern Matcher
Required parameters:
smartspattern
Methods:
findall(molecule)
Example:
>>> mol = readstring("smi","CCN(CC)CC") # triethylamine
>>> smarts = Smarts("[#6][#6]") # Matches an ethyl group
>>> print(smarts.findall(mol))
[(1, 2), (4, 5), (6, 7)]
The numbers returned are the indices (starting from 1) of the atoms
that match the SMARTS pattern. In this case, there are three matches
for each of the three ethyl groups in the molecule.
"""
def __init__(self,smartspattern):
"""Initialise with a SMARTS pattern."""
self.obsmarts = ob.OBSmartsPattern()
success = self.obsmarts.Init(smartspattern)
if not success:
raise IOError("Invalid SMARTS pattern")
def findall(self,molecule):
"""Find all matches of the SMARTS pattern to a particular molecule.
Required parameters:
molecule
"""
self.obsmarts.Match(molecule.OBMol)
vector = self.obsmarts.GetUMapList()
if sys.platform[:4] == "java":
vector = [vector.get(i) for i in range(vector.size())]
return list(vector)
class MoleculeData(object):
"""Store molecule data in a dictionary-type object
Required parameters:
obmol -- an Open Babel OBMol
Methods and accessor methods are like those of a dictionary except
that the data is retrieved on-the-fly from the underlying OBMol.
Example:
>>> mol = readfile("sdf", 'head.sdf').next() # Python 2
>>> # mol = next(readfile("sdf", 'head.sdf')) # Python 3
>>> data = mol.data
>>> print(data)
{'Comment': 'CORINA 2.61 0041 25.10.2001', 'NSC': '1'}
>>> print(len(data), data.keys(), data.has_key("NSC"))
2 ['Comment', 'NSC'] True
>>> print(data['Comment'])
CORINA 2.61 0041 25.10.2001
>>> data['Comment'] = 'This is a new comment'
>>> for k,v in data.items():
... print(k, "-->", v)
Comment --> This is a new comment
NSC --> 1
>>> del data['NSC']
>>> print(len(data), data.keys(), data.has_key("NSC"))
1 ['Comment'] False
"""
def __init__(self, obmol):
self._mol = obmol
def _data(self):
data = self._mol.GetData()
if sys.platform[:4] == "java":
data = [data.get(i) for i in range(data.size())]
answer = [x for x in data if
x.GetDataType()==_obconsts.PairData or
x.GetDataType()==_obconsts.CommentData]
if sys.platform[:3] != "cli":
answer = [_obfuncs.toPairData(x) for x in answer]
return answer
def _testforkey(self, key):
if not key in self:
raise KeyError("'%s'" % key)
def keys(self):
return [x.GetAttribute() for x in self._data()]
def values(self):
return [x.GetValue() for x in self._data()]
def items(self):
return iter(zip(self.keys(), self.values()))
def __iter__(self):
return iter(self.keys())
def iteritems(self): # Can remove for Python 3
return self.items()
def __len__(self):
return len(self._data())
def __contains__(self, key):
return self._mol.HasData(key)
def __delitem__(self, key):
self._testforkey(key)
self._mol.DeleteData(self._mol.GetData(key))
def clear(self):
for key in self:
del self[key]
def has_key(self, key):
return key in self
def update(self, dictionary):
for k, v in dictionary.items():
self[k] = v
def __getitem__(self, key):
self._testforkey(key)
answer = self._mol.GetData(key)
if sys.platform[:3] != "cli":
answer = _obfuncs.toPairData(answer)
return answer.GetValue()
def __setitem__(self, key, value):
if key in self:
if sys.platform[:3] != "cli":
pairdata = _obfuncs.toPairData(self._mol.GetData(key))
else:
pairdata = self._mol.GetData(key).Downcast[ob.OBPairData]()
pairdata.SetValue(str(value))
else:
pairdata = ob.OBPairData()
pairdata.SetAttribute(key)
pairdata.SetValue(str(value))
self._mol.CloneData(pairdata)
def __repr__(self):
return dict(self.items()).__repr__()
if sys.platform[:3] == "cli":
class _MyForm(Form):
def __init__(self):
Form.__init__(self)
def setup(self, filename, title):
# adjust the form's client area size to the picture
self.ClientSize = Size(300, 300)
self.Text = title
self.filename = filename
self.image = Image.FromFile(self.filename)
pictureBox = PictureBox()
# this will fit the image to the form
pictureBox.SizeMode = PictureBoxSizeMode.StretchImage
pictureBox.Image = self.image
# fit the picture box to the frame
pictureBox.Dock = DockStyle.Fill
self.Controls.Add(pictureBox)
self.Show()
if __name__=="__main__": #pragma: no cover
import doctest
doctest.testmod(verbose=True)
|
nilq/baby-python
|
python
|
import argparse
import os
import sys
import requests
# Globals
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
APP_DIR = 'app'
APP_FILES = ['__init__.py', 'config.py', 'run.py', 'create_db.py', 'shell.py']
STATIC_DIR = 'static'
STATIC_SUBDIRS = ['css', 'fonts', 'img', 'js']
TEMPLATE_DIR = 'templates'
TEMPLATE_FILES = ['base.html', 'macros.html']
VIEWS_DIR = 'views'
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--init",
help="Initialize a project", action="store_true")
parser.add_argument("-n", "--name",
help="Project Name", nargs='+')
parser.add_argument("-u", "--ui",
help="UI Library")
parser.add_argument("-a", "--auth",
help="Authentication System")
parser.add_argument("-d", "--db",
help="Database Backend")
args = parser.parse_args()
# Create a new project
if args.init:
if not args.name:
sys.exit('You must have a project name')
project_dir = '{}/{}'.format(BASE_DIR, '-'.join(args.name))
if os.path.exists(project_dir):
sys.exit('Project Directory already exists')
else:
os.makedirs(project_dir)
os.makedirs('/'.join([project_dir, APP_DIR]))
os.makedirs('/'.join([project_dir, APP_DIR, TEMPLATE_DIR]))
os.makedirs('/'.join([project_dir, APP_DIR, VIEWS_DIR]))
os.makedirs('/'.join([project_dir, APP_DIR, STATIC_DIR]))
for sub in STATIC_SUBDIRS:
os.makedirs('/'.join([project_dir, APP_DIR, STATIC_DIR, sub]))
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
import sys
_UINT8_TO_CHAR = [
'.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.',
'.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.',
' ', '!', '"', '#', '$', '%', '&', "'", '(', ')', '*', '+', ',', '-', '.', '/',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?',
'@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O',
'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', '\\', ']', '^', '_',
'`', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '}', '~', '.',
]
if __name__ == '__main__':
print('%s does not provide main()' % __file__)
sys.exit(1)
def _hex_str(byte):
return '%02X' % (int(byte) & 0xFF)
def _hex_char(byte):
byte = int(byte) & 0xFF
if byte > 0x7F:
return '.'
else:
return _UINT8_TO_CHAR[byte]
def dump_bytes(data):
'dump data in a readable string table'
if isinstance(data, bytes) is False:
return ''
lines = []
data_len = len(data)
lines.append('data length %d' % data_len)
lines.append(
'------ 0 1 2 3 4 5 6 7 | 8 9 A B C D E F 01234567 89ABCDEF')
for index in range(0, data_len, 16):
remain_len = data_len - index
if remain_len >= 16:
string = '0x%04X %s %s %s %s %s %s %s %s | %s %s %s %s %s %s %s %s %s%s%s%s%s%s%s%s %s%s%s%s%s%s%s%s' % (
index,
_hex_str(data[index + 0]), _hex_str(data[index + 1]), _hex_str(data[index + 2]), _hex_str(data[index + 3]),
_hex_str(data[index + 4]), _hex_str(data[index + 5]), _hex_str(data[index + 6]), _hex_str(data[index + 7]),
_hex_str(data[index + 8]), _hex_str(data[index + 9]), _hex_str(data[index + 10]), _hex_str(data[index + 11]),
_hex_str(data[index + 12]), _hex_str(data[index + 13]), _hex_str(data[index + 14]), _hex_str(data[index + 15]),
_hex_char(data[index + 0]), _hex_char(data[index + 1]), _hex_char(data[index + 2]), _hex_char(data[index + 3]),
_hex_char(data[index + 4]), _hex_char(data[index + 5]), _hex_char(data[index + 6]), _hex_char(data[index + 7]),
_hex_char(data[index + 8]), _hex_char(data[index + 9]), _hex_char(data[index + 10]), _hex_char(data[index + 11]),
_hex_char(data[index + 12]), _hex_char(data[index + 13]), _hex_char(data[index + 14]), _hex_char(data[index + 15]),
)
lines.append(string)
else:
this_line = []
this_line.append('0x%04X ' % index)
for col in range(index, data_len):
this_line.append('%s ' % _hex_str(data[col]))
if remain_len > 8:
this_line.insert(9, '| ')
this_line.append(' ' * (16 - remain_len))
else:
this_line.append(' ' * (16 - remain_len))
this_line.append(' ')
print('remain_len = %d' % remain_len)
# this_line.append(' ')
this_line.append(' ')
for col in range(index, data_len):
this_line.append(_hex_char(data[col]))
if col == index + 7:
this_line.append(' ')
lines.append(''.join(this_line))
return '\n'.join(lines)
|
nilq/baby-python
|
python
|
def checkorders(orders: [str]) -> [bool]:
results = []
for i in orders:
flag = True
stock = []
for j in i:
if j in '([{':
stock.append(j)
else:
if stock == []:
flag = False
break
symbol = stock.pop()
if not match(symbol, j):
flag = False
break
if stock != []:
flag = False
results.append(flag)
return results
def match(opens,closers):
return '([{'.index(opens) == ')]}'.index(closers)
print(checkorders(['()','(','{}[]','[][][]','[{]{]']))
|
nilq/baby-python
|
python
|
from unittest import TestCase
import requests_mock
import urllib.parse
from .fixtures import TOKEN
from typeform import Typeform
from typeform.constants import API_BASE_URL
class FormsTestCase(TestCase):
def setUp(self):
self.forms = Typeform(TOKEN).forms
form = self.forms.create({
'title': 'title'
})
self.formID = form.get('id')
def tearDown(self):
list = self.forms.list()
forms = list.get('items', [])
for form in forms:
self.forms.delete(form.get('id'))
def test_forms_returns_method_and_path(self):
"""
get all forms has the correct method and path
"""
with requests_mock.mock() as m:
m.get(API_BASE_URL+'/forms', json={})
self.forms.list()
history = m.request_history
self.assertEqual(history[0].url, API_BASE_URL+'/forms')
self.assertEqual(history[0].method, 'GET')
def test_forms_correct_params(self):
"""
paramters are sent correctly
"""
with requests_mock.mock() as m:
m.get(API_BASE_URL+'/forms', json={})
self.forms.list(page=2, pageSize=10, search='hola', workspaceId='abc')
history = m.request_history
query = history[0].url.split('?')[1]
params = dict(urllib.parse.parse_qs(query))
self.assertEqual(params.pop('page')[0], '2')
self.assertEqual(params.pop('page_size')[0], '10')
self.assertEqual(params.pop('search')[0], 'hola')
self.assertEqual(params.pop('workspace_id')[0], 'abc')
def test_forms_get_correct_id(self):
"""
get sends the correct UID
"""
with requests_mock.mock() as m:
m.get(API_BASE_URL+'/forms/'+self.formID, json={})
self.forms.get(self.formID)
history = m.request_history
self.assertEqual(history[0].url, API_BASE_URL+'/forms/'+self.formID)
def test_forms_get_sets_get_method(self):
"""
get sets get method
"""
with requests_mock.mock() as m:
m.get(API_BASE_URL+'/forms/'+self.formID, json={})
self.forms.get(self.formID)
history = m.request_history
self.assertEqual(history[0].method, 'GET')
def test_forms_update_updates_a_form(self):
"""
update updates a form
"""
title = 'hola'
result = self.forms.update(self.formID, data={
'title': title
})
self.assertEqual(result.get('title'), title)
def test_forms_update_as_patch_updates_a_form(self):
"""
update as patch updates a form
"""
result = self.forms.update(self.formID, patch=True, data=[{
'op': 'replace',
'path': '/title',
'value': 'aloha'
}])
self.assertEqual(result, 'OK')
def test_forms_update_sets_put_method_in_request_by_default(self):
"""
update sets put method in request by default
"""
with requests_mock.mock() as m:
m.put(API_BASE_URL+'/forms/'+self.formID, json={})
self.forms.update(self.formID, data={
'title': 'title'
})
history = m.request_history
self.assertEqual(history[0].method, 'PUT')
def test_forms_delete_removes_the_correct_uid_form(self):
"""
delete removes the correct uid form
"""
get1Result = self.forms.get(self.formID)
self.assertEqual(get1Result.get('id'), self.formID)
self.forms.delete(self.formID)
try:
self.forms.get(self.formID)
except Exception as err:
error = str(err)
self.assertEqual(error, 'Non existing form with uid %s' % self.formID)
def test_forms_create_has_the_correct_path_and_method(self):
"""
create has the correct path and method
"""
with requests_mock.mock() as m:
m.post(API_BASE_URL+'/forms', json={})
self.forms.create({
'title': 'hola'
})
history = m.request_history
self.assertEqual(history[0].method, 'POST')
self.assertEqual(history[0].url, API_BASE_URL+'/forms')
def test_forms_create_creates_a_new_form(self):
"""
create creates a new form
"""
createResult = self.forms.create({
'title': 'hola'
})
formID = createResult.get('id')
getResult = self.forms.get(formID)
self.assertIsNone(createResult.get('code', None))
self.assertEqual(getResult.get('id'), formID)
def test_forms_get_messages_has_the_correct_path_and_method(self):
"""
get messages has the correct path and method
"""
with requests_mock.mock() as m:
m.get(API_BASE_URL+'/forms/'+self.formID+'/messages', json={})
self.forms.messages.get(self.formID)
history = m.request_history
self.assertEqual(history[0].method, 'GET')
self.assertEqual(history[0].url, API_BASE_URL+'/forms/'+self.formID+'/messages')
def test_forms_update_messages_has_the_correct_path_and_method(self):
"""
update messages has the correct path and method
"""
with requests_mock.mock() as m:
m.put(API_BASE_URL+'/forms/'+self.formID+'/messages')
self.forms.messages.update(self.formID)
history = m.request_history
self.assertEqual(history[0].method, 'PUT')
self.assertEqual(history[0].url, API_BASE_URL+'/forms/'+self.formID+'/messages')
|
nilq/baby-python
|
python
|
# libraries
import pandas as pd
import yaml as yaml
from google.cloud import storage
from os.path import dirname, abspath
# utils
from utils import upload_local_file_to_gcp_storage_bucket, df_to_gcp_csv
# set project directory
project_directory = dirname(dirname(abspath("__file__")))
print("Processing : Loading configuration file")
config = yaml.safe_load(open(project_directory + "/config/config.yaml"))
print("Processing : Set Configuration parameters")
storage_key = project_directory + config["parameters"]["storage_service_account_key"]
data_file = project_directory + config["parameters"]["data_source"]
bucket = config["parameters"]["bucket_source"]
blob_name = config["parameters"]["blob_source"]
print("Processing : Set storage client")
storage_client = storage.Client.from_service_account_json(storage_key)
print("Processing : upload file")
upload_local_file_to_gcp_storage_bucket(storage_client, bucket, blob_name, data_file)
print("Processing : upload from pandas dataframe")
df = pd.read_csv(data_file)
df_to_gcp_csv(
storage_client,
df,
bucket=bucket,
blob_name=blob_name,
source_file_name=blob_name,
)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates a sysroot tarball for building a specific package.
Meant for use after setup_board and build_packages have been run.
"""
from __future__ import print_function
import os
from chromite.lib import constants
from chromite.lib import cros_build_lib
from chromite.lib import commandline
from chromite.lib import osutils
from chromite.lib import sudo
from chromite.lib import sysroot_lib
DEFAULT_NAME = 'sysroot_%(package)s.tar.xz'
PACKAGE_SEPARATOR = '/'
SYSROOT = 'sysroot'
def ParseCommandLine(argv):
"""Parse args, and run environment-independent checks."""
parser = commandline.ArgumentParser(description=__doc__)
parser.add_argument('--board', required=True,
help=('The board to generate the sysroot for.'))
parser.add_argument('--package', required=True,
help=('The packages to generate the sysroot for.'))
parser.add_argument('--deps-only', action='store_true',
default=False,
help='Build dependencies only.')
parser.add_argument('--out-dir', type='path', required=True,
help='Directory to place the generated tarball.')
parser.add_argument('--out-file', default=DEFAULT_NAME,
help='The name to give to the tarball. '
'Defaults to %(default)s.')
options = parser.parse_args(argv)
options.out_file %= {
'package': options.package.split()[0].replace(PACKAGE_SEPARATOR, '_'),
}
return options
class GenerateSysroot(object):
"""Wrapper for generation functionality."""
PARALLEL_EMERGE = os.path.join(constants.CHROMITE_BIN_DIR, 'parallel_emerge')
def __init__(self, sysroot, options):
"""Initialize
Args:
sysroot: Path to sysroot.
options: Parsed options.
"""
self.sysroot = sysroot
self.options = options
self.extra_env = {'ROOT': self.sysroot, 'USE': os.environ.get('USE', '')}
def _Emerge(self, *args, **kwargs):
"""Emerge the given packages using parallel_emerge."""
cmd = [self.PARALLEL_EMERGE, '--board=%s' % self.options.board,
'--usepkgonly', '--noreplace'] + list(args)
kwargs.setdefault('extra_env', self.extra_env)
cros_build_lib.SudoRunCommand(cmd, **kwargs)
def _InstallToolchain(self):
# Create the sysroot's config.
sysroot = sysroot_lib.Sysroot(self.sysroot)
sysroot.WriteConfig(sysroot.GenerateBoardConfig(self.options.board))
cros_build_lib.RunCommand(
[os.path.join(constants.CROSUTILS_DIR, 'install_toolchain'),
'--noconfigure', '--sysroot', self.sysroot])
def _InstallKernelHeaders(self):
self._Emerge('sys-kernel/linux-headers')
def _InstallBuildDependencies(self):
# Calculate buildtime deps that are not runtime deps.
raw_sysroot = cros_build_lib.GetSysroot(board=self.options.board)
packages = []
if not self.options.deps_only:
packages = self.options.package.split()
else:
for pkg in self.options.package.split():
cmd = ['qdepends', '-q', '-C', pkg]
output = cros_build_lib.RunCommand(
cmd, extra_env={'ROOT': raw_sysroot}, capture_output=True).output
if output.count('\n') > 1:
raise AssertionError('Too many packages matched for given pattern')
# qdepend outputs "package: deps", so only grab the deps.
deps = output.partition(':')[2].split()
packages.extend(deps)
# Install the required packages.
if packages:
self._Emerge(*packages)
def _CreateTarball(self):
target = os.path.join(self.options.out_dir, self.options.out_file)
cros_build_lib.CreateTarball(target, self.sysroot, sudo=True)
def Perform(self):
"""Generate the sysroot."""
self._InstallToolchain()
self._InstallKernelHeaders()
self._InstallBuildDependencies()
self._CreateTarball()
def FinishParsing(options):
"""Run environment dependent checks on parsed args."""
target = os.path.join(options.out_dir, options.out_file)
if os.path.exists(target):
cros_build_lib.Die('Output file %r already exists.' % target)
if not os.path.isdir(options.out_dir):
cros_build_lib.Die(
'Non-existent directory %r specified for --out-dir' % options.out_dir)
def main(argv):
options = ParseCommandLine(argv)
FinishParsing(options)
cros_build_lib.AssertInsideChroot()
with sudo.SudoKeepAlive(ttyless_sudo=False):
with osutils.TempDir(set_global=True, sudo_rm=True) as tempdir:
sysroot = os.path.join(tempdir, SYSROOT)
os.mkdir(sysroot)
GenerateSysroot(sysroot, options).Perform()
|
nilq/baby-python
|
python
|
valor_do_produto = float(input('Digite o valor do produto? R$ '))
desconto = int(input('Qual será o desconto? '))
desconto_aplicado = valor_do_produto - ((valor_do_produto * desconto)/100)
print('O produto que custava R${:.2f}, na promoção de {}% custará: R$ {:.2f}'.format(valor_do_produto,desconto, desconto_aplicado))
|
nilq/baby-python
|
python
|
import collections
import statistics
import time
class Statistics:
"""Calculate mathematical statistics of numerical values.
:ivar ~.sum: sum of all values
:ivar ~.min: minimum of all values
:ivar ~.max: maximum of all values
:ivar ~.mean: mean of all values
:ivar ~.median: median of all values
:ivar ~.last_value: last added value
:ivar ~.last_change: timestamp the last time a value was added
"""
def __init__(self, max_age=None, max_samples=None):
"""
:param max_age: Maximum age of values in seconds
:param max_samples: Maximum amount of samples which will be kept
"""
if max_age is None and max_samples is None:
raise ValueError('Please specify max age or max samples!')
self._max_age = max_age
self.timestamps = collections.deque(maxlen=max_samples)
self.values = collections.deque(maxlen=max_samples)
self.sum: float = None
self.min: float = None
self.max: float = None
self.mean: float = None
self.median: float = None
self.last_value: float = None
self.last_change: float = None
def _remove_old(self):
if self._max_age is None:
return None
# remove too old entries
now = time.time()
while self.timestamps and (now - self.timestamps[0]) > self._max_age:
self.timestamps.popleft()
self.values.popleft()
def update(self):
"""update values without adding a new value"""
self._remove_old()
__len = len(self.values)
if not __len:
self.sum = None
self.min = None
self.max = None
self.mean = None
self.median = None
else:
self.sum = sum(self.values)
self.min = min(self.values)
self.max = max(self.values)
self.mean = statistics.mean(self.values)
self.median = statistics.median(self.values)
if __len >= 2:
self.last_change = self.values[-1] - self.values[-2]
else:
self.last_change = None
def add_value(self, value):
"""Add a new value and recalculate statistical values
:param value: new value
"""
assert isinstance(value, (int, float)), type(value)
self.last_value = value
self.timestamps.append(time.time())
self.values.append(value)
self.update()
def __repr__(self):
return f'<Statistics sum: {self.sum:.1f}, min: {self.min:.2f}, max: {self.max:.2f}, ' \
f'mean: {self.mean:.2f}, median: {self.median:.2f}>'
|
nilq/baby-python
|
python
|
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
class MetaFeaturesExtractor(BaseEstimator, TransformerMixin):
def __init__(self, user_meta=None, item_meta=None):
self.user_meta = user_meta
self.item_meta = item_meta
self.user_meta.registration_init_time = pd.to_datetime(self.user_meta.registration_init_time, format='%Y%m%d')
self.user_meta.expiration_date = pd.to_datetime(self.user_meta.expiration_date, format='%Y%m%d')
self.X_with_meta = None
def fit(self, X, y=None, **fit_params):
return self
def transform(self, X):
self.X_with_meta = X.copy()
self.X_with_meta = pd.merge(self.X_with_meta, self.user_meta, on='msno', how='left')
self.X_with_meta = pd.merge(self.X_with_meta, self.item_meta, on='song_id', how='left')
self.X_with_meta[
'days_registered'
] = self.X_with_meta.expiration_date - self.X_with_meta.registration_init_time
self.X_with_meta['days_registered'] = self.X_with_meta.days_registered.apply(lambda x: x.days)
return self.X_with_meta
|
nilq/baby-python
|
python
|
# coding=utf-8
from django.test import TestCase
from django.db import IntegrityError
from applications.trackers.models import Tracker
class TrackerModelTest(TestCase):
def test_create_tracker(self):
Tracker.objects.create(ip='192.168.0.1')
tracker = Tracker.objects.all()
self.assertTrue(tracker)
def test_multiple_create(self):
Tracker.objects.bulk_create([
Tracker(ip='192.168.0.1'),
Tracker(ip='192.168.0.2'),
])
tracker = Tracker.objects.all()
self.assertEquals(tracker.count(), 2)
def test_ordering(self):
Tracker.objects.bulk_create([
Tracker(ip='192.168.0.1'),
Tracker(ip='192.168.1.2'),
Tracker(ip='192.168.0.2'),
])
self.assertEquals(Tracker.objects.first().ip, '192.168.0.2')
def test_error_without_ip(self):
with self.assertRaises(IntegrityError):
Tracker.objects.create()
def test_str(self):
Tracker.objects.create(ip='192.168.0.1')
tracker = Tracker.objects.first()
self.assertEquals(
tracker.__str__(),
'IP адрес {ip}, зафиксирован {date} в {time}'.format(
ip=tracker.ip, date=tracker.time.strftime("%d.%m.%Y"),
time=tracker.time.strftime("%H:%M:%S")
)
)
|
nilq/baby-python
|
python
|
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from .models import Message, Person, Tag
class MessageView(DetailView):
"""
Detail view of a Person object
"""
model = Message
class MessagesView(ListView):
"""
A view to list all Person objects
"""
model = Message
class PersonView(DetailView):
"""
Detail view of a Person object
"""
model = Person
class PersonsView(ListView):
"""
A view to list all Person objects
"""
model = Person
class TagView(DetailView):
"""
Detail view of a Tag object
"""
model = Tag
class TagsView(ListView):
"""
A view to list all Tag objects
"""
model = Tag
|
nilq/baby-python
|
python
|
# Nick Hansel
# Web scraper to create a shopping list given recipes
from random_recipe import *
days = {
"Monday": None,
"Tuesday": None,
"Wednesday": None,
"Thursday": None,
"Friday": None,
"Saturday": None,
"Sunday": None
}
while True:
answer = input("Would you like to choose a random meal or would you like to schedule your meal plan? ("
"schedule/random): ")
answer = answer.lower()
if answer == "random":
randomRecipe()
print("\n" + "Recipe chosen: " + '\n' + Globals.chosen_recipe[0] + "\n")
print("Ingredients needed:")
for x in Globals.final_dict.get(Globals.chosen_recipe[0]):
print(x)
elif answer == 'schedule':
how_many_days = input("How many days would you like to schedule (up to 7 days, starting on Monday): " + '\n')
how_many_days = int(how_many_days)
shopping = input('Would you like a shopping list as well? (y/n): ' + '\n')
if how_many_days <= 7:
randomRecipe()
new = (list(days.items()))
new = ([list(x) for x in new])
for x in range(how_many_days):
used = (choice(Globals.names))
new[x][1] = used
Globals.names.remove(used)
del new[how_many_days:]
new = ([tuple(x) for x in new])
new = dict(new)
file1 = open("lunch.txt", 'w')
for k, v in new.items():
print(k + ':' + ' ', v + "\n")
if shopping == 'y':
file1 = open('Shopping List.txt', 'w')
for x in new.values():
for j in Globals.final_dict.get(x):
file1.write(j + '\n')
file1.close()
break
|
nilq/baby-python
|
python
|
import logging
LOG_FORMAT = "%(levelname)s %(asctime)s - %(message)s"
logging.basicConfig(
filename = "logging_demo.log",
level = logging.DEBUG,
format = LOG_FORMAT,
filemode = "w")
logger = logging.getLogger()
logger.debug("Debug level message")
logger.info("Info level message")
logger.warning("Warning level message")
logger.error("Error level message")
logger.critical("Critical level message")
print(logger.level)
|
nilq/baby-python
|
python
|
this is not valid python source code, but still more beautiful than many non-pythonic languages.
|
nilq/baby-python
|
python
|
import discord
from discord.ext import commands
import os
import json
client = commands.Bot(command_prefix = ".")
# @client.command()
# async def load(ctx , extensions):
# client.load_extensions(f"cogs.{extensions}")
# @client.command()
# async def unload(ctx , extensions):
# client.unload_extension(f"cogs.{extensions}")
for filename in os.listdir("./cogs"):
if filename.endswith(".py"):
client.load_extension(f"cogs.{filename[:-3]}")
@client.event
async def on_command_error(ctx , error):
if isinstance(error , commands.CommandNotFound):
await ctx.send("Invalid Command")
f = open(r".\tokens\token.json", )
s = json.load(f)
client.run(s["Token"])
|
nilq/baby-python
|
python
|
fibonacci = [0, 1]
n = int(input())
if n == 1:
print(str(fibonacci[0]))
if n < 46 and n > 1:
if n > 2:
for x in range(n - 2):
fibonacci.append(fibonacci[x] + fibonacci[x + 1])
myTable = str(fibonacci).maketrans("", "", "[,]")
print(str(fibonacci).translate(myTable))
|
nilq/baby-python
|
python
|
"""
Test CCompiler.
"""
from pathlib import Path
from types import SimpleNamespace
from unittest import mock
from fab.build_config import AddFlags
from fab.dep_tree import AnalysedFile
from fab.steps.compile_c import CompileC
class Test_Compiler(object):
def test_vanilla(self):
# ensure the command is formed correctly
config = SimpleNamespace(
project_workspace=Path('foo'), source_root=Path('foo/src'), multiprocessing=False, reuse_artefacts=False)
c_compiler = CompileC(
compiler='gcc', common_flags=['-c'], path_flags=[
AddFlags(match='foo/src/*', flags=['-I', 'foo/include', '-Dhello'])])
analysed_files = {Path('foo/src/foo.c'): AnalysedFile(fpath=Path('foo/src/foo.c'), file_hash=None)}
with mock.patch('fab.steps.compile_c.run_command') as mock_run:
with mock.patch('fab.steps.compile_c.send_metric'):
c_compiler.run(artefact_store={'build_tree': analysed_files}, config=config)
mock_run.assert_called_with([
'gcc', '-c', '-I', 'foo/include', '-Dhello', 'foo/src/foo.c', '-o', 'foo/src/foo.o'])
|
nilq/baby-python
|
python
|
import markov
from typing import Optional
from fastapi import FastAPI
app = FastAPI()
@app.get("/")
def read_item(length: Optional[str] = None, start: Optional[str] = None):
if length is not None:
length = int(length)
text = markov.generate(length=length, start=start)
return text
|
nilq/baby-python
|
python
|
from lxml import etree
from io import StringIO
from django.urls import path
from django.http import HttpResponse
from django.template import Template, Context, Engine, engines
def a(request):
xslt_root = etree.XML('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<foo><xsl:value-of select="/a/b/text()" /></foo>
</xsl:template>
</xsl:stylesheet>''')
transform = etree.XSLT(xslt_root)
def b(request):
xslt_root = etree.XML('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<foo><xsl:value-of select="/a/b/text()" /></foo>
</xsl:template>
</xsl:stylesheet>''')
f = StringIO('<foo><bar></bar></foo>')
tree = etree.parse(f)
result_tree = tree.xslt(xslt_root)
def c(request):
xslt_root = etree.XML('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<foo><xsl:value-of select="/a/b/text()" /></foo>
</xsl:template>
</xsl:stylesheet>''')
f = StringIO('<foo><bar></bar></foo>')
tree = etree.parse(f)
result = tree.xslt(xslt_root, a="'A'")
urlpatterns = [
path('a', a),
path('b', b),
path('c', c)
]
if __name__ == "__main__":
a(None)
b(None)
c(None)
|
nilq/baby-python
|
python
|
from VisualisationPlugin import VisualisationPlugin
import pygame
import math
import logging
from DDRPi import FloorCanvas
class SineWaveVisualisationPlugin(VisualisationPlugin):
logger = logging.getLogger(__name__)
def __init__(self):
self.clock = pygame.time.Clock()
def configure(self, config):
self.config = config
self.logger.info("Config: %s" % config)
def draw_frame(self, canvas):
# Limit the frame rate.
# This sleeps so that at least 25ms has passed since tick()
# was last called. It is a no-op if the loop is running slow
self.clock.tick(25)
# Draw whatever this plugin does
return self.draw_surface(canvas, pygame.time.get_ticks())
def draw_splash(self, canvas):
return self.draw_surface(canvas, 0)
def draw_surface(self, canvas):
return self.draw_surface(canvas, 0)
def draw_surface(self, canvas, ticks):
# Get the background colour
background_colour = FloorCanvas.GREEN
wave_colour = FloorCanvas.WHITE
amplitude = (canvas.get_height() / 2) -1
period = 18.0
if self.config is not None:
try:
background_colour = getattr(FloorCanvas, self.config["background_colour"].upper())
except (AttributeError, KeyError):
pass
# Get the wave colour
try:
wave_colour = getattr(FloorCanvas, self.config["colour"].upper())
except (AttributeError, KeyError):
pass
# Get the amplitude
try:
amplitude = float(self.config["amplitude"])
except (AttributeError, ValueError, KeyError):
pass
# Get the period
try:
period = float(self.config["period"])
except (AttributeError, ValueError, KeyError):
pass
# Set the background colour
canvas.set_colour(background_colour)
phase_offset = 0.0
frequency = 1.0
phase_offset = 2 * math.pi * frequency * ticks / 1000
# phase_offset = 0
w = canvas.get_width();
h = canvas.get_height()
previous_x = None
previous_y = None
for x in range(w):
phase = math.pi * 2 * x / period
y = h / 2.0 + amplitude * math.sin(phase_offset + phase)
if previous_y != None and previous_x != None:
# Draw line between previous point at this one
#self.surface.draw_line(int(round(previous_x)), int(round(previous_y)), int(round(x)), int(round(y)), FloorCanvas.WHITE)
canvas.draw_line(int(previous_x), int(previous_y), int(x), int(y), wave_colour)
#self.surface.set_pixel(int(x),int(y),FloorCanvas.WHITE)
previous_x = x
previous_y = y
return canvas
def get_valid_arguments(self):
args = ["background_colour", # The background colour of the wave
"colour", # The colour of the wave
"speed", # The speed of the wave
"amplitude", # The amplitude of the wave
]
return args
|
nilq/baby-python
|
python
|
import socket
sock = socket.socket()
address = "agps.u-blox.com"
port = 46434
print "Connecting to u-blox"
sock.connect((address, port))
print "Connection established"
print "Sending the request"
sock.send("cmd=full;user=korovkin@gmail.com;token=4HWt1EvhQUKJ2InFyaaZDw;lat=30.0;lon=30.0;pacc=10000;")
print "Sending the request - done"
data = ""
buffer = True;
while buffer:
print(".")
buffer = sock.recv(1024)
if buffer:
data += buffer
print("\n")
print(data)
|
nilq/baby-python
|
python
|
import os.path as osp
from pathlib import Path
import pandas as pd
from jitenshea.stats import find_cluster
_here = Path(osp.dirname(osp.abspath(__file__)))
DATADIR = _here / 'data'
CENTROIDS_CSV = DATADIR / 'centroids.csv'
def test_find_cluster():
df = pd.read_csv(CENTROIDS_CSV)
df = df.set_index('cluster_id')
cluster = find_cluster(df)
expected = {3: 'evening', 1: 'high', 0: 'morning', 2: 'noon'}
assert expected == cluster
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 23 22:40:34 2018
@author: boele
"""
# 03 read csv and find unique survey vessels...
# open csv file
f = open('fartoey_maaleoppdrag.csv', 'r')
data = f.read()
surveys_and_vessels = data.split('\n')
# print number of rows and show first 5 rows
print(len(surveys_and_vessels))
print(surveys_and_vessels[0:5])
print()
# remove header
surveys_and_vessels = surveys_and_vessels[1:]
# create empty vessels list
vessels = []
# for each row extract second column and add to vessel list
for row in surveys_and_vessels:
col = row.split(';')
if len(col)>1:
vessels.append(col[1])
# print first 5 ned rows
print(vessels[0:5])
print()
# create vessel_counts dictonary with vessel name as key and count as value
vessel_counts = {}
for item in vessels:
if item in vessel_counts:
vessel_counts[item] = vessel_counts[item] + 1
else:
vessel_counts[item] = 1
print(vessel_counts)
print('number of unique vessels: ' + str(len(vessel_counts)))
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 11 13:59:21 2017
@author: tuur
"""
from __future__ import print_function
from dateutil import parser as dparser
from lib.evaluation import get_selective_rel_metrics, get_acc_from_confusion_matrix,save_confusion_matrix_from_metrics, viz_docs_rel_difference, save_entity_error_analysis
import random, re, os, shutil, time, datetime, pickle
import numpy as np
import torch.nn as nn
import torch.autograd as autograd
import torch.optim as optim
import plotly as py
import plotly.figure_factory as ff
import plotly.graph_objs as go
import torch
from lib.data import reverse_dict_list
from lib.timeml import write_timebank_folder, get_dur_from_value
from lib.transformer.SubLayers import MultiHeadAttention
import itertools
from copy import copy
from collections import Counter, OrderedDict
import subprocess
from gensim.models.keyedvectors import KeyedVectors
from lib.yellowfin import YFOptimizer
random.seed(0)
torch.backends.cudnn.enabled=True
class TimelineModel(object):
def setup_vocabularies(self, data, unk_threshold, special_conflation=False, entity_sequence=False):
# Sets up indices for characters, POS, and words
if entity_sequence:
self.word_frequencies = Counter([token if not special_conflation else self.conflate_digits(token) for text in data for token in text.entity_tokens])
else:
self.word_frequencies = Counter([token if not special_conflation else self.conflate_digits(token) for text in data for token in text.tokens])
if unk_threshold:
self.word_frequencies = Counter({token for token in self.word_frequencies if self.word_frequencies[token] > unk_threshold})
all_features = set([f for doc in data for tok_index in range(len(doc.tokens)) for f in self.get_features(tok_index, doc)])
cindex = {c:autograd.Variable(torch.from_numpy(np.array([i]))) for i,c in enumerate(set([c for w in self.word_frequencies for c in w]).union([self.unk_token]).union([str(n) for n in range(10)]))}
pindex = {p:autograd.Variable(torch.from_numpy(np.array([i]))) for i,p in enumerate(set([p for text in data for p in text.pos] + [self.unk_token]))}
windex = {w:autograd.Variable(torch.from_numpy(np.array([i]))) for i,w in enumerate(list(self.word_frequencies.keys()) + [self.unk_token])}
findex = {f:i for i,f in enumerate(list(all_features))}
return windex, cindex, pindex, findex
def get_params_from_nn_dict(self, nn_dict):
params = []
for name, component in nn_dict.items():
params += self.get_component_params(name, nn_dict)
return params
def get_component_params(self, name, component_dict):
if name in component_dict:
component = component_dict[name]
if hasattr(component, 'parameters'):
return list(component.parameters())
else:
return [component]
def fix_component_by_name(self, name):
component_names = [name] if name in self.nn else self.nn_by_subtask[name]
for component_name in component_names:
for par in self.get_component_params(component_name, self.nn):
par.requires_grad=False
self.tied_components.add(component_name)
def free_component_by_name(self, name):
component_names = [name] if name in self.nn else self.nn_by_subtask[name]
for component_name in component_names:
for par in self.get_component_params(component_name, self.nn):
par.requires_grad=True
if component_name in self.tied_components:
self.tied_components.remove(component_name)
def print_gradient_by_name(self, name=None):
if name is None:
components = self.nn.keys()
else:
components = [name] if name in self.nn else self.nn_by_subtask[name]
for component in components:
params = self.get_component_params(component, self.nn)
summed = 0
n_params = 0
for p in params:
if not p.grad is None:
n_params += np.prod(list(p.size()))
summ = sum(torch.abs(p.grad))
if summ.size()[0] > 1:
summ = sum(summ)
summed += summ
summed_grad = summed.data[0] if not type(summed)==int else summed
print(component, round(summed_grad,2), '/',round(n_params,2),'=',round(float(summed_grad)/(n_params+1),2))
def get_trainable_params(self):
pars = set()
for task in self.active_subtasks:
component_names = self.nn_by_subtask[task]
for comp in component_names:
if comp in self.tied_components:
continue
for par in self.get_component_params(comp, self.nn):
if par is not None and par.requires_grad:
pars.add(par)
return pars
def reset_optimizer(self):
trainable_params = self.get_trainable_params()
if self.optimizer_type == 'adam':
self.optimizer = optim.Adam(trainable_params, lr=self.lr)
if self.optimizer_type == 'adaml2':
self.optimizer = optim.Adam(trainable_params, lr=self.lr, weight_decay=0.0001)
if self.optimizer_type == 'amsgrad':
self.optimizer = optim.Adam(trainable_params, lr=self.lr, amsgrad=True)
if self.optimizer_type == 'amsgrad0.01':
self.optimizer = optim.Adam(trainable_params, lr=self.lr, amsgrad=True, eps=0.01)
if self.optimizer_type == 'amsgrad0.001':
self.optimizer = optim.Adam(trainable_params, lr=self.lr, amsgrad=True, eps=0.001)
elif self.optimizer_type== 'adadelta':
self.optimizer = optim.Adadelta(trainable_params, lr=self.lr)
elif self.optimizer_type == 'rmsprop':
self.optimizer = optim.RMSprop(trainable_params, lr=self.lr)
elif self.optimizer_type == 'sgd':
self.optimizer = optim.SGD(trainable_params, lr=self.lr, momentum=0.9, weight_decay=0.001)
elif self.optimizer_type == 'nesterov':
self.optimizer = optim.SGD(trainable_params, lr=self.lr, momentum=0.9, weight_decay=0.001, nesterov=True)
elif self.optimizer_type == 'asgd':
self.optimizer = optim.ASGD(trainable_params, lr=self.lr)
elif self.optimizer_type == 'yf':
self.optimizer = YFOptimizer(trainable_params)
def move_to_gpu(self):
for cname, component in self.nn.items():
if hasattr(component, 'data'):
component.data = component.data.cuda()
else:
component = component.cuda()
for cname, constant in self.constants.items():
constant.data = constant.data.cuda()
for indices in [self.windex, self.pindex, self.cindex]:
for w,i in indices.items():
indices[w] = indices[w].cuda()
def get_features(self, w_index, doc):
w_span = doc.spans[w_index]
annotations = doc.reverse_span_annotations[w_span] if w_span in doc.reverse_span_annotations else []
features = []
if len(annotations) > 0 and self.feature_keys:
for feat_key in self.feature_keys:
for ann in annotations:
if feat_key in ann:
features.append(ann)
return features
def get_feature_vec(self, w_index, doc):
features = self.get_features(w_index, doc)
vec = torch.zeros(len(self.findex))
for f in features:
if f in self.findex:
findex = self.findex[f]
vec[findex] = 1.0
if self.gpu:
vec = vec.cuda()
return autograd.Variable(vec, requires_grad=False)
def get_tif_vec(self, w_index, doc):
span = doc.spans[w_index]
if span in doc.reverse_span_annotations:
k = [tif for tif in doc.reverse_span_annotations[span] if tif[:3]=='TIF']
#print(k)
if len(k) >0:
return self.tif_vecs[k[0]]
return self.tif_vecs['TIF-UNKNOWN']
def set_train_mode(self):
for component in self.nn.values():
if hasattr(component, 'train'):
component.train()
def set_eval_mode(self):
for component in self.nn.values():
if hasattr(component, 'eval'):
component.eval()
def __init__(self, model_dir='tml_model', data=[], margin=0.01, dmin=0.1, pemb_size=20, wemb_size=25, cemb_size=10, rnn_size=50, crnn_size=20, lr=0.001, gpu=True, relations=['BEFORE', 'AFTER', 'INCLUDES', 'IS_INCLUDED','SIMULTANEOUS'], dropout=0.5, depth=1, unk_threshold=0, special_conflation=False, rnn_unit='LSTM', pos=False, optimizer='adam', loss_func='Ldce', subtasks=['sc','dc','sa','da'], word_vectors=None, fix_wembs=False, dct_start_fixed=True, dct_duration_fixed=False, rnn_bias=True, linear_bias=True, use_character_level_encoding=True,doc_normalization=True,blinding=False, feature_keys = None, deep_word_modeling=False, entity_sequence=False, absolute=False, pointwise_loss='hinge'):
self.model_dir = model_dir
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
self.unk_token = '_unk_'
self.feature_keys = feature_keys.split(',') if feature_keys else None
self.windex, self.cindex, self.pindex, self.findex = self.setup_vocabularies(data, unk_threshold, special_conflation=special_conflation, entity_sequence=entity_sequence)
print ('wvocab:', len(self.windex), 'cvocab:', len(self.cindex), 'pvocab:', len(self.pindex), 'fvocab:', len(self.findex), '( using pos:', bool(pos),', features:', self.feature_keys, ')')
print('features:', self.findex.keys())
self.train_margin, self.pred_margin = margin, margin
self.dmin, self.rels_train, self.loss_func, self.pointwise_loss = dmin, relations, loss_func, pointwise_loss
self.gpu, self.optimizer_type, self.lr = gpu, optimizer, lr
self.special_conflation=special_conflation
self.entity_sequence=entity_sequence
self.absolute = absolute
self.doc_normalization=doc_normalization
# Some stats about layer sizes (for easy usage later on)
self.pemb_size, self.wemb_size, self.crnn_size, self.cemb_size, self.rnn_size = (pemb_size if pos else 0), wemb_size,(crnn_size if use_character_level_encoding else 0), (cemb_size if use_character_level_encoding else 0), rnn_size
self.pos, self.use_character_level_encoding, self.blinding, self.dropout, self.rnn_unit, self.deep_word_modeling = pos, use_character_level_encoding, blinding, dropout, rnn_unit, deep_word_modeling
# --- Constructing Network Components
self.nn, self.constants = OrderedDict(), OrderedDict()
self.contextual_subtasks, self.word_level_subtasks = ['sc','dc'], ['sa','da']
# Set which subtasks should be used for prediction
self.active_subtasks = subtasks
print('Active subtasks',self.active_subtasks)
# optional dropout
if self.dropout:
self.nn['dropout*'] = nn.Dropout(self.dropout)
# Single parameters (or constants)
self.nn['s_dct*'] = autograd.Variable(torch.zeros(1), requires_grad=True)
self.nn['d_dct*'] = autograd.Variable(torch.ones(1), requires_grad=True)
self.constants['ZERO'] = autograd.Variable(torch.FloatTensor([0]),requires_grad=False)
# Word representation modules
if word_vectors:
wv = read_word_vectors(word_vectors)
for subtask in self.contextual_subtasks + self.word_level_subtasks:
if word_vectors:
self.windex, self.nn['wembs_'+subtask], self.wemb_size = self.set_word_embeddings(wv)
else:
self.nn['wembs_'+subtask] = nn.Embedding(len(self.windex), self.wemb_size)
if pos:
self.nn['pembs_'+subtask] = nn.Embedding(len(self.pindex), self.pemb_size)
if use_character_level_encoding:
self.nn['cembs_'+subtask] = nn.Embedding(len(self.cindex), self.cemb_size)
self.nn['crnn_'+subtask] = nn.LSTM(self.cemb_size, self.crnn_size, bidirectional=False, num_layers=depth, bias=rnn_bias)
self.word_repr_size = self.pemb_size + self.wemb_size + self.crnn_size + (len(self.findex) if self.feature_keys else 0)
if deep_word_modeling:
for subtask in self.contextual_subtasks + self.word_level_subtasks:
self.nn['wff_'+subtask] = nn.Linear(self.word_repr_size, deep_word_modeling)
self.word_repr_size = deep_word_modeling
# Contextual modules
for subtask in self.contextual_subtasks:
if self.rnn_unit == 'LSTM':
self.nn['wrnn_'+subtask] = nn.LSTM(self.word_repr_size, self.rnn_size, bidirectional=True, num_layers=depth, bias=rnn_bias)
elif self.rnn_unit == 'Att':
self.nn['wrnn_'+subtask] = MultiHeadAttention(n_head=2, d_model=self.word_repr_size, d_k=10, d_v=10)
self.nn['out_'+subtask] = nn.Linear(self.word_repr_size, 1, bias=linear_bias)
# Non-contextual modules:
self.out_repr_size_d = 0 + (1 if 'dp' in self.active_subtasks else 0) + (1 if 'sp' in self.active_subtasks else 0) + (2*self.rnn_size if 'dc' in self.active_subtasks else 0) + (self.word_repr_size if 'da' in self.active_subtasks else 0)
self.out_repr_size_s = 0 + (1 if 'dp' in self.active_subtasks else 0) + (1 if 'sp' in self.active_subtasks else 0) + (2*self.rnn_size if 'sc' in self.active_subtasks else 0) + (self.word_repr_size if 'sa' in self.active_subtasks else 0)
self.nn['out_s'] = nn.Linear(self.out_repr_size_d, 1, bias=linear_bias)
self.nn['out_d'] = nn.Linear(self.out_repr_size_s, 1, bias=linear_bias)
# Easy access to subparts of the net by subtask, to easily free or fix parameters
self.nn_by_subtask = {subtask:{name:component for (name,component) in self.nn.items() if subtask in name or '*' in name} for subtask in self.contextual_subtasks+self.word_level_subtasks}
for subtask in self.nn_by_subtask:
self.nn_by_subtask[subtask]['out_s']=self.nn['out_s']
self.nn_by_subtask[subtask]['out_d']=self.nn['out_d']
# Set all components to trainable by default except checking the DCT start and duration
self.tied_components = set()
if dct_start_fixed:
self.fix_component_by_name('s_dct*')
if dct_duration_fixed:
self.fix_component_by_name('d_dct*')
self.reset_optimizer()
print('Full model parameters:', sum([np.prod(list(par.size())) for par in self.get_trainable_params()]))
print('Word representation size:',self.word_repr_size)
print ('Dims - wemb:',self.wemb_size, '- pemb:',self.pemb_size, '- cemb:',self.cemb_size, '- wrnn:', self.rnn_size, '- crnn:', self.crnn_size)
print ('Relations:', relations)
if self.gpu:
self.move_to_gpu()
def index_w(self, w):
return self.windex[w] if w in self.windex else self.windex[self.unk_token]
def index_p(self, p):
return self.pindex[p] if p in self.pindex else self.pindex[self.unk_token]
def index_c(self, c):
return self.cindex[c] if c in self.cindex else self.cindex[self.unk_token]
def get_e_vec(self, e):
return self.e_vecs[e] if e in self.e_vecs else self.e_vecs[self.unk_token]
def encode_char(self,c, subtask):
return self.nn['cembs_'+subtask](self.index_c(c))
def conflate_digits(self, w):
return re.sub('\d', '5', w)
def set_word_embeddings(self, wv):
print('setting word embeddings')
wv_vocab = [w for w in wv.vocab.keys() if (not ('_' in w) or w=='_') and w in self.windex] # ! only words that overlap are initialized (so no bigger vocab)!
new_windex, wemb_size = {w:i for i,w in enumerate(wv_vocab + [self.unk_token])}, wv.vector_size
wembs = nn.Embedding(len(new_windex), wemb_size)
emb_matrix = np.zeros([len(new_windex), wemb_size], dtype=float)
for w in new_windex:
if w in wv:
emb_matrix[new_windex[w]] = wv[w]
emb_tensor = torch.from_numpy(emb_matrix).float()
wembs.weight.data = emb_tensor.view(len(new_windex), wemb_size)
new_windex = {w:autograd.Variable(torch.from_numpy(np.array([i]))) for w,i in new_windex.items()}
print ('vocab size:', len(wv_vocab))
return new_windex, wembs, wemb_size
def encode_word_for_subtask(self, w_index, doc, subtask):
if self.entity_sequence:
token_str = doc.span_to_tokens(doc.entity_spans[w_index])[-1]
else:
token_str = doc.tokens[w_index]
if self.blinding == 1 and subtask in ['dc','sc' ] and doc.entities[w_index]!='O':
token_str = self.unk_token
elif self.blinding == 2 and subtask in ['dc','sc']:
token_str = self.unk_token
# Getting the word embedding
if self.special_conflation:
word_encoding = self.nn['wembs_'+subtask](self.index_w(self.conflate_digits(token_str)))
else:
word_encoding = self.nn['wembs_'+subtask](self.index_w(token_str))
# Adding Character RNN encoding
if self.use_character_level_encoding:
# Constructing sequence of char-embeddings
cembs_lr = torch.stack([self.encode_char(c, subtask) for c in token_str])
# Running Char-RNN
cencoding_lr, _ = self.nn['crnn_'+subtask](cembs_lr)
# Concatenating the word embedding and last Char-RNN output
word_encoding = torch.cat([word_encoding,cencoding_lr[-1]], dim=1)
# Adding POS
if self.pos:
pemb = self.nn['pembs_'+subtask](self.index_p(doc.pos[w_index]))
word_encoding = torch.cat([word_encoding, pemb], dim=1)
# Adding Entity encoding (EVENT, TIMEX3, or NONE)
if self.feature_keys:
feat_vec = self.get_feature_vec(w_index, doc).view(1,-1)
word_encoding = torch.cat([word_encoding, feat_vec], dim=1)
if self.deep_word_modeling:
word_encoding = torch.tanh(word_encoding)
word_encoding = self.nn['wff_'+subtask](word_encoding)
# Add dropout
if self.dropout:
word_encoding = self.nn['dropout*'](word_encoding)
return word_encoding
def encode_tokens_for_subtask(self, doc, subtask):
# construct word representations
if self.entity_sequence:
word_encoding = torch.stack([self.encode_word_for_subtask(e_index,doc, subtask) for e_index in range(len(doc.entity_spans))])
else:
word_encoding = torch.stack([self.encode_word_for_subtask(w_index,doc, subtask) for w_index in range(len(doc.tokens))])
# For contextual subtasks apply the corresponding word-level RNN
if subtask in self.contextual_subtasks:
if self.rnn_unit in ['LSTM', 'GRU','RNN']:
word_encoding, _ = self.nn['wrnn_'+subtask](word_encoding)
if self.rnn_unit in ['Att']:
word_encoding, enc_slf_attn = self.nn['wrnn_'+subtask](word_encoding, word_encoding, word_encoding)
# Add dropout (dropout is already appliedon word representation level as well)
if self.dropout:
word_encoding = self.nn['dropout*'](word_encoding)
return word_encoding
def pred_subtask(self, token_index, doc, encoded_text, subtask):
token_representation = encoded_text[subtask][token_index]
return self.nn['out_'+subtask](token_representation)
def encode_tokens(self, doc, entity_spans=None, measure_speed=False):
if measure_speed:
t0 = time.time()
entity_spans = entity_spans if entity_spans else doc.entity_spans
encodings = {}
sp,dp = 0,0
for subtask in self.active_subtasks:
if not subtask in set(['dp','sp']):
encodings[subtask] = self.encode_tokens_for_subtask(doc, subtask)
encodings['s'], encodings['d'] = {},{}
# span (0,0) corresponds to the document-creation-time
s, d = self.nn['s_dct*'].view(1,1), self.clamp(self.nn['d_dct*'].view(1,1), self.dmin)#.clamp(self.dmin)
encodings['s'][(0,0)], encodings['d'][(0,0)] = s, d
sp,dp = s, d
for span in entity_spans:
# Get the token index corresponding to the span
token_ix = doc.entity_indices[span] if self.entity_sequence else doc.span_to_tokens(span,token_index=True)[-1]
tok_rs, tok_rd = None,None
if 'sa' in self.active_subtasks:
tok_rs = encodings['sa'][token_ix]
if 'da' in self.active_subtasks:
tok_rd = encodings['da'][token_ix]
if 'sc' in self.active_subtasks:
tok_rs = torch.cat([tok_rs, encodings['sc'][token_ix]], dim=1) if tok_rs is not None else encodings['sc'][token_ix]
if 'dc' in self.active_subtasks:
tok_rd = torch.cat([tok_rd, encodings['dc'][token_ix]], dim=1) if tok_rd is not None else encodings['dc'][token_ix]
if 'sp' in self.active_subtasks:
tok_rs = torch.cat([tok_rs, sp], dim=1)
tok_rd = torch.cat([tok_rd, sp], dim=1)
if 'dp' in self.active_subtasks:
tok_rs = torch.cat([tok_rs, dp], dim=1)
tok_rd = torch.cat([tok_rd, dp], dim=1)
s, d = self.nn['out_s'](tok_rs), self.clamp(self.nn['out_d'](tok_rd), self.dmin)
encodings['s'][span] = s
encodings['d'][span] = d
sp,dp = s, d
if measure_speed:
print(doc.id, 'enc t:',time.time()-t0,'s', 'words:', len(doc.tokens),'w/s:', float(len(doc.tokens)) / (time.time()-t0))
return encodings
def clamp(self, tensor, min_value):
return torch.log(1.0 + torch.exp(tensor)) + min_value
def pred_starttime(self, span, doc, encoded_text):
return encoded_text['s'][span]
def pred_duration(self, span, doc, encoded_text):
return encoded_text['d'][span]
def pointwise_loss_before(self, x, y, train_mode=False): # X < Y, interpreted as: max(X + m - Y, 0)
margin_t = self.train_margin if train_mode else self.pred_margin
if self.pointwise_loss == 'hinge':
loss = torch.max(torch.stack([x[0] + margin_t - y[0], self.constants['ZERO']]))
elif self.pointwise_loss == 'log':
loss = torch.log(1 + torch.exp(x[0] - y[0] + margin_t))
elif self.pointwise_loss == 'exp':
loss = torch.exp(x[0] - y[0] + margin_t)
return loss.view(1)
def pointwise_loss_equal(self, x, y, train_mode=False):
# |x-y| < margin --> max(|x-y| - self.loss_margin , 0)
margin_t = self.train_margin if train_mode else self.pred_margin
if self.pointwise_loss == 'hinge':
loss = torch.max(torch.stack([torch.abs(x[0] - y[0]) - margin_t, self.constants['ZERO']]))
elif self.pointwise_loss == 'log':
loss = torch.log(1 + torch.exp(torch.abs(x[0] - y[0]) - margin_t))
elif self.pointwise_loss == 'exp':
loss = torch.exp(torch.abs(x[0] - y[0]) - margin_t)
return loss.view(1)
def get_Lt(self, rel, s1, d1, s2, d2, train_mode=False):
e1 = s1 + d1
e2 = s2 + d2
if rel == 'IS_INCLUDED':
loss = self.pointwise_loss_before(s2, s1, train_mode) + self.pointwise_loss_before(e1, e2, train_mode) # + self.pointwise_loss_before(d1,d2)
elif rel =='INCLUDES':
loss = self.pointwise_loss_before(s1, s2, train_mode) + self.pointwise_loss_before(e2, e1, train_mode) # + self.pointwise_loss_before(d2,d1)
elif rel == 'BEFORE':
loss = self.pointwise_loss_before(e1, s2, train_mode)
elif rel == 'AFTER':
loss = self.pointwise_loss_before(e2, s1, train_mode)
elif rel == 'SIMULTANEOUS':
loss = self.pointwise_loss_equal(s1, s2, train_mode) + self.pointwise_loss_equal(e1, e2, train_mode) # + self.pointwise_loss_equal(d1,d2)
elif rel == 'BEGINS':
loss = self.pointwise_loss_equal(s1, s2, train_mode) + self.pointwise_loss_before(e1, e2, train_mode)
elif rel == 'BEGUN_BY':
loss = self.pointwise_loss_equal(s2, s1, train_mode) + self.pointwise_loss_before(e2, e1, train_mode)
elif rel == 'ENDS':
loss = self.pointwise_loss_before(s2, s1, train_mode) + self.pointwise_loss_equal(e1, e2, train_mode)
elif rel == 'ENDED_BY':
loss = self.pointwise_loss_before(s1, s2, train_mode) + self.pointwise_loss_equal(e2, e1, train_mode)
elif rel == 'IBEFORE':
loss = self.pointwise_loss_equal(e1, s2, train_mode)
elif rel == 'IAFTER':
loss = self.pointwise_loss_equal(e2, s1, train_mode)
else:
print('ERROR: no loss for relation:', rel)
#print(rel, loss, s1, e1, s2, e2)
return loss
def get_Lr(self, rel, s1, d1, s2, d2, all_relations, train_mode=False):
if self.loss_func == 'Lt':
return self.get_Lt(rel, s1, d1, s2, d2, train_mode)
elif self.loss_func == 'Ldh': # the timeline loss of the true label should be lower than that of all false/other labels
gt_loss = self.get_Lt(rel, s1, d1, s2, d2, train_mode)
loss = 0.0
for other_rel in all_relations:
if other_rel != rel:
loss += torch.max(torch.stack([gt_loss - self.get_Lt(other_rel, s1, d1, s2, d2, train_mode) + self.dmin, self.constants['ZERO']]))
return loss
elif self.loss_func == 'Ldcem':
# Uses standard normalization instead of softmax
f = lambda x: -x
score_per_relation = torch.stack([f(self.get_Lt(rel, s1, d1, s2, d2, train_mode))] + [f(self.get_Lt(r, s1, d1, s2, d2, train_mode)) for r in all_relations if not r==rel])
lifted_scores = score_per_relation + (0 - torch.min(score_per_relation))
minmaxnorm = lambda x: x / torch.sum(x)
mm1 = minmaxnorm(lifted_scores)
return 1 - mm1[0]
elif self.loss_func == 'Ldcemt':
# Uses standard normalization instead of softmax and use tanh to flatten low scores (and prevent forever pushing away from unlikely relations, causing the time-line to move always during learning)
f = lambda x: torch.tanh(-x)
score_per_relation = torch.stack([f(self.get_Lt(rel, s1, d1, s2, d2, train_mode))] + [f(self.get_Lt(r, s1, d1, s2, d2, train_mode)) for r in all_relations if not r==rel])
lifted_scores = score_per_relation + (0 - torch.min(score_per_relation))
minmaxnorm = lambda x: x / torch.sum(x)
mm1 = minmaxnorm(lifted_scores)
return 1 - mm1[0]
elif self.loss_func == 'Ldce':
f = lambda x: -x
new_score = torch.stack([f(self.get_Lt(rel, s1, d1, s2, d2, train_mode))] + [f(self.get_Lt(r, s1, d1, s2, d2, train_mode)) for r in all_relations if not r==rel])
score_per_relation = new_score
ref_vector = autograd.Variable(torch.LongTensor([0]), requires_grad=False)
if self.gpu:
ref_vector = ref_vector.cuda()
cross_entropy = torch.nn.CrossEntropyLoss()
return cross_entropy(score_per_relation.t(), ref_vector)
elif self.loss_func in ['Lt+Ldh','Ldh+Lt']:
gt_loss = self.get_Lt(rel, s1, d1, s2, d2, train_mode)
loss = 0.0
for other_rel in all_relations:
if other_rel != rel:
loss += torch.max(torch.stack([gt_loss - self.get_Lt(other_rel, s1, d1, s2, d2, train_mode) + self.dmin, self.constants['ZERO']]))
return loss + gt_loss
elif self.loss_func in ['Lt+Ldce','Ldce+Lt']:
f = lambda x: -x
gt_loss = self.get_Lt(rel, s1, d1, s2, d2, train_mode)
new_score = torch.stack([f(gt_loss)] + [f(self.get_Lt(r, s1, d1, s2, d2, train_mode)) for r in all_relations if not r==rel])
score_per_relation = new_score
ref_vector = autograd.Variable(torch.LongTensor([0]), requires_grad=False)
if self.gpu:
ref_vector = ref_vector.cuda()
cross_entropy = torch.nn.CrossEntropyLoss()
return cross_entropy(score_per_relation.t(), ref_vector) + gt_loss
elif self.loss_func in ['Ldh+Ldce','Ldce+Ldh']:
gt_loss = self.get_Lt(rel, s1, d1, s2, d2, train_mode)
f = lambda x: -x
loss = 0.0
for other_rel in all_relations:
if other_rel != rel:
loss += torch.max(torch.stack([gt_loss - self.get_Lt(other_rel, s1, d1, s2, d2, train_mode) + self.dmin, self.constants['ZERO']]))
new_score = torch.stack([f(gt_loss)] + [f(self.get_Lt(r, s1, d1, s2, d2, train_mode)) for r in all_relations if not r==rel])
score_per_relation = new_score
ref_vector = autograd.Variable(torch.LongTensor([0]), requires_grad=False)
if self.gpu:
ref_vector = ref_vector.cuda()
cross_entropy = torch.nn.CrossEntropyLoss()
loss += cross_entropy(score_per_relation.t(), ref_vector)
return loss
elif self.loss_func == 'L*':
gt_loss = self.get_Lt(rel, s1, d1, s2, d2, train_mode)
f = lambda x: -x
loss = 0.0
for other_rel in all_relations:
if other_rel != rel:
loss += torch.max(torch.stack([gt_loss - self.get_Lt(other_rel, s1, d1, s2, d2, train_mode) + self.dmin, self.constants['ZERO']]))
new_score = torch.stack([f(gt_loss)] + [f(self.get_Lt(r, s1, d1, s2, d2, train_mode)) for r in all_relations if not r==rel])
score_per_relation = new_score
ref_vector = autograd.Variable(torch.LongTensor([0]), requires_grad=False)
if self.gpu:
ref_vector = ref_vector.cuda()
cross_entropy = torch.nn.CrossEntropyLoss()
loss += cross_entropy(score_per_relation.t(), ref_vector)
loss += self.get_Lt(rel, s1, d1, s2, d2, train_mode)[0]
return loss
def train(self, data, num_epochs=5, max_docs=None, viz_inbetween=False, verbose=0,save_checkpoints=None, eval_on=None, batch_size=32, temporal_awareness_ref_dir=None, clip=1.0, pred_relations=None, patience=100, loss_func=None, pointwise_loss=None,tune_margin=1, checkpoint_interval=1000,timex3_dur_loss=False, reset_optimizer=None):
training_start_time = time.time()
print('Fixed components:', self.tied_components)
print('Trainable parameters:', sum([np.prod(list(par.size())) for par in self.get_trainable_params()]))
print ('epochs:', num_epochs, 'dropout:', self.dropout, 'batch_size:', batch_size)
print('checkpoints:', save_checkpoints)
torch.backends.cudnn.benchmark = True
self.reset_optimizer()
if loss_func:
self.loss_func = loss_func
if pointwise_loss:
self.pointwise_loss=pointwise_loss
print('Lr loss func:', self.loss_func)
print('Lp loss func:',self.pointwise_loss)
if max_docs:
data = data[:max_docs]
# Taking subsection from training to calculate training accuracy
train_err_subset = data[:max(int(len(data)*0.05),5)]
pred_relations = pred_relations if pred_relations else self.rels_train
if save_checkpoints:
checkpoint_dir = self.model_dir + '/checkpoints/'
os.makedirs(checkpoint_dir)
if eval_on:
error_dir_conf = self.model_dir + '/errors/confusion/'
error_dir_entities = self.model_dir + '/errors/entities/'
os.makedirs(error_dir_conf)
os.makedirs(error_dir_entities)
dev_metrics, F1_TA, P_TA, R_TA = evaluate_timelinemodel(self, eval_on, pred_relations,temporal_awareness_ref_dir=temporal_awareness_ref_dir,all_pairs=True)
train_metrics, _, _, _ = evaluate_timelinemodel(self, train_err_subset, pred_relations, all_pairs=True, entity_error_analysis_file_path=error_dir_entities+'/train_0.txt')
save_confusion_matrix_from_metrics(train_metrics, error_dir_conf + '/train_0.html')
save_confusion_matrix_from_metrics(dev_metrics, error_dir_conf + '/dev_0.html')
# saving initial evaluation (before training)
best_eval_acc = get_acc_from_confusion_matrix(dev_metrics)
epoch_stats = {'loss':[None], 'eval_acc':[get_acc_from_confusion_matrix(dev_metrics)], 'train_acc':[get_acc_from_confusion_matrix(train_metrics)]}
if temporal_awareness_ref_dir:
epoch_stats['F1_TA'], epoch_stats['P_TA'], epoch_stats['R_TA'] = [F1_TA], [P_TA], [R_TA]
else:
best_eval_acc = 0,0
if viz_inbetween:
viz_dir = self.model_dir + '/viz/'
os.makedirs(viz_dir)
viz_doc = data[0]
self.pred_viz(viz_doc, path=viz_dir + '/timeline0.html')
num_examples_seen, num_examples_seen_prev_chkpt = 0, 0
batch_id = 0
e = 0
chkpt_id,best_chkpt = 0,0
while (e < num_epochs + 1) and (chkpt_id - best_chkpt <= patience):
e+=1
# ------------------------------------- start of epoch ------------------------
# set network to training mode (for dropout)
streaming_avg_loss = []
start_time = time.time()
batches = []
num_batches_per_doc = {}
for doc_id,doc in enumerate(data):
c_rels = [(r, p) for (r,ps) in doc.span_pair_annotations.items() for p in ps if r in self.rels_train]
random.shuffle(c_rels)
num_batches = int(len(c_rels)/batch_size) + 1
num_batches_per_doc[doc_id] = num_batches
batch_indices = range(num_batches)
for batch_i in batch_indices:
batch = c_rels[batch_i*batch_size:(batch_i+1)*batch_size]
batches.append((doc_id,batch))
random.shuffle(batches)
print ('\n===== Epoch', e, '(',(len(data)),' docs,',len(batches),'batches ) =====\n')
self.set_train_mode()
for doc_id, batch in batches:
if chkpt_id - best_chkpt > patience:
print('no more patience...')
break
if reset_optimizer and len(streaming_avg_loss) % reset_optimizer: # reset optimizer every X iterations
self.reset_optimizer()
doc, batch_start_time, batch_id, num_examples_seen = data[doc_id], time.time(), batch_id + 1, num_examples_seen + len(batch)
loss, predicted_spans = 0.0, {}
self.optimizer.zero_grad()
encoded_text = self.encode_tokens(doc)
# Make span predictions
for rel, (span_a1, span_a2) in batch:
if not span_a1 in predicted_spans:
predicted_spans[span_a1] = self.pred_span(doc, span_a1, encoded_text, convert_to_floats=False)
if not span_a2 in predicted_spans:
predicted_spans[span_a2] = self.pred_span(doc, span_a2, encoded_text, convert_to_floats=False)
# Calculate TLink Loss
for rel, (span_a1, span_a2) in batch:
s1, d1 = predicted_spans[span_a1]
s2, d2 = predicted_spans[span_a2]
Lr = self.get_Lr(rel, s1, d1, s2, d2, pred_relations, train_mode=True).view(1)
loss += Lr
if self.absolute:
# Calculate Span Loss
for span in predicted_spans:
#print('--------------')
#print(doc.span_to_string(span))
anns = doc.reverse_span_annotations[span] if span in doc.reverse_span_annotations else []
vs = [ann.split(':')[1] for ann in anns if ann.split(':')[0] == 'value']
value = vs[0] if len(vs) > 0 else None
if value:
num_seconds = get_dur_from_value(value)
if num_seconds:
gt_duration = float(num_seconds) / 86400 # to number of days
s, d = predicted_spans[span]
#print('gt',num_seconds, gt_duration, d)
Ldur = torch.abs(d - gt_duration).view(1)
#print('Ldur>>', Ldur)
loss += Ldur
if self.doc_normalization:
loss = loss / num_batches_per_doc[doc_id]
loss_end_time = time.time()
batch_loss = loss.cpu().data.numpy()[0] / len(batch) if type(loss) != float else 0
if batch_loss > 0:
loss.backward()
#self.print_gradient_by_name()
if clip:
for params in self.get_trainable_params():
nn.utils.clip_grad_norm(params,clip)
self.optimizer.step()
streaming_avg_loss.append(batch_loss)
print (batch_id, '/',len(batches), doc.id, '\tbatch_loss:', round(batch_loss,5), 'streaming_avg_loss:',round(np.mean(streaming_avg_loss[-100:]),5),'\t t:', round(loss_end_time - batch_start_time,2),'backprop t:',round(time.time()-loss_end_time,2))
if num_examples_seen - num_examples_seen_prev_chkpt > checkpoint_interval : # After every 10.000 examples evaluate the status quo
chkpt_id += 1
num_examples_seen_prev_chkpt = num_examples_seen
self.set_eval_mode()
if viz_inbetween:
viz_start_time = time.time()
self.pred_viz(viz_doc, path=viz_dir + '/timeline'+str(chkpt_id)+'.html')
print ('viz t:',round(time.time() - viz_start_time, 2))
avg_loss = np.mean(streaming_avg_loss[-100:])
epoch_stats['loss'].append(avg_loss)
print('\n-- checkpoint', chkpt_id, '--')
print('> avg loss: [', avg_loss, '] examples seen:', num_examples_seen,'chkpt t:', round(time.time() - start_time,2))
print('DCT\ts:', self.nn['s_dct*'].data.cpu().numpy(),'\td:',self.clamp(self.nn['d_dct*'], self.dmin).data.cpu().numpy())
if eval_on:
start_time_eval = time.time()
print('eval rels:', pred_relations)
original_margin = self.pred_margin
m_range = set([max(original_margin+d,0) for d in np.arange(-0.15, 0.2, 0.05)]) if tune_margin == 2 else [original_margin]
best_m_acc, best_m = 0, original_margin
for test_margin in m_range:
self.pred_margin = test_margin
dev_metrics, F1_TA, P_TA, R_TA = evaluate_timelinemodel(self, eval_on, pred_relations,temporal_awareness_ref_dir=temporal_awareness_ref_dir, all_pairs=True, entity_error_analysis_file_path=error_dir_entities + '/dev_' +str(chkpt_id) + '.txt')
eval_acc=get_acc_from_confusion_matrix(dev_metrics)
if tune_margin == 2:
print('m:', round(test_margin, 3), 'eval_acc', round(eval_acc, 3))
if eval_acc > best_m_acc:
best_m, best_m_acc, best_eval_metric = test_margin, eval_acc, dev_metrics
if temporal_awareness_ref_dir:
best_F1_TA, best_P1_TA, best_R_TA = F1_TA, P_TA, R_TA
self.pred_margin = best_m
train_metrics, _, _, _ = evaluate_timelinemodel(self, train_err_subset, pred_relations, all_pairs=True, entity_error_analysis_file_path=error_dir_entities + '/train_' +str(chkpt_id) + '.txt')
train_acc=get_acc_from_confusion_matrix(train_metrics)
save_confusion_matrix_from_metrics(train_metrics, error_dir_conf + '/train_' + str(chkpt_id) + '-m'+ str(self.pred_margin) + '.html')
save_confusion_matrix_from_metrics(best_eval_metric, error_dir_conf + '/dev_' + str(chkpt_id) + '-m'+ str(self.pred_margin) + '.html')
epoch_stats['eval_acc'].append(eval_acc)
epoch_stats['train_acc'].append(train_acc)
if temporal_awareness_ref_dir:
epoch_stats['F1_TA'].append(F1_TA)
epoch_stats['P_TA'].append(P_TA)
epoch_stats['R_TA'].append(R_TA)
print ('M:',round(self.pred_margin,3), 'f1_ta', best_F1_TA,'p_ta', best_P1_TA, 'r_ta', best_R_TA, 'eval_acc:', round(best_m_acc, 3), 'train_acc:',round(train_acc, 3), 't:', round(time.time()-start_time_eval, 2))
else:
print ('M:',round(self.pred_margin,3), '\teval_acc:', round(best_m_acc, 3), 'train_acc:',round(train_acc, 3), 't:', round(time.time()-start_time_eval, 2))
if epoch_stats['eval_acc'][-1] >= best_eval_acc:
print(epoch_stats['eval_acc'][-1],'>=', best_eval_acc)
best_chkpt, best_eval_acc = chkpt_id, epoch_stats['eval_acc'][-1]
if save_checkpoints:
self.save_timelinemodel(checkpoint_dir + '/checkpoint_' + str(chkpt_id) + '.p')
plot_data = [go.Scatter(x=np.array(range(num_epochs)), y=np.array(values), mode='lines+markers', name=key) for key,values in epoch_stats.items()]
py.offline.plot(plot_data, filename=self.model_dir + '/train_stats.html', auto_open=False)
print()
self.set_train_mode()
self.set_eval_mode()
if save_checkpoints:
best_checkpoint, best_score = best_chkpt, best_eval_acc
print('>>> using best checkpoint:', best_checkpoint, 'with dev score', best_score)
if best_checkpoint > 0:
best_checkpoint_model = load_timelinemodel(checkpoint_dir + '/checkpoint_' + str(best_checkpoint) + '.p')
print('setting checkpoint')
self.__dict__.update(best_checkpoint_model.__dict__)
if tune_margin:
self.tune_pred_margin(data, pred_relations)
self.save_timelinemodel(self.model_dir + '/model.p')
print ('finished training t:',round(time.time()-training_start_time, 2))
def pred_span(self, doc, span, encoded_text, convert_to_floats=True):
start, duration = self.pred_starttime(span, doc, encoded_text), self.pred_duration(span, doc, encoded_text)
if convert_to_floats:
start, duration = float(start.cpu().data.numpy()[0,0]), float(duration.cpu().data.numpy()[0,0])
return start, duration
def start_duration_pair_to_relation(self, s1, d1, s2, d2, rels):
# Returns the relation from rels that has the lowest Lt loss
rel_losses = [(rel, self.get_Lt(rel, s1, d1, s2, d2).cpu().data.numpy()[0]) for rel in rels]
return min(rel_losses, key=lambda x:x[1])[0]
def pred_viz(self, doc, path='timeline.path'):
# https://plot.ly/python/gantt/
encoded_text = self.encode_tokens(doc)
events = {}
dct_str = [label[6:] for label in doc.reverse_span_annotations[(0,0)] if 'value:' in label][0]
dct_date_str = re.findall(r'\d\d\d\d-\d\d-\d\d', dct_str)[0]
dct= datetime.datetime.strptime(dct_date_str, '%Y-%m-%d')
for event_span in doc.span_annotations['EType:EVENT']:
event_str = doc.text[event_span[0]:event_span[1]]
start, duration = self.pred_span(doc, event_span, encoded_text)
events[event_str] = {'start_date':self.num_to_date(float(start),dct_date=dct), 'end_date':self.num_to_date(float(start + duration),dct_date=dct)}
df_events = [dict(Task=event, Start=events[event]['start_date'], Finish=events[event]['end_date'], Resource='EVENT') for event in events]
timex3s = {'DCT': {'start_date':self.num_to_date(float(0),dct_date=dct), 'end_date':self.num_to_date(float(0 + 1),dct_date=dct)}}
for timex_span in doc.span_annotations['EType:TIMEX3']:
timex3_str = doc.text[timex_span[0]:timex_span[1]]
start, duration = self.pred_span(doc, timex_span, encoded_text)
timex3s[timex3_str] = {'start_date':self.num_to_date(float(start),dct_date=dct), 'end_date':self.num_to_date(float(start + duration),dct_date=dct)}
df_timex3 = [dict(Task=timex3, Start=timex3s[timex3]['start_date'], Finish=timex3s[timex3]['end_date'], Resource='TIMEX3') for timex3 in timex3s]
colors = {'EVENT': 'rgb(0, 0, 255)', 'TIMEX3': 'rgb(0, 255, 100)' }
fig = ff.create_gantt(sorted(df_events+df_timex3, key=lambda x: self.date_to_num(x['Start'])), title=doc.id, colors=colors, index_col='Resource',show_colorbar=True, group_tasks=True)
py.offline.plot(fig, filename=path,auto_open=False)
def predict_doc(self, doc, span_labels):
self.set_eval_mode()
encoded_text = self.encode_tokens(doc)
for label in span_labels:
for span in doc.span_annotations[label] + [(0,0)]:
start, duration = self.pred_span(doc, span, encoded_text)
st_lab, dur_lab = 'start:' + str(start), 'duration:' + str(duration)
if not st_lab in doc.span_annotations:
doc.span_annotations[st_lab] = []
if not dur_lab in doc.span_annotations:
doc.span_annotations[dur_lab] = []
doc.span_annotations[st_lab].append(span)
doc.span_annotations[dur_lab].append(span)
doc.reverse_span_annotations = reverse_dict_list(doc.span_annotations)
return doc
def classify_rels_in_doc(self, doc, rels, all_pairs=False):
if all_pairs:
pairs = set([pair for pair in doc.reverse_span_pair_annotations])
else:
pairs = set([pair for rel in rels if rel in doc.span_pair_annotations for pair in doc.span_pair_annotations[rel]])
encoded_text = self.encode_tokens(doc)
span_predictions = {}
span_pair_predictions = {r:[] for r in rels}
for a1,a2 in pairs:
if not a1 in span_predictions:
span_predictions[a1] = self.pred_span(doc, a1, encoded_text, convert_to_floats=False)
if not a2 in span_predictions:
span_predictions[a2] = self.pred_span(doc, a2, encoded_text, convert_to_floats=False)
s1, d1 = span_predictions[a1]
s2, d2 = span_predictions[a2]
pred_rel = self.start_duration_pair_to_relation(s1, d1, s2, d2, rels)
span_pair_predictions[pred_rel].append((a1, a2))
return span_pair_predictions,span_predictions
def save_timelinemodel(self, path):
print ('saving model', path)
init_time = time.time()
with open(path, 'wb') as f:
pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)
print('saved t:',round(time.time()-init_time,2),'s')
def parse_date(self, date):
return dparser.parse(date)
def date_to_num(self, date, dct_date=None):
if not dct_date:
dct_date = datetime.datetime(2017,10,12)
return (date - dct_date).total_seconds()
def num_to_date(self, num, dct_date=None):
if not dct_date:
dct_date = datetime.datetime(2017,10,12)
return dct_date + datetime.timedelta(0, num)
def tune_pred_margin(self, dataset, relations, margins=np.arange(0,1,0.1/3), max_docs=10):
print('Tuning prediction margin')
print('Training margin:', self.train_margin)
tuning_dir = self.model_dir + '/tuning_m/'
os.mkdir(tuning_dir)
random.shuffle(dataset)
max_acc, max_margin = 0, 0
for m in margins:
self.pred_margin = m
metrics, F1, P, R = evaluate_timelinemodel(self, dataset[:max_docs], relations, all_pairs=True)
acc = get_acc_from_confusion_matrix(metrics)
save_confusion_matrix_from_metrics(metrics, tuning_dir + '/m'+ str(self.pred_margin) + '.html')
if acc > max_acc:
max_acc = acc
max_margin = m
print('m:',round(m,3),'\tacc:', acc)
print('best margin:', max_margin)
self.pred_margin = max_margin
def load_timelinemodel(path):
print ('loading model', path)
with open(path, 'rb') as f:
return pickle.load(f)
def read_word_vectors(path):
print('reading word vectors:', path)
try:
wv = KeyedVectors.load_word2vec_format(path, binary=True)
except:
wv = KeyedVectors.load_word2vec_format(path, binary=False)
return wv
def write_average_durs_and_starts(model, preds, file_path):
word_to_s, word_to_d = {}, {}
pred_dir = '/'.join(file_path.split('/')[:-1])
if not os.path.exists(pred_dir):
os.makedirs(pred_dir)
for doc in preds:
encoded_text = model.encode_tokens(doc)
for espan in doc.span_annotations['EType:EVENT']:
s, d = model.pred_span(doc, espan, encoded_text, convert_to_floats=True)
tok_str = doc.span_to_string(espan)
if not tok_str in word_to_s:
word_to_s[tok_str],word_to_d[tok_str] = [],[]
word_to_s[tok_str].append(s)
word_to_d[tok_str].append(d)
word_to_avg_s = sorted([(w,np.mean(values),np.var(values)) for w,values in word_to_s.items()], key=lambda x: x[1])
word_to_avg_d = sorted([(w,np.mean(values),np.var(values)) for w,values in word_to_d.items()], key=lambda x: x[1], reverse=True)
with open(file_path, 'w') as f:
f.write('--- Start Times Ascending --- (WORD, START, FREQ)\n\n')
for w,avg_s,var_s in word_to_avg_s:
f.write(w + '\t' + str(round(avg_s,4)) + '\t' + str(round(var_s,4)) + '\t' + str(model.word_frequencies[w] if w in model.word_frequencies else '<UNK>') + '\n')
f.write('\n\n--- Durations Descending --- (WORD, DURATION, FREQ)\n\n')
for w,avg_d,var_d in word_to_avg_d:
f.write(w + '\t' + str(round(avg_d,4)) + '\t' + str(round(var_d,4)) + '\t' + str(model.word_frequencies[w] if w in model.word_frequencies else '<UNK>') + '\n')
def evaluate_timelinemodel(model, docs, rel_labels, temporal_awareness_ref_dir=None, all_pairs=False, error_viz_dir=None, entity_error_analysis_file_path=None, write_average_durations_and_starts=False,print_sd_preds=False):
preds, entity_errors_per_doc = [], []
for doc in docs:
#remove relations that you don't want to evaluate on
for rel in doc.span_pair_annotations:
if not rel in rel_labels:
doc.span_pair_annotations[rel] = []
# copy ref doc text etc
pred = copy(doc)
# remove relation annotations
pred.span_pair_annotations = {}
# classify relations using the model
pairwise_labels, pointwise_preds = model.classify_rels_in_doc(doc, rel_labels,all_pairs=all_pairs)
pred.update_annotations(span_pair_update=pairwise_labels)
preds.append(pred)
if print_sd_preds:
if not os.path.exists(print_sd_preds):
os.mkdir(print_sd_preds)
with open(print_sd_preds + '/' + doc.id + '.txt', 'w') as f:
preds_string = '\n'.join([str(s[0][0].cpu().data.numpy()) + '\t'+str(d[0][0].cpu().data.numpy()) + '\t' + str(span) +'\t'+ doc.span_to_string(span) for (span, (s,d)) in sorted(pointwise_preds.items(), key=lambda x: x[0][0])])
preds_string = 'start\tduration\tspan\ttext\n' + preds_string
f.write(preds_string)
if error_viz_dir:
viz_docs_rel_difference(docs, preds, error_viz_dir)
# evaluate predictions
metrics, entity_errors_per_doc = get_eval_metrics_docs(docs, preds, rel_labels, entity_error_analysis_file_path, error_viz_dir)
if entity_error_analysis_file_path:
save_entity_error_analysis(docs, entity_errors_per_doc, entity_error_analysis_file_path)
if write_average_durations_and_starts:
write_average_durs_and_starts(model, preds, write_average_durations_and_starts)
if temporal_awareness_ref_dir:
#print('[temporal awareness evaluation subscripts]')
# write preds to tmp folder
tmp_pred_dir = model.model_dir + '/tmp_preds_'+str(len(docs))+'/'
if not os.path.exists(tmp_pred_dir):
os.mkdir(tmp_pred_dir)
else:
shutil.rmtree(tmp_pred_dir)
os.mkdir(tmp_pred_dir)
if not temporal_awareness_ref_dir[-1]=='/':
temporal_awareness_ref_dir = temporal_awareness_ref_dir + '/'
write_timebank_folder(preds, tmp_pred_dir, verbose=0)
# 1. normalize temporal graphs
norm_cmd = 'java -jar ./tempeval-3-tools/TimeML-Normalizer/TimeML-Normalizer.jar -a "'+temporal_awareness_ref_dir+';'+tmp_pred_dir+'"'
norm_out_str = subprocess.check_output(norm_cmd, shell=True,stderr=subprocess.STDOUT)
# 2. eval
eval_cmd = 'python2.7 ./tempeval-3-tools/evaluation-relations/temporal_evaluation.py '+temporal_awareness_ref_dir[:-1]+'-normalized/'+' '+tmp_pred_dir[:-1]+'-normalized/ '+str(0)
eval_out_str = subprocess.check_output(eval_cmd, shell=True).decode("utf-8")
F1, P, R = [float(x) for x in eval_out_str.split('\n')[3].split()]
return metrics, F1, P, R
else:
return metrics, None, None, None
def get_eval_metrics_docs(docs, preds, rel_labels, entity_error_analysis_file_path, error_viz_dir):
entity_errors_per_doc = []
metrics = {rel:{rel:0 for rel in rel_labels} for rel in rel_labels}
for i in range(len(preds)):
# evaluate prediction
if error_viz_dir:
pred_metrics, metrics_per_span = get_selective_rel_metrics(docs[i], preds[i], rels=rel_labels, print_pairwise_errors=error_viz_dir + '/pairwise_errors_viz/')
else:
pred_metrics, metrics_per_span = get_selective_rel_metrics(docs[i], preds[i], rels=rel_labels)
if entity_error_analysis_file_path:
entity_errors_per_doc.append(metrics_per_span)
# summing results for all documents
for ref_rel in metrics:
for pred_rel in metrics[ref_rel]:
metrics[ref_rel][pred_rel] += pred_metrics[ref_rel][pred_rel]
return metrics, entity_errors_per_doc
class TimelineFinder(TimelineModel): # TL2RTL Model
def __init__(self, timeml_docs, dmin=0.025, rels_train=['BEFORE','AFTER','INCLUDES','IS_INCLUDED','SIMULTANEOUS'], rels_pred=['BEFORE','AFTER','INCLUDES','IS_INCLUDED','SIMULTANEOUS']):
# Builds timelines from TimeML files
self.dmin=dmin
self.constants = {}
self.constants['ZERO'] = autograd.Variable(torch.FloatTensor([0]),requires_grad=False)
self.entity_starts = {doc.id:{eid:autograd.Variable(torch.FloatTensor([[0]]),requires_grad=True) for eid in doc.get_span_labels_by_regex('ei\d+').union(doc.get_span_labels_by_regex('t\d+')) }for doc in timeml_docs}
self.entity_durations = {doc.id:{eid:autograd.Variable(torch.FloatTensor([[self.dmin]]),requires_grad=True) for eid in doc.get_span_labels_by_regex('ei\d+').union(doc.get_span_labels_by_regex('t\d+')) }for doc in timeml_docs}
self.rels_pred = rels_pred
self.rels_train = rels_train
self.gpu=False
self.unk_token = '__unk__'
self.feature_keys = None
self.windex, self.cindex, self.pindex, self.findex = self.setup_vocabularies(timeml_docs, 0, special_conflation=0, entity_sequence=0)
return
def encode_tokens(self, doc, entity_spans=None):
if not doc.id in self.entity_starts:
print('ERROR:', doc.id, 'not found in timeline encoded documents')
exit()
encodings = {'s':{}, 'd':{}}
for eid in self.entity_starts[doc.id]:
if not eid in doc.span_annotations:
print('ERROR: eid not in document annotations:', eid, doc.get_span_labels_by_regex(eid[:2]+'.*'))
exit()
spans = doc.span_annotations[eid]
if len(spans) > 1:
print('!!!!!!!', doc.id, eid)
span = spans[0]
s, d = s, d = self.entity_starts[doc.id][eid], self.clamp(self.entity_durations[doc.id][eid], self.dmin)
encodings['s'][span] = s
encodings['d'][span] = d
return encodings
def train(self, timeml_docs, num_epochs):
print('\n===== Building Timeline for each Document =====')
# Starting to construct timelines
for doc in timeml_docs:
params = list(self.entity_starts[doc.id].values()) + list(self.entity_durations[doc.id].values())
optimizer = torch.optim.Adam(params, lr=0.001)
print(doc.id)
for i in range(0,num_epochs):
optimizer.zero_grad()
loss = 0.0
num_rels = 0
for rel_type in self.rels_train:
if rel_type in doc.span_pair_annotations:
for sp_a1, sp_a2 in doc.span_pair_annotations[rel_type]:
eid_a1 = [label for label in doc.reverse_span_annotations[sp_a1] if label in self.entity_starts[doc.id]][0]
eid_a2 = [label for label in doc.reverse_span_annotations[sp_a2] if label in self.entity_starts[doc.id]][0]
s1, d1 = self.entity_starts[doc.id][eid_a1], self.clamp(self.entity_durations[doc.id][eid_a1], min_value=self.dmin)
s2, d2 = self.entity_starts[doc.id][eid_a2], self.clamp(self.entity_durations[doc.id][eid_a2], min_value=self.dmin)
loss += self.get_Lr(rel_type, s1, d1, s2, d2, self.rels_pred, train_mode=True).view(1)
num_rels += 1
loss.backward()
optimizer.step()
if loss == 0.0:
break
print('loss', loss, 'after',i+1,'steps')
|
nilq/baby-python
|
python
|
import pygame
from cell_class import *
import copy
vec = pygame.math.Vector2
CELL_SIZE = 20
class GameWindow:
def __init__(self, screen, x, y):
self.screen = screen
self.position = vec(x, y)
self.width, self.height = 600, 600
self.image = pygame.Surface((self.width, self.height))
self.rect = self.image.get_rect()
self.init_grids()
def init_grids(self):
# self.num_cols = 33
# self.num_rows = 33
self.num_cols = int(self.width / CELL_SIZE)
self.num_rows = int(self.height / CELL_SIZE)
self.grid = [[Cell(self.image, x, y) for x in range(self.num_cols)]
for y in range(self.num_rows)]
for row in self.grid:
for cell in row:
cell.get_neighbors(self.grid)
def update(self):
# inspect the current active gen
# update the inactive grid to store next gen
# swap out the active grid
self.rect.topleft = self.position
for row in self.grid:
for cell in row:
cell.update()
def draw(self):
self.image.fill((255, 255, 255))
for row in self.grid:
for cell in row:
cell.draw()
self.screen.blit(self.image, (self.position.x, self.position.y))
pygame.display.flip()
def reset_grid(self):
self.grid = [[Cell(self.image, x, y) for x in range(self.num_cols)]
for y in range(self.num_rows)]
def evaluate(self):
new_grid = copy.copy(self.grid)
for row in self.grid:
for cell in row:
cell.live_neighbors()
for yidx, row in enumerate(self.grid):
for xidx, cell in enumerate(row):
if cell.alive:
if cell.alive_neighbors == 2 or cell.alive_neighbors == 3:
new_grid[yidx][xidx].alive = True
if cell.alive_neighbors < 2:
new_grid[yidx][xidx].alive = False
if cell.alive_neighbors > 3:
new_grid[yidx][xidx].alive = False
else:
if cell.alive_neighbors == 3:
new_grid[yidx][xidx].alive = True
for yidx, row in enumerate(self.grid):
for xidx, cell in enumerate(row):
if cell.alive:
new_grid[yidx][xidx].set_color()
self.grid = new_grid
|
nilq/baby-python
|
python
|
'''
Created on Jul 28, 2013
@author: akittredge
'''
import pandas as pd
import pymongo
class MongoDataStore(object):
def __init__(self, collection):
self._collection = collection
def __repr__(self):
return '{}(collection={})'.format(self.__class__.__name__,
self._collection.full_name)
@classmethod
def _ensure_indexes(cls, collection):
collection.ensure_index([('index_val', pymongo.ASCENDING),
('identifier', pymongo.ASCENDING)])
def get(self, metric, df):
'''Populate a DataFrame.
'''
identifiers = list(df.columns)
start, stop = df.index[0], df.index[-1]
index = 'date'
metric = self.sanitize_key(metric)
query = {'identifier' : {'$in' : identifiers},
metric : {'$exists' : True},
index : {'$gte' : start,
'$lte' : stop},
}
store_data = read_frame(qry=query,
index=index,
values=metric,
collection=self._collection)
df.update(store_data)
return df
def set(self, metric, df):
metric = self.sanitize_key(metric)
write_frame(metric=metric,
df=df,
collection=self._collection)
@classmethod
def sanitize_key(cls, key):
'''Can't have . or $ in mongo field names.'''
key = key.replace('.', unichr(0xFF0E))
key = key.replace('$', unichr(0xFF04))
return key
# after pandas.io.sql
def read_frame(qry, index, values, collection):
documents = collection.find(qry)
result = pd.DataFrame.from_records(documents)
if not result.empty:
result = result.pivot(index=index, columns='identifier', values=values)
return result
def write_frame(metric, df, collection):
docs = []
index_name = 'date'
for column in df:
for index_value, value in df[column].iteritems():
docs.append({'identifier' : column,
index_name : index_value,
metric : value})
collection.insert(docs)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Copyright 2018 Whitestack, LLC
# *************************************************************
# This file is part of OSM Monitoring module
# All Rights Reserved to Whitestack, LLC
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# For those usages not covered by the Apache License, Version 2.0 please
# contact: bdiaz@whitestack.com or glavado@whitestack.com
##
import asyncio
import json
import logging
import os
import sys
import unittest
from aiokafka import AIOKafkaProducer, AIOKafkaConsumer
from kafka.errors import KafkaError
from osm_policy_module.core.config import Config
log = logging.getLogger()
log.level = logging.INFO
stream_handler = logging.StreamHandler(sys.stdout)
log.addHandler(stream_handler)
class KafkaMessagesTest(unittest.TestCase):
def setUp(self):
super()
cfg = Config()
self.kafka_server = '{}:{}'.format(cfg.get('message', 'host'),
cfg.get('message', 'port'))
self.loop = asyncio.new_event_loop()
def tearDown(self):
super()
def test_send_instantiated_msg(self):
async def test_send_instantiated_msg():
producer = AIOKafkaProducer(loop=self.loop,
bootstrap_servers=self.kafka_server,
key_serializer=str.encode,
value_serializer=str.encode)
await producer.start()
consumer = AIOKafkaConsumer(
"ns",
loop=self.loop,
bootstrap_servers=self.kafka_server,
consumer_timeout_ms=10000,
auto_offset_reset='earliest',
value_deserializer=bytes.decode,
key_deserializer=bytes.decode)
await consumer.start()
try:
with open(
os.path.join(os.path.dirname(__file__), '../examples/instantiated.json')) as file:
payload = json.load(file)
await producer.send_and_wait("ns", key="instantiated", value=json.dumps(payload))
finally:
await producer.stop()
try:
async for message in consumer:
if message.key == 'instantiated':
self.assertIsNotNone(message.value)
return
finally:
await consumer.stop()
try:
self.loop.run_until_complete(test_send_instantiated_msg())
except KafkaError:
self.skipTest('Kafka server not present.')
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
import json
from .miioservice import MiIOService
def twins_split(string, sep, default=None):
pos = string.find(sep)
return (string, default) if pos == -1 else (string[0:pos], string[pos+1:])
def string_to_value(string):
if string == 'null' or string == 'none':
return None
elif string == 'false':
return False
elif string == 'true':
return True
else:
return int(string)
def string_or_value(string):
return string_to_value(string[1:]) if string[0] == '#' else string
def miio_command_help(did=None, prefix='?'):
quote = '' if prefix == '?' else "'"
return f'\
Get Props: {prefix}<siid[-piid]>[,...]\n\
{prefix}1,1-2,1-3,1-4,2-1,2-2,3\n\
Set Props: {prefix}<siid[-piid]=[#]value>[,...]\n\
{prefix}2=#60,2-2=#false,3=test\n\
Do Action: {prefix}<siid[-piid]> <arg1|#NA> [...] \n\
{prefix}2 #NA\n\
{prefix}5 Hello\n\
{prefix}5-4 Hello #1\n\n\
Call MIoT: {prefix}<cmd=prop/get|/prop/set|action> <params>\n\
{prefix}action {quote}{{"did":"{did or "267090026"}","siid":5,"aiid":1,"in":["Hello"]}}{quote}\n\n\
Call MiIO: {prefix}/<uri> <data>\n\
{prefix}/home/device_list {quote}{{"getVirtualModel":false,"getHuamiDevices":1}}{quote}\n\n\
Devs List: {prefix}list [name=full|name_keyword] [getVirtualModel=false|true] [getHuamiDevices=0|1]\n\
{prefix}list Light true 0\n\n\
MiIO Spec: {prefix}spec [model_keyword|type_urn] [format=text|python|json]\n\
{prefix}spec\n\
{prefix}spec speaker\n\
{prefix}spec xiaomi.wifispeaker.lx04\n\
{prefix}spec urn:miot-spec-v2:device:speaker:0000A015:xiaomi-lx04:1\n\
'
async def miio_command(service: MiIOService, did, text, prefix='?'):
cmd, arg = twins_split(text, ' ')
if cmd.startswith('/'):
return await service.miio_request(cmd, arg)
if cmd.startswith('prop') or cmd == 'action':
return await service.miot_request(cmd, json.loads(arg) if arg else None)
argv = arg.split(' ') if arg else []
argc = len(argv)
if cmd == 'list':
return await service.device_list(argc > 0 and argv[0], argc > 1 and string_to_value(argv[1]), argc > 2 and argv[2])
if cmd == 'spec':
return await service.miot_spec(argc > 0 and argv[0], argc > 1 and argv[1])
if not did or not cmd or cmd == '?' or cmd == '?' or cmd == 'help' or cmd == '-h' or cmd == '--help':
return miio_command_help(did, prefix)
props = []
isget = False
for item in cmd.split(','):
iid, value = twins_split(item, '=')
siid, apiid = twins_split(iid, '-', '1')
if not siid.isdigit() or not apiid.isdigit():
return 'ERROR: siid/piid/aiid must be integer'
prop = [int(siid), int(apiid)]
if not isget:
if value is None:
isget = True
else:
prop.append(string_or_value(value))
props.append(prop)
if argc > 0:
args = [string_or_value(a) for a in argv] if arg != '#NA' else []
return await service.miot_action(did, props[0][0], props[0][1], args)
return await (service.miot_get_props if isget else service.miot_set_props)(did, props)
|
nilq/baby-python
|
python
|
import socket
import logging
logger = logging.getLogger(__name__)
class P2PSocket:
def __init__(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
def bind(self, host, port):
logger.debug("Binding P2P socket to (%s, %i)", host, port)
self.s.bind((host, port))
self.s.setblocking(False)
def close(self): self.s.close()
def recv(self, num=4096):
try:
return self.s.recvfrom(num)
except BlockingIOError:
pass
except OSError:
return b""
def send(self, data, addr):
self.s.sendto(data, addr)
def client_address(self): return self.s.getsockname()
|
nilq/baby-python
|
python
|
from django.shortcuts import render, get_object_or_404
from blog_posts.models import Post
from blog_posts.forms import PostForm
def index(request):
posts = Post.objects.all()
return render(request, 'administracao/index-admin.html', context ={"index": "Index",
"posts": posts, })
def post_detalhes(request, id):
post = get_object_or_404(Post, id=id)
if request.method == "POST":
form = PostForm(request.POST, instance=post)
return render(request, "blog_posts/post_detalhes.html", context = {"form":form})
def erro401(request):
return render(request, 'administracao/erro401.html')
def erro403(request):
return render(request, 'administracao/erro403.html')
def erro404(request):
return render(request, 'administracao/erro404.html')
def erro500(request):
return render(request, 'administracao/erro500.html')
def esqueceu(request):
return render(request, 'administracao/esqueceu.html')
def login(request):
return render(request, 'administracao/login.html')
|
nilq/baby-python
|
python
|
"""Algorithm for simulating a 2048 game using Monte-Carlo method."""
import random, _2048
SIMULATE_TIMES = 100000
DIRECTIONS = ('UP', 'DOWN', 'LEFT', 'RIGHT')
def simulate_to_end(game):
while game.get_state():
dircts = list(DIRECTIONS)
for i in xrange(3):
c = random.choice(dircts)
if game.move(c):
break
dircts.remove(c)
return game.get_score()
def score_sum(game,direction):
score = 0
temp = game.clone()
temp.move(direction)
for i in xrange(SIMULATE_TIMES):
score += simulate_to_end(temp)
return score
def monte_carlo(game):
scores = {}
biggest = 0
best = None
directions = list(DIRECTIONS)
for d in DIRECTIONS:
test = game.clone()
if not test.move(d):
directions.remove(d)
for direction in directions:
temp = game.clone()
score = score_sum(temp, direction)
if score > biggest:
biggest = score
best = direction
scores[direction] = score
print scores
if len(set(scores)) == 1:
return False
else:
return best
if __name__ == '__main__':
a_game = _2048.Gameplay()
print monte_carlo(a_game)
|
nilq/baby-python
|
python
|
# Define a procedure is_palindrome, that takes as input a string, and returns a
# Boolean indicating if the input string is a palindrome.
# Base Case: '' => True
# Recursive Case: if first and last characters don't match => False
# if they do match, is the middle a palindrome?
def is_palindrome(s):
#print is_palindrome('')
#>>> True
#print is_palindrome('abab')
#>>> False
#print is_palindrome('abba')
#>>> True
|
nilq/baby-python
|
python
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import warnings
import mmcv
import numpy as np
import torch
from mmdet.core.visualization.image import imshow_det_bboxes
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .base import BaseDetector
INF = 1e8
@DETECTORS.register_module()
class SingleStageInstanceSegmentor(BaseDetector):
"""Base class for single-stage instance segmentors."""
def __init__(self,
backbone,
neck=None,
bbox_head=None,
mask_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
if pretrained:
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
backbone.pretrained = pretrained
super(SingleStageInstanceSegmentor, self).__init__(init_cfg=init_cfg)
self.backbone = build_backbone(backbone)
if neck is not None:
self.neck = build_neck(neck)
else:
self.neck = None
if bbox_head is not None:
bbox_head.update(train_cfg=copy.deepcopy(train_cfg))
bbox_head.update(test_cfg=copy.deepcopy(test_cfg))
self.bbox_head = build_head(bbox_head)
else:
self.bbox_head = None
assert mask_head, f'`mask_head` must ' \
f'be implemented in {self.__class__.__name__}'
mask_head.update(train_cfg=copy.deepcopy(train_cfg))
mask_head.update(test_cfg=copy.deepcopy(test_cfg))
self.mask_head = build_head(mask_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def extract_feat(self, img):
"""Directly extract features from the backbone and neck."""
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
"""
raise NotImplementedError(
f'`forward_dummy` is not implemented in {self.__class__.__name__}')
def forward_train(self,
img,
img_metas,
gt_masks,
gt_labels,
gt_bboxes=None,
gt_bboxes_ignore=None,
**kwargs):
"""
Args:
img (Tensor): Input images of shape (B, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_masks (list[:obj:`BitmapMasks`] | None) : The segmentation
masks for each box.
gt_labels (list[Tensor]): Class indices corresponding to each box
gt_bboxes (list[Tensor]): Each item is the truth boxes
of each image in [tl_x, tl_y, br_x, br_y] format.
Default: None.
gt_bboxes_ignore (list[Tensor] | None): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
gt_masks = [
gt_mask.to_tensor(dtype=torch.bool, device=img.device)
for gt_mask in gt_masks
]
x = self.extract_feat(img)
losses = dict()
# CondInst and YOLACT have bbox_head
if self.bbox_head:
# bbox_head_preds is a tuple
bbox_head_preds = self.bbox_head(x)
# positive_infos is a list of obj:`InstanceData`
# It contains the information about the positive samples
# CondInst, YOLACT
det_losses, positive_infos = self.bbox_head.loss(
*bbox_head_preds,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
gt_masks=gt_masks,
img_metas=img_metas,
gt_bboxes_ignore=gt_bboxes_ignore,
**kwargs)
losses.update(det_losses)
else:
positive_infos = None
mask_loss = self.mask_head.forward_train(
x,
gt_labels,
gt_masks,
img_metas,
positive_infos=positive_infos,
gt_bboxes=gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
**kwargs)
# avoid loss override
assert not set(mask_loss.keys()) & set(losses.keys())
losses.update(mask_loss)
return losses
def simple_test(self, img, img_metas, rescale=False):
"""Test function without test-time augmentation.
Args:
img (torch.Tensor): Images with shape (B, C, H, W).
img_metas (list[dict]): List of image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list(tuple): Formatted bbox and mask results of multiple \
images. The outer list corresponds to each image. \
Each tuple contains two type of results of single image:
- bbox_results (list[np.ndarray]): BBox results of
single image. The list corresponds to each class.
each ndarray has a shape (N, 5), N is the number of
bboxes with this category, and last dimension
5 arrange as (x1, y1, x2, y2, scores).
- mask_results (list[np.ndarray]): Mask results of
single image. The list corresponds to each class.
each ndarray has a shape (N, img_h, img_w), N
is the number of masks with this category.
"""
feat = self.extract_feat(img)
if self.bbox_head:
outs = self.bbox_head(feat)
# results_list is list[obj:`InstanceData`]
results_list = self.bbox_head.get_results(
*outs, img_metas=img_metas, cfg=self.test_cfg, rescale=rescale)
else:
results_list = None
results_list = self.mask_head.simple_test(
feat, img_metas, rescale=rescale, instances_list=results_list)
format_results_list = []
for results in results_list:
format_results_list.append(self.format_results(results))
return format_results_list
def format_results(self, results):
"""Format the model predictions according to the interface with
dataset.
Args:
results (:obj:`InstanceData`): Processed
results of single images. Usually contains
following keys.
- scores (Tensor): Classification scores, has shape
(num_instance,)
- labels (Tensor): Has shape (num_instances,).
- masks (Tensor): Processed mask results, has
shape (num_instances, h, w).
Returns:
tuple: Formatted bbox and mask results.. It contains two items:
- bbox_results (list[np.ndarray]): BBox results of
single image. The list corresponds to each class.
each ndarray has a shape (N, 5), N is the number of
bboxes with this category, and last dimension
5 arrange as (x1, y1, x2, y2, scores).
- mask_results (list[np.ndarray]): Mask results of
single image. The list corresponds to each class.
each ndarray has shape (N, img_h, img_w), N
is the number of masks with this category.
"""
data_keys = results.keys()
assert 'scores' in data_keys
assert 'labels' in data_keys
assert 'masks' in data_keys, \
'results should contain ' \
'masks when format the results '
mask_results = [[] for _ in range(self.mask_head.num_classes)]
num_masks = len(results)
if num_masks == 0:
bbox_results = [
np.zeros((0, 5), dtype=np.float32)
for _ in range(self.mask_head.num_classes)
]
return bbox_results, mask_results
labels = results.labels.detach().cpu().numpy()
if 'bboxes' not in results:
# create dummy bbox results to store the scores
results.bboxes = results.scores.new_zeros(len(results), 4)
det_bboxes = torch.cat([results.bboxes, results.scores[:, None]],
dim=-1)
det_bboxes = det_bboxes.detach().cpu().numpy()
bbox_results = [
det_bboxes[labels == i, :]
for i in range(self.mask_head.num_classes)
]
masks = results.masks.detach().cpu().numpy()
for idx in range(num_masks):
mask = masks[idx]
mask_results[labels[idx]].append(mask)
return bbox_results, mask_results
def aug_test(self, imgs, img_metas, rescale=False):
raise NotImplementedError
def show_result(self,
img,
result,
score_thr=0.3,
bbox_color=(72, 101, 241),
text_color=(72, 101, 241),
mask_color=None,
thickness=2,
font_size=13,
win_name='',
show=False,
wait_time=0,
out_file=None):
"""Draw `result` over `img`.
Args:
img (str or Tensor): The image to be displayed.
result (tuple): Format bbox and mask results.
It contains two items:
- bbox_results (list[np.ndarray]): BBox results of
single image. The list corresponds to each class.
each ndarray has a shape (N, 5), N is the number of
bboxes with this category, and last dimension
5 arrange as (x1, y1, x2, y2, scores).
- mask_results (list[np.ndarray]): Mask results of
single image. The list corresponds to each class.
each ndarray has shape (N, img_h, img_w), N
is the number of masks with this category.
score_thr (float, optional): Minimum score of bboxes to be shown.
Default: 0.3.
bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.
The tuple of color should be in BGR order. Default: 'green'
text_color (str or tuple(int) or :obj:`Color`):Color of texts.
The tuple of color should be in BGR order. Default: 'green'
mask_color (None or str or tuple(int) or :obj:`Color`):
Color of masks. The tuple of color should be in BGR order.
Default: None
thickness (int): Thickness of lines. Default: 2
font_size (int): Font size of texts. Default: 13
win_name (str): The window name. Default: ''
wait_time (float): Value of waitKey param.
Default: 0.
show (bool): Whether to show the image.
Default: False.
out_file (str or None): The filename to write the image.
Default: None.
Returns:
img (Tensor): Only if not `show` or `out_file`
"""
assert isinstance(result, tuple)
bbox_result, mask_result = result
bboxes = np.vstack(bbox_result)
img = mmcv.imread(img)
img = img.copy()
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
if len(labels) == 0:
bboxes = np.zeros([0, 5])
masks = np.zeros([0, 0, 0])
# draw segmentation masks
else:
masks = mmcv.concat_list(mask_result)
if isinstance(masks[0], torch.Tensor):
masks = torch.stack(masks, dim=0).detach().cpu().numpy()
else:
masks = np.stack(masks, axis=0)
# dummy bboxes
if bboxes[:, :4].sum() == 0:
num_masks = len(bboxes)
x_any = masks.any(axis=1)
y_any = masks.any(axis=2)
for idx in range(num_masks):
x = np.where(x_any[idx, :])[0]
y = np.where(y_any[idx, :])[0]
if len(x) > 0 and len(y) > 0:
bboxes[idx, :4] = np.array(
[x[0], y[0], x[-1] + 1, y[-1] + 1],
dtype=np.float32)
# if out_file specified, do not show image in window
if out_file is not None:
show = False
# draw bounding boxes
img = imshow_det_bboxes(
img,
bboxes,
labels,
masks,
class_names=self.CLASSES,
score_thr=score_thr,
bbox_color=bbox_color,
text_color=text_color,
mask_color=mask_color,
thickness=thickness,
font_size=font_size,
win_name=win_name,
show=show,
wait_time=wait_time,
out_file=out_file)
if not (show or out_file):
return img
|
nilq/baby-python
|
python
|
#LordLynx
#Part of PygameLord
import pygame,os
from pygame.locals import*
pygame.init()
#Loading Objects
'''
Parse_Locations(file)
file: Your text file, use a .txt
# Like in Python will be ingored thusly follow this example
#Coment
./File/File
./File/Other File
...
'''
def Parse_Locations(file):
file = open(file, 'r')#read the file
lines = []
folders = []
for text_line in file:
lines.append(text_line) #pull the files info
file.close()#close it
moding = []
for i in lines:
s =i.strip('\n')#split the lines up
moding.append(s)
for i in moding:
if i != '\n' and i[0] != '#': #ignore new lines or coments '#'
folders.append(i)
return folders
'''
Lord_Loaders(paths,files)
paths: The folders returned in the Parse_Locations function
files: The .files which you wish to use
Modified versions of this are in Sounds and Images
If the opertunity arises copy and paste this code into your program and change the files like the Image and Sound loaeders
'''
def Lord_Loader(paths,files):
Files = []
File_Set = {}
for path in paths:
file = os.listdir(path)
for Object in file: #loops through the parts
for fileEnd in files:
if Object.endswith(fileEnd):
Images.append(os.path.join(path, Object))
for file in Files:#appends them
text = os.path.split(file)[-1]
text = text.split('.')
text =text[0]
File_Set[text] = file
return Image_Set
|
nilq/baby-python
|
python
|
from results_saver import LogWriter
from .ModelType import ModelType
from .lda_lsa_model_tester import LModelTester
from .naive_bayes_model_tester import NBModelTester
from .lsa_tester import LSAModelTester
from .svm_model_tester import SVMModelTester
from ..methods.Lda import Lda
from ..methods.Lsa import Lsa
from ..methods.Lda_sklearn import LdaSklearn
from ..methods.Naive_bayes import NaiveBayes
from ..methods.SVM import SupportVectorMachines
from ..methods.Decision_tree import DecisionTree
from ..methods.Random_forest import RandomForest
from results_saver import plot_confusion_matrix
import numpy as np
class GeneralTester:
def __init__(self, log_writer, start_time):
self.testing_docs = None
self.training_docs = None
self.num_of_topics = None
self.log_writer:LogWriter = log_writer
self.start_time = start_time
self.topic_names = None
self.model_results = []
self.preprocess_style = ""
self.preproces_results = {}
self.num_of_tests = 1
def set_new_dataset(self, num_of_topics, topic_names):
"""
Notifies that new dataset has been set and updates num_of_topics and topic_names atribtes
:param num_of_topics:
:param topic_names:
"""
self.num_of_topics = num_of_topics
self.topic_names = topic_names
def set_new_preprocess_docs(self, training_docs, testing_docs):
"""
Sets new dataset documents to be tested
:param training_docs:
:param testing_docs:
:param preprocess_style:
"""
self.testing_docs = testing_docs
self.training_docs = training_docs
def do_test(self, model_type, num_of_tests, statistics, params, test_params, stable=False):
"""
Do test on provided model type. Also sets things up before the test.
:param model_type: ModelType enum for model that should be tested
:param num_of_tests: number of tests to be performed on this model
:param statistics: list to which accuracy and other information will be written
:param params: Parameters for tested model
:param test_params: Parameters for test
:param stable: Indicates whether algorithm is deterministic. If True only one test will be commited and the rest of results will be padded with same result (for charts comparisons).
"""
self.num_of_tests = num_of_tests
accuracies = []
statistics.append([])
statistics.append([model_type.name])
statistics.append([x for x in range(num_of_tests)])
statistics[len(statistics) - 1].append("Average")
statistics.append([])
for i in range(num_of_tests):
accuracy = self.test_model(model_type,
test_params.get("dataset_name", "none"),
params,test_params)
accuracies.append(accuracy)
statistics[len(statistics) - 1].append(accuracy)
self.log_writer.add_log("Testing {} model done with {}% accuracy".format(model_type, accuracy * 100))
self.log_writer.add_log("\n\n")
if stable:
for j in range(num_of_tests - 1):
accuracies.append(accuracy)
statistics[len(statistics) - 1].append(accuracy)
break
total_accuracy = sum(accuracies) / len(accuracies)
self.log_writer.add_to_plot(model_type.name, accuracies)
self.log_writer.draw_plot(model_type.name + " " + test_params.get("dataset_name", "none"),
'{}_model_accuracy'.format(test_params.get("dataset_name", "none")), num_of_tests)
self.model_results.append((model_type.name, accuracies))
if model_type in self.preproces_results:
self.preproces_results[model_type].append((self.preprocess_style, accuracies))
else:
self.preproces_results[model_type] = [(self.preprocess_style, accuracies)]
statistics[len(statistics) - 1].append(total_accuracy)
self.log_writer.add_log("Total accuracy is: {}".format(total_accuracy))
def test_model(self, model_type, test_name, params, test_params):
"""
Runs actual test on a model
:param model_type: ModelType enum for model that should be tested
:param test_name: name that will be used for creating output folder
:param params: Parameters for tested model
:return: Accuracy of provided model
"""
model = None
tester = None
if model_type == ModelType.LDA:
model = Lda(self.num_of_topics, params=params)
elif model_type == ModelType.LDA_Sklearn:
model = LdaSklearn(self.num_of_topics, params=params)
if model is not None:
self.log_writer.add_log("Starting training {} model".format(model_type))
model.train(self.training_docs) # TODO watch out for rewrites
self.log_writer.add_log("Starting testing {} model".format(model_type))
tester = LModelTester(self.training_docs, self.testing_docs, self.num_of_topics, self.log_writer,
self.topic_names)
if model_type == ModelType.LSA:
model = Lsa(self.num_of_topics, params=params)
self.log_writer.add_log("Starting training {} model".format(model_type))
model.train(self.training_docs) # TODO watch out for rewrites
self.log_writer.add_log("Starting testing {} model".format(model_type))
tester = LSAModelTester(self.training_docs, self.testing_docs, self.num_of_topics, self.log_writer,
self.topic_names)
if model_type == ModelType.NB:
model = NaiveBayes(params)
self.log_writer.add_log("Starting training {} model".format(model_type))
model.train(self.training_docs, self.testing_docs)
self.log_writer.add_log("Starting testing {} model".format(model_type))
tester = NBModelTester(self.training_docs, self.testing_docs, self.num_of_topics, self.log_writer,
self.topic_names)
if model_type == ModelType.SVM or model_type == ModelType.DT or model_type == ModelType.RF:
if model_type == ModelType.SVM:
model = SupportVectorMachines(params)
elif model_type == ModelType.DT:
model = DecisionTree(params)
elif model_type == ModelType.RF:
model = RandomForest(params)
self.log_writer.add_log("Starting training {} model".format(model_type))
model.train(self.training_docs)
self.log_writer.add_log("Starting testing {} model".format(model_type))
tester = SVMModelTester(self.training_docs, self.testing_docs, self.num_of_topics, self.log_writer,
self.topic_names)
accuracy = tester.test_model(model,test_name)
cm:np.ndarray = np.array(tester.confusion_matrix)
cm = cm[1:,1:]
cm = cm.transpose()
cm = cm.astype(np.uint32)
dataset_helper = test_params.get('dataset_helper',None)
plot_confusion_matrix(cm,dataset_helper.get_num_of_topics(),dataset_helper.get_dataset_name(),self.log_writer)
return accuracy
def create_test_name(self, dataset_name, start_time, model_name, preprocess_index, test_num):
"""
Helper function to create path to a current test folder
:param dataset_name: name of a tested dataset
:param start_time: can be any unique number. (if number was already used in past test results will rewrite those past test results)
:param model_name: name of a tested model
:param preprocess_index: Index of a preprocess settings
:param test_num: number of a test (if multiple tests are conducted on a single model)
:return: path to test folder
"""
return "\\results\\results{}{}\\{}\\preprocess{}\\test_num{}".format(dataset_name, start_time, model_name,
preprocess_index, test_num)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import pathlib
import sys
sys.path += ['/opt/py', str(pathlib.Path.home() / 'py')]
import basedir
import shlex
import subprocess
def info_beamer_invocation():
custom_cmd = pathlib.Path.home() / '.config' / 'fenhl' / 'info-beamer'
if custom_cmd.exists():
return [str(custom_cmd)]
#TODO support info-beamer-open-source (see ~/.config/fenhl/info-beamer @ familiepc)
return ['sudo', '-E', str(pathlib.Path.home() / 'info-beamer-pi' / 'info-beamer')]
def run_node(node, *args, check=True, **kwargs):
configured_nodes = basedir.config_dirs('fenhl/syncbin.json').json(base={}).get('info-beamer', {}).get('nodes', {})
if node in configured_nodes:
return subprocess.run(configured_nodes[node] + list(args), check=check, **kwargs)
else:
node_path = pathlib.Path(node).expanduser().resolve()
return subprocess.run(info_beamer_invocation() + [str(node_path)] + list(args), check=check, **kwargs)
if __name__ == '__main__':
if sys.argv[1] == '--list':
for node_name, invocation in basedir.config_dirs('fenhl/syncbin.json').json(base={}).get('info-beamer', {}).get('nodes', {}).items():
print('{}: {}'.format(node_name, ' '.join(shlex.quote(arg) for arg in invocation)))
else:
sys.exit(run_node(*sys.argv[1:], check=False).returncode)
|
nilq/baby-python
|
python
|
import random
from app.core.utils import get_random_date
def build_demo_data():
"""
Helper method, just to demo the app
:return: a list of demo docs sorted by ranking
"""
samples = ["Messier 81", "StarBurst", "Black Eye", "Cosmos Redshift", "Sombrero", "Hoags Object",
"Andromeda", "Pinwheel", "Cartwheel",
"Mayall's Object", "Milky Way", "IC 1101", "Messier 87", "Ring Nebular", "Centarus A", "Whirlpool",
"Canis Major Overdensity", "Virgo Stellar Stream"]
res = []
for index, item in enumerate(samples):
res.append(DocumentInfo(item, (item + " ") * 5, get_random_date(),
"doc_details?id={}¶m1=1¶m2=2".format(index), random.random()))
# simulate sort by ranking
res.sort(key=lambda doc: doc.ranking, reverse=True)
return res
class SearchEngine:
"""educational search engine"""
i = 12345
def search(self, search_query):
print("Search query:", search_query)
results = []
##### your code here #####
results = build_demo_data() # replace with call to search algorithm
##### your code here #####
return results
class DocumentInfo:
def __init__(self, title, description, doc_date, url, ranking):
self.title = title
self.description = description
self.doc_date = doc_date
self.url = url
self.ranking = ranking
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
"""
Project Icarus
creator: derilion
date: 01.07.2019
version: 0.1a
"""
"""
TODO:
- Installer
- Database Structure
- Special Characters in *.ini
- Setup of skills
- Configuration of Clients
- multi language support
"""
# imports
from icarus.icarus import Icarus
# thread safe init
if __name__ == "__main__":
Icarus().start()
|
nilq/baby-python
|
python
|
import requests
import json
remote_url = ""
device_id = ""
bearer = ""
api_key = ""
app_id = ""
def url(endpoint):
return "{0}{1}".format(remote_url, endpoint)
def headers_with_headers(headers):
new_headers = {}
new_headers["Content-Type"] = "application/json"
new_headers["X-BLGREQ-UDID"] = device_id
new_headers["X-BLGREQ-SIGN"] = api_key
new_headers["X-BLGREQ-APPID"] = app_id
if bearer:
new_headers["Authorization"] = "Bearer {0}".format(bearer)
if headers:
return dict(list(new_headers.items()) + list(headers.items()))
else:
return new_headers
def get(endpoint, parameters, headers):
return requests.get(url(endpoint), params=parameters, headers=headers_with_headers(headers))
def post(endpoint, parameters, headers):
return requests.post(url(endpoint), data=json.dumps(parameters), headers=headers_with_headers(headers))
|
nilq/baby-python
|
python
|
from .iotDualMotor import IotDualMotor
class IotEncodedMotor(IotDualMotor):
""" the base class for motor with encoder
The speed range from -100 to 100 with zero (less than minMovingSpeed) to stop the motor.
"""
def __init__(self, name, parent, minMovingSpeed=5):
""" construct a PiIotNode
name: the name of the node
parent: parent IotNode object. None for root node.
minMovingSpeed: the minimum valid moving absolute speed
"""
super(IotEncodedMotor, self).__init__(name, parent, minMovingSpeed)
def runAngle(self, angle, speed, speed2 = None):
""" move the motor by specified angle for either single or dual motor
angle range from 0 to 360 degree
speed controls the direction ranges from -100 to 100
"""
pass
def goToPosition(self, position, position2 = None, speed = 100):
""" run the motor to specified positions for either single or dual motor
position range from int.min to int.max
speed controls the direction ranges from -100 to 100
"""
pass
|
nilq/baby-python
|
python
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TaskProperties(Model):
"""Task properties of the software update configuration.
:param parameters: Gets or sets the parameters of the task.
:type parameters: dict[str, str]
:param source: Gets or sets the name of the runbook.
:type source: str
"""
_attribute_map = {
'parameters': {'key': 'parameters', 'type': '{str}'},
'source': {'key': 'source', 'type': 'str'},
}
def __init__(self, *, parameters=None, source: str=None, **kwargs) -> None:
super(TaskProperties, self).__init__(**kwargs)
self.parameters = parameters
self.source = source
|
nilq/baby-python
|
python
|
import sys
import argparse
from absynthe.graph_builder import TreeBuilder
def treeGeneration(numRoots: int = 2, numLeaves: int = 4,
branching: int = 2, numInnerNodes: int = 16):
loggerNodeTypes: str = "SimpleLoggerNode"
tree_kwargs = {TreeBuilder.KW_NUM_ROOTS: str(numRoots),
TreeBuilder.KW_NUM_LEAVES: str(numLeaves),
TreeBuilder.KW_BRANCHING_DEGREE: str(branching),
TreeBuilder.KW_NUM_INNER_NODES: str(numInnerNodes),
TreeBuilder.KW_SUPPORTED_NODE_TYPES: loggerNodeTypes}
simpleTreeBuilder = TreeBuilder(**tree_kwargs)
simpleTreeBuilder.generateNewGraph().dumpDotFile(sys.stdout)
return
if "__main__" == __name__:
"""
Dumps a simple, tree-like control flow graph on standard output. This output can be redirected
to a file and converted to an image using graphviz's 'dot' utility. The graph is generated with
fair amount of randomness, so repeated invocations with the same set of parameters will yield
different graphs.
"""
argParser = argparse.ArgumentParser(description="Dumps a simple control flow graph on standard"
+ " output. This output can be redirected to a file and"
+ " converted to an image using graphviz's 'dot' utility."
+ " The graph is generated with fair amount of randomness,"
+ " so repeated invocations with identical parameters will"
+ " yield different graphs.")
argParser.add_argument("-r", "--num_roots", required=True, type=int,
help="Number of roots in the graph.")
argParser.add_argument("-l", "--num_leaves", type=int, required=True,
help="Number of leaves in the graph.")
argParser.add_argument("-n", "--num_nodes", type=int, required=True,
help="Approximate number of inner nodes that this graph should contain."
+ " The actual number is usually larger"
+ " than what is specified here.")
argParser.add_argument("-b", "--branching", type=int, required=True,
help="Approximate avg. branching degree of nodes in this graph.")
args = argParser.parse_args()
r: int = args.num_roots
l: int = args.num_leaves
n: int = args.num_nodes
b: int = args.branching
treeGeneration(r, l, b, n)
|
nilq/baby-python
|
python
|
import sys
import time
dy_import_module_symbols("shimstackinterface")
SERVER_IP = getmyip()
SERVER_PORT = 34829
UPLOAD_RATE = 1024 * 1024 * 15 # 15MB/s
DOWNLOAD_RATE = 1024 * 1024 * 128 # 15MB/s
DATA_TO_SEND = "HelloWorld" * 1024 * 1024
RECV_SIZE = 2**14 # 16384 bytes.
MSG_RECEIVED = ''
END_TAG = "@@END"
def launchserver():
"""
<Purpose>
Launch a server that receives and echos the message back.
<Arguments>
None
<Side Effects>
None
<Exceptions>
None
<Return>
None
"""
shim_object = ShimStackInterface("(NoopShim)")
tcpserver_socket = shim_object.listenforconnection(SERVER_IP, SERVER_PORT)
while True:
try:
rip, rport, sockobj = tcpserver_socket.getconnection()
break
except SocketWouldBlockError:
pass
except (SocketClosedLocal, SocketClosedRemote):
break
msg_received = ''
recv_closed = False
send_closed = False
# Echo back all the message that we receive. Exit out of the
# loop once we get socket closed error.
while True:
try:
msg_received += sockobj.recv(RECV_SIZE)
except SocketWouldBlockError:
pass
except (SocketClosedLocal, SocketClosedRemote):
break
try:
if len(msg_received) > 0:
data_sent = sockobj.send(msg_received)
msg_received = msg_received[data_sent : ]
except SocketWouldBlockError:
pass
except (SocketClosedLocal, SocketClosedRemote):
break
def launch_test():
log("\nSetting upload rate to %dbytes/s. \nSetting download rate to %dbytes/s" % (UPLOAD_RATE, DOWNLOAD_RATE))
# Launch the server and sleep for couple of seconds.
createthread(launchserver)
sleep(3)
shim_obj = ShimStackInterface("(RateLimitShim,%s,%s)" % (UPLOAD_RATE, DOWNLOAD_RATE))
try:
sockobj = shim_obj.openconnection(SERVER_IP, SERVER_PORT, SERVER_IP, SERVER_PORT + 1, 10)
except Exception, err:
print "Found error: " + str(err)
exitall()
msg_to_send = DATA_TO_SEND + END_TAG
# --------------------- Testing Upload --------------------------------
cur_data_sent = 0
log("\nStarting to send msg.")
starttime = getruntime()
while msg_to_send:
try:
data_sent = sockobj.send(msg_to_send)
except SocketWouldBlockError, err:
pass
else:
msg_to_send = msg_to_send[data_sent:]
cur_data_sent += data_sent
elapsed_time = getruntime() - starttime
log("\nTime to upload: %fs. Upload rate: %fbytes/s" % (elapsed_time, len(DATA_TO_SEND + END_TAG)*1.0 / elapsed_time))
log("\nTesting upload rate with 10% error")
rate_over_percent = ((len(DATA_TO_SEND + END_TAG)*1.0 / elapsed_time) - UPLOAD_RATE) / UPLOAD_RATE
if rate_over_percent > 0.10:
log("[ FAIL ]")
sys.stdout.flush()
exitall()
else:
log("[ PASS ]")
# -------------------------- Testing Download ------------------------------
msg_received = ''
log("\nStarting to recv echo msg.")
starttime = getruntime()
while True:
try:
data_received = sockobj.recv(RECV_SIZE)
except SocketWouldBlockError, err:
pass
else:
msg_received += data_received
if END_TAG in data_received:
break
elapsed_time = getruntime() - starttime
sockobj.close()
log("\nTime to download: %fs. Download rate: %fbytes/s" % (elapsed_time, len(msg_received)*1.0 / elapsed_time))
log("\nTesting download rate with 10% error")
rate_over_percent = ((len(msg_received)*1.0 / elapsed_time) - DOWNLOAD_RATE) / DOWNLOAD_RATE
if rate_over_percent > 0.10:
log("[ FAIL ]")
sys.stdout.flush()
exitall()
else:
log("[ PASS ]")
log("\nChecking message received len: ")
try:
assert(len(msg_received) == len(DATA_TO_SEND + END_TAG))
except AssertionError:
log("[ FAIL ]")
sys.stdout.flush()
exitall()
else:
log("[ PASS ]")
|
nilq/baby-python
|
python
|
#%%
from pssr import pssr
from speech_recognition import UnknownValueError, RequestError, Recognizer
print('oi')
r = Recognizer() #recognizes audio, outputs transcript
ps = pssr.PSRecognizer() #PSRecognizer instance to listen and generate the audio
psmic = pssr.PSMic(nChannels=3) #ps eye mic array
with psmic as source:
print('*recording')
audio = ps.listen(source)
print('*done recording')
try:
# to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
print("Google Speech Recognition thinks you said ")
print(r.recognize_google(audio, language='de-DE',show_all=True))
except UnknownValueError:
print("Google Speech Recognition could not understand audio")
except RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.