text stringlengths 38 1.54M |
|---|
from django.db.models import (Model, BooleanField, CharField,CheckConstraint,Q,Deferrable,TextChoices)
class Consent(Model):
"""
Stores Consent to treat.
"""
class Patient (Model):
"""
United States Census Beurau Ethnicity Information. https://www.census.gov/topics/population/race/about.html
Improved Ethinicity Information. https://www2.census.gov/cac/nac/meetings/2016-10/2016-nac-jones.pdf
"""
class Ethnicity(TextChoices):
ASIAN = "ASN", _("Asian - Far East, Southeast Asia, or the Indian subcontinent including, for example, Cambodia, China, India, Japan, Korea, Malaysia, Pakistan, the Philippine Islands, Thailand, and Vietnam.")
BLACK="BLK",_("Black/origins in any of the Black racial groups of Africa.")
HAWAIIAN= "HAI", _("Native Hawaiian - Hawaii, Guam, Samoa, or other Pacific Islands.")
HISPANIC = "HPN" ,_("Hispanic")
WHITE="WHT" ,_("White-origins in Europe, the Middle East, or North Africa.")
OTHER="OTH", _("Other/Multiple/Unknown")
first_name = CharField(max_length=100)
middle_name = CharField(max_length=100, null=True)
last_name = CharField(max_length=100)
date_of_birth = DateField()
def demographics(self):
pass
# class META:
# CheckConstraint(check=Q(date_of_birth__gte=),name='dob_check', fields=['date_of_birth'], deferrable=Deferrable.DEFERRED,)
|
import math
import os
from py_scripts import vector_math
from py_scripts.file_handler import append_file
from py_scripts.vector_math import *
from noise import pnoise2
from FilePaths import in_models
# scaling UVs by 3d euclidean distance experiment
def write_obj_quad_tex_len(quad, v_i, fd_v, fd_t, fd_n, fd_f, u_vals, v_vals):
fd_v.write('v {:.6f} {:.6f} {:.6f}\n'.format(quad[0][0], quad[0][1], quad[0][2]))
fd_v.write('v {:.6f} {:.6f} {:.6f}\n'.format(quad[1][0], quad[1][1], quad[1][2]))
fd_v.write('v {:.6f} {:.6f} {:.6f}\n'.format(quad[2][0], quad[2][1], quad[2][2]))
fd_v.write('v {:.6f} {:.6f} {:.6f}\n'.format(quad[3][0], quad[3][1], quad[3][2]))
normal1 = cross_product(vec_from_to(quad[3], quad[1]), vec_from_to(quad[3], quad[0]))
normal2 = cross_product(vec_from_to(quad[0], quad[2]), vec_from_to(quad[0], quad[3]))
fd_n.write('vn {:.6f} {:.6f} {:.6f}\n'.format(normal1[0], normal1[1], normal1[2]))
fd_n.write('vn {:.6f} {:.6f} {:.6f}\n'.format(normal2[0], normal2[1], normal2[2]))
fd_t.write('vt {:.6f} {:.6f}\n'.format(u_vals[0], v_vals[0]))
fd_t.write('vt {:.6f} {:.6f}\n'.format(u_vals[1], v_vals[1]))
fd_t.write('vt {:.6f} {:.6f}\n'.format(u_vals[2], v_vals[2]))
fd_t.write('vt {:.6f} {:.6f}\n'.format(u_vals[3], v_vals[3]))
f_i = v_i // 2 + 1
# 3 1 0
fd_f.write(
'f {:d}/{:d}/{:d} {:d}/{:d}/{:d} {:d}/{:d}/{:d}\n'.format(
v_i + 4, v_i + 4, f_i, v_i + 2, v_i + 2, f_i, v_i + 1, v_i + 1, f_i))
f_i += 1
# 0 2 3
fd_f.write(
'f {:d}/{:d}/{:d} {:d}/{:d}/{:d} {:d}/{:d}/{:d}\n'.format(
v_i + 1, v_i + 1, f_i, v_i + 3, v_i + 3, f_i, v_i + 4, v_i + 4, f_i))
# scaling UVs by 3d euclidean distance experiment
def generate_noise_file_tex_len(f, x_max, x_grid_count, z_max, z_grid_count, fd_main):
dx = x_max / x_grid_count
dz = z_max / z_grid_count
h_dict = {}
v_dict = {}
for z in range(0, x_grid_count+1):
if not h_dict.__contains__(z):
h_dict[z] = 0
for x in range(0, z_grid_count):
p_from = [x*dx, f(x*dx, z*dz), z*dz]
p_to = [x*dx+dx, f(x*dx+dx, z*dz), z*dz]
h_dict[z] += vector_math.vec_length(vector_math.vec_from_to(p_from, p_to))
for x in range(0, z_grid_count + 1):
if not v_dict.__contains__(x):
v_dict[x] = 0
for z in range(0, x_grid_count):
p_from = [x*dx, f(x*dx, z*dz), z*dz]
p_to = [x*dx, f(x*dx, z*dz + dz), z*dz + dz]
v_dict[x] += vector_math.vec_length(vector_math.vec_from_to(p_from, p_to))
u_map = {}
v_map = {}
for z in range(0, z_grid_count+1):
u_map[(0,z)] = 0.0
for x in range(1,x_grid_count + 1):
p_to = [x*dx, f(x*dx, z*dz), z*dz]
p_from = [x*dx-dx, f(x*dx-dx, z*dz), z*dz]
u_map[(x,z)] = u_map[(x-1,z)] + vector_math.vec_length(vector_math.vec_from_to(p_from, p_to)) / h_dict[z]
for x in range(0, x_grid_count+1):
v_map[(x,0)] = 0.0
for z in range(1, z_grid_count + 1):
p_to = [x*dx, f(x*dx,z*dz), z*dz]
p_from = [x*dx, f(x*dx, z*dz - dz), z*dz - dz]
v_map[(x,z)] = v_map[(x,z-1)] + vector_math.vec_length(vector_math.vec_from_to(p_from, p_to)) / v_dict[x]
with open(in_models('face_temp.obj'), 'w') as face_fd:
with open(in_models('normal_temp.obj'), 'w') as normal_fd:
with open(in_models('texture_temp.obj'), 'w') as texture_fd:
for x in range(0, x_grid_count):
for z in range(0, z_grid_count):
u_li = []
v_li = []
u_li += [u_map[(x,z)]]
u_li += [u_map[(x+1,z)]]
u_li += [u_map[(x,z+1)]]
u_li += [u_map[(x+1,z+1)]]
v_li += [v_map[(x,z)]]
v_li += [v_map[(x+1,z)]]
v_li += [v_map[(x,z+1)]]
v_li += [v_map[(x+1,z+1)]]
quad = grid_xyz(f, x * dx, z * dz, dx, dz)
write_obj_quad_tex_len(quad, z * 4 + (z_grid_count * 4 * x),
fd_main, texture_fd, normal_fd, face_fd,
u_li, v_li)
append_file(fd_main, in_models('texture_temp.obj'))
append_file(fd_main, in_models('normal_temp.obj'))
append_file(fd_main, in_models('face_temp.obj'))
# linear map of plane to uv plane for texture
def write_obj_quad(quad, v_i, fd_v, fd_t, fd_n, fd_f, u_adjustment, v_adjustment):
fd_v.write('v {:.6f} {:.6f} {:.6f}\n'.format(quad[0][0], quad[0][1], quad[0][2]))
fd_v.write('v {:.6f} {:.6f} {:.6f}\n'.format(quad[1][0], quad[1][1], quad[1][2]))
fd_v.write('v {:.6f} {:.6f} {:.6f}\n'.format(quad[2][0], quad[2][1], quad[2][2]))
fd_v.write('v {:.6f} {:.6f} {:.6f}\n'.format(quad[3][0], quad[3][1], quad[3][2]))
normal1 = cross_product(vec_from_to(quad[3], quad[1]), vec_from_to(quad[3], quad[0]))
normal2 = cross_product(vec_from_to(quad[0], quad[2]), vec_from_to(quad[0], quad[3]))
fd_n.write('vn {:.6f} {:.6f} {:.6f}\n'.format(normal1[0], normal1[1], normal1[2]))
fd_n.write('vn {:.6f} {:.6f} {:.6f}\n'.format(normal2[0], normal2[1], normal2[2]))
fd_t.write('vt {:.6f} {:.6f}\n'.format(u_adjustment(quad[0][0]), v_adjustment(quad[0][2])))
fd_t.write('vt {:.6f} {:.6f}\n'.format(u_adjustment(quad[1][0]), v_adjustment(quad[1][2])))
fd_t.write('vt {:.6f} {:.6f}\n'.format(u_adjustment(quad[2][0]), v_adjustment(quad[2][2])))
fd_t.write('vt {:.6f} {:.6f}\n'.format(u_adjustment(quad[3][0]), v_adjustment(quad[3][2])))
f_i = v_i // 2 + 1
# 3 1 0
fd_f.write(
'f {:d}/{:d}/{:d} {:d}/{:d}/{:d} {:d}/{:d}/{:d}\n'.format(
v_i + 4, v_i + 4, f_i, v_i + 2, v_i + 2, f_i, v_i + 1, v_i + 1, f_i))
f_i += 1
# 0 2 3
fd_f.write(
'f {:d}/{:d}/{:d} {:d}/{:d}/{:d} {:d}/{:d}/{:d}\n'.format(
v_i + 1, v_i + 1, f_i, v_i + 3, v_i + 3, f_i, v_i + 4, v_i + 4, f_i))
def generate_noise_file(f, x_max, x_grid_count, z_max, z_grid_count, fd_main):
dx = x_max / x_grid_count
dz = z_max / z_grid_count
with open(in_models('face_temp.obj'), 'w') as face_fd:
with open(in_models('normal_temp.obj'), 'w') as normal_fd:
with open(in_models('texture_temp.obj'), 'w') as texture_fd:
for x in range(0, x_grid_count):
for z in range(0, z_grid_count):
quad = grid_xyz(f, x * dx, z * dz, dx, dz)
write_obj_quad(quad, z * 4 + (z_grid_count * 4 * x),
fd_main, texture_fd, normal_fd, face_fd,
lambda u: u / x_max,
lambda v: v / z_max)
append_file(fd_main, in_models('texture_temp.obj'))
append_file(fd_main, in_models('normal_temp.obj'))
append_file(fd_main, in_models('face_temp.obj'))
|
from unittest import TestCase
import arteria
from arteria.web.routes import RouteService
import mock
class RoutesServiceTest(TestCase):
def test_help_doc_generated(self):
app_svc = mock.MagicMock()
route_svc = RouteService(app_svc, debug=False)
routes = [
("/route0", TestHandler),
("/route1", TestHandler)
]
route_svc.set_routes(routes)
base_url = "http://self"
help = route_svc.get_help(base_url).get("doc")
self.assertEqual(len(help), len(routes))
for index, entry in enumerate(help):
self.assertEqual(help[index]["route"], "{base_url}/route{index}"
.format(base_url=base_url, index=index))
methods = entry["methods"]
self.assertEqual(len(methods), 2)
actual = set([(key, value.split(':')[0] == "True")
for key, value in methods.items()])
expected = set([("get", True), ("delete", True)])
self.assertEqual(actual, expected)
class TestHandler:
"""Used in RoutesServiceTest.test_help_doc_generated"""
def get(self):
"""True: Documentation should show up"""
pass
def put(self):
pass # Documentation should not show up
@arteria.undocumented
def post(self):
"""False: Documentation should not show up"""
pass
def delete(self):
"""True: Documentation should show up"""
pass
|
from .context import Description, Context, SharedExamples
from .registry import get_registry
def describe(described, **kwargs):
registry = get_registry()
parent = registry.current_context()
return Description(described,
parent=parent,
**kwargs)
def context(description, **kwargs):
registry = get_registry()
parent = registry.current_context()
return Context(description,
parent=parent,
**kwargs)
def shared(description, **kwargs):
registry = get_registry()
parent = registry.current_context()
return SharedExamples(description,
parent=parent,
**kwargs)
def it_behaves_like(name, **kwargs):
registry = get_registry()
context = registry.current_context()
context.behaves_like(name, **kwargs)
|
# -*- coding: utf-8 -*-
from ._tabular import ClassicTabularNovelty, TabularNovelty, DepthBasedTabularNovelty, DepthBasedTabularNovelty, DepthBasedTabularNoveltyOptimised
from ._base import Feature
from ._state_vars import SVF
_factory_entries = { 'TabularNovelty' : TabularNovelty,\
'ClassicTabularNovelty' : ClassicTabularNovelty,\
'DepthBasedTabularNovelty': DepthBasedTabularNovelty,\
'DepthBasedTabularNoveltyOptimised': DepthBasedTabularNoveltyOptimised
}
def create(product_key, **kwargs ) :
return _factory_entries[product_key](**kwargs)
|
from db_config import db_init as db
# 用户模型
# 数据模型类
class User(db.Model):
#数据库 数据表的名字
__tablename__ = 'user'
# 多个字段
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
username = db.Column(db.String(255), nullable=False)
password = db.Column(db.String(255), nullable=False)
phone = db.Column(db.String(255), nullable=True)
others = db.Column(db.String(255), nullable=True)
def __repr__(self):
# 打印对象:名字
return '<User %s>' % self.username
|
from django.db import models
# Create your models here.
class User(models.Model):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
email = models.CharField(max_length=50)
password = models.CharField(max_length=255)
isAdmin = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __repr__(self):
return f'first_name: {self.first_name}, last_name: {self.last_name}, email: {self.email}'
class File(models.Model):
name = models.CharField(max_length=50)
path = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(User, related_name="files", on_delete=models.CASCADE)
class Report(models.Model):
name = models.CharField(max_length=50)
path = models.CharField(max_length=50)
user = models.ForeignKey(User, related_name="reports", on_delete=models.CASCADE)
file = models.ForeignKey(File, related_name="reports", on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
class Message(models.Model):
content = models.TextField()
path = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add=True)
sender = models.ForeignKey(User, related_name="messages", on_delete=models.CASCADE)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The :mod:`samplesizelib.linear.bayesian` contains classes:
- :class:`samplesizelib.linear.bayesian.APVCEstimator`
- :class:`samplesizelib.linear.bayesian.ACCEstimator`
- :class:`samplesizelib.linear.bayesian.ALCEstimator`
- :class:`samplesizelib.linear.bayesian.MaxUtilityEstimator`
- :class:`samplesizelib.linear.bayesian.KLEstimator`
"""
from __future__ import print_function
__docformat__ = 'restructuredtext'
from multiprocessing import Pool
import numpy as np
import scipy.stats as sps
from scipy.optimize import minimize_scalar
from ..shared.estimator import SampleSizeEstimator
from ..shared.utils import Dataset
class APVCEstimator(SampleSizeEstimator):
r"""
Description of APVC Method
:param statmodel: the machine learning algorithm
:type statmodel: RegressionModel or LogisticModel
:param averaging: to do
:type averaging: float
:param epsilon: to do
:type epsilon: float
:param begin: to do
:type begin: int
:param end: to do
:type end: int
:param num: to do
:type num: int
:param multiprocess: to do
:type multiprocess: bool
:param progressbar: to do
:type progressbar: bool
"""
def __init__(self, statmodel, **kwards):
r"""Constructor method
"""
super().__init__()
self.statmodel = statmodel
self.averaging = int(kwards.pop('averaging', 100))
if self.averaging <= 0:
raise ValueError(
"The averaging should be positive but get {}".format(
self.averaging))
self.epsilon = kwards.pop('epsilon', 0.5)
if self.epsilon <= 0:
raise ValueError(
"The epsilon must be positive value but get {}".format(
self.epsilon))
self.begin = kwards.pop('begin', None)
if self.begin is not None and self.begin < 0:
raise ValueError(
"The begin must be positive value but get {}".format(
self.begin))
self.end = kwards.pop('end', None)
if self.end is not None and self.end < 0:
raise ValueError(
"The end must be positive value but get {}".format(
self.end))
if self.end is not None and self.begin is not None and self.end <= self.begin:
raise ValueError(
"The end value must be greater than the begin value but {}<={}".format(
self.end, self.begin))
self.num = kwards.pop('num', 5)
if self.num <=0:
raise ValueError(
"The num must be positive value but get {}".format(
self.num))
if self.end is not None and self.begin is not None and self.num >= self.end - self.begin:
raise ValueError(
"The num value must be smaler than (end - begin) but {}>={}".format(
self.num, self.end - self.begin))
self.multiprocess = kwards.pop('multiprocess', False)
if not isinstance(self.multiprocess, bool):
raise ValueError(
"The multiprocess must be bool value but get {}".format(
self.multiprocess))
self.progressbar = kwards.pop('progressbar', False)
if not isinstance(self.progressbar, bool):
raise ValueError(
"The progressbar must be bool value but get {}".format(
self.progressbar))
if kwards:
raise ValueError("Invalid parameters: %s" % str(kwards))
self.dataset = None
def _hDispersion(self, dataset):
r"""
Return ...
"""
X, y = dataset.sample()
w_hat = self.statmodel(y, X).fit()
cov = np.linalg.inv(
0.01*np.eye(w_hat.shape[0]) - self.statmodel(y, X).hessian(w_hat))
return np.sqrt(np.sum((np.linalg.eigvals(cov)/2)**2))
def _score_subsample(self, m):
r"""
Return ...
"""
X_m, y_m = self.dataset.sample(m)
dataset_m = Dataset(X_m, y_m)
return self._hDispersion(dataset_m)
def forward(self, features, target):
r"""
Returns sample size prediction for the given dataset.
:param features: The tensor of shape
`num_elements` :math:`\times` `num_feature`.
:type features: array.
:param target: The tensor of shape `num_elements`.
:type target: array.
:return: sample size estimation for the given dataset.
:rtype: dict
"""
self.dataset = Dataset(features, target)
if self.end is None:
end = len(self.dataset) - 1
else:
end = self.end
if self.begin is None:
begin = 2*self.dataset.n
else:
begin = self.begin
if end <= begin:
raise ValueError(
"The end value must be greater than the begin value but {}<={}".format(
end, begin))
if self.num >= end - begin:
raise ValueError(
"The num value must be smaler than (end - begin) but {}>={}".format(
self.num, end - begin))
subset_sizes = np.arange(begin, end, self.num, dtype=np.int64)
list_of_answers = []
points_one = np.ones(self.averaging, dtype=np.int64)
if self.multiprocess:
pool = Pool()
mapping = pool.map
else:
mapping = map
if self.progressbar:
iterator = self._progressbar(subset_sizes)
else:
iterator = subset_sizes
for i, m in enumerate(iterator):
list_of_answers.append(
np.asarray(
list(mapping(self._score_subsample, m*points_one))))
self._set_status(100.*(i+1)/len(subset_sizes))
if self.multiprocess:
pool.close()
pool.join()
list_of_answers = np.asarray(list_of_answers)
list_of_E = np.mean(list_of_answers, axis = 1)
list_of_S = np.std(list_of_answers, axis = 1)
m_size = end
for m, mean in zip(reversed(subset_sizes), reversed(list_of_E)):
if mean < self.epsilon:
m_size = m
return {'m*': m_size,
'E': np.array(list_of_E),
'S': np.array(list_of_S),
'm': np.array(subset_sizes),
}
class ACCEstimator(SampleSizeEstimator):
r"""
Description of ACC Method
:param statmodel: the machine learning algorithm
:type statmodel: RegressionModel or LogisticModel
:param averaging: to do
:type averaging: float
:param alpha: to do
:type alpha: float
:param length: to do
:type length: float
:param begin: to do
:type begin: int
:param end: to do
:type end: int
:param num: to do
:type num: int
:param multiprocess: to do
:type multiprocess: bool
:param progressbar: to do
:type progressbar: bool
"""
def __init__(self, statmodel, **kwards):
r"""Constructor method
"""
super().__init__()
self.statmodel = statmodel
self.averaging = int(kwards.pop('averaging', 100))
if self.averaging <= 0:
raise ValueError(
"The averaging should be positive but get {}".format(
self.averaging))
self.length = kwards.pop('length', 0.25)
if self.length <= 0:
raise ValueError(
"The length must be positive value but get {}".format(
self.length))
self.alpha = kwards.pop('alpha', 0.05)
if self.alpha < 0 or self.alpha > 1:
raise ValueError(
"The alpha must be between 0 and 1 but get {}".format(
self.alpha))
self.begin = kwards.pop('begin', None)
if self.begin is not None and self.begin < 0:
raise ValueError(
"The begin must be positive value but get {}".format(
self.begin))
self.end = kwards.pop('end', None)
if self.end is not None and self.end < 0:
raise ValueError(
"The end must be positive value but get {}".format(
self.end))
if self.end is not None and self.begin is not None and self.end <= self.begin:
raise ValueError(
"The end value must be greater than the begin value but {}<={}".format(
self.end, self.begin))
self.num = kwards.pop('num', 5)
if self.num <=0:
raise ValueError(
"The num must be positive value but get {}".format(
self.num))
if self.end is not None and self.begin is not None and self.num >= self.end - self.begin:
raise ValueError(
"The num value must be smaler than (end - begin) but {}>={}".format(
self.num, self.end - self.begin))
self.multiprocess = kwards.pop('multiprocess', False)
if not isinstance(self.multiprocess, bool):
raise ValueError(
"The multiprocess must be bool value but get {}".format(
self.multiprocess))
self.progressbar = kwards.pop('progressbar', False)
if not isinstance(self.progressbar, bool):
raise ValueError(
"The progressbar must be bool value but get {}".format(
self.progressbar))
if kwards:
raise ValueError("Invalid parameters: %s" % str(kwards))
self.dataset = None
def _iDistribution(self, dataset):
r"""
Return ...
"""
X, y = dataset.sample()
w_hat = self.statmodel(y, X).fit()
cov = np.linalg.inv(
0.01*np.eye(w_hat.shape[0]) - self.statmodel(y, X).hessian(w_hat))
W = sps.multivariate_normal(mean=np.zeros(w_hat.shape[0]), cov = cov).rvs(size=1000)
return (np.sqrt((W**2).sum(axis=1)) < 3*self.length).mean()
def _score_subsample(self, m):
r"""
Return ...
"""
X_m, y_m = self.dataset.sample(m)
dataset_m = Dataset(X_m, y_m)
return self._iDistribution(dataset_m)
def forward(self, features, target):
r"""
Returns sample size prediction for the given dataset.
:param features: The tensor of shape
`num_elements` :math:`\times` `num_feature`.
:type features: array.
:param target: The tensor of shape `num_elements`.
:type target: array.
:return: sample size estimation for the given dataset.
:rtype: dict
"""
self.dataset = Dataset(features, target)
if self.end is None:
end = len(self.dataset) - 1
else:
end = self.end
if self.begin is None:
begin = 2*self.dataset.n
else:
begin = self.begin
if end <= begin:
raise ValueError(
"The end value must be greater than the begin value but {}<={}".format(
end, begin))
if self.num >= end - begin:
raise ValueError(
"The num value must be smaler than (end - begin) but {}>={}".format(
self.num, end - begin))
subset_sizes = np.arange(begin, end, self.num, dtype=np.int64)
list_of_answers = []
points_one = np.ones(self.averaging, dtype=np.int64)
if self.multiprocess:
pool = Pool()
mapping = pool.map
else:
mapping = map
if self.progressbar:
iterator = self._progressbar(subset_sizes)
else:
iterator = subset_sizes
for i, m in enumerate(iterator):
list_of_answers.append(
np.asarray(
list(mapping(self._score_subsample, m*points_one))))
self._set_status(100.*(i+1)/len(subset_sizes))
if self.multiprocess:
pool.close()
pool.join()
list_of_answers = np.asarray(list_of_answers)
list_of_E = np.mean(list_of_answers, axis = 1)
list_of_S = np.std(list_of_answers, axis = 1)
m_size = end
for m, mean in zip(reversed(subset_sizes), reversed(list_of_E)):
if mean > 1 - self.alpha:
m_size = m
return {'m*': m_size,
'E': np.array(list_of_E),
'S': np.array(list_of_S),
'm': np.array(subset_sizes),
}
class ALCEstimator(SampleSizeEstimator):
r"""
Description of ALC Method
:param statmodel: the machine learning algorithm
:type statmodel: RegressionModel or LogisticModel
:param averaging: to do
:type averaging: float
:param alpha: to do
:type alpha: float
:param length: to do
:type length: float
:param begin: to do
:type begin: int
:param end: to do
:type end: int
:param num: to do
:type num: int
:param multiprocess: to do
:type multiprocess: bool
:param progressbar: to do
:type progressbar: bool
"""
def __init__(self, statmodel, **kwards):
r"""Constructor method
"""
super().__init__()
self.statmodel = statmodel
self.averaging = int(kwards.pop('averaging', 100))
if self.averaging <= 0:
raise ValueError(
"The averaging should be positive but get {}".format(
self.averaging))
self.length = kwards.pop('length', 0.5)
if self.length <= 0:
raise ValueError(
"The length must be positive value but get {}".format(
self.length))
self.alpha = kwards.pop('alpha', 0.05)
if self.alpha < 0 or self.alpha > 1:
raise ValueError(
"The alpha must be between 0 and 1 but get {}".format(
self.alpha))
self.begin = kwards.pop('begin', None)
if self.begin is not None and self.begin < 0:
raise ValueError(
"The begin must be positive value but get {}".format(
self.begin))
self.end = kwards.pop('end', None)
if self.end is not None and self.end < 0:
raise ValueError(
"The end must be positive value but get {}".format(
self.end))
if self.end is not None and self.begin is not None and self.end <= self.begin:
raise ValueError(
"The end value must be greater than the begin value but {}<={}".format(
self.end, self.begin))
self.num = kwards.pop('num', 5)
if self.num <=0:
raise ValueError(
"The num must be positive value but get {}".format(
self.num))
if self.end is not None and self.begin is not None and self.num >= self.end - self.begin:
raise ValueError(
"The num value must be smaler than (end - begin) but {}>={}".format(
self.num, self.end - self.begin))
self.multiprocess = kwards.pop('multiprocess', False)
if not isinstance(self.multiprocess, bool):
raise ValueError(
"The multiprocess must be bool value but get {}".format(
self.multiprocess))
self.progressbar = kwards.pop('progressbar', False)
if not isinstance(self.progressbar, bool):
raise ValueError(
"The progressbar must be bool value but get {}".format(
self.progressbar))
if kwards:
raise ValueError("Invalid parameters: %s" % str(kwards))
self.dataset = None
def _aDistribution(self, dataset):
r"""
Return ...
"""
X, y = dataset.sample()
w_hat = self.statmodel(y, X).fit()
cov = np.linalg.inv(
0.01*np.eye(w_hat.shape[0]) - self.statmodel(y, X).hessian(w_hat))
W = sps.multivariate_normal(mean=np.zeros(w_hat.shape[0]), cov = cov).rvs(size=1000)
function = lambda r: np.abs( (np.sqrt((W**2).sum(axis=1)) > 3*r).mean() - self.alpha)
return minimize_scalar(function, bounds=(0.01, 1), method='Bounded', options={'maxiter':10})['x']
def _score_subsample(self, m):
r"""
Return ...
"""
X_m, y_m = self.dataset.sample(m)
dataset_m = Dataset(X_m, y_m)
return self._aDistribution(dataset_m)
def forward(self, features, target):
r"""
Returns sample size prediction for the given dataset.
:param features: The tensor of shape
`num_elements` :math:`\times` `num_feature`.
:type features: array.
:param target: The tensor of shape `num_elements`.
:type target: array.
:return: sample size estimation for the given dataset.
:rtype: dict
"""
self.dataset = Dataset(features, target)
if self.end is None:
end = len(self.dataset) - 1
else:
end = self.end
if self.begin is None:
begin = 2*self.dataset.n
else:
begin = self.begin
if end <= begin:
raise ValueError(
"The end value must be greater than the begin value but {}<={}".format(
end, begin))
if self.num >= end - begin:
raise ValueError(
"The num value must be smaler than (end - begin) but {}>={}".format(
self.num, end - begin))
subset_sizes = np.arange(begin, end, self.num, dtype=np.int64)
list_of_answers = []
points_one = np.ones(self.averaging, dtype=np.int64)
if self.multiprocess:
pool = Pool()
mapping = pool.map
else:
mapping = map
if self.progressbar:
iterator = self._progressbar(subset_sizes)
else:
iterator = subset_sizes
for i, m in enumerate(iterator):
list_of_answers.append(
np.asarray(
list(mapping(self._score_subsample, m*points_one))))
self._set_status(100.*(i+1)/len(subset_sizes))
if self.multiprocess:
pool.close()
pool.join()
list_of_answers = np.asarray(list_of_answers)
list_of_E = np.mean(list_of_answers, axis = 1)
list_of_S = np.std(list_of_answers, axis = 1)
m_size = end
for m, mean in zip(reversed(subset_sizes), reversed(list_of_E)):
if mean < self.length:
m_size = m
return {'m*': m_size,
'E': np.array(list_of_E),
'S': np.array(list_of_S),
'm': np.array(subset_sizes),
}
class MaxUtilityEstimator(SampleSizeEstimator):
r"""
Description of Utility Maximisation Method
:param statmodel: the machine learning algorithm
:type statmodel: RegressionModel or LogisticModel
:param averaging: to do
:type averaging: float
:param c: to do
:type c: float
:param begin: to do
:type begin: int
:param end: to do
:type end: int
:param num: to do
:type num: int
:param multiprocess: to do
:type multiprocess: bool
:param progressbar: to do
:type progressbar: bool
"""
def __init__(self, statmodel, **kwards):
r"""Constructor method
"""
super().__init__()
self.statmodel = statmodel
self.averaging = int(kwards.pop('averaging', 100))
if self.averaging <= 0:
raise ValueError(
"The averaging should be positive but get {}".format(
self.averaging))
self.c = kwards.pop('c', 0.005)
if self.c <= 0:
raise ValueError(
"The c must be positive value but get {}".format(
self.c))
self.begin = kwards.pop('begin', None)
if self.begin is not None and self.begin < 0:
raise ValueError(
"The begin must be positive value but get {}".format(
self.begin))
self.end = kwards.pop('end', None)
if self.end is not None and self.end < 0:
raise ValueError(
"The end must be positive value but get {}".format(
self.end))
if self.end is not None and self.begin is not None and self.end <= self.begin:
raise ValueError(
"The end value must be greater than the begin value but {}<={}".format(
self.end, self.begin))
self.num = kwards.pop('num', 5)
if self.num <=0:
raise ValueError(
"The num must be positive value but get {}".format(
self.num))
if self.end is not None and self.begin is not None and self.num >= self.end - self.begin:
raise ValueError(
"The num value must be smaler than (end - begin) but {}>={}".format(
self.num, self.end - self.begin))
self.multiprocess = kwards.pop('multiprocess', False)
if not isinstance(self.multiprocess, bool):
raise ValueError(
"The multiprocess must be bool value but get {}".format(
self.multiprocess))
self.progressbar = kwards.pop('progressbar', False)
if not isinstance(self.progressbar, bool):
raise ValueError(
"The progressbar must be bool value but get {}".format(
self.progressbar))
if kwards:
raise ValueError("Invalid parameters: %s" % str(kwards))
self.dataset = None
def _uFunction(self, dataset):
r"""
Return ...
"""
X, y = dataset.sample()
model = self.statmodel(y, X)
w_hat = model.fit()
cov = np.linalg.inv(
0.01*np.eye(w_hat.shape[0]) - model.hessian(w_hat))
prior = sps.multivariate_normal(mean = np.zeros(w_hat.shape[0]), cov = 0.01*np.eye(w_hat.shape[0]))
W = sps.multivariate_normal(mean=w_hat, cov = cov).rvs(size=100)
u = []
for w in W:
u.append(model.loglike(w) + prior.logpdf(w))
return np.mean(u)/y.shape[0] - self.c*y.shape[0]
def _score_subsample(self, m):
r"""
Return ...
"""
X_m, y_m = self.dataset.sample(m)
dataset_m = Dataset(X_m, y_m)
return self._uFunction(dataset_m)
def forward(self, features, target):
r"""
Returns sample size prediction for the given dataset.
:param features: The tensor of shape
`num_elements` :math:`\times` `num_feature`.
:type features: array.
:param target: The tensor of shape `num_elements`.
:type target: array.
:return: sample size estimation for the given dataset.
:rtype: dict
"""
self.dataset = Dataset(features, target)
if self.end is None:
end = len(self.dataset) - 1
else:
end = self.end
if self.begin is None:
begin = 2*self.dataset.n
else:
begin = self.begin
if end <= begin:
raise ValueError(
"The end value must be greater than the begin value but {}<={}".format(
end, begin))
if self.num >= end - begin:
raise ValueError(
"The num value must be smaler than (end - begin) but {}>={}".format(
self.num, end - begin))
subset_sizes = np.arange(begin, end, self.num, dtype=np.int64)
list_of_answers = []
points_one = np.ones(self.averaging, dtype=np.int64)
if self.multiprocess:
pool = Pool()
mapping = pool.map
else:
mapping = map
if self.progressbar:
iterator = self._progressbar(subset_sizes)
else:
iterator = subset_sizes
for i, m in enumerate(iterator):
list_of_answers.append(
np.asarray(
list(mapping(self._score_subsample, m*points_one))))
self._set_status(100.*(i+1)/len(subset_sizes))
if self.multiprocess:
pool.close()
pool.join()
list_of_answers = np.asarray(list_of_answers)
list_of_E = np.mean(list_of_answers, axis = 1)
list_of_S = np.std(list_of_answers, axis = 1)
return {'m*': subset_sizes[np.argmax(np.array(list_of_E))],
'E': np.array(list_of_E),
'S': np.array(list_of_S),
'm': np.array(subset_sizes),
}
class KLEstimator(SampleSizeEstimator):
r"""
Description of KL based Method
:param statmodel: the machine learning algorithm
:type statmodel: RegressionModel or LogisticModel
:param averaging: to do
:type averaging: float
:param epsilon: to do
:type epsilon: float
:param begin: to do
:type begin: int
:param end: to do
:type end: int
:param num: to do
:type num: int
:param multiprocess: to do
:type multiprocess: bool
:param progressbar: to do
:type progressbar: bool
"""
def __init__(self, statmodel, **kwards):
r"""Constructor method
"""
super().__init__()
self.statmodel = statmodel
self.averaging = int(kwards.pop('averaging', 5))
if self.averaging <= 0:
raise ValueError(
"The averaging should be positive but get {}".format(
self.averaging))
self.epsilon = kwards.pop('epsilon', 0.01)
if self.epsilon <= 0:
raise ValueError(
"The epsilon must be positive value but get {}".format(
self.epsilon))
self.begin = kwards.pop('begin', None)
if self.begin is not None and self.begin < 0:
raise ValueError(
"The begin must be positive value but get {}".format(
self.begin))
self.end = kwards.pop('end', None)
if self.end is not None and self.end < 0:
raise ValueError(
"The end must be positive value but get {}".format(
self.end))
if self.end is not None and self.begin is not None and self.end <= self.begin:
raise ValueError(
"The end value must be greater than the begin value but {}<={}".format(
self.end, self.begin))
self.num = kwards.pop('num', 5)
if self.num <=0:
raise ValueError(
"The num must be positive value but get {}".format(
self.num))
if self.end is not None and self.begin is not None and self.num >= self.end - self.begin:
raise ValueError(
"The num value must be smaler than (end - begin) but {}>={}".format(
self.num, self.end - self.begin))
self.multiprocess = kwards.pop('multiprocess', False)
if not isinstance(self.multiprocess, bool):
raise ValueError(
"The multiprocess must be bool value but get {}".format(
self.multiprocess))
self.progressbar = kwards.pop('progressbar', False)
if not isinstance(self.progressbar, bool):
raise ValueError(
"The progressbar must be bool value but get {}".format(
self.progressbar))
if kwards:
raise ValueError("Invalid parameters: %s" % str(kwards))
self.dataset = None
@staticmethod
def D_KL_normal(m_0, cov_0, m_1, cov_1, cov_0_inv, cov_1_inv):
m_0 = np.array(m_0, ndmin=1)
m_1 = np.array(m_1, ndmin=1)
cov_0 = np.array(cov_0, ndmin=2)
cov_1 = np.array(cov_1, ndmin=2)
D_KL_1 = np.sum(np.diagonal(cov_1@cov_0_inv))
D_KL_2 = float(np.reshape((m_1 - m_0), [1, -1])@cov_1@np.reshape((m_1 - m_0), [-1, 1]))
D_KL_3 = -m_0.shape[0]
D_KL_4 = float(np.log(np.linalg.det(cov_0)/np.linalg.det(cov_1)))
return 0.5*(D_KL_1 + D_KL_2 + D_KL_3 + D_KL_4)
def _klFunction(self, dataset):
r"""
Return ...
"""
X, y = dataset.sample()
model_0 = self.statmodel(y, X)
m_0 = model_0.fit()
cov_0_inv = 0.01*np.eye(m_0.shape[0]) - model_0.hessian(m_0)
cov_0 = np.linalg.inv(cov_0_inv)
# ind = np.random.randint(0, X.shape[0])
indexes = np.random.permutation(X.shape[0])
list_of_res = []
for ind in indexes:
X_new = np.delete(X, ind, axis = 0)
y_new = np.delete(y, ind, axis = 0)
model_1 = self.statmodel(y_new, X_new)
m_1 = model_1.fit()
cov_1_inv = 0.01*np.eye(m_1.shape[0]) - model_1.hessian(m_1)
cov_1 = np.linalg.inv(cov_1_inv)
list_of_res.append(
self.D_KL_normal(m_0, cov_0, m_1, cov_1, cov_0_inv, cov_1_inv))
return np.mean(list_of_res)
def _score_subsample(self, m):
r"""
Return ...
"""
X_m, y_m = self.dataset.sample(m)
dataset_m = Dataset(X_m, y_m)
return self._klFunction(dataset_m)
def forward(self, features, target):
r"""
Returns sample size prediction for the given dataset.
:param features: The tensor of shape
`num_elements` :math:`\times` `num_feature`.
:type features: array.
:param target: The tensor of shape `num_elements`.
:type target: array.
:return: sample size estimation for the given dataset.
:rtype: dict
"""
self.dataset = Dataset(features, target)
if self.end is None:
end = len(self.dataset) - 1
else:
end = self.end
if self.begin is None:
begin = 2*self.dataset.n
else:
begin = self.begin
if end <= begin:
raise ValueError(
"The end value must be greater than the begin value but {}<={}".format(
end, begin))
if self.num >= end - begin:
raise ValueError(
"The num value must be smaler than (end - begin) but {}>={}".format(
self.num, end - begin))
subset_sizes = np.arange(begin, end, self.num, dtype=np.int64)
list_of_answers = []
points_one = np.ones(self.averaging, dtype=np.int64)
if self.multiprocess:
pool = Pool()
mapping = pool.map
else:
mapping = map
if self.progressbar:
iterator = self._progressbar(subset_sizes)
else:
iterator = subset_sizes
for i, m in enumerate(iterator):
list_of_answers.append(
np.asarray(
list(mapping(self._score_subsample, m*points_one))))
self._set_status(100.*(i+1)/len(subset_sizes))
if self.multiprocess:
pool.close()
pool.join()
list_of_answers = np.asarray(list_of_answers)
list_of_E = np.mean(list_of_answers, axis = 1)
list_of_S = np.std(list_of_answers, axis = 1)
m_size = end
for m, mean in zip(reversed(subset_sizes), reversed(list_of_E)):
if mean < self.epsilon:
m_size = m
return {'m*': m_size,
'E': np.array(list_of_E),
'S': np.array(list_of_S),
'm': np.array(subset_sizes),
}
|
from near import near_group
def split2ships(cells):
ships = set()
for i in cells:
new = {i}
add = True
while add:
add = False
for j in cells - new:
intersection = near_group({j}, base=False, diagonals=False) & new
if len(intersection) != 0:
new.add(j)
add = True
ships.add(frozenset(new))
return ships
if __name__ == '__main__':
cells = {(0, 1), (3, 2), (1, 5), (1, 6), (2, 3), (2, 2), (1, 0), (1, 1)}
print(split2ships(cells))
|
from PIL import Image,ImageFilter
#打开一个jpg图像文件,注意是当前路径
im = Image.open('thumbnail.jpg')
#获得尺寸大小
w,h=im.size
print('Original image size : %sx%s' %(w,h))
im.thumbnail((w*2,h*2))
print('Resize image to :%sx%s' %(w*2,h*2))
im2 = im.filter(ImageFilter.BLUR)
im2.save('thumbnail.jpg','jpeg')
|
''' Class definition for Pre processed contents'''
class PreprocessedContents:
def __init__(
self,
title_text,
normal_text,
media,
embedded_content,
quoted_content):
self.title_text = title_text
self.normal_text = normal_text
self.media = media
self.embedded_content = embedded_content
self.quoted_content = quoted_content
self._calculate_content_counts()
def _calculate_content_counts(self):
# calculate all counts once so we
# don't have to recalculate everytime
# it's needed
self.title_text_content_count = len(self.title_text)
self.normal_text_content_count = len(self.normal_text)
self.media_content_count = len(self.media)
self.embedded_content_count = len(self.embedded_content)
self.quoted_content_count = len(self.quoted_content)
def get_title_text_content_count(self):
return self.title_text_content_count
def get_normal_text_content_count(self):
return self.normal_text_content_count
def get_media_content_count(self):
return self.media_content_count
def get_embedded_content_count(self):
return self.embedded_content_count
def get_quoted_content_count(self):
return self.quoted_content_count
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 15 21:13:36 2019
@author: leonwebs
"""
"""
Generate random regions
Randomly form regions given various types of constraints on cardinality and
composition.
This is edited from pysal.region.randomregion to allow weighted cardinality.
For example, total population in each region can be constrained rather than the
number of areas.
"""
__author__ = "David Folch dfolch@fsu.edu, Serge Rey srey@asu.edu"
import numpy as np
from pysal.region.components import check_contiguity
from pysal.common import copy
__all__ = ["Random_RegionsWt", "Random_RegionWt"]
class Random_RegionsWt:
"""Generate a list of Random_Region instances.
Parameters
----------
area_ids : list
IDs indexing the areas to be grouped into regions (must
be in the same order as spatial weights matrix if this
is provided)
num_regions : integer
number of regions to generate (if None then this is
chosen randomly from 2 to n where n is the number of
areas)
cardinality : list
list containing the number of areas to assign to regions
(if num_regions is also provided then len(cardinality)
must equal num_regions; if cardinality=None then a list
of length num_regions will be generated randomly)
contiguity : W
spatial weights object (if None then contiguity will be
ignored)
maxiter : int
maximum number attempts (for each permutation) at finding
a feasible solution (only affects contiguity constrained
regions)
compact : boolean
attempt to build compact regions, note (only affects
contiguity constrained regions)
max_swaps : int
maximum number of swaps to find a feasible solution
(only affects contiguity constrained regions)
permutations : int
number of Random_Region instances to generate
Attributes
----------
solutions : list
list of length permutations containing all Random_Region instances generated
solutions_feas : list
list of the Random_Region instances that resulted in feasible solutions
Examples
--------
Setup the data
>>> import random
>>> import numpy as np
>>> import pysal
>>> nregs = 13
>>> cards = range(2,14) + [10]
>>> w = pysal.lat2W(10,10,rook=True)
>>> ids = w.id_order
Unconstrained
>>> random.seed(10)
>>> np.random.seed(10)
>>> t0 = pysal.region.Random_Regions(ids, permutations=2)
>>> t0.solutions[0].regions[0]
[19, 14, 43, 37, 66, 3, 79, 41, 38, 68, 2, 1, 60]
Cardinality and contiguity constrained (num_regions implied)
>>> random.seed(60)
>>> np.random.seed(60)
>>> t1 = pysal.region.Random_Regions(ids, num_regions=nregs, cardinality=cards, contiguity=w, permutations=2)
>>> t1.solutions[0].regions[0]
[62, 61, 81, 71, 64, 90, 72, 51, 80, 63, 50, 73, 52]
Cardinality constrained (num_regions implied)
>>> random.seed(100)
>>> np.random.seed(100)
>>> t2 = pysal.region.Random_Regions(ids, num_regions=nregs, cardinality=cards, permutations=2)
>>> t2.solutions[0].regions[0]
[37, 62]
Number of regions and contiguity constrained
>>> random.seed(100)
>>> np.random.seed(100)
>>> t3 = pysal.region.Random_Regions(ids, num_regions=nregs, contiguity=w, permutations=2)
>>> t3.solutions[0].regions[1]
[62, 52, 51, 63, 61, 73, 41, 53, 60, 83, 42, 31, 32]
Cardinality and contiguity constrained
>>> random.seed(60)
>>> np.random.seed(60)
>>> t4 = pysal.region.Random_Regions(ids, cardinality=cards, contiguity=w, permutations=2)
>>> t4.solutions[0].regions[0]
[62, 61, 81, 71, 64, 90, 72, 51, 80, 63, 50, 73, 52]
Number of regions constrained
>>> random.seed(100)
>>> np.random.seed(100)
>>> t5 = pysal.region.Random_Regions(ids, num_regions=nregs, permutations=2)
>>> t5.solutions[0].regions[0]
[37, 62, 26, 41, 35, 25, 36]
Cardinality constrained
>>> random.seed(100)
>>> np.random.seed(100)
>>> t6 = pysal.region.Random_Regions(ids, cardinality=cards, permutations=2)
>>> t6.solutions[0].regions[0]
[37, 62]
Contiguity constrained
>>> random.seed(100)
>>> np.random.seed(100)
>>> t7 = pysal.region.Random_Regions(ids, contiguity=w, permutations=2)
>>> t7.solutions[0].regions[1]
[62, 61, 71, 63]
"""
def __init__(
self, area_ids, num_regions=None, cardinality=None, contiguity=None,
maxiter=100, compact=False, max_swaps=1000000, permutations=99,
area_wts=None, tol = 0):
solutions = []
for i in range(permutations):
solutions.append(Random_RegionWt(area_ids, num_regions, cardinality,
contiguity, maxiter, compact,
max_swaps, area_wts, tol))
self.solutions = solutions
self.solutions_feas = []
for i in solutions:
if i.feasible == True:
self.solutions_feas.append(i)
class Random_RegionWt:
"""Randomly combine a given set of areas into two or more regions based
on various constraints.
Parameters
----------
area_ids : list
IDs indexing the areas to be grouped into regions (must
be in the same order as spatial weights matrix if this
is provided)
area_wts : list
weight to use for each area, for instance the population
of the area
num_regions : integer
number of regions to generate (if None then this is
chosen randomly from 2 to n where n is the number of
areas)
cardinality : list
list containing the number of areas to assign to regions
(if num_regions is also provided then len(cardinality)
must equal num_regions; if cardinality=None then a list
of length num_regions will be generated randomly)
contiguity : W
spatial weights object (if None then contiguity will be
ignored)
maxiter : int
maximum number attempts at finding a feasible solution
(only affects contiguity constrained regions)
compact : boolean
attempt to build compact regions (only affects
contiguity constrained regions)
max_swaps : int
maximum number of swaps to find a feasible solution
(only affects contiguity constrained regions)
Attributes
----------
feasible : boolean
if True then solution was found
regions : list
list of lists of regions (each list has the ids of areas
in that region)
Examples
--------
Setup the data
>>> import random
>>> import numpy as np
>>> import pysal
>>> nregs = 13
>>> cards = range(2,14) + [10]
>>> w = pysal.weights.lat2W(10,10,rook=True)
>>> ids = w.id_order
Unconstrained
>>> random.seed(10)
>>> np.random.seed(10)
>>> t0 = pysal.region.Random_Region(ids)
>>> t0.regions[0]
[19, 14, 43, 37, 66, 3, 79, 41, 38, 68, 2, 1, 60]
Cardinality and contiguity constrained (num_regions implied)
>>> random.seed(60)
>>> np.random.seed(60)
>>> t1 = pysal.region.Random_Region(ids, num_regions=nregs, cardinality=cards, contiguity=w)
>>> t1.regions[0]
[62, 61, 81, 71, 64, 90, 72, 51, 80, 63, 50, 73, 52]
Cardinality constrained (num_regions implied)
>>> random.seed(100)
>>> np.random.seed(100)
>>> t2 = pysal.region.Random_Region(ids, num_regions=nregs, cardinality=cards)
>>> t2.regions[0]
[37, 62]
Number of regions and contiguity constrained
>>> random.seed(100)
>>> np.random.seed(100)
>>> t3 = pysal.region.Random_Region(ids, num_regions=nregs, contiguity=w)
>>> t3.regions[1]
[62, 52, 51, 63, 61, 73, 41, 53, 60, 83, 42, 31, 32]
Cardinality and contiguity constrained
>>> random.seed(60)
>>> np.random.seed(60)
>>> t4 = pysal.region.Random_Region(ids, cardinality=cards, contiguity=w)
>>> t4.regions[0]
[62, 61, 81, 71, 64, 90, 72, 51, 80, 63, 50, 73, 52]
Number of regions constrained
>>> random.seed(100)
>>> np.random.seed(100)
>>> t5 = pysal.region.Random_Region(ids, num_regions=nregs)
>>> t5.regions[0]
[37, 62, 26, 41, 35, 25, 36]
Cardinality constrained
>>> random.seed(100)
>>> np.random.seed(100)
>>> t6 = pysal.region.Random_Region(ids, cardinality=cards)
>>> t6.regions[0]
[37, 62]
Contiguity constrained
>>> random.seed(100)
>>> np.random.seed(100)
>>> t7 = pysal.region.Random_Region(ids, contiguity=w)
>>> t7.regions[0]
[37, 36, 38, 39]
"""
def __init__(
self, area_ids, num_regions=None, cardinality=None, contiguity=None,
maxiter=1000, compact=False, max_swaps=1000000,
area_wts=None, tol = 0):
if area_wts is None:
area_wts = np.ones(len(area_ids))
self.n = sum(area_wts)
ids = copy.copy(area_ids)
self.wtdict = dict(zip(ids, area_wts))
self.ids = list(np.random.permutation(ids))
self.area_ids = area_ids
self.regions = []
self.feasible = True
self.tol = tol
self.cd_dev = {} #dictionary of deviations from intended cardinality
# tests for input argument consistency
if cardinality is not None:
# if self.n != sum(cardinality):
if np.greater_equal(abs(self.n-sum(cardinality)), .001 * abs(sum(cardinality))):
self.feasible = False
raise Exception('Sum of cardinalities does not equal total weight')
if contiguity is not None:
if area_ids != contiguity.id_order:
self.feasible = False
raise Exception('order of area_ids must match order in contiguity')
if num_regions and cardinality is not None:
if num_regions != len(cardinality):
self.feasible = False
raise Exception('number of regions does not match cardinality')
# dispatches the appropriate algorithm
if num_regions and cardinality is not None and contiguity is not None:
# conditioning on cardinality and contiguity (number of regions implied)
self.build_contig_regions(num_regions, cardinality, contiguity,
maxiter, compact, max_swaps)
elif num_regions and cardinality is not None:
# conditioning on cardinality (number of regions implied)
region_breaks = self.cards2breaks(cardinality)
self.build_noncontig_regions(num_regions, region_breaks)
elif num_regions and contiguity is not None:
# conditioning on number of regions and contiguity
cards = self.get_cards(num_regions)
self.build_contig_regions(num_regions, cards, contiguity,
maxiter, compact, max_swaps)
elif cardinality is not None and contiguity is not None:
# conditioning on cardinality and contiguity
num_regions = len(cardinality)
self.build_contig_regions(num_regions, cardinality, contiguity,
maxiter, compact, max_swaps)
elif num_regions:
# conditioning on number of regions only
region_breaks = self.get_region_breaks(num_regions)
self.build_noncontig_regions(num_regions, region_breaks)
elif cardinality is not None:
# conditioning on number of cardinality only
num_regions = len(cardinality)
region_breaks = self.cards2breaks(cardinality)
self.build_noncontig_regions(num_regions, region_breaks)
elif contiguity is not None:
# conditioning on number of contiguity only
num_regions = self.get_num_regions()
cards = self.get_cards(num_regions)
self.build_contig_regions(num_regions, cards, contiguity,
maxiter, compact, max_swaps)
else:
# unconditioned
num_regions = self.get_num_regions()
region_breaks = self.get_region_breaks(num_regions)
self.build_noncontig_regions(num_regions, region_breaks)
def get_num_regions(self):
return np.random.random_integers(2, self.n)
def get_region_breaks(self, num_regions):
region_breaks = set([])
while len(region_breaks) < num_regions - 1:
region_breaks.add(np.random.random_integers(1, self.n - 1))
region_breaks = list(region_breaks)
region_breaks.sort()
return region_breaks
def get_cards(self, num_regions):
region_breaks = self.get_region_breaks(num_regions)
cards = []
start = 0
for i in region_breaks:
cards.append(i - start)
start = i
cards.append(self.n - start)
return cards
def cards2breaks(self, cards):
region_breaks = []
break_point = 0
for i in cards:
break_point += i
region_breaks.append(break_point)
region_breaks.pop()
return region_breaks
def build_noncontig_regions(self, num_regions, region_breaks):
start = 0
for i in region_breaks:
self.regions.append(self.ids[start:i])
start = i
self.regions.append(self.ids[start:])
def grow_compact(self, w, test_card, region, candidates, potential, regpop):
# try to build a compact region by exhausting all existing
# potential areas before adding new potential areas
add_areas = []
sizecheck = self.region_size_check
while potential and sizecheck(regpop,test_card) == -1:
pot_index = np.random.random_integers(0, len(potential) - 1)
add_area = potential[pot_index]
region.append(add_area)
regpop += self.wtdict[add_area]
candidates.remove(add_area)
potential.remove(add_area)
add_areas.append(add_area)
for i in add_areas:
potential.extend([j for j in w.neighbors[i]
if j not in region and
j not in potential and
j in candidates and
self.region_size_check(regpop + self.wtdict[i] , test_card)!=1])
#regpop = sum(self.wtdict[i] for i in region)
return region, candidates, potential, regpop
def grow_free(self, w, test_card, region, candidates, potential, regpop):
# increment potential areas after each new area is
# added to the region (faster than the grow_compact)
pot_index = np.random.random_integers(0, len(potential) - 1)
add_area = potential[pot_index]
tst =[self.wtdict[i]+regpop-test_card for i in w.neighbors[add_area] if i not in region and
i not in potential and
i in candidates and
self.region_size_check(regpop + self.wtdict[i] , test_card)!=1]
if any([i>0 for i in tst]):
print(tst)
region.append(add_area)
regpop += self.wtdict[add_area]
candidates.remove(add_area)
potential.remove(add_area)
potential.extend([i for i in w.neighbors[add_area]
if i not in region and
i not in potential and
i in candidates and
self.region_size_check(regpop + self.wtdict[i] , test_card)!=1
])
return region, candidates, potential, regpop
def region_size_check(self, regsize, testsize):
diff = regsize - testsize
if diff > self.tol * testsize:
return 1
elif diff < -self.tol * testsize:
return -1
else:
return 0
def build_contig_regions(self, num_regions, cardinality, w,
maxiter, compact, max_swaps):
if compact:
grow_region = self.grow_compact
else:
grow_region = self.grow_free
sizecheck = self.region_size_check
iter = 0
while iter < maxiter:
# regionalization setup
regions = []
size_pre = 0
counter = -1
area2region = {}
self.feasible = False
swap_count = 0
cards = copy.copy(cardinality)
cards.sort() # try to build largest regions first (pop from end of list)
candidates = copy.copy(self.ids) # these are already shuffled
# begin building regions
while candidates and swap_count < max_swaps:
# setup test to determine if swapping is needed
# print(counter, size_pre)
if size_pre == len(regions):
counter += 1
else:
counter = 0
size_pre = len(regions)
# test if swapping is needed
# if counter == len(candidates):
#
# # start swapping
# # swapping simply changes the candidate list
# swap_in = None # area to become new candidate
# while swap_in is None: # PEP8 E711
# swap_count += 1
# swap_out = candidates.pop(0) # area to remove from candidates
# swap_neighs = copy.copy(w.neighbors[swap_out])
# swap_neighs = list(np.random.permutation(swap_neighs))
# # select area to add to candidates (i.e. remove from an existing region)
# for i in swap_neighs:
# if i not in candidates:
# join = i # area linking swap_in to swap_out
# swap_index = area2region[join]
# swap_region = regions[swap_index]
# print("before swap: ")
# print([self.cd_dev[j][0]-sum([self.wtdict[i] for i in swap_region]) for j in range(len(regions))])
# swap_region = list(np.random.permutation(swap_region))
# swap_dev = self.cd_dev[swap_index]
# for j in swap_region:
# # test to ensure region connectivity after removing area
# # j is an element in the region possibly to remove
# swap_region_test = swap_region[:] + [swap_out]
# delta = self.wtdict[swap_out]-self.wtdict[j]
# if (check_contiguity(w, swap_region_test, j)
# and sizecheck(swap_dev[0] + delta, swap_dev[0]-swap_dev[1]) == 0):
# swap_in = j
# break
# if swap_in is not None: # PEP8 E711
# break
# else:
# candidates.append(swap_out)
# # swapping cleanup
# regions[swap_index].remove(swap_in)
# regions[swap_index].append(swap_out)
# area2region.pop(swap_in)
# area2region[swap_out] = swap_index
# self.cd_dev[swap_index] = tuple(i + delta for i in self.cd_dev[swap_index] )
# candidates.append(swap_in)
# counter = 0
#
# setup to build a single region
building = True
seed = candidates.pop(0)
region = [seed]
regpop = self.wtdict[seed]
if not cards:
print(len(regions))
print(sum(list(map(len,regions))))
print("Ran out of cards")
break
test_card = cards.pop()
#test_card is the population target;
potential = [i for i in w.neighbors[seed] if (i in candidates
and sizecheck(regpop+self.wtdict[i],test_card) != 1)]
# begin building single region
while building and sizecheck(regpop, test_card)==-1:
if potential:
region, candidates, potential, regpop = grow_region(
w, test_card,
region, candidates, potential, regpop)
else:
# not enough potential neighbors to reach test_card size
building = False
cards.append(test_card)
if regpop in cards:
# constructed region matches another candidate region size
cards.remove(regpop)
else:
# constructed region doesn't match a candidate region size
candidates.extend(region)
region = []
# cleanup when successful region built
if region:
region_index = len(regions)
for i in region:
area2region[i] = region_index # area2region needed for swapping
regions.append(region) #append the successful region to the region list
self.cd_dev[region_index] = np.array([regpop, regpop - test_card])
#print(self.cd_dev)
# print(building)
print(regpop)
# print(#[round(self.wtdict[i]/776646,2) for i in region],
# round(sum([self.wtdict[i] for i in region])/self.n*7,2))
# print(regpop/self.n*7)
# print(sum([self.wtdict[i] for i in candidates]))
# print(sum([len(i) for i in regions]))
# print(sum([sum(self.wtdict[j] for j in i) for i in regions]))
# print([self.cd_dev[j][0] for j in range(len(regions))])
# print([sum([self.wtdict[i] for i in regions[j]]) for j in range(len(regions))])
# handling of regionalization result
if len(regions) < num_regions:
# regionalization failed
print("fail")
self.ids = list(np.random.permutation(self.ids))
regions = []
iter += 1
else:
# regionalization successful
self.feasible = True
iter = maxiter
self.regions = regions
class RegStruct(dict):
def __init__(self, areas, region_list = [], area_wts = None):
dict.__init__({area: None for area in areas})
self.regions = [i.copy() for i in region_list]
self.update({ area: regionind for regionind in range(len(region_list)) for area in region_list[regionind] })
if area_wts is None:
self.totalfun = len #total fun accepts a list of areas and gets total weight
self.wtfun = lambda x: 1
else:
self.wtdict = dict(zip(areas, area_wts))
self.totalfun = lambda x: sum([self.wtdict[i] for i in x])
self.wtfun = self.wtdict.get
self.regionwts = [self.totalfun(i) for i in region_list]
def area_into_region(self, area, regionind):
if regionind == len(self.regions):
self.regions.append([])
self.regionwts.append(0)
if type(area) is int:
area = [area]
for a in area:
if a in self.keys():
#remove from old region
if self[a] is not None:
self.regions[self[a]].remove(a)
self.regionwts[self[a]] -= self.wtfun(a)
#include in new region
self[a] = regionind
self.regions[regionind].append(a)
self.regionwts[regionind] += self.wtfun(a)
|
import torch.nn as nn
from src.utils.layers import *
scale = 3
class SuperResolutionTransformer(torch.nn.Module):
def __init__(self):
super(SuperResolutionTransformer, self).__init__()
# Initial convolution layers
self.enc_conv0 = nn.Sequential(
ConvLayer(3, 8 * scale, 3),
nn.InstanceNorm2d(8 * scale),
nn.LeakyReLU(0.02),
ConvLayer(8 * scale, 8 * scale, 3),
nn.InstanceNorm2d(8 * scale),
nn.LeakyReLU(0.02)
)
self.pool0 = nn.Conv2d(8 * scale, 8 * scale, 3, stride=2, padding=1)
self.enc_conv1 = nn.Sequential(
ConvLayer(8 * scale, 16 * scale, 3),
nn.InstanceNorm2d(16 * scale),
nn.LeakyReLU(0.02),
ConvLayer(16 * scale, 16 * scale, 3),
nn.InstanceNorm2d(16 * scale),
nn.LeakyReLU(0.02)
)
self.pool1 = nn.Conv2d(16 * scale, 16 * scale, 3, stride=2, padding=1)
self.enc_conv2 = nn.Sequential(
ConvLayer(16 * scale, 32 * scale, 3),
nn.InstanceNorm2d(32 * scale),
nn.LeakyReLU(0.02),
ConvLayer(32 * scale, 32 * scale, 3),
nn.InstanceNorm2d(32 * scale),
nn.LeakyReLU(0.02)
)
self.pool2 = nn.Conv2d(32 * scale, 32 * scale, 3, stride=2, padding=1)
self.enc_conv3 = nn.Sequential(
ConvLayer(32 * scale, 64 * scale, 3),
nn.InstanceNorm2d(64 * scale),
nn.LeakyReLU(0.02),
ConvLayer(64 * scale, 64 * scale, 3),
nn.InstanceNorm2d(64 * scale),
nn.LeakyReLU(0.02)
)
self.pool3 = nn.Conv2d(64 * scale, 64 * scale, 3, stride=2, padding=1)
self.enc_conv4 = nn.Sequential(
ConvLayer(64 * scale, 128 * scale, 3),
nn.InstanceNorm2d(128 * scale),
nn.LeakyReLU(0.02),
ConvLayer(128 * scale, 128 * scale, 3),
nn.InstanceNorm2d(128 * scale),
nn.LeakyReLU(0.02)
)
self.pool4 = nn.Conv2d(128 * scale, 128 * scale, 3, stride=2, padding=1)
self.enc_conv5 = nn.Sequential(
ConvLayer(128 * scale, 256 * scale, 3),
nn.InstanceNorm2d(256 * scale),
nn.LeakyReLU(0.02),
ConvLayer(256 * scale, 256 * scale, 3),
nn.InstanceNorm2d(256 * scale),
nn.LeakyReLU(0.02)
)
self.pool5 = nn.Conv2d(256 * scale, 256 * scale, 3, stride=2, padding=1)
self.enc_conv6 = nn.Sequential(
ConvLayer(256 * scale, 256 * scale, 3),
nn.InstanceNorm2d(256 * scale),
nn.LeakyReLU(0.02),
ConvLayer(256 * scale, 256 * scale, 3),
nn.InstanceNorm2d(256 * scale),
nn.LeakyReLU(0.02)
)
self.pool6 = nn.Conv2d(256 * scale, 256 * scale, 3, stride=2, padding=1)
self.bottleneck_conv = nn.Sequential(
ConvLayer(256 * scale, 256 * scale, 1),
nn.InstanceNorm2d(256 * scale),
nn.LeakyReLU(0.02),
ConvLayer(256 * scale, 256 * scale, 1),
nn.InstanceNorm2d(256 * scale),
nn.LeakyReLU(0.02),
)
self.upsample0 = nn.Upsample(scale_factor=2, mode='nearest')
self.de_conv0 = nn.Sequential(
ConvLayer(256 * scale, 256 * scale, 3),
nn.InstanceNorm2d(256 * scale),
nn.LeakyReLU(0.02),
ConvLayer(256 * scale, 256 * scale, 3),
nn.InstanceNorm2d(256 * scale),
nn.LeakyReLU(0.02)
)
self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
self.de_conv1 = nn.Sequential(
ConvLayer(256 * scale, 128 * scale, 3),
nn.InstanceNorm2d(128 * scale),
nn.LeakyReLU(0.02)
)
self.de_conv2 = nn.Sequential(
ConvLayer(128 * scale, 64 * scale, 3),
nn.InstanceNorm2d(64 * scale),
nn.LeakyReLU(0.02)
)
self.de_conv3 = nn.Sequential(
ConvLayer(64 * scale, 32 * scale, 3),
nn.InstanceNorm2d(32 * scale),
nn.LeakyReLU(0.02)
)
self.de_conv4 = nn.Sequential(
ConvLayer(32 * scale, 16 * scale, 3),
nn.InstanceNorm2d(16 * scale),
nn.LeakyReLU(0.02)
)
self.de_conv5 = nn.Sequential(
ConvLayer(16 * scale, 8 * scale, 3),
nn.InstanceNorm2d(8 * scale),
nn.LeakyReLU(0.02)
)
self.de_conv6 = nn.Sequential(
ConvLayer(8 * scale, 8 * scale, 3),
nn.InstanceNorm2d(8 * scale),
nn.LeakyReLU(0.02),
ConvLayer(8 * scale, 8 * scale, 3),
nn.LeakyReLU(0.02)
)
self.de_conv7 = nn.Sequential(
ConvLayer(8 * scale, 8 * scale, 3),
nn.InstanceNorm2d(8 * scale),
nn.LeakyReLU(0.02),
ConvLayer(8 * scale, 3, 3),
)
def forward(self, x):
out0 = self.enc_conv0(x)
e0 = self.pool0(out0)
out1 = self.enc_conv1(e0)
e1 = self.pool1(out1)
out2 = self.enc_conv2(e1)
e2 = self.pool2(out2)
out3 = self.enc_conv3(e2)
e3 = self.pool3(out3)
out4 = self.enc_conv4(e3)
e4 = self.pool4(out4)
out5 = self.enc_conv5(e4)
e5 = self.pool5(out5)
out6 = self.enc_conv6(e5)
e6 = self.pool6(out6)
# bottleneck
b = self.bottleneck_conv(e6)
# decoder
d0 = self.de_conv0(self.upsample(b)[0:out6.shape[0], 0:out6.shape[1], 0:out6.shape[2], 0:out6.shape[3]] + out6)
d1 = self.de_conv1(self.upsample(d0)[0:out5.shape[0], 0:out5.shape[1], 0:out5.shape[2], 0:out5.shape[3]] + out5)
d2 = self.de_conv2(self.upsample(d1)[0:out4.shape[0], 0:out4.shape[1], 0:out4.shape[2], 0:out4.shape[3]] + out4)
d3 = self.de_conv3(self.upsample(d2)[0:out3.shape[0], 0:out3.shape[1], 0:out3.shape[2], 0:out3.shape[3]] + out3)
d4 = self.de_conv4(self.upsample(d3)[0:out2.shape[0], 0:out2.shape[1], 0:out2.shape[2], 0:out2.shape[3]] + out2)
d5 = self.de_conv5(self.upsample(d4)[0:out1.shape[0], 0:out1.shape[1], 0:out1.shape[2], 0:out1.shape[3]] + out1)
d6 = self.de_conv6(self.upsample(d5)[0:out0.shape[0], 0:out0.shape[1], 0:out0.shape[2], 0:out0.shape[3]] + out0)
d7 = self.de_conv7(self.upsample(d6))
return d7
|
import requests
from bs4 import BeautifulSoup
baseUrl = "http://www.imdb.com"
#get movie name and make a url to fetch movie details from imdb
def create_url(query):
url ="http://www.imdb.com/find?ref_=nv_sr_fn&q="
query = query.replace(" ","+")
url = url+query+'&s=all'
return url
#extract all the links for a particular movie
def extract_all_items(url):
global baseUrl
res = requests.get(url)
con = res.text
soup = BeautifulSoup(con, "html.parser")
main_div = soup.find("div", {"id":"main"})
divs = main_div.findAll("div", {"class": "findSection"})
# getting table with Title
mydiv = ""
for div in divs:
if div.find("h3").text == "Titles":
mydiv = div
break
if mydiv == "":
return {}
else:
table = mydiv.find("table", {"class": "findList"})
rows = table.findAll("tr")
results = {}
for row in rows:
columns = row.findAll("td")
movie_url = columns[1].find("a").get("href")
movie_name = columns[1].text
results.update({baseUrl+movie_url : movie_name})
return results
#return all the movie details
def get_movie_details(url):
movie_name, movie_director,certificate, duration,movie_image_url,movie_video_url, genre, release_date,movie_credit_summary = ['']*9
ratings = 0
res = requests.get(url)
con = res.text
soup = BeautifulSoup(con, "html.parser")
div = soup.find("div", {"id":"title-overview-widget"})
movie_name_div = div.find("div", {"class" : "title_wrapper"})
if movie_name_div is not None:
movie_name = movie_name_div.h1.text
ratings_div = div.find("div", {"class" : "ratingValue"})
if ratings_div is not None:
ratings = ratings_div.span.text
movie_image_url_div = div.find("div" , {"class" : "poster"})
if movie_image_url_div is not None:
movie_image_url = movie_image_url_div.img['src']
movie_video_url_div = div.find("div" , {"class" : "slate"})
if movie_video_url_div is not None:
movie_video_url = baseUrl + movie_video_url_div.a['href']
movie_details_div = div.find("div", {"class" : "subtext"})
if movie_details_div is not None:
movie_details = movie_details_div.text
if '|' in movie_details:
movie_details_info = movie_details.split('|')
if movie_details_info[0] is not None:
certificate = movie_details_info[0].strip()
if movie_details_info[1] is not None:
duration = movie_details_info[1].strip()
movie_credit_summary_div = div.find("div", {"class" : "summary_text"})
if movie_credit_summary_div is not None:
movie_credit_summary = movie_credit_summary_div.text.strip()
movie_details = [movie_name, movie_director, ratings, certificate, duration,
movie_image_url, movie_video_url,genre, release_date,
movie_credit_summary]
return movie_details
|
from dateutil import parser
from rest_framework.test import APITestCase
from .models import Message
from .factories import MessageFactory
class MessageListTestCase(APITestCase):
def test_unauthenticated_user_can_list_all_messages(self):
messages = MessageFactory.create_batch(20)
self.client.force_authenticate(user=None)
response = self.client.get('/api/messages/')
self.assertEqual(200, response.status_code)
returned_bodies = [msg['body'] for msg in response.data]
for message in messages:
self.assertTrue(message.body in returned_bodies)
class MessageCreateTestCase(APITestCase):
def test_unauthenticated_user_can_create_message(self):
self.client.force_authenticate(user=None)
payload = {'body': 'Tis but a scratch!'}
response = self.client.post('/api/messages/', payload, format='json')
self.assertEqual(201, response.status_code)
self.assertTrue(response.data.get('url'))
self.assertTrue(response.data.get('created'))
self.assertEqual(payload['body'], response.data.get('body'))
class MessageRetrieveTestCase(APITestCase):
def test_unauthenticated_user_can_retrieve_existing_message(self):
message = MessageFactory.create()
self.client.force_authenticate(user=None)
response = self.client.get('/api/messages/{}/'.format(message.id))
self.assertEqual(200, response.status_code)
self.assertEqual(message.body, response.data.get('body'))
self.assertEqual(
message.created,
parser.parse(response.data.get('created'))
)
self.assertTrue(response.data.get('url'))
class MessageModifyTestCase(APITestCase):
def test_unauthenticated_user_can_modify_existing_message(self):
message = MessageFactory.create(body='Good afternoon!')
payload = {'body': 'Good night!'}
self.client.force_authenticate(user=None)
response = self.client.patch('/api/messages/{}/'.format(message.id),
payload,
format='json')
self.assertEqual(200, response.status_code)
message.refresh_from_db()
self.assertEqual(payload['body'], message.body)
class MessageDestroyTestCase(APITestCase):
def test_unauthenticated_user_can_destroy_existing_message(self):
message = MessageFactory.create()
self.client.force_authenticate(user=None)
response = self.client.delete('/api/messages/{}/'.format(message.id))
self.assertEqual(204, response.status_code)
self.assertRaises(Message.DoesNotExist, message.refresh_from_db)
|
from ellipticCurve import ellipticCurveSolver
from primeChecker import primeChecker
from new_simple import *
from power import powerCongruence
from simply_exp import *
from numsix import *
def show_menu():
print("\"simple\": to calculate a simple linear congruence of style ax = b mod(n)")
print("\"power\": to calculate a powered congruence of style x^e = b mod(n)")
print("\"elliptic\": to calculate an elliptic curve of style Y=y^2 = x^3 + ax + b (mod p)")
print("\"prime check\": check if an extremely large integer is prime")
print("\"new_simple\": problem like 13x+5= 15(mod 23)")
print("\"simplify\": 'Simplify the following expressions")
print("\"menu\": to see all available options")
print("\"ecadd\": To calculate #P using the double-and-add algorithm")
print("\"order_num\": Find the possible orders of number in Zp")
print("\"end\": to close program\n")
def main():
print("welcome to the Modulus Calculator")
# initialize option before using in while loop
option = "placeholder"
print("type \"menu\" to see all available options")
while option != "end":
option = input("\nWhat would you like to do?: ")
if option == "simplify":
print("a^b (mod p)")
a = int(input("a = "))
b = int(input("b = "))
p = int(input("p = "))
simplify_base(a, b, p)
if option == "simple":
print("ax = b mod(n)")
num = int(input("a = "))
num2 = int(input("b = "))
mod_value = int(input("n = "))
#fixed to use minv
mod_inv = minv(num, mod_value)
print("First find the modulus inverse of a")
print("modulus inverse of", num, "=", mod_inv)
solution = num2*mod_inv % mod_value
print("multiply the mod inverse on both sides")
print(mod_inv, "*", num, " = ", mod_inv, "*", num2, " (mod ", mod_value, ")", sep='')
print("therefore: ", num2*mod_inv, " (mod ", mod_value, ")", sep='')
print("solution =", solution)
if option == "order_num":
numbers_of_order_base()
if option == "power":
print("x^e = b mod(n)")
num = int(input("b = "))
mod_value = int(input("n = "))
exponent = int(input("e = "))
powerCongruence(num, exponent, mod_value, False)
if option == "power show":
print("x^e = b mod(n)")
num = int(input("b = "))
mod_value = int(input("n = "))
exponent = int(input("e = "))
answer = powerCongruence(num, exponent, mod_value, True)
print("x =", answer)
if option == "ecadd":
print("Y=y^2 = x^3 + ax + b (mod p)")
a = int(input("a = "))
b = int(input("b = "))
x = int(input("x = "))
p = int(input("p = "))
print("P = (x1, y1)")
x1 = int(input("x1 = "))
y1 = int(input("y1 = "))
print("REMEMBER: If using double and add, type in the same point as (x1, y1)")
print("P = (x2, y2)")
x2 = int(input("x2 = "))
y2 = int(input("y2 = "))
print("Calculate #P using...")
np = int(input("#P = "))
for x in range(1, np):
(x2, y2) = ecadd(p, a, x1, y1, x2, y2)
print(x + 1, "P = (", x2, ",", y2, ")", sep='')
if option == "elliptic":
print("Y=y^2 = x^3 + ax + b (mod p)")
x = int(input("x = "))
mod_value = int(input("p = "))
a = int(input("a = "))
b = int(input("b = "))
values = ellipticCurveSolver(x, mod_value, a, b, False)
print("y values are", values)
if option == "elliptic show":
print("Y=y^2 = x^3 + ax + b (mod p)")
x = int(input("x = "))
mod_value = int(input("p = "))
a = int(input("a = "))
b = int(input("b = "))
values = ellipticCurveSolver(x, mod_value, a, b, True)
print("y values are", values)
if option == "prime check":
prime = int(input("prime = "))
isPrime = primeChecker(prime)
print(isPrime)
if option == "menu":
show_menu()
if option == "new_simple":
print("ax = b mod(n)")
numa = int(input("a = "))
numb = int(input("b = "))
numn = int(input("n = "))
new_simple(numa, numb, numn)
if __name__ == "__main__":
main()
|
from typing import Set
from unittest.mock import Mock
from warnings import warn
import spellbot
from spellbot.assets import load_strings
from .constants import REPO_ROOT
S_SPY = Mock(wraps=spellbot.s)
SNAPSHOTS_USED: Set[str] = set()
class TestMeta:
# Tracks the usage of string keys over the entire test session.
# It can fail for two reasons:
#
# 1. There's a key in strings.yaml that's not being used at all.
# 2. There's a key in strings.yaml that isn't being used in the tests.
#
# For situation #1 the solution is to remove the key from the config.
# As for #2, there should be a new test which utilizes this key.
def test_strings(self):
"""Assures that there are no missing or unused strings data."""
used_keys = set(s_call[0][0] for s_call in S_SPY.call_args_list)
config_keys = set(load_strings().keys())
if "did_you_mean" not in used_keys:
warn('strings.yaml key "did_you_mean" is unused in test suite')
used_keys.add("did_you_mean")
assert config_keys - used_keys == set()
# Tracks the usage of snapshot files over the entire test session.
# When it fails it means you probably need to clear out any unused snapshot files.
def test_snapshots(self):
"""Checks that all of the snapshots files are being used."""
snapshots_dir = REPO_ROOT / "tests" / "snapshots"
snapshot_files = set(f.name for f in snapshots_dir.glob("*.txt"))
assert snapshot_files == SNAPSHOTS_USED
|
"""
This script runs the signal_timestamps function, that gets the duration of each
recording + all the timestamps of the recoring, and saves the info in the
database.
"""
import os
import sqlite3
from birdsong.data_preparation.audio_conversion.signal_extraction import signal_timestamps
if 'HOSTNAME' in os.environ:
# script runs on server
STORAGE_DIR = '/storage/step1_wav/'
DATABASE_DIR = '/storage/db.sqlite'
else:
# script runs locally
STORAGE_DIR = 'storage/step1_wav/'
DATABASE_DIR = 'storage/db.sqlite'
# Get a list of files that are downloaded
downloaded_files = os.listdir(STORAGE_DIR)
print('list with downloaded files made')
print(len(downloaded_files))
# Get the recording ID's from the filenames
downloaded_ids = [int(x[:-4]) for x in downloaded_files]
# Get all the recordings that were already processed before
conn = sqlite3.connect(DATABASE_DIR)
print('database loaded')
c = conn.cursor()
q = '''
SELECT id FROM recordings
WHERE step1 = 1 AND duration IS NOT NULL
'''
c.execute(q)
processed_ids = [i[0] for i in c.fetchall()]
print('list of already processed recordings')
print(len(processed_ids))
# Remove the already processed recordings from the ones we want to process
to_process = [x for x in downloaded_ids if x not in processed_ids]
print('list of files to process')
print(len(to_process))
# Processing
q = '''
UPDATE recordings
SET duration = ?, sum_signal = ?, timestamps = ?
WHERE id = ?
'''
batch = []
for i, rec_id in enumerate(to_process):
rec = str(rec_id) + '.wav'
print(rec)
try:
duration, sum_signal, timestamps = signal_timestamps(
STORAGE_DIR + rec)
batch.append((duration, sum_signal, timestamps, rec_id))
if len(batch) % 50 == 0:
print(f"batch {i} full")
c.executemany(q, batch)
conn.commit()
batch = []
except:
print(f'could not get info of recording {rec}')
pass
c.executemany(q, batch)
conn.commit()
conn.close()
|
# importing irregular nouns data from irregular_nouns_dict.py (should be in the same folder)
from irregular_nouns_dict import irregular_nouns, nouns_in_plurals
# following English language rules to form plural forms of provided nouns
def plurals(lst):
lst_with_plurals = []
for word in lst:
lst_with_plurals.append(word)
if word in nouns_in_plurals:
lst_with_plurals.append(word)
elif word in irregular_nouns:
lst_with_plurals.append(irregular_nouns[word])
elif word in ['addendum', 'bacterium', 'datum', 'erratum', 'medium']:
lst_with_plurals.append(word[0:-2] + 'a')
elif word[-2:] == 'us' and word not in ['bus', 'apparatus', 'corpus', 'genus']:
lst_with_plurals.append(word[0:-2] + 'i')
elif word[-2:] == 'is':
lst_with_plurals.append(word[0:-2] + 'es')
elif word[-2:] == 'on' and word not in ['python']:
lst_with_plurals.append(word[0:-2] + 'a')
elif word[-1:] in ['s', 'x', 'z'] or word[-2:] in ['sh', 'ch']:
if word == 'fez':
lst_with_plurals.append('fezzes')
continue
if word == 'gas':
lst_with_plurals.append('gasses')
continue
lst_with_plurals.append(word + 'es')
elif word[-1:] == 'f':
exceptions_f = ['roof', 'belief', 'chef', 'chief']
if word in exceptions_f:
for item in exceptions_f:
if word == item:
lst_with_plurals.append(item + 's')
break
continue
lst_with_plurals.append(word[0:-1] + 'ves')
elif word[-2:] == 'fe':
lst_with_plurals.append(word[0:-2] + 'ves')
elif word[-1:] == 'y':
if word[-2:-1] in ['a', 'e', 'i', 'o', 'u']:
lst_with_plurals.append(word + 's')
continue
lst_with_plurals.append(word[0:-1] + 'ies')
elif word[-1:] == 'o':
exceptions_o = ['photo', 'piano', 'halo']
if word in exceptions_o:
for item in exceptions_o:
if word == item:
lst_with_plurals.append(item + 's')
break
continue
lst_with_plurals.append(word + 'es')
else:
lst_with_plurals.append(word + 's')
return lst_with_plurals
|
import time
from kafka.client import KafkaClient
from kafka.consumer import SimpleConsumer
import os
class Consumer(object):
def __init__(self, addr, group, topic):
self.client = KafkaClient(addr)
self.consumer = SimpleConsumer(self.client, group, topic, max_buffer_size=1310720000)
self.temp_file_path = None
self.temp_file = None
self.topic = topic
self.group = group
self.block_cnt = 0
def consume_topic(self, output_dir):
timestamp = time.strftime('%Y%m%d%H%M%S')
#open file for writing
self.temp_file_path = "/home/ubuntu/FantasyFootball/ingestion/kafka_%s_%s_%s.dat" % (self.topic, self.group, timestamp)
self.temp_file = open(self.temp_file_path,"w")
one_entry = False
while True:
try:
messages = self.consumer.get_messages(count=100, block=False)
#OffsetAndMessage(offset=43, message=Message(magic=0,
# attributes=0, key=None, value='some message'))
for message in messages:
one_entry = True
self.tempfile.write(message.message.value + "\n")
if self.tempfile.tell() > 2000:
self.save_to_hdfs(output_dir)
self.consumer.commit()
except:
self.consumer.seek(0, 2)
if one_entry:
self.save_to_hdfs(output_dir, self.topic)
self.consumer.commit()
def save_to_hdfs(self, output_dir):
self.tempfile.close()
timestamp = time.strftime('%Y%m%d%H%M%S')
hadoop_path = "/user/solivero/playerpoints/history/%s_%s_%s.dat" % (self.group, self.topic, timestamp)
cached_path = "/user/solivero/playerpoints/cached/%s_%s_%s.dat" % (self.group, self.topic, timestamp)
print "Block " + str(self.block_cnt) + ": Saving file to HDFS " + hadoop_path
self.block_cnt += 1
# place blocked messages into history and cached folders on hdfs
os.system("sudo -u hdfs hdfs dfs -put %s %s" % (self.temp_file_path,hadoop_path))
os.system("sudo -u hdfs hdfs dfs -put %s %s" % (self.temp_file_path,cached_path))
os.remove(self.temp_file_path)
timestamp = time.strftime('%Y%m%d%H%M%S')
self.temp_file_path = "/home/ubuntu/fantasyfootball/ingestion/kafka_%s_%s_%s.dat" % (self.topic, self.group, timestamp)
self.temp_file = open(self.temp_file_path, "w")
if __name__ == '__main__':
group = "hdfs"
output = "/data"
topic = "hdfs"
print "\nConsuming topic: [%s] into HDFS" % topic
cons = Consumer(addr="localhost:9092", group="hdfs", topic="playplay")
cons.consume_topic("user/fantasyfootball") |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def main():
'''
Following function is the main function that contains the essence of what this script will be doing. Reading the MNIST_100.csv
'''
print("Hello from MNIST_100.csv reader!")
# Create Dataframe
df = pd.read_csv("MNIST_100.csv")
print(df)
#Create axis
y = df.iloc[:, 0]
X = df.drop('label', axis=1)
img = np.array(X[0:1]).reshape(28, 28) / 255
plt.imshow(img)
plt.show()
if __name__== "__main__":
main()
|
#!/usr/bin/python3
def print_reversed_list_integer(my_list=[]):
if (my_list):
for index in reversed(range(len(my_list))):
print("{:d}".format(my_list[index]))
|
input_string = input('Which sentence would you like to be reversed?')
split_string =input_string.split()
split_string.reverse()
result = " ".join(split_string)
print(result) |
import urllib2
import json
from operator import itemgetter
import datetime
instructorList = ["weesun", "knmnyn", "wgx731", "Leventhan", "franklingu", "Limy", "Muhammad-Muneer"]
"""
This function fetches the json data for a given username
"""
def fetchJSON(userName):
urlString = "http://osrc.dfm.io/%s.json" % userName
jsonData = json.load(urllib2.urlopen(urlString))
return jsonData
"""
This function extracts the push event data for a given osrc user json
"""
def getPushEvents(jsonData):
pushDict = [eventData for eventData in jsonData["usage"]["events"] if eventData["type"] == "PushEvent"]
return pushDict
"""
Creates a dictionary mapping instructors to their push event dictionary
"""
def getPushEventDictionary():
pushEventDictionary = {}
for instructor in instructorList:
jsonData = fetchJSON(instructor)
pushEvents = getPushEvents(jsonData)
if (pushEvents):
pushEventDictionary[instructor] = pushEvents[0]
else:
print instructor, " push events not found"
return pushEventDictionary;
"""
Sorts the instructor activity for a given hour from max to min, and resolves conflicts lexicographically
Returns a list of tuples of the form (instructor name, hours)
"""
def getSortedHourActivityList(pushEventDictionary, hour):
#First argument creates a tuple list sorted lexicographically
hourActivityList = sorted([(instructor, pushEventDictionary[instructor]["day"][hour]) for instructor in pushEventDictionary],
key = itemgetter(0), reverse = False)
#Sorts the tuple list by hours
#Note that since we first sorted by strings, and both sorts are stable, lexicographic ordering for
#conflicts are preserved
hourActivityList.sort(key = itemgetter(1), reverse = True)
return hourActivityList
"""
Sorts the instructor activity for a given day from max to min, and resolves conflicts lexicographically
Returns a list of tuples of the form (instructor name, hours)
"""
def getSortedDayActivityList(pushEventDictionary, day):
dayActivityList = sorted([(instructor, pushEventDictionary[instructor]["week"][day]) for instructor in pushEventDictionary],
key = itemgetter(0), reverse = False)
dayActivityList.sort(key = itemgetter(1), reverse = True)
return dayActivityList
def outputHourHighScores(pushEventDictionary):
print "\nHour High Scores: \n"
for i in range(0, 24):
hourActivityList = getSortedHourActivityList(pushEventDictionary, i)
print "%d:00 - %d:00 => %s" % (i, i+1, hourActivityList[0][0])
def outputDayHighScores(pushEventDictionary):
print "\nDay High Scores: \n"
for i in range(0, 7):
dayActivityList = getSortedDayActivityList(pushEventDictionary, i)
#1990 started on a Monday, and the json data day 0 is Sunday, and python day starts from 1, so we offset by 7
#For why %A prints day, look up the datetime documentation for python
print "%s => %s" % (datetime.date(day = i + 7, year = 1990, month = 1).strftime("%A"), dayActivityList[0][0])
def run():
pushEventDictionary = getPushEventDictionary()
outputHourHighScores(pushEventDictionary)
outputDayHighScores(pushEventDictionary)
run() |
from .Parameter import Parameter, VerifyFailed
import datetime
__all__ = ['Datetime', 'Date']
class Datetime(Parameter):
'''把 timestamp (int / float) 类型参数值,转换成 datetime 对象'''
rule_order = ['type']
def rule_type(self, value):
if type(value) is datetime.datetime:
return value
elif type(value) in [int, float]:
return datetime.datetime.fromtimestamp(value)
else:
raise VerifyFailed('参数 {} 的值必须是 timestamp (int / float / datetime.datetime),got {} {}'.format(
self.name, type(value), value))
class Date(Parameter):
'''把 timestamp (int / float) 类型参数值,转换成 date 对象'''
rule_order = ['type']
def rule_type(self, value):
if type(value) is datetime.date:
return value
elif type(value) in [int, float]:
return datetime.date.fromtimestamp(value)
else:
raise VerifyFailed('参数 {} 的值必须是 timestamp (int / float / datetime.date),got {} {}'.format(
self.name, type(value), value))
|
# Generated by Django 2.1.4 on 2019-02-07 18:21
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('coreapp', '0080_auto_20190130_0336'),
]
operations = [
migrations.AlterField(
model_name='shippingaddress',
name='phone_number',
field=models.CharField(help_text='Enter your 10 digit phone number without any prefix code.', max_length=17, validators=[django.core.validators.RegexValidator(message="Phone number must be entered in the format: '+9999999999'. Up to 15 digits allowed.", regex='^\\+?1?\\d{10,15}$')]),
),
]
|
#!/usr/bin/env python
###############################################################################
#
# Purpose: Create a browse image with 1000 pixels wide from RGB TIF input files.
# Paul G: inclusion of basig FAST format handling
#
# Date: 2012-06-01
# Author: Simon.Oliver@ga.gov.au and Fei.Zhang@ga.gov.au
# Revisions:
# 2012-07-01: Fei Zhang refactored the program module into a class
# and fixed all tab-space mixed issues
# 2012-07-05: Fei and Paul make the create() method to take either
# "LSTRETCH" or "CONVERT" parameters, to choose an available jpeg creation algorithm
# The LSTRETCH is fast and make better images. But it requires GDAL and numpy installed with the python
# 2012-08-06: Paul: Inclusion of FAST format code from GISTA packaging
# 2010-08-07: Paul and Simon O: Merged FAST format handling into main GDAL code utalizing
# same stretching algorithm
#
#
###############################################################################
import logging
import sys
import subprocess
import math as math
import os
import tempfile
_log = logging.getLogger(__name__)
class BrowseImgCreator(object):
def __init__(self):
self.incols = None
self.inrows = None
self.inpixelx = None
pass
def setInput_RGBfiles(self, rfile, gfile, bfile, nodata_value ):
self.red_file = rfile
self.green_file = gfile
self.blue_file = bfile
self.nodata = nodata_value
return
def setOut_Browsefile(self, browsefile, cols):
self.outthumb = browsefile
self.outcols = cols
self.tempDir = os.path.dirname(browsefile)
return
def create(self, alg):
# next "Path" or North Up from the MTL file
baseDir = os.path.dirname(self.green_file)
# print "GREEN Band:", baseDir, self.green_file
for root, dirs, files in os.walk(baseDir):
for f in files:
if f.find('_MTL.txt') > 1:
mtl_header_path = os.path.join(baseDir, f)
fp = open(mtl_header_path)
input_data = fp.read()
for line in input_data.split('\n'):
if line.find('ORIENTATION') > -1:
self.orientation = line.split('=')[1].strip().strip('"')
if line.find('PRODUCT_SAMPLES_REF') > -1:
self.incols = int(line.split('=')[1].strip())
if line.find('PRODUCT_LINES_REF') > -1:
self.inrows = int(line.split('=')[1].strip())
if line.find('GRID_CELL_SIZE_REF') > -1:
self.inpixelx = float(line.split('=')[1].strip())
# Diferent tags for Landsat-8:
if line.find('REFLECTIVE_SAMPLES') > -1:
self.incols = int(line.split('=')[1].strip())
if line.find('REFLECTIVE_LINES') > -1:
self.inrows = int(line.split('=')[1].strip())
if line.find('GRID_CELL_SIZE_REFLECTIVE') > -1:
self.inpixelx = float(line.split('=')[1].strip())
# Special case for TIRS data set: THERMAL only
if self.incols is None:
for line in input_data.split('\n'):
if line.find('THERMAL_SAMPLES') > -1:
self.incols = int(line.split('=')[1].strip())
if line.find('THERMAL_LINES') > -1:
self.inrows = int(line.split('=')[1].strip())
if line.find('GRID_CELL_SIZE_THERMAL') > -1:
self.inpixelx = float(line.split('=')[1].strip())
_log.info("Orientation: %s", self.orientation)
_log.info("Lines/pixels: %s, %s", self.inrows, self.incols)
_log.info("Pixel Size: %s", self.inpixelx)
if alg == "LSTRETCH":
if self.orientation == 'NOM':
return self.generate_Path_browse_image()
else:
return self.create_linear_stretch()
else:
_log.error("Unrecognised algorithm '%s'", alg)
return -1
def initial_fast(self, green_file):
# locates and returns the FAST header file
# along with RGB band combination
# need to locate the HRF fast format header
baseDir = os.path.dirname(self.green_file)
fast_header_path = None
for root, dirs, files in os.walk(baseDir):
for f in files:
if f.find('_HRF.FST') > 1:
fast_header_path = os.path.join(baseDir, f)
if not fast_header_path:
raise Exception("No fast header path found in dir %s" % baseDir)
# must determine correct band numbers from "BANDS PRESENT"
IredB = self.red_file.split('_B')[-1][0]
IgreenB = self.green_file.split('_B')[-1][0]
IblueB = self.blue_file.split('_B')[-1][0]
p = subprocess.Popen(['grep', 'PRESENT', fast_header_path],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout_msg, stderr_msg) = p.communicate()
if p.returncode != 0:
raise RuntimeError("Failure reading BANDS from file '%s'. Exit status %d: %s" %
(fast_header_path, p.returncode, stderr_msg))
bandsPresent = stdout_msg.strip().split('=')[1]
for i in range(len(bandsPresent)):
if bandsPresent[i] == IredB:
redB = i + 1
if bandsPresent[i] == IgreenB:
greenB = i + 1
if bandsPresent[i] == IblueB:
blueB = i + 1
return fast_header_path, redB, greenB, blueB
###############################################################################################
def create_linear_stretch(self):
# Linear contrast stretch using GDAL
#from osgeo import gdal
import gdal
import gdalconst
import numpy.ma as ma
_log.info("Generating Browse Image for Ortho/Map product")
tempDir = self.tempDir
_log.info("Temp directory: %s", tempDir)
# working files
tmpPrefix = "temp_" + os.path.basename(self.blue_file).split('_B')[0]
file_to = os.path.join(tempDir, tmpPrefix + "_RGB.vrt")
warp_to_file = os.path.join(tempDir, tmpPrefix + "_RGBwarped.vrt")
outtif = os.path.join(tempDir, tmpPrefix + "_browseimg.tif")
# Different initial step for FAST vs TIFF
if self.green_file.find("FST") > 1:
# fd, file_to_R = tempfile.mkstemp(suffix="_BandR.vrt")
# fd, file_to_G = tempfile.mkstemp(suffix="_BandG.vrt")
# fd, file_to_B = tempfile.mkstemp(suffix="_BandB.vrt")
file_to_R = os.path.join(tempDir, tmpPrefix + "_band_R.vrt")
file_to_G = os.path.join(tempDir, tmpPrefix + "_band_G.vrt")
file_to_B = os.path.join(tempDir, tmpPrefix + "_band_B.vrt")
(fast_header_path, redB, greenB, blueB) = self.initial_fast(self.green_file)
_log.info("Processing red band")
args = ['gdal_translate', '-b', str(redB), fast_header_path, file_to_R]
subprocess.call(args)
_log.info("... green band")
args = ['gdal_translate', '-b', str(greenB), fast_header_path, file_to_G]
subprocess.call(args)
_log.info("... and blue band")
args = ['gdal_translate', '-b', str(blueB), fast_header_path, file_to_B]
subprocess.call(args)
# Build the RGB Virtual Raster at full resolution
step1 = [
"gdalbuildvrt",
"-overwrite",
"-separate",
file_to,
file_to_R,
file_to_G,
file_to_B]
_log.info("First step: %s", step1)
subprocess.call(
["gdalbuildvrt",
"-overwrite",
"-separate",
file_to,
file_to_R,
file_to_G,
file_to_B], cwd = tempDir)
else:
# Build the RGB Virtual Raster at full resolution
step1 = [
"gdalbuildvrt",
"-overwrite",
"-separate",
file_to,
self.red_file,
self.green_file,
self.blue_file]
_log.info("First step: %s", step1)
subprocess.call(
["gdalbuildvrt",
"-overwrite",
"-separate",
file_to,
self.red_file,
self.green_file,
self.blue_file], cwd = tempDir)
if not os.path.isfile(file_to):
_log.error("error creating .vrt file '%s'", file_to)
return -9
# Determine the pixel scaling to get the outcols (1024 usually) wide thumbnail
vrt = gdal.Open(file_to)
intransform = vrt.GetGeoTransform()
inpixelx = intransform[1]
inrows = vrt.RasterYSize
incols = vrt.RasterXSize
_log.info("incols %s, inrows %s", incols, inrows)
outcols = self.outcols
if outcols == 0:
# ZERO indicates full size so just convert
outresx = inpixelx
outrows = inrows
outcols = incols
else:
outresx = inpixelx * incols / self.outcols # output peg reolution
outrows = int(math.ceil((float(inrows) / float(incols)) * self.outcols))
_log.info("pixels: %s,%s,%s", outresx, outcols, outrows)
subprocess.call(["gdalwarp",
"-of",
"VRT",
"-tr",
str(outresx),
str(outresx),
"-r",
"near",
"-overwrite",
file_to, warp_to_file])
# Open VRT file to array
vrt = gdal.Open(warp_to_file)
bands = (1, 2, 3)
driver = gdal.GetDriverByName("GTiff")
outdataset = driver.Create(outtif, outcols, outrows, 3, gdalconst.GDT_Byte)
# Loop through bands and apply Scale and Offset
for bandnum, band in enumerate(bands):
vrtband = vrt.GetRasterBand(band)
vrtband_array = vrtband.ReadAsArray()
nbits = gdal.GetDataTypeSize(vrtband.DataType)
dataTypeName = gdal.GetDataTypeName(vrtband.DataType)
_log.info("nbits = %d, type = %s, self.nodata = %d" % (nbits, dataTypeName, self.nodata))
dfScaleDstMin, dfScaleDstMax = 0.0, 255.0
# Determine scale limits
#dfScaleSrcMin = dfBandMean - 2.58*(dfBandStdDev)
#dfScaleSrcMax = dfBandMean + 2.58*(dfBandStdDev)
if dataTypeName == "Int16": # signed integer
count = 32767 + int(self.nodata) #for 16bits 32767-999= 31768
histogram = vrtband.GetHistogram(-32767, 32767, 65536)
elif dataTypeName == "UInt16": # unsigned integer
count = 0
histogram = vrtband.GetHistogram(-0.5, 65535.5, 65536)
else:
count = 0
histogram = vrtband.GetHistogram()
total = 0
cliplower = int(0.01 * (sum(histogram) - histogram[count]))
clipupper = int(0.99 * (sum(histogram) - histogram[count]))
# print "count = ", count
# print "len(histogram)", len(histogram)
dfScaleSrcMin = count
while total < cliplower and count < len(histogram) - 1:
count = count + 1
total = total + int(histogram[count])
dfScaleSrcMin = count
if dataTypeName == "Int16":
count = 32767 + int(self.nodata)
else:
count = 0
total = 0
dfScaleSrcMax = count
while total < clipupper and count < len(histogram) - 1:
count = count + 1
total = total + int(histogram[count])
dfScaleSrcMax = count
if dataTypeName == "Int16":
dfScaleSrcMin = dfScaleSrcMin - 32768
dfScaleSrcMax = dfScaleSrcMax - 32768
# GEMDOPS-1040 need to trap possible divide by zero in the stats
# Check for Src Min == Max: would give divide by zero:
srcDiff = dfScaleSrcMax - dfScaleSrcMin
if srcDiff == 0:
_log.warn("dfScaleSrc Min and Max are equal! Applying correction")
srcDiff = 1
# Determine gain and offset
# dfScale = (dfScaleDstMax - dfScaleDstMin) / (dfScaleSrcMax - dfScaleSrcMin)
dfScale = (dfScaleDstMax - dfScaleDstMin) / srcDiff
dfOffset = -1 * dfScaleSrcMin * dfScale + dfScaleDstMin
#Apply gain and offset
outdataset.GetRasterBand(band).WriteArray(
(ma.masked_less_equal(vrtband_array, int(self.nodata)) * dfScale) + dfOffset)
pass # for loop
outdataset = None
vrt = None # do this is necessary to allow close the files and remove warp_to_file below
# GDAL Create doesn't support JPEG so we need to make a copy of the GeoTIFF
subprocess.call(["gdal_translate", "-of", "JPEG", outtif, self.outthumb])
# Cleanup working VRT files
os.remove(file_to)
os.remove(warp_to_file)
os.remove(outtif)
if self.green_file.find("FST") > 1:
os.remove(file_to_R)
os.remove(file_to_G)
os.remove(file_to_B)
# Done
return outresx # output jpeg resolution in meters
def generate_Path_browse_image(self):
"""Uses GDAL to create a JPEG browse image for a Path dataset."""
_log.info("Generating Browse Image for Path product")
# calculate scale factor. required for TIF and FAST
outcols = self.outcols
if outcols == 0:
# ZERO indicates full size so just convert
outresx = self.inpixelx
outrows = self.inrows
outcols = self.incols
else:
outresx = self.inpixelx * self.incols / self.outcols # output peg reolution
outrows = int(math.ceil((float(self.inrows) / float(self.incols)) * self.outcols))
_log.info("pixels: %s,%s,%s", outresx, outcols, outrows)
# Different initial step for FAST vs TIFF
if self.green_file.find("FST") > 1:
(fast_header_path, redB, greenB, blueB) = self.initial_fast(self.green_file)
args = ['gdal_translate',
'-b', str(redB),
'-b', str(greenB),
'-b', str(blueB),
'-outsize', str(outcols), str(outrows), '-of', 'JPEG', '-scale',
fast_header_path, self.outthumb]
subprocess.call(args)
gdal_xml_file = self.outthumb + '.aux.xml'
if os.path.exists(gdal_xml_file):
os.remove(gdal_xml_file)
return outresx
fd, outtif = tempfile.mkstemp(suffix="_browseimg.tif")
args = ['gdal_merge.py', '-seperate', self.blue_file, self.green_file, self.red_file,
'-o', outtif]
_log.info("Running: '%s'", args)
subprocess.call(args)
# convert tiff to jpeg
args = ['gdal_translate', '-b', '1', '-b', '2', '-b', '3',
'-outsize', str(outcols), str(outrows), '-of', 'JPEG', '-scale',
outtif, self.outthumb]
_log.info("Running: '%s'", args)
subprocess.call(args)
os.path.remove(outtif)
gdal_xml_file = self.outthumb + '.aux.xml'
if os.path.exists(gdal_xml_file):
os.remove(gdal_xml_file)
return outresx
def createThumbnail(red, green, blue, nodata_val, outBrowseFile, width=0):
bimgObj = BrowseImgCreator()
bimgObj.setInput_RGBfiles(red, green, blue, nodata_val)
bimgObj.setOut_Browsefile(outBrowseFile, int(width))
resolution = bimgObj.create("LSTRETCH")
return resolution
#############################################################################################
# Example Usage:
# python browseimg_creator.py
# /home/fzhang/SoftLabs/geolab/data/LandsatImages/LT5_20080310_091_077/L5091077_07720080310_B70.TIF
# /home/fzhang/SoftLabs/geolab/data/LandsatImages/LT5_20080310_091_077/L5091077_07720080310_B40.TIF
# /home/fzhang/SoftLabs/geolab/data/LandsatImages/LT5_20080310_091_077/L5091077_07720080310_B10.TIF
# 0 LT5_20080310_091_077_test.jpg 1024
# nodata=0 for 8-bits images
# nodata=-999 for 16-bits images
############################################################################################
if __name__ == "__main__":
# check for correct usage - if not prompt user
print "Browse Image Creation using GDAL"
if len(sys.argv) < 6:
print "*----------------------------------------------------------------*"
print ""
print " thumbnail.py computes a linear stretch and applies to input image"
print ""
print "*----------------------------------------------------------------*"
print ""
print " usage: thumbnail.py <red image file> <green image file>"
print " <blue image file> <output image file> <input null value>"
print " <optional output width in pixels>"
print "NOTE output width = 0 indicates full size browse image"
sys.exit(1)
jpeg_pixel_size = createThumbnail(sys.argv[1],
sys.argv[2],
sys.argv[3],
sys.argv[4],
sys.argv[5],
0 if len(sys.argv) < 7 else int(sys.argv[6]))
print "Output jpeg pixel size (resolution) = %s" % (jpeg_pixel_size)
|
import argparse
import glob
import logging
import os
import random
import timeit
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
import collections
from transformers import (
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
WEIGHTS_NAME,
AdamW,
AutoConfig,
AutoTokenizer,
get_linear_schedule_with_warmup,
)
from model import SentimentClassification, Baseline
from utils import AscProcessor, convert_examples_to_features, get_label2id
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def to_list(tensor):
return tensor.detach().cpu().tolist()
def train(args, train_dataset, model, tokenizer):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 1
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
try:
# set global_step to gobal_step of last saved checkpoint from model path
checkpoint_suffix = args.model_name_or_path.split("-")[-1].split("/")[0]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]
)
# Added here for reproductibility
set_seed(args)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2],
"sentiment_id": batch[-1],
}
if args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": batch[5], "p_mask": batch[6]})
if args.version_2_with_negative:
inputs.update({"is_impossible": batch[7]})
if hasattr(model, "config") and hasattr(model.config, "lang2id"):
inputs.update(
{"langs": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)}
)
outputs = model(**inputs)
# model outputs are always tuple in transformers (see doc)
loss = outputs[0]
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
# Log metrics
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Only evaluate when single GPU otherwise metrics may not average well
if args.local_rank == -1 and args.evaluate_during_training:
results = evaluate(args, model, tokenizer)
# collections.OrderedDict(
# [
# ("exact", 100.0 * sum(exact_scores.values()) / total),
# ("f1", 100.0 * sum(f1_scores.values()) / total),
# ("total", total),
# ]
# )
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
# Save model checkpoint
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, prefix="", test=False, enriched=False):
dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True,
test=test, output_examples=True,
enriched=enriched)
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(dataset)
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
start_time = timeit.default_timer()
num_true = 0
num_all = 0
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2],
'sentiment_id': batch[-1]
}
outputs = model(**inputs)
logits = outputs['logits']
label_id = outputs['sentiment_id']
predict_result = torch.max(logits, dim=-1)[1]
compare = (label_id == predict_result).long()
num_true = num_true + sum(compare).tolist()
num_all = num_all + compare.shape.numel()
evalTime = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(dataset))
precision = 100.0 * num_true / num_all
results = collections.OrderedDict(
[
("precision", precision),
]
)
logger.info("Precision = %f", precision / 100.0)
return results
def load_and_cache_examples(args, tokenizer, evaluate=False, test=False, output_examples=False, enriched=False):
if args.local_rank not in [-1, 0] and not evaluate:
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
torch.distributed.barrier()
# Load data features from cache or dataset file
input_dir = args.data_dir if args.data_dir else "."
if test:
if enriched:
name = 'test_enriched'
# name = 'test_arts'
else:
name = 'test'
elif evaluate:
name = 'dev'
else:
name = "train"
cached_features_file = os.path.join(
input_dir,
"{}_{}_{}_{}.cached".format(
args.dataset,
name,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
),
)
# Init features and dataset from cache if it exists
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features_and_dataset = torch.load(cached_features_file)
features, dataset, examples = (
features_and_dataset["features"],
features_and_dataset["dataset"],
features_and_dataset["examples"],
)
else:
logger.info("Creating features from dataset file at %s", input_dir)
processor = AscProcessor()
if test:
examples = processor.get_test_examples(args.data_dir, filename=args.dataset, enriched=enriched)
elif evaluate:
examples = processor.get_dev_examples(args.data_dir, filename=args.dataset)
else:
examples = processor.get_train_examples(args.data_dir, filename=args.dataset)
features, dataset = convert_examples_to_features(
examples=examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=not evaluate,
return_dataset="pt",
threads=args.threads,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save({"features": features, "dataset": dataset, "examples": examples}, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier()
if output_examples:
return dataset, examples, features
return dataset
def training_arguments():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dataset",
default=None,
type=str,
required=True,
help="dataset"
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_TYPES),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
help="The input data dir. Should contain the .json files for the task."
+ "If no data dir or train/predict files are specified, will run with tensorflow_datasets.",
)
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=384,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.",
)
parser.add_argument(
"--doc_stride",
default=128,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument(
"--max_query_length",
default=64,
type=int,
help="The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length.",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument("--do_test", action="store_true", help="Whether to run testing.")
parser.add_argument("--do_test_enriched", action="store_true", help="Whether to use ARTS for testing.")
parser.add_argument("--do_intervention", action="store_true", help="Whether to do intervention.")
parser.add_argument(
"--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step."
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model."
)
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.")
parser.add_argument(
"--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation."
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform."
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help="The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another.",
)
parser.add_argument(
"--verbose_logging",
action="store_true",
help="If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.",
)
parser.add_argument(
"--lang_id",
default=0,
type=int,
help="language id of input for language-specific xlm models (see tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)",
)
parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Whether not to use CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory"
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.")
parser.add_argument("--threads", type=int, default=1, help="multiple threads for converting example to features")
return parser
def get_biased_feature(biased_model_path, args, tokenizer):
cached_features_path = os.path.join(biased_model_path, "bias.features")
if os.path.exists(cached_features_path) and not args.overwrite_cache:
bias_feature = torch.load(cached_features_path)
else:
dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=False,
test=False, output_examples=True,
enriched=False)
print('load biased features...... ...... ')
config = AutoConfig.from_pretrained(
biased_model_path,
num_labels=len(get_label2id().keys()),
max_length=args.max_seq_length
)
model = Baseline.from_pretrained(
biased_model_path,
config=config,
)
model.to(args.device)
model.eval()
sampler = SequentialSampler(dataset)
dataloader = DataLoader(dataset, sampler=sampler, batch_size=32)
part0_feature = []
part1_feature = []
part2_feature = []
for batch in tqdm(dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2],
'sentiment_id': batch[-1]
}
outputs = model(**inputs)
logits = outputs['logits'] # 32 * 3
label_id = outputs['sentiment_id']
hidden_states = outputs['hidden_states'] # 32 * 768
predict_id = torch.max(logits, dim=-1)[1]
select_vec = (predict_id == 0)
# torch.index_select(input=hidden_states, dim=0, index=select_vec.long())
select_part0 = torch.masked_select(hidden_states, (select_vec.unsqueeze(dim=-1)).expand(-1, 768))
select_part0 = select_part0.reshape(-1, 768)
part0_feature.append(select_part0)
select_vec = (predict_id == 1)
select_part1 = torch.masked_select(hidden_states, (select_vec.unsqueeze(dim=-1)).expand(-1, 768))
select_part1 = select_part1.reshape(-1, 768)
part1_feature.append(select_part1)
select_vec = (predict_id == 2)
select_part2 = torch.masked_select(hidden_states, (select_vec.unsqueeze(dim=-1)).expand(-1, 768))
select_part2 = select_part2.reshape(-1, 768)
part2_feature.append(select_part2)
part0 = torch.cat(part0_feature, dim=0) # num0 * 768
part1 = torch.cat(part1_feature, dim=0) # num1 * 768
part2 = torch.cat(part2_feature, dim=0) # num2 * 768
part0 = torch.mean(part0, dim=0)
part1 = torch.mean(part1, dim=0)
part2 = torch.mean(part2, dim=0)
bias_feature = [part0, part1, part2]
torch.save(bias_feature, cached_features_path)
return bias_feature
# 这里返回每个特征对应的平均向量
def load_biased_model(biased_model_path, args):
print('正在读取向量')
config = AutoConfig.from_pretrained(
biased_model_path,
num_labels=len(get_label2id().keys()),
max_length=args.max_seq_length
)
model = Baseline.from_pretrained(
biased_model_path,
config=config,
)
model.to(args.device)
model.eval()
return model
def main():
parser = training_arguments()
args = parser.parse_args()
if args.doc_stride >= args.max_seq_length - args.max_query_length:
logger.warning(
"WARNING - You've set a doc stride which may be superior to the document length in some "
"examples. This could result in errors when building features from the examples. Please reduce the doc "
"stride or increase the maximum length to ensure the features are correctly built."
)
if args.do_intervention:
biased_model_dir = args.output_dir
# biased_model_dir = '/home/bz/SentimentClassification/output_laptop_base'
args.output_dir = args.output_dir + '_intervention'
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
# 单卡训练
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
if args.local_rank == 0:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
args.model_type = args.model_type.lower()
logger.info("Training/evaluation parameters %s", args)
if args.fp16:
try:
import apex
apex.amp.register_half_function(torch, "einsum")
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
config = AutoConfig.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None,
max_length=args.max_seq_length
# from_tf=False
)
tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
use_fast=False
# from_tf=False
)
Model_dict = {'Baseline': Baseline, 'SentimentClassification': SentimentClassification}
if args.do_intervention:
Model = Model_dict['SentimentClassification']
# biased_model = load_biased_model(biased_model_dir, args)
biased_feature = get_biased_feature(biased_model_dir, args, tokenizer)
else:
Model = Model_dict['Baseline']
# Training
if args.do_train:
if args.do_intervention:
model = Model.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
# biased_model=biased_model
biased_feature=biased_feature
)
else:
model = Model.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model.to(args.device)
train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False)
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Save the trained model and the tokenizer
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
logger.info("Saving model checkpoint to %s", args.output_dir)
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Test
results = {}
if args.do_test and args.local_rank in [-1, 0]:
logger.info("Loading checkpoints saved during training for testing")
checkpoints = [args.output_dir]
# if args.do_intervention:
# checkpoints = list(
# os.path.dirname(c)
# for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
# )
# else:
# # checkpoints = [args.output_dir]
# checkpoints = list(
# os.path.dirname(c)
# for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
# )
for checkpoint in checkpoints:
# Reload the model
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
if args.do_intervention:
Model = Model_dict['SentimentClassification']
# biased_model = load_biased_model(biased_model_dir, args)
biased_feature = get_biased_feature(biased_model_dir, args, tokenizer)
model = Model.from_pretrained(
checkpoint,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
biased_feature=biased_feature
)
else:
Model = Model_dict['Baseline']
model = Model.from_pretrained(checkpoint, config=config)
model.to(args.device)
# Test
result = evaluate(args, model, tokenizer, prefix=global_step, test=True, enriched=args.do_test_enriched)
result = dict((k + ("_{}".format(global_step) if global_step else ""), v) for k, v in result.items())
results.update(result)
logger.info("Results: {}".format(results))
return results
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
import datetime
from gmsdk import *
import pandas as pd
import DATA_CONSTANTS as DC
K_MIN_set=[60,300,600,900]
K_MIN=60
exchange_id='DCE'
sec_id='i1801'
md.init(username="smartgang@126.com", password="39314656a")
contractlist=pd.read_excel(DC.PUBLIC_DATA_PATH+'Contract.xlsx')['Contract']
symbol=exchange_id+'.'+sec_id
starttime="2017-11-01 00:00:00"
endtime="2017-11-10 09:51:00"
nextFlag=False
databuf=[]
while(nextFlag is not True):
bars = md.get_bars(symbol, K_MIN, starttime, endtime)
for bar in bars:
databuf.append([
bar.exchange, ## 交易所代码
bar.sec_id , ## 证券ID
bar.bar_type, ## bar类型,以秒为单位,比如1分钟bar, bar_type=60
bar.strtime , ## Bar开始时间
bar.utc_time, ## Bar开始时间
bar.strendtime, ## Bar结束时间
bar.utc_endtime, ## Bar结束时间
bar.open , ## 开盘价
bar.high , ## 最高价
bar.low , ## 最低价
bar.close, ## 收盘价
bar.volume, ## 成交量
bar.amount, ## 成交额
bar.pre_close, ## 前收盘价
bar.position, ## 持仓量
bar.adj_factor, ## 复权因子
bar.flag]) ## 除权出息标记
if len(databuf)==33000:
starttime=datetime.datetime.fromtimestamp(databuf[-1][6]).strftime('%Y-%m-%d %H:%M:%S')
else:nextFlag=True
df = pd.DataFrame(databuf, columns=[
'exchange',
'sec_id',
'bar_type',
'strtime',
'utc_time',
'strendtime',
'utc_endtime',
'open','high','low','close','volume','amount','pre_close','position','adj_factor','flag'])
df.to_csv(sec_id+'_'+str(K_MIN)+'.csv')
print symbol+' raw data collection finished!'
|
from datetime import datetime
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.operators.dummy_operator import DummyOperator
import json
import pandas as pd
import numpy as np
import datetime
from dateutil.relativedelta import relativedelta
from bs4 import BeautifulSoup
import urllib.request as rq
import urllib
import time
import re
args = {
'owner': 'airflow',
'provide_context': True
}
def parse_date(fecha,date_map):
day_only = fecha[:-6]
only_months = re.sub("\d*","",day_only)
only_months_replaced = date_map[only_months]
scrape_year = date_map["Hoy"][:4]
if scrape_year in only_months_replaced:
return pd.to_datetime(only_months_replaced)
else:
fecha_num = day_only[:2]
nueva_fecha = pd.to_datetime("{}{}-{}".format(scrape_year,only_months_replaced,fecha_num))
return nueva_fecha
def scrape_basic_info(date_limit,date_map):
prices = []
urls = []
nombres = []
categorias = []
municipios = []
fechas = []
base_url = 'http://www.corotos.com.do/santo_domingo/bienes_ra%C3%ADces-en_venta?f=c'
new_url = base_url
limit_reached = False
i = 0
today_dt = pd.to_datetime(date_map["Hoy"])
while not limit_reached:
print("Scraping page {}".format(i))
req = rq.Request(new_url)
req.add_header('User-Agent', 'Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0')
a = rq.urlopen(req).read()
parser = BeautifulSoup(a, 'lxml')
for item in parser.find_all("div", attrs={'class': 'item relative '}):
# print(item)
fecha = item.find("div", attrs={"class": "time"}).get_text().strip()
fecha_iso = parse_date(fecha,date_map)
if fecha_iso > today_dt:
fecha_iso = fecha_iso - relativedelta(years=1)
if fecha_iso < date_limit:
print("Date limit reached.")
limit_reached = True
break
else:
fechas.append(fecha_iso.isoformat())
link = item.find("a", attrs={'class': "history", "href": True})
urls.append(link["href"])
nombres.append(link.get_text())
try:
prices.append(item.find("span", attrs={'class': "price"}).get_text().strip())
except:
prices.append("")
otra_info = (item.find("div", attrs={"class": "item-cat-region"}).get_text().strip().split(','))
categorias.append(otra_info[0])
municipios.append(otra_info[1])
new_url = base_url + "&o={}".format(i + 2)
i+=1
bienes_raices_df = pd.DataFrame(
{"Nombre": nombres, "URLs": urls, "Categoria": categorias, "Municipio": municipios, "Fecha": fechas,
"Precio": prices})
bienes_raices_df = bienes_raices_df.loc[bienes_raices_df.Precio != ""]
return bienes_raices_df
def print_scrape(**kwargs):
months_dict = {' dic.': '-12', ' nov.': '-11', ' oct.': '-10', ' sept.': '-09', ' agosto': '-08', ' jul.': '-07',
' jun.': '-06', ' mayo': '-05', ' abr.': '-04', ' marzo': '-03', ' feb.': '-02', ' enero': '-01'}
scraping_date = datetime.date.today()
months_dict['Hoy'] = scraping_date.isoformat()
months_dict['Ayer'] = (scraping_date - relativedelta(days=1)).isoformat()
bas = scrape_basic_info(pd.to_datetime("2018-05-02"), months_dict)
return bas
def scrape_detailed_info(bienes_raices_df,characteristics):
detailed_df = bienes_raices_df.copy()
characteristics_data = {}
for key in characteristics:
characteristics_data[key] = ["Ninguno"] * detailed_df.shape[0]
print("Scraping {} URLs".format(detailed_df.shape[0]))
for i, url in enumerate(detailed_df.URLs):
spl = url.split("://")
full_url = spl[0] + "://" + urllib.parse.quote(spl[1])
req = rq.Request(full_url)
req.add_header('User-Agent', 'Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0')
skipped_details = set(['Tipo de anuncio','Tipo de Uso','Número de Oficinas'])
try:
a = rq.urlopen(req).read()
parser = BeautifulSoup(a, 'lxml')
characteristics_data["Vendedor"][i] = parser.find("h3", attrs={"class": "shops-name"}).get_text()
characteristics_data["Descripción"][i] = parser.find("div", attrs={"class": "fs16"}).get_text()
try:
table = parser.find("div", attrs={"class": "param-table"})
for column in table.find_all("div", attrs={"class": "param-table-column"}):
detail = column.find("span", attrs={"class": "adparam_label float-left prm"}).get_text()
if detail not in skipped_details:
characteristics_data[detail][i] = column.find("strong").get_text()
except:
print(detail)
print("Could not find params for {}".format(i))
except:
print("Failed to open {}".format(i))
for key in characteristics_data.keys():
try:
detailed_df[key] = characteristics_data[key]
except:
continue
return detailed_df
def get_detailed_info(**kwargs):
characteristics = ["Construcción", "Número de Baños", "Número de Habitaciones", "Sector",
"Solar", "Tipo",
"Vendedor", "Descripción"]
bienes_raices_df = kwargs['ti'].xcom_pull(task_ids='hello_task')
bienes_raices_df_detailed = scrape_detailed_info(bienes_raices_df,characteristics)
return bienes_raices_df_detailed
def puller(**kwargs):
ti = kwargs['ti']
v2 = ti.xcom_pull(task_ids='extract_task')
v3 = ti.xcom_pull(task_ids='extract_details_task')
return v2
dag = DAG('hello_world', description='Simple scraping DAG',
schedule_interval='0 12 * * *',
start_date=datetime.datetime(2017, 3, 20), catchup=False,
default_args=args)
push1 = PythonOperator(task_id='extract_task', python_callable=print_scrape, dag=dag)
push2 = PythonOperator(task_id='extract_details_task', python_callable=get_detailed_info, dag=dag)
pull = PythonOperator(
task_id='puller', dag=dag, python_callable=puller)
pull.set_upstream([push1,push2]) |
from ast import Call
import itertools
import json
from typing import (
List,
Any,
Iterable,
TypeVar,
Union,
Dict,
Optional,
Callable,
)
from queue import LifoQueue
from six import string_types
# python 2 to 3 compatibility imports
try:
from itertools import imap as map
from itertools import ifilter as filter
from itertools import izip as zip
except ImportError:
pass
from builtins import range
from .core import Key, OrderingDirection, RepeatableIterable, Node
from .decorators import deprecated
from .exceptions import (
NoElementsError,
NoMatchingElement,
NullArgumentError,
MoreThanOneMatchingElement,
)
TEnumerable = TypeVar("TEnumerable", bound="Enumerable")
TGrouping = TypeVar("TGrouping", bound="Grouping")
TSortedEnumerable = TypeVar("TSortedEnumerable", bound="SortedEnumerable")
TGroupedEnumerable = TypeVar("TGroupedEnumerable", bound="GroupedEnumerable")
Number = Union[float, int]
class Enumerable(object):
def __init__(self, data=None):
"""
Constructor
** Note: no type checking of the data elements are performed during
instantiation. **
:param data: iterable object
:return: None
"""
self._iterable = RepeatableIterable(data)
def __iter__(self) -> Iterable[Any]:
return iter(self._iterable)
def __reversed__(self) -> Iterable[Any]:
return reversed(self._iterable)
def next(self) -> Any:
return next(self._iterable)
def __next__(self) -> Any:
return self.next()
def __getitem__(self, n) -> Any:
"""
Gets item in iterable at specified zero-based index
:param n: the index of the item to get
:returns the element at the specified index.
:raises IndexError if n > number of elements in the iterable
"""
for i, e in enumerate(self):
if i == n:
return e
def __len__(self) -> int:
"""
Gets the number of elements in the collection
"""
return len(self._iterable)
def __repr__(self) -> str:
return list(self).__repr__()
def to_list(self) -> List[Any]:
"""
Converts the iterable into a list
:return: list object
"""
return [x for x in self]
def count(self, predicate=None) -> int:
"""
Returns the number of elements in iterable
:return: integer object
"""
if predicate is not None:
return sum(1 for element in self.where(predicate))
return sum(1 for element in self)
def select(self, func=lambda x: x) -> TEnumerable:
"""
Transforms data into different form
:param func: lambda expression on how to perform transformation
:return: new Enumerable object containing transformed data
"""
return Enumerable(map(func, self))
def sum(self, func=lambda x: x) -> Number:
"""
Returns the sum of af data elements
:param func: lambda expression to transform data
:return: sum of selected elements
"""
return sum(func(x) for x in self)
def min(self, func=lambda x: x) -> Number:
"""
Returns the min value of data elements
:param func: lambda expression to transform data
:return: minimum value
"""
if len(self) == 0:
raise NoElementsError(u"Iterable contains no elements")
return func(min(self, key=func))
def max(self, func=lambda x: x) -> Number:
"""
Returns the max value of data elements
:param func: lambda expression to transform data
:return: maximum value
"""
if len(self) == 0:
raise NoElementsError(u"Iterable contains no elements")
return func(max(self, key=func))
def avg(self, func=lambda x: x) -> Number:
"""
Returns the average value of data elements
:param func: lambda expression to transform data
:return: average value as float object
"""
if len(self) == 0:
raise NoElementsError(u"Iterable contains no elements")
return float(self.sum(func)) / float(self.count())
def median(self, func=lambda x: x) -> Number:
"""
Return the median value of data elements
:param func: lambda expression to project and sort data
:return: median value
"""
if len(self) == 0:
raise NoElementsError(u"Iterable contains no elements")
result = self.order_by(func).select(func).to_list()
length = len(result)
i = int(length / 2)
return (
result[i]
if length % 2 == 1
else (float(result[i - 1]) + float(result[i])) / float(2)
)
def element_at(self, n) -> Any:
"""
Returns element at given index.
* Raises IndexError if no element found at specified position
:param n: index as int object
:return: Element at given index
"""
if not isinstance(n, int):
raise TypeError("Must be an integer")
result = self[n]
if result is None:
raise IndexError
return result
def element_at_or_default(self, n) -> Optional[Any]:
"""
Returns element at given index or None if no element found
* Raises IndexError if n is greater than the number of elements in
enumerable
:param n: index as int object
:return: Element at given index
"""
try:
return self.element_at(n)
except IndexError:
return None
def first(self, func=None) -> Any:
"""
Returns the first element in a collection
:func: predicate as lambda expression used to filter collection
:return: data element as object or NoElementsError if transformed data
contains no elements
"""
if func is not None:
return self.where(func).element_at(0)
return self.element_at(0)
def first_or_default(self, func=None) -> Optional[Any]:
"""
Return the first element in a collection. If collection is empty, then returns None
:func: predicate as lambda expression used to filter collection
:return: data element as object or None if transformed data contains no
elements
"""
if func is not None:
return self.where(func).element_at_or_default(0)
return self.element_at_or_default(0)
def last(self, func=None) -> Any:
"""
Return the last element in a collection
:func: predicate as a lambda expression used to filter collection
:return: data element as object or NoElementsError if transformed data
contains no elements
"""
if func is not None:
self.reverse().where(func).first()
return self.reverse().first()
def last_or_default(self, func=None) -> Optional[Any]:
"""
Return the last element in a collection or None if the collection is empty
:func: predicate as a lambda expression used to filter collection
:return: data element as object or None if transformed data contains no
elements
"""
if func is not None:
return self.reverse().where(func).first_or_default()
return self.reverse().first_or_default()
def order_by(self, key):
"""
Returns new Enumerable sorted in ascending order by given key
:param key: key to sort by as lambda expression
:return: new Enumerable object
"""
if key is None:
raise NullArgumentError(u"No key for sorting given")
kf = [OrderingDirection(key, reverse=False)]
return SortedEnumerable(Enumerable(iter(self)), key_funcs=kf)
def order_by_descending(self, key):
"""
Returns new Enumerable sorted in descending order by given key
:param key: key to sort by as lambda expression
:return: new Enumerable object
"""
if key is None:
raise NullArgumentError(u"No key for sorting given")
kf = [OrderingDirection(key, reverse=True)]
return SortedEnumerable(Enumerable(iter(self)), key_funcs=kf)
def skip(self, n) -> TEnumerable:
"""
Returns new Enumerable where n elements have been skipped
:param n: Number of elements to skip as int
:return: new Enumerable object
"""
return Enumerable(data=itertools.islice(self, n, None, 1))
def take(self, n) -> TEnumerable:
"""
Return new Enumerable where first n elements are taken
:param n: Number of elements to take
:return: new Enumerable object
"""
return Enumerable(data=itertools.islice(self, 0, n, 1))
def where(self, predicate) -> TEnumerable:
"""
Returns new Enumerable where elements matching predicate are selected
:param predicate: predicate as a lambda expression
:return: new Enumerable object
"""
if predicate is None:
raise NullArgumentError("No predicate given for where clause")
return Enumerable(filter(predicate, self))
def single(self, predicate=None) -> Any:
"""
Returns single element that matches given predicate.
Raises:
* NoMatchingElement error if no matching elements are found
* MoreThanOneMatchingElement error if more than one matching
element is found
:param predicate: predicate as a lambda expression
:return: Matching element as object
"""
result = self.where(predicate) if predicate is not None else self
if len(result) == 0:
raise NoMatchingElement("No matching elements are found")
if result.count() > 1:
raise MoreThanOneMatchingElement("More than one matching element is found")
return result.to_list()[0]
def single_or_default(self, predicate=None) -> Optional[Any]:
"""
Return single element that matches given predicate. If no matching
element is found, returns None
Raises:
* MoreThanOneMatchingElement error if more than one matching
element is found
:param predicate: predicate as a lambda expression
:return: Matching element as object or None if no matches are found
"""
try:
return self.single(predicate)
except NoMatchingElement:
return None
def select_many(self, func=lambda x: x) -> TEnumerable:
"""
Flattens an iterable of iterables returning a new Enumerable
:param func: selector as lambda expression
:return: new Enumerable object
"""
selected = self.select(func)
return Enumerable(data=itertools.chain.from_iterable(selected))
def add(self, element) -> TEnumerable:
"""
Adds an element to the enumerable.
:param element: An element
:return: new Enumerable object
The behavior here is no longer in place to conform more to typical
functional programming practices. This is a breaking change from
1.X versions.
"""
if element is None:
return self
return self.concat(Enumerable([element]))
def concat(self, enumerable) -> TEnumerable:
"""
Adds enumerable to an enumerable
:param enumerable: An iterable object
:return: new Enumerable object
"""
if not isinstance(enumerable, Enumerable):
raise TypeError(u"enumerable argument must be an instance of Enumerable")
return Enumerable(data=itertools.chain(self._iterable, enumerable._iterable))
def group_by(
self, key_names=[], key=lambda x: x, result_func=lambda x: x
) -> TEnumerable:
"""
Groups an enumerable on given key selector. Index of key name
corresponds to index of key lambda function.
Usage:
Enumerable([1,2,3]).group_by(key_names=['id'], key=lambda x: x) _
.to_list() -->
Enumerable object [
Grouping object {
key.id: 1,
_data: [1]
},
Grouping object {
key.id: 2,
_data: [2]
},
Grouping object {
key.id: 3,
_data: [3]
}
]
Thus the key names for each grouping object can be referenced
through the key property. Using the above example:
Enumerable([1,2,3]).group_by(key_names=['id'], key=lambda x: x) _
.select(lambda g: { 'key': g.key.id, 'count': g.count() }
:param key_names: list of key names
:param key: key selector as lambda expression
:param result_func: transformation function as lambda expression
:return: Enumerable of grouping objects
"""
return GroupedEnumerable(self, key, key_names, result_func)
def distinct(self, key=lambda x: x) -> TEnumerable:
"""
Returns enumerable containing elements that are distinct based on
given key selector
:param key: key selector as lambda expression
:return: new Enumerable object
"""
return GroupedEnumerable(self, key, ["distinct"], lambda g: g.first())
def join(
self,
inner_enumerable,
outer_key=lambda x: x,
inner_key=lambda x: x,
result_func=lambda x: x,
):
"""
Return enumerable of inner equi-join between two enumerables
:param inner_enumerable: inner enumerable to join to self
:param outer_key: key selector of outer enumerable as lambda expression
:param inner_key: key selector of inner enumerable as lambda expression
:param result_func: lambda expression to transform result of join
:return: new Enumerable object
"""
if not isinstance(inner_enumerable, Enumerable):
raise TypeError(
u"inner_enumerable parameter must be an instance of Enumerable"
)
return (
Enumerable(data=itertools.product(self, inner_enumerable))
.where(lambda r: outer_key(r[0]) == inner_key(r[1]))
.select(lambda r: result_func(r))
)
def default_if_empty(self, value=None):
"""
Returns an enumerable containing a single None element if enumerable is
empty, otherwise the enumerable itself
:return: an Enumerable object
"""
if len(self) == 0:
return Enumerable([value])
return self
def group_join(
self,
inner_enumerable: TEnumerable,
outer_key: Callable = lambda x: x,
inner_key: Callable = lambda x: x,
result_func: Callable = lambda x: x,
):
"""
Return enumerable of group join between two enumerables
:param inner_enumerable: inner enumerable to join to self
:param outer_key: key selector of outer enumerable as lambda expression
:param inner_key: key selector of inner enumerable as lambda expression
:param result_func: lambda expression to transform the result of group
join
:return: new Enumerable object
"""
if not isinstance(inner_enumerable, Enumerable):
raise TypeError(
u"inner enumerable parameter must be an instance of Enumerable"
)
group_joined = self.join(
inner_enumerable=inner_enumerable.group_by(key_names=["id"], key=inner_key),
outer_key=outer_key,
inner_key=lambda g: g.key.id,
result_func=lambda gj: (gj[0], Enumerable(gj[1]._iterable)),
).select(result_func)
return group_joined
def any(self, predicate: Callable = None):
"""
Returns true if any elements that satisfy predicate are found
:param predicate: condition to satisfy as lambda expression
:return: boolean True or False
"""
return self.first_or_default(predicate) is not None
def intersect(self, enumerable: TEnumerable, key: Callable):
"""
Returns enumerable that is the intersection between given enumerable
and self
:param enumerable: enumerable object
:param key: key selector as lambda expression
:return: new Enumerable object
"""
if not isinstance(enumerable, Enumerable):
raise TypeError(u"enumerable parameter must be an instance of Enumerable")
membership = set((key(j) for j in enumerable))
intrsct = (i for i in self if key(i) in membership)
return Enumerable(data=intrsct).distinct(key)
def aggregate(self, func: Callable, seed: Any = None) -> Any:
"""
Perform a calculation over a given enumerable using the initial seed
value
:param func: calculation to perform over every the enumerable.
This function will ingest (aggregate_result, next element) as parameters
:param seed: initial seed value for the calculation. If None, then the
first element is used as the seed
:return: result of the calculation
"""
result = seed
for i, e in enumerate(self):
if i == 0 and seed is None:
result = e
continue
result = func(result, e)
return result
def union(self, enumerable: TEnumerable, key: Callable):
"""
Returns enumerable that is a union of elements between self and given
enumerable
:param enumerable: enumerable to union self to
:param key: key selector used to determine uniqueness
:return: new Enumerable object
"""
if not isinstance(enumerable, Enumerable):
raise TypeError(u"enumerable parameter must be an instance of Enumerable")
return Enumerable(data=self.concat(enumerable)).distinct(key)
def except_(self, enumerable: TEnumerable, key: Callable):
"""
Returns enumerable that subtracts given enumerable elements from self
:param enumerable: enumerable object
:param key: key selector as lambda expression
:return: new Enumerable object
"""
if not isinstance(enumerable, Enumerable):
raise TypeError(u"enumerable parameter must be an instance of Enumerable")
membership = set((key(j) for j in enumerable))
exc = (i for i in self if key(i) not in membership)
return Enumerable(data=exc).distinct(key)
def contains(self, element, key=lambda x: x):
"""
Returns True if element is found in enumerable, otherwise False
:param element: the element being tested for membership in enumerable
:param key: key selector to use for membership comparison
:return: boolean True or False
"""
return self.select(key).any(lambda x: x == key(element))
def all(self, predicate: Callable) -> bool:
"""
Determines whether all elements in an enumerable satisfy the given
predicate
:param predicate: the condition to test each element as lambda function
:return: boolean True or False
"""
return all(predicate(e) for e in self)
def append(self, element):
"""
Appends an element to the end of an enumerable
:param element: the element to append to the enumerable
:return: Enumerable object with appended element
"""
return self.concat(Enumerable([element]))
def prepend(self, element):
"""
Prepends an element to the beginning of an enumerable
:param element: the element to prepend to the enumerable
:return: Enumerable object with the prepended element
"""
return Enumerable([element]).concat(self)
@staticmethod
def empty():
"""
Returns an empty enumerable
:return: Enumerable object that contains no elements
"""
return Enumerable()
@staticmethod
def range(start, length):
"""
Generates a sequence of integers starting from start with length of length
:param start: the starting value of the sequence
:param length: the number of integers in the sequence
:return: Enumerable of the generated sequence
"""
return Enumerable(range(start, start + length, 1))
@staticmethod
def repeat(element, length):
"""
Generates an enumerable containing an element repeated length times
:param element: the element to repeat
:param length: the number of times to repeat the element
:return: Enumerable of the repeated elements
"""
return Enumerable(data=itertools.repeat(element, length))
def reverse(self):
"""
Inverts the order of the elements in a sequence
:return: Enumerable with elements in reversed order
"""
return Enumerable(data=reversed(self))
def skip_last(self, n):
"""
Skips the last n elements in a sequence
:param n: the number of elements to skip
:return: Enumerable with n last elements removed
"""
return self.take(self.count() - n)
def skip_while(self, predicate):
"""
Bypasses elements in a sequence while the predicate is True. After predicate fails
remaining elements in sequence are returned
:param predicate: a predicate as a lambda expression
:return: Enumerable
"""
return SkipWhileEnumerable(Enumerable(iter(self)), predicate)
def take_last(self, n):
"""
Takes the last n elements in a sequence
:param n: the number of elements to take
:return: Enumerable containing last n elements
"""
return self.skip(self.count() - n)
def take_while(self, predicate):
"""
Includes elements in a sequence while the predicate is True. After predicate fails
remaining elements in a sequence are removed
:param predicate: a predicate as a lambda expression
:return: Enumerable
"""
return TakeWhileEnumerable(Enumerable(iter(self)), predicate)
def to_dictionary(self, key=lambda x: x, value=lambda x: x):
"""
Converts the enumerable into a dictionary
:param key: key selector to use to create dictionary keys
:param value: optional value selector to use to assign values to dictionary keys
:return: dict
"""
result = {}
for i, e in enumerate(self):
result[key(e)] = value(e)
return result
def zip(self, enumerable, func=lambda x: x):
"""
Merges 2 Enumerables using the given function. If the 2 collections are of unequal length, then
merging continues until the end of one of the collections is reached
:param enumerable: Enumerable collection to merge with
:param func: a function to perform the merging
:return: Enumerable
"""
if not isinstance(enumerable, Enumerable):
raise TypeError()
return ZipEnumerable(Enumerable(iter(self)), enumerable, func)
class SkipWhileEnumerable(Enumerable):
"""
Class to hold state for skipping elements while a given predicate is true
"""
def __init__(self, enumerable, predicate):
super(SkipWhileEnumerable, self).__init__(enumerable)
self.predicate = predicate
def __iter__(self):
return itertools.dropwhile(self.predicate, self._iterable)
class TakeEnumerable(Enumerable):
"""
Class to hold state for taking subset of consecutive elements in a collection
"""
def __init__(self, enumerable, n):
super(TakeEnumerable, self).__init__(enumerable)
self.n = n
def __iter__(self):
for index, element in enumerate(self._iterable):
if index < self.n:
yield element
class TakeWhileEnumerable(Enumerable):
"""
Class to hold state for taking elements while a given predicate is true
"""
def __init__(self, enumerable, predicate):
super(TakeWhileEnumerable, self).__init__(enumerable)
self.predicate = predicate
def __iter__(self):
return itertools.takewhile(self.predicate, self._iterable)
class GroupedEnumerable(Enumerable):
def __init__(
self,
data: Iterable,
key: Callable,
key_names: List[Dict],
func: Callable = lambda x: x,
) -> None:
"""
Constructor for GroupedEnumerable class
:param data: Iterable of grouped data obtained by itertools.groupby. The data structure is
(key, grouper) where grouper is an iterable of items that match the key
:param key: function to get the key
:param key_names: list of names to use for the keys
:param func: function to transform the Grouping result.
"""
self._iterable = GroupedRepeatableIterable(key, key_names, func, data)
class Grouping(Enumerable):
def __init__(self, key, data):
"""
Constructor of Grouping class used for group by operations of
Enumerable class
:param key: Key instance
:param data: iterable object
:return: void
"""
if not isinstance(key, Key):
raise Exception("key argument should be a Key instance")
self.key = key
super(Grouping, self).__init__(data)
def __repr__(self):
return {
"key": self.key.__repr__(),
"enumerable": list(self._iterable).__repr__(),
}.__repr__()
class GroupedRepeatableIterable(RepeatableIterable):
def __init__(
self,
key: Callable,
key_names: List[Dict],
func: Callable,
data: Iterable[Any] = None,
):
self.key = key
self.key_names = key_names
self.func = func
super().__init__(data)
def _can_enumerate(self, key_value):
return (
hasattr(key_value, "__len__")
and len(key_value) > 0
and not isinstance(key_value, string_types)
)
def __iter__(self) -> Any:
if self._root is None:
grouped_iterable = (
(k, list(g))
for k, g in itertools.groupby(
sorted(self._data, key=self.key), self.key
)
)
i = 0
for key, group in grouped_iterable:
key_prop = {}
for j, prop in enumerate(self.key_names):
key_prop.setdefault(
prop, key[j] if self._can_enumerate(key) else key
)
key_object = Key(key_prop)
node = Node(value=self.func(Grouping(key_object, list(group))))
if i == 0:
self._root = node
self._current = self._root
else:
self._current.next = node
self._current = self._current.next
yield node.value
i += 1
self._len = i
else:
while self._current is not None:
yield self._current.value
self._current = self._current.next
self._current = self._root
class SortedEnumerable(Enumerable):
def __init__(self, data: Iterable, key_funcs):
"""
Constructor
:param key_funcs: list of OrderingDirection instances in order of primary key --> less important keys
:param data: data as iterable
"""
if key_funcs is None:
raise NullArgumentError(u"key_funcs argument cannot be None")
if not isinstance(key_funcs, list):
raise TypeError(u"key_funcs should be a list instance")
self._key_funcs = [f for f in key_funcs if isinstance(f, OrderingDirection)]
for o in reversed(self._key_funcs):
data = sorted(data, key=o.key, reverse=o.descending)
super(SortedEnumerable, self).__init__(data)
def then_by(self, func):
"""
Subsequent sorting function in ascending order
:param func: lambda expression for secondary sort key
:return: SortedEnumerable instance
"""
if func is None:
raise NullArgumentError(u"then by requires a lambda function arg")
self._key_funcs.append(OrderingDirection(key=func, reverse=False))
return SortedEnumerable(self, self._key_funcs)
def then_by_descending(self, func):
"""
Subsequent sorting function in descending order
:param func: lambda function for secondary sort key
:return: SortedEnumerable instance
"""
if func is None:
raise NullArgumentError(
u"then_by_descending requires a lambda function arg"
)
self._key_funcs.append(OrderingDirection(key=func, reverse=True))
return SortedEnumerable(self, self._key_funcs)
class ZipEnumerable(Enumerable):
"""
Class to hold state for zipping 2 collections together
"""
def __init__(self, enumerable1, enumerable2, result_func):
super(ZipEnumerable, self).__init__(enumerable1)
self.enumerable = enumerable2
self.result_func = result_func
def __iter__(self):
return map(
lambda r: self.result_func(r), zip(iter(self._iterable), self.enumerable)
)
|
#!/usr/bin/python
'''pets'''
# Make an empty list
pets = []
# Make individual pets dictionary
pet = {
'animal type': 'cat',
'name': 'muffin',
'owner': 'adya',
'weight': '2.3 kg',
'eats': 'milk',
}
pets.append(pet)
pet = {
'animal type': 'chicken',
'name': 'kukudu ku',
'owner': 'abhilash',
'weight': '2.8 kg',
'eats': 'seeds',
}
pets.append(pet)
pet = {
'animal type': 'dog',
'name': 'lol',
'owner': 'abhishek',
'weight':'9.35 kg',
'eats': 'pedigree',
}
pets.append(pet)
# Display information about each pet.
for pet in pets:
print("\nHere's what I know about " + pet['name'].title() + ":")
for key, value in pet.items():
print("\t" + key + ": " + str(value))
|
# Accepted
# Python 3
#!/bin/python3
import sys
s = input().strip()
n = int(input().strip())
l = len(s)
pri = n//l
a = n%l
count, additional = 0, 0
for i in range(l):
if i<a and s[i]=='a':
count += 1
additional += 1
elif s[i]=='a':
count += 1
print((count*pri)+additional)
|
from .columns import Column
class MetaBase(type):
"""
идея __table__ из
https://lectureswww.readthedocs.io/6.www.sync/\
2.codding/9.databases/2.sqlalchemy/3.orm.html
само добавление из
https://realpython.com/python-metaclasses/#custom-metaclasses
и
https://github.com/sqlalchemy/sqlalchemy/blob/\
10851b002844fa4f9de7af92dbb15cb1133497eb/lib/\
sqlalchemy/orm/decl_api.py#L54
"""
def __new__(cls, name, bases, dct):
base = super().__new__(cls, name, bases, dct)
base.__table__ = {}
base.__tablename__ = name.lower()
for attr, value in base.__dict__.items():
if isinstance(value, Column):
base.__table__[attr] = value
return base
class Base(metaclass=MetaBase):
def __init__(self, **kwargs):
self._primary_dict = None
tab_colums = set(self.__table__.keys())
given_columns = set(kwargs.keys())
if given_columns != tab_colums:
diff1 = given_columns.difference(tab_colums)
diff2 = tab_colums.difference(given_columns)
s1 = f'columns do not exist: {diff1}' \
if len(diff1) > 0 else ''
s2 = f'you missed to fill columns: {diff2}' \
if len(diff2) > 0 else ''
raise AttributeError(' ;'.join((_ for _ in
[s1, s2] if len(_) > 0)))
for name, val in kwargs.items():
setattr(self, name, val)
# здесь используем set
def create(self):
"""
create new row
return None
"""
columns = self.__table__.keys()
values = tuple(getattr(self, c) for c in columns)
connection = self.__session__.connection
if not self.__session__.table_exists(self.__tablename__):
self.__session__.create_table(self.__tablename__, columns)
if self.row_exists(columns, values):
raise RuntimeError("Can't create already existing row")
connection.execute(f"INSERT INTO {self.__tablename__} "
f"({', '.join(columns)}) VALUES"
f"({', '.join('?'*len(columns))});", values)
self._primary_dict = {k: v for k, v in zip(columns, values)}
connection.commit()
@classmethod
def read(cls, **kwargs):
"""
read existing rows
return: List[Instance]
"""
connection = cls.__session__.connection
tab_colums = list(cls.__table__.keys())
columns, values = zip(*kwargs.items())
command = f"SELECT {', '.join(tab_colums)}" + \
f" FROM {cls.__tablename__} WHERE " + \
' AND '.join(f'{c}=?' for c in columns) + ";"
rows = list(connection.execute(command, values))
primary_dicts = [{k: v for k, v in zip(tab_colums, r)} for r in rows]
res = []
for d in primary_dicts:
obj = cls(**d)
obj._primary_dict = d
res.append(obj)
return res
@classmethod
def all(cls):
"""
read existing rows
return: List[Instance]
"""
connection = cls.__session__.connection
tab_colums = list(cls.__table__.keys())
command = f"SELECT {', '.join(tab_colums)}" + \
f" FROM {cls.__tablename__};"
rows = list(connection.execute(command))
primary_dicts = [{k: v for k, v in zip(tab_colums, r)} for r in rows]
res = []
for d in primary_dicts:
obj = cls(**d)
obj._primary_dict = d
res.append(obj)
return res
def update(self):
"""
update existing row
return None
"""
connection = self.__session__.connection
if self._primary_dict is None:
raise RuntimeError("Can't update non-readen or non-created row")
wh_columns, wh_values = zip(*self._primary_dict.items())
set_columns = list(self.__table__.keys())
set_values = list(getattr(self, c) for c in set_columns)
if not self.row_exists(wh_columns, wh_values):
raise RuntimeError("Can't update non-existing row")
if self.row_exists(set_columns, set_values):
raise RuntimeError("Can't update row, update will create copy")
command = f"UPDATE {self.__tablename__} SET " + \
', '.join(f'{c}=?' for c in set_columns) + " WHERE " + \
' AND '.join(f'{c}=?' for c in wh_columns) + ";"
connection.execute(command, list(set_values) + list(wh_values))
self._primary_dict = {c: v for c, v in zip(set_columns, set_values)}
connection.commit()
def delete(self):
"""
delete existing row
return None
"""
connection = self.__session__.connection
if self._primary_dict is None:
raise RuntimeError("Can't delete non-readen or non-created row")
columns, values = zip(*self._primary_dict.items())
if not self.row_exists(columns, values):
raise RuntimeError("Can't delete non-existing row")
command = f"DELETE FROM {self.__tablename__} WHERE " + \
' AND '.join(f'{c}=?' for c in columns) + ";"
connection.execute(command, values)
self._primary_dict = None
connection.commit()
# здесь используем get
def row_exists(self, columns, values):
connection = self.__session__.connection
command = f"SELECT {', '.join(columns)}" + \
f" FROM {self.__tablename__} WHERE " + \
' AND '.join(f'{c}=?' for c in columns) + ";"
res = list(connection.execute(command, values))
return len(res) != 0
|
from five import grok
from ilo.missionreportstats.content.mission_report_statistics import (
IMissionReportStatistics
)
from ilo.missionreportstats.interfaces import IStatsCache
from zope.lifecycleevent import IObjectModifiedEvent, IObjectAddedEvent
@grok.subscribe(IMissionReportStatistics, IObjectModifiedEvent)
def refresh_cache_on_save(obj, event):
IStatsCache(obj).update()
@grok.subscribe(IMissionReportStatistics, IObjectAddedEvent)
def refresh_cache_on_create(obj, event):
IStatsCache(obj).update()
|
#packages
import json
import requests
#json holen
url = 'https://wttr.in/Darmstadt?format=j1'
r = requests.get(url)
wttr=r.json()
#print(wttr)
currentdate=None
for key in wttr.keys():
if key == 'current_condition':
for j in wttr[key]:
if 'weatherDesc' in j :
pass
#print(f"{key}: {j['weatherDesc'][0]['value']}")
elif key == 'weather':
for j in wttr[key]:
if not currentdate:
currentdate=j['date']
if 'hourly' in j and currentdate != j['date'] :
tempc=""
humi=""
cor=""
wd=""
for i in j['hourly']:
tempc+=f"{i['FeelsLikeC']}/"
humi+=f"{i['humidity']}/"
cor+=f"{i['chanceofrain']}/"
wd+=f"{i['weatherDesc'][0]['value']}/"
print(f"{j['date']}: with temperature {tempc}°C, and {cor} % chance of rain and it's {wd}")
|
from django.db import models
from django.conf import settings
from autoslug import AutoSlugField
from nucleo.models import User, Tag, Dependencia, AreaConocimiento, ProgramaLicenciatura, ProgramaMaestria, ProgramaDoctorado, Proyecto
CURSO_ESPECIALIZACION_TIPO = getattr(settings, 'CURSO_ESPECIALIZACION_TIPO', (('CURSO', 'Curso'), ('DIPLOMADO', 'Diplomado'), ('CERTIFICACION', 'Certificación'), ('OTRO', 'Otro')))
CURSO_ESPECIALIZACION_MODALIDAD = getattr(settings, 'CURSO_ESPECIALIZACION_MODALIDAD', (('PRESENCIAL', 'Presencial'), ('EN_LINEA', 'En línea'), ('MIXTO', 'Mixto'), ('OTRO', 'Otro')))
# Create your models here.
class CursoEspecializacion(models.Model):
nombre_curso = models.CharField(max_length=255, verbose_name='Nombre del curso')
slug = AutoSlugField(populate_from='nombre_curso', unique=True)
descripcion = models.TextField(verbose_name='Descripción', blank=True)
tipo = models.CharField(max_length=20, choices=CURSO_ESPECIALIZACION_TIPO, verbose_name='Tipo de curso')
horas = models.PositiveIntegerField(verbose_name='Número de horas')
fecha_inicio = models.DateField('Fecha de inicio')
fecha_fin = models.DateField('Fecha de finalización', blank=True, null=True)
modalidad = models.CharField(max_length=20, choices=CURSO_ESPECIALIZACION_MODALIDAD)
area_conocimiento = models.ForeignKey(AreaConocimiento, verbose_name='Área de conocimiento')
dependencia = models.ForeignKey(Dependencia)
usuario = models.ForeignKey(User, related_name='cursos_especializacion')
tags = models.ManyToManyField(Tag, related_name='curso_especializacion_tags', blank=True)
def __str__(self):
return "{} : {} : {}".format(self.nombre_curso, self.fecha_inicio, self.usuario)
class Meta:
ordering = ['fecha_inicio']
verbose_name = 'Curso de especialización'
verbose_name_plural = 'Cursos de especialización'
unique_together = ['nombre_curso', 'fecha_inicio', 'usuario']
class Licenciatura(models.Model):
carrera = models.ForeignKey(ProgramaLicenciatura)
descripcion = models.TextField(verbose_name='Descripición', blank=True)
dependencia = models.ForeignKey(Dependencia)
titulo_tesis = models.CharField(max_length=255)
slug = AutoSlugField(populate_from='titulo_tesis', unique=True)
#tesis = models.FileField(blank=True)
tesis_url = models.URLField(blank=True)
fecha_inicio = models.DateField('Fecha de inicio de licenciatura')
fecha_fin = models.DateField('Fecha de terminación de licenciatura')
fecha_grado = models.DateField('Fecha de obtención de grado licenciatura')
usuario = models.ForeignKey(User, related_name='licenciaturas')
tags = models.ManyToManyField(Tag, related_name='licenciatura_tags', blank=True)
def __str__(self):
return "{} : {} : {}".format(self.dependencia, str(self.carrera.programa), self.titulo_tesis)
class Meta:
ordering = ['dependencia', 'carrera', 'titulo_tesis']
class Maestria(models.Model):
programa = models.ForeignKey(ProgramaMaestria)
descripcion = models.TextField(verbose_name='Descripición', blank=True)
dependencia = models.ForeignKey(Dependencia)
titulo_tesis = models.CharField(max_length=255)
slug = AutoSlugField(populate_from='titulo_tesis', unique=True)
#tesis = models.FileField(blank=True)
tesis_url = models.URLField(blank=True)
fecha_inicio = models.DateField('Fecha de inicio de maestría')
fecha_fin = models.DateField('Fecha de terminación de maestría')
fecha_grado = models.DateField('Fecha de obtención de grado de maestría')
usuario = models.ForeignKey(User, related_name='maestrias')
tags = models.ManyToManyField(Tag, related_name='maestria_tags', blank=True)
def __str__(self):
return "{} : {} : {}".format(self.dependencia, self.programa.programa, self.titulo_tesis)
class Meta:
ordering = ['dependencia', 'programa', 'titulo_tesis']
class Doctorado(models.Model):
programa = models.ForeignKey(ProgramaDoctorado)
descripcion = models.TextField(verbose_name='Descripición', blank=True)
dependencia = models.ForeignKey(Dependencia)
titulo_tesis = models.CharField(max_length=255)
slug = AutoSlugField(populate_from='titulo_tesis', unique=True)
#tesis = models.FileField(blank=True)
tesis_url = models.URLField(blank=True)
fecha_inicio = models.DateField('Fecha de inicio de doctorado', auto_now=False)
fecha_fin = models.DateField('Fecha de terminación de doctorado', auto_now=False, blank=True, null=True)
fecha_grado = models.DateField('Fecha de obtención de grado de doctorado', auto_now=False, blank=True)
usuario = models.ForeignKey(User, related_name='doctorados')
tags = models.ManyToManyField(Tag, related_name='doctorado_tags', blank=True)
def __str__(self):
return "{} : {} : {}".format(self.dependencia, self.programa.programa, self.titulo_tesis)
class Meta:
ordering = ['fecha_grado', 'dependencia', 'titulo_tesis']
proyectos = [
('Ninguno', 1900, 1, 1, 2900, 1, 1, 'OTRO', 'OTRO', 'INDIVIDUAL', 'OTRO', )
]
class PostDoctorado(models.Model):
titulo = models.CharField(max_length=255)
descripcion = models.TextField(verbose_name='Descripición', blank=True)
area_conocimiento = models.ForeignKey(AreaConocimiento, related_name='postdoctorado_area_conocimiento', verbose_name='Área de conocimiento')
dependencia = models.ForeignKey(Dependencia)
proyecto = models.ForeignKey(Proyecto)
fecha_inicio = models.DateField('Fecha de inicio de postdoctorado', auto_now=False)
fecha_fin = models.DateField('Fecha de terminación de postdoctorado', auto_now=False)
usuario = models.ForeignKey(User, related_name='postdoctorados')
tags = models.ManyToManyField(Tag, related_name='post_doctorado_tags', blank=True)
def __str__(self):
return "{} : {} : {}".format(self.usuario, self.dependencia, self.area_conocimiento)
class Meta:
ordering = ['fecha_fin', 'dependencia'] |
# -*- encoding: utf-8 -*-
from PyQt4 import QtGui, QtCore
class ListWidgetSpecial(QtGui.QListWidget):
valueSelected = QtCore.pyqtSignal(float)
closeMe = QtCore.pyqtSignal()
def __init__(self, parent=None):
super(ListWidgetSpecial, self).__init__(parent)
self.itemClicked.connect(self.theItemClicked)
def theItemClicked(self, item):
self.valueSelected.emit(float(item.text()))
def keyPressEvent(self, QKeyEvent):
if QKeyEvent.key() == QtCore.Qt.Key_Up and self.currentRow() == 0:
self.closeMe.emit()
if QKeyEvent.key() == QtCore.Qt.Key_Enter or QKeyEvent.key() == QtCore.Qt.Key_Return:
self.valueSelected.emit(float(self.currentItem().text()))
super(ListWidgetSpecial, self).keyPressEvent(QKeyEvent)
def focusOutEvent(self, *args, **kwargs):
self.hide() |
def buy_or_pass(stock_price, all_time_high):
if (stock_price <= (.80 * all_time_high)):
return "Buy"
else:
return "Pass"
|
from django.shortcuts import render
from voltageapi.models import measurement
from . import forms
from voltageview.forms import voltapiform,UserForm,UserProfileInfoForm
from django.contrib.auth import authenticate,login,logout
from django.http import HttpResponse,HttpResponseRedirect
from django.urls import reverse
from django.contrib.auth.decorators import login_required
#@login_required
def index(request):
#measurements= measurement.objects.all()
print("hi")
return render(request,"voltageview/index.html")
@login_required
def user_logout(request):
logout(request)
return HttpResponseRedirect(reverse('voltage_view:index'))
@login_required
def special(request):
return HttpResponse("You are logged in")
@login_required
def register(request):
registered = False
if request.method == "POST":
user_form = UserForm(data=request.POST)
profile_form = UserProfileInfoForm(data = request.POST)
if user_form.is_valid() and profile_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
profile = profile_form.save(commit=False)
profile.user = user
if 'profile_pic' in request.FILES:
profile.profile_pic= request.FILES['profile_pic']
profile.save()
registered = True
else:
print(user_form.errors,profile_form.errors)
else:
user_form = UserForm()
profile_form = UserProfileInfoForm()
return render(request,'voltageview/registration.html',
{'user_form':user_form,
'profile_form':profile_form,
'registered':registered})
@login_required
def VoltapiForm(request):
form= voltapiform()
if request.method == "POST":
form = voltapiform(request.POST)
if form.is_valid():
form.save(commit=True)
return index(request)
else:
print("Error")
return render(request,'voltageview/formindex.html',{'form':form})
def user_login(request):
if request.method =="POST":
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username,password=password)
if user:
if user.is_active:
login(request,user)
return HttpResponseRedirect(reverse('voltage_view:index'))
else:
return HttpResponse("Account not active")
else:
print("Some one tried to login and failed")
return HttpResponse("Invalid credentials")
else:
print("last else")
return render(request,'voltageview/login.html',{})
# def form_name_view(request):
# form = forms.FormName()
#
# if request.method == "POST":
# form = forms.FormName(request.POST)
#
# if form.is_valid():
# print("Validation Success")
# print("Name is"+form.cleaned_data['name'])
# print("Email is"+form.cleaned_data['email'])
# print("Text is"+form.cleaned_data['text'])
#
# return render(request,'voltageview/formindex.html',{'form':form})
|
#! python3
# Author: George Gao, gaojz017@163.com
from django import forms
from .models import Comment
import markdown
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ['name', 'email', 'url', 'text']
|
"""Define the command-line interface for the datauniquifier program."""
from pathlib import Path
import os
import psutil
from resource import getrusage, RUSAGE_SELF
import typer
from datauniquifier import analyze
from datauniquifier import extract
from datauniquifier import uniquify
UNIQUE_FUNCTION_BASE = "unique"
UNDERSCORE = "_"
def main(
approach: str = typer.Option(...),
column: int = typer.Option(...),
data_file: Path = typer.Option(...),
display: bool = typer.Option(False, "--display"),
):
"""Create the list of data values in stored in the specified file and then uniquify the file contents."""
# display the debugging output for the program's command-line arguments
typer.echo("")
typer.echo(f"The chosen approach to uniquify the file is: {approach}")
typer.echo("")
typer.echo(f"The data file that contains the input is: {data_file}")
typer.echo("")
# TODO: construct the full name of the function to call
function = ""
# TODO: construct the requested function from the compute module
# Reference: https://stackoverflow.com/questions/3061/calling-a-function-of-a-module-by-using-its-name-a-string
function_to_call = ""
# declare the variables that will store file content for a valid file
data_text = ""
data_column_text_list = []
# --> the file was not specified so we cannot continue using program
if data_file is None:
typer.echo("No data file specified!")
raise typer.Abort()
# --> the file was specified and it is valid so we should read and check it
if data_file.is_file():
data_text = data_file.read_text()
data_line_count = len(data_text.splitlines())
typer.echo(f"The data file contains {data_line_count} data values in it!")
typer.echo("Let's do some uniquification! 🚀")
typer.echo("")
# display a final message and some extra spacing, asking a question
# about the efficiency of the approach to computing the number sequence
typer.echo("So, was this an efficient approach to uniquifying the data? 🔍")
typer.echo("")
data_column_text_list = extract.extract_data_given_column(data_text, column)
# TODO: call the constructed function and capture the result
unique_result_list = ""
typer.echo("")
# display debugging information with the function's output
if display:
typer.echo(f" This is the output from using the {approach}:")
typer.echo("")
typer.echo(" " + str(unique_result_list))
typer.echo("")
process = psutil.Process(os.getpid())
# TODO: display the estimated overall memory use as reported by the operating system
# Reference:
# https://stackoverflow.com/questions/938733/total-memory-used-by-python-process
# TODO: display the estimated peak memory use as reported by the operating system
# Reference:
# https://pythonspeed.com/articles/estimating-memory-usage/
# Display the percent reduction that is a result of the uniquification process
typer.echo("")
typer.echo("So, did this remove a lot of duplicate data? 🔍")
typer.echo("")
reduction = analyze.calculate_reduction(data_column_text_list, unique_result_list)
percent_reduction = analyze.calculate_percent_reduction(
data_column_text_list, unique_result_list
)
typer.echo(f"The number of values removed from the data: {reduction}")
typer.echo(f"The percent reduction due to uniquification: {percent_reduction:.2f}%")
typer.echo("")
if __name__ == "__main__":
typer.run(main)
|
import json
import re
from pybars import Compiler
BLANK_LINE_RE = re.compile(r'\n\s*\n')
def _eq(this, options, a, b):
if a == b:
return options['fn'](this)
return []
def _neq(this, options, a, *args):
for b in args:
if a == b:
return []
return options['fn'](this)
helpers = {
"eq": _eq,
"neq": _neq,
}
cp = Compiler()
with open("./config.json") as f, open("./default-config.json") as df:
user_config = json.loads(f.read())
default_config = json.loads(df.read())
config = {**default_config, **user_config}
template = config['template']
with open("./templates/" + template + "/index.js.hbs") as f:
js_index = cp.compile(f.read())
with open("./templates/" + template + "/client.html.hbs") as f:
html_client = cp.compile(f.read())
with open("./i18n/" + config['locale'] + ".json") as f:
i18n = json.loads(f.read())
variables = {
"i18n": i18n,
**config,
}
with open("./static/client.html", "w") as f:
f.write(BLANK_LINE_RE.sub("\n", html_client(variables, helpers=helpers)))
with open("./static/index.js", "w") as f:
f.write(BLANK_LINE_RE.sub("\n", js_index(variables, helpers=helpers)))
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from rasa_nlu.training_data import load_data
from rasa_nlu.config import RasaNLUModelConfig
from rasa_nlu.model import Trainer, Metadata, Interpreter
from rasa_nlu import config
def train(data, config_file, model_dir):
training_data = load_data(data)
configuration = config.load(config_file)
trainer = Trainer(configuration)
trainer.train(training_data)
model_directory = trainer.persist(model_dir, fixed_model_name='chat')
def run():
interpreter = Interpreter.load('./models/nlu/default/chat')
print(interpreter.parse('Fraud money'))
if __name__ == '__main__':
train('./data/training_data.json', './config/config.yml', './models/nlu')
run()
|
import discord
import os
from keep_alive import keep_alive
from discord.ext import commands
client = discord.Client()
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content.startswith('wipe'):
await message.channel.send('**Wipe date is Friday March 26th**')
if message.content.startswith('pure'):
await message.channel.send('**pure is a badmod**')
if message.content.startswith('$botdev'):
await message.channel.send('𝕃𝕒𝕔𝕚𝕘𝕒𝕞#1495')
if message.content.startswith('who is god'):
await message.channel.send('taz.')
if message.content.startswith('$invite'):
await message.channel.send('[INVITE](https://discord.com/api/oauth2/authorize?client_id=821174933182218300&permissions=8&scope=bot)')
if message.content.startswith('evolh'):
await message.channel.send('evolh is a sus admin')
if message.content.startswith('$help'):
await message.channel.send(os.getenv('HELP'))
if message.content.startswith('$help'):
await message.channel.send('`$botdev - Shows Bot Devoloper`')
if message.content.startswith('$help'):
await message.channel.send('`$wipe - Shows Insidious Wipe Date`')
if message.content.startswith('$help'):
await message.channel.send('`$invite - Shows Bot Invite Link`')
if message.content.startswith('hazard'):
await message.channel.send('I3ioHazard is kinda cute tho')
keep_alive()
token = os.environ.get("TOKEN")
client.run(os.getenv('TOKEN'))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-21 06:01
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('user', '0036_auto_20170620_1646'),
]
operations = [
migrations.AlterField(
model_name='coupon',
name='detail_minor',
field=models.TextField(blank=True, null=True, verbose_name='优惠券详情 - 次'),
),
migrations.AlterField(
model_name='coupon',
name='food',
field=models.ManyToManyField(blank=True, related_name='coupon_food', to='user.Food', verbose_name='打折商品'),
),
migrations.AlterField(
model_name='discountcoupon',
name='coupon_type',
field=models.IntegerField(choices=[(0, '定额'), (1, '折扣'), (2, '满赠')], default=0, verbose_name='折扣类型'),
),
migrations.AlterField(
model_name='discountcoupon',
name='discount',
field=models.IntegerField(blank=True, default=0, verbose_name='折扣'),
),
migrations.AlterField(
model_name='discountcoupon',
name='gift',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='user.Food', verbose_name='赠品'),
),
migrations.AlterField(
model_name='discountcoupon',
name='more_gift',
field=models.IntegerField(blank=True, default=0, verbose_name='满赠个数'),
),
migrations.AlterField(
model_name='discountcoupon',
name='num',
field=models.IntegerField(default=0, verbose_name='赠品数量'),
),
migrations.AlterField(
model_name='discountcoupon',
name='quota',
field=models.IntegerField(blank=True, default=0, null=True, verbose_name='定额优惠金额'),
),
]
|
#!/usr/bin/env python
# coding: utf-8
# In[247]:
import pandas as pd
import os
import numpy as np
import matplotlib.pyplot as plt
# In[275]:
def fetch_housing_data():
return pd.read_csv(r"converted_rent_only.csv", 'r')
# In[274]:
def filter(housingData):
housingData = housingData[housingData["SERIALNO"].notnull()]
housingData = housingData[housingData["PUMA"].notnull()]
housingData = housingData[housingData["NP"].notnull()]
housingData = housingData[housingData["R18"].notnull()]
housingData = housingData[housingData["R60"].notnull()]
housingData = housingData[housingData["R65"].notnull()]
housingData = housingData[housingData["NOC"].notnull()]
housingData = housingData[housingData["RNTP"].notnull()]
housingData = housingData[housingData["FINCP"].notnull()]
invalid_rows = (12 * housingData["RNTP"] - 0.5 * housingData["FINCP"] < 0)
housingData = housingData[-invalid_rows]
return housingData;
# In[271]:
def getDeficit(rent, income) :
tot = rent - (.5*income)
if(tot < 0):
return 0
return tot
# In[272]:
def calcWeights(weights, persons, children, R18, R60, R65):
ct = sum(1 for row in housingData)
for i in range(ct):
weights[i] = persons[i] + 1.3*children[i] + 1.1*R18[i] + R60[i] + R65[i] + 0.1*np.random.rand()
return weights
# In[273]:
def calcExpanditure(weights, M, deducts):
ct = sum(1 for row in housingData)
expanditure = [0]*ct
weightsPr, inds = zip(*sorted([(j, i) for i, j in enumerate(weights)]))
for i in range(ct):
if deducts[inds[i]] >= M:
expanditure[inds[i]] = M
else:
expanditure[inds[i]] = deducts[inds[i]]
M = M - expanditure[inds[i]]
return expanditure
# In[301]:
def init():
housingData = fetch_housing_data()
housingData = filter(housingData)
ct = sum(1 for row in housingData)
rents = []
incomes = []
persons = []
children = []
R18 = []
R60 = []
R65 = []
areas = []
deducts = []
weights = [0]*ct
expenditures = [0]*ct
M = 50000000
for i in range(ct):
areas.append(housingData["PUMA"].iloc[i])
for i in range(ct):
rents.append(12 * housingData["RNTP"].iloc[i])
for i in range(ct):
incomes.append(housingData["FINCP"].iloc[i])
for i in range(ct):
persons.append(housingData["NP"].iloc[i])
for i in range(ct):
children.append(housingData["NOC"].iloc[i])
for i in range(ct):
R18.append(housingData["R18"].iloc[i])
for i in range(ct):
R60.append(housingData["R60"].iloc[i])
for i in range(ct):
R65.append(housingData["R65"].iloc[i])
for i in range(ct):
deducts.append(getDeficit(rents[i], incomes[i]))
weights = calcWeights(weights, persons, children, R18, R60, R65)
expanditures = calcExpanditure(weights, M, deducts)
expanditure_by_area = {3301:0., 3302:0., 3303:0., 3304:0., 3305:0., 3306:0., 3400:0.}
for i, j in zip(areas, expanditures):
expanditure_by_area[i] += j
print(expanditure_by_area)
legend = expanditure_by_area.keys()
x = np.arange(len(legend))
y = expanditure_by_area.values()
plt.bar(x, y)
plt.xticks(x, ["0{}".format(i) for i in legend])
plt.show()
# In[302]:
init()
# In[ ]:
# In[ ]:
|
# Generated by Django 2.1.3 on 2019-02-18 18:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fest_2019', '0032_auto_20190218_1131'),
]
operations = [
migrations.AddField(
model_name='caracterizacioninicial',
name='recibe_alimentos',
field=models.BooleanField(default=False),
preserve_default=False,
),
]
|
from TreeNode import TreeNode
class Solution(object):
def isSymmetric(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
if root == None:
return True
else:
return self.isSymmetrucLeftRight(root.left,root.right)
def isSymmetrucLeftRight(self,left,right):
if left == None and right == None:
return True
if (left != None and right == None) or (left == None and right != None) or (left != None and right != None and left.value != right.value):
return False
return self.isSymmetrucLeftRight(left.left,right.right) and self.isSymmetrucLeftRight(left.right,right.left)
if __name__ == '__main__':
sol = Solution()
p = TreeNode(1)
p.insertRight(2)
print(sol.isSymmetric(p)) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayEbppInstserviceTokenCreateResponse(AlipayResponse):
def __init__(self):
super(AlipayEbppInstserviceTokenCreateResponse, self).__init__()
self._sign_token = None
@property
def sign_token(self):
return self._sign_token
@sign_token.setter
def sign_token(self, value):
self._sign_token = value
def parse_response_content(self, response_content):
response = super(AlipayEbppInstserviceTokenCreateResponse, self).parse_response_content(response_content)
if 'sign_token' in response:
self.sign_token = response['sign_token']
|
left = 1
right = 22
def selfDividingNumbers(left, right):
alist= []
for i in range(left,right+1):
judge = 0
if "0" in list(str(i)):
continue
for num in list(str(i)):
if i % int(num) == 0:
judge = 1
continue
else:
judge = 0
break
if judge:
alist.append(i)
return alist
selfDividingNumbers(left,right) |
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from ..context.dataindex import makeindex
from ..context.missing import is_missing
from .. import errors
from ..document import Document
from ..elements.elementbasemeta import ElementBaseMeta
from ..context.expression import Expression
from ..elements import attributetypes
from ..elements.elementproxy import ElementProxy
from ..tools import textual_list, format_element_type, nearest_word
from ..containers import OrderedDict
from ..application import Application
from ..compat import (
implements_to_string,
text_type,
string_types,
with_metaclass,
iteritems,
iterkeys,
)
import inspect
import logging
import weakref
from textwrap import dedent
from collections import deque, namedtuple
Closure = namedtuple("Closure", ["element", "data"])
startup_log = logging.getLogger("moya.startup")
class MoyaAttributeError(Exception):
pass
class Attribute(object):
def __init__(
self,
doc,
name=None,
map_to=None,
type=None,
required=False,
default=None,
example=None,
metavar=None,
context=True,
evaldefault=False,
oneof=None,
synopsis=None,
choices=None,
translate=None,
missing=True,
empty=True,
):
self.doc = doc
self.name = name
self.map_to = map_to or name
if type is None:
type = "text"
self.type_name = type
try:
self.type = attributetypes.lookup(type)
except KeyError:
raise MoyaAttributeError(
"Unable to create an attribute of type '{}'".format(type)
)
self.required = required
self.default = default
self.example = example
self.metavar = metavar
self.context = context
self.evaldefault = evaldefault
self.oneof = oneof
self.synopsis = synopsis
self._choices = choices
self.translate = translate or self.type.translate
self.missing = missing
self.empty = empty
self.enum = None
# if translate is not None:
# self.translate = translate
@property
def choices(self):
if callable(self._choices):
return self._choices()
return self._choices
def __getstate__(self):
state = self.__dict__.copy()
del state["type"]
return state
def __setstate__(self, state):
self.__dict__ = state
self.type = attributetypes.lookup(self.type_name)
def __moyaelement__(self):
return self
@property
def type_display(self):
if hasattr(self.type, "type_display"):
return self.type.get_type_display()
return getattr(self.type, "name", text_type(self.type.__name__))
def default_display(self, value):
# if not isinstance(value, text_type):
# return ''
if self.evaldefault:
return value
return self.type.display(value) or ""
def __repr__(self):
return "Attribute(name=%r)" % self.name
def get_param_info(self):
param_info = {
"name": self.name,
"doc": self.doc,
"type": self.type_display,
"required": bool(self.required),
"default": self.default,
"default_display": self.default_display(self.default),
"metavar": self.metavar,
"choices": self.choices,
"missing": self.missing,
"empty": self.empty,
}
return param_info
class _Parameters(object):
__slots__ = ["__attr_values", "__context", "__cache"]
def __init__(self, attr_values, context, lazy=True):
self.__attr_values = attr_values
self.__context = context
self.__cache = {}
if not lazy:
for name in self.__attr_values:
self.__cache[name] = self.__attr_values[name](self.__context)
self.__context = None
def __getattr__(self, name):
if name not in self.__cache:
self.__cache[name] = self.__attr_values[name](self.__context)
return self.__cache[name]
def __repr__(self):
return repr(self._get_param_dict())
def _get_param_dict(self):
d = {}
for name in self.__attr_values:
if name not in self.__cache:
self.__cache[name] = self.__attr_values[name](self.__context)
d[name] = self.__cache[name]
return d
def keys(self):
return self.__attr_values.keys()
def values(self):
return [self[k] for k in iterkeys(self.__attr_values)]
def items(self):
return {k: self[k] for k in self.__attr_values}
def __iter__(self):
return iter(self.keys())
def __getitem__(self, key):
if key not in self.__cache:
self.__cache[key] = self.__attr_values[key](self.__context)
return self.__cache[key]
def __setitem__(self, key, value):
raise NotImplementedError
def __contains__(self, key):
return key in self.__attr_values
def __moyaconsole__(self, console):
console.obj(None, self._get_param_dict())
def _open_tag(tag_name, attrs):
if attrs:
a = " ".join('%s="%s"' % (k, v) for k, v in sorted(attrs.items())).strip()
return "<%s>" % " ".join((tag_name, a))
return "</%s>" % tag_name
def _close_tag(tag_name):
return "</%s>" % tag_name
def _childless_tag(tag_name, attrs):
if attrs:
a = " ".join('%s="%s"' % (k, v) for k, v in sorted(attrs.items())).strip()
return "<%s/>" % " ".join((tag_name, a))
return "<%s/>" % tag_name
class _Eval(object):
__slots__ = ["code", "filename", "compiled_code"]
def __init__(self, code, filename):
self.code = code
self.filename = filename
self.compiled_code = compile(code, filename, "eval")
def __call__(self, context):
return eval(self.compiled_code, dict(context=context))
def __getstate__(self):
return self.code, self.filename
def __setstate__(self, state):
code, filename = state
self.code = state
self.filename = filename
self.compiled_code = compile(code, filename, "eval")
def make_eval(s, filename="unknown"):
"""Create a function that evaluates a Python expression"""
return _Eval(s.strip(), filename)
class ElementType(tuple):
def __eq__(self, other):
if isinstance(other, string_types):
return self[1] == other
return self == other
class NoDestination(object):
__slots__ = []
def __eq__(self, other):
return isinstance(other, NoDestination)
def __repr__(self):
return "<no destination>"
no_destination = NoDestination()
class NoValue(object):
__slots__ = []
def __reduce__(self):
"""So pickle doesn't create a new instance on unpickling"""
return b"no_value"
def __eq__(self, other):
return isinstance(other, NoValue)
no_value = NoValue()
class Getter(object):
__slots__ = ["value"]
def __init__(self, value):
self.value = value
def __call__(self, context):
return self.value
class Translator(object):
__slots__ = ["getter", "lib"]
def __init__(self, getter, lib):
self.getter = getter
self.lib = weakref.proxy(lib)
def __call__(self, context):
value = self.getter(context)
if isinstance(value, text_type):
return self.lib.translate(context, value)
else:
return value
class ChoicesChecker(object):
__slots__ = ["name", "element", "value_callable", "choices"]
def __init__(self, value_callable, name, element, choices):
self.name = name
self.element = element
self.value_callable = value_callable
self.choices = choices
def __call__(self, context):
value = self.value_callable(context)
if value is not None and value not in self.choices:
valid_choices = textual_list(self.choices)
raise errors.ElementError(
"attribute '{}' must be {} (not '{}') ".format(
self.name, valid_choices, value
),
element=self.element,
)
return value
class MissingChecker(object):
__slots__ = ["value_callable", "name", "element"]
def __init__(self, value_callable, name, element):
self.value_callable = value_callable
self.name = name
self.element = element
def __call__(self, context):
value = self.value_callable(context)
if is_missing(value):
raise errors.ElementError(
"attribute '{}' must not be missing (it is {})".format(
self.name, context.to_expr(value)
),
diagnosis="The expression has referenced a value on the context which doesn't exist. Check the expression for typos.",
element=self.element,
)
return value
class EmptyChecker(object):
__slots__ = ["value_callable", "name", "element"]
def __init__(self, value_callable, name, element):
self.value_callable = value_callable
self.name = name
self.element = element
def __call__(self, context):
value = self.value_callable(context)
if not value:
raise errors.ElementError(
"attribute '{}' must not be empty or evaluate to false (it is {})".format(
self.name, context.to_expr(value)
),
diagnosis="Check the expression returns a non-empty result.",
element=self.element,
)
return value
@implements_to_string
class ElementBaseType(object):
__metaclass__ = ElementBaseMeta
_element_class = "data"
_lib_long_name = None
_ignore_skip = False
element_class = "default"
xmlns = "http://moyaproject.com"
preserve_attributes = []
class Meta:
logic_skip = False
virtual_tag = False
is_call = False
text_nodes = None
translate = True
class Help:
undocumented = True
@classmethod
def _get_tag_attributes(cls):
attributes = OrderedDict()
for k in dir(cls):
v = getattr(cls, k)
if isinstance(v, Attribute):
name = v.name or k.lstrip("_")
v.name = name
attributes[name] = v
return attributes
@classmethod
def _get_base_attributes(cls):
attributes = {}
for el in cls.__mro__[1:]:
if hasattr(el, "_get_tag_attributes"):
base_attributes = el._get_tag_attributes()
base_attributes.update(attributes)
attributes = base_attributes
break
return attributes
@property
def document(self):
return self._document()
@property
def lib(self):
return self._document().lib
@property
def priority(self):
return self.lib.priority
@classmethod
def extract_doc_info(cls):
"""Extract information to document this tag"""
doc = {}
doc["namespace"] = cls.xmlns
doc["tag_name"] = cls._tag_name
doc["doc"] = dedent(cls._tag_doc) if cls._tag_doc else cls._tag_doc
doc["defined"] = getattr(cls, "_definition", "?")
if hasattr(cls, "Help"):
example = getattr(cls.Help, "example", None)
doc["example"] = example
doc["synopsis"] = getattr(cls.Help, "synopsis", None)
base_attributes = cls._get_base_attributes()
attribs = cls._tag_attributes
params = {}
inherited_params = {}
for name, attrib in base_attributes.items():
inherited_params[name] = attrib.get_param_info()
for name, attrib in attribs.items():
if name not in inherited_params:
params[name] = attrib.get_param_info()
doc["params"] = params
doc["inherited_params"] = inherited_params
return doc
@classmethod
def _get_help(cls, attribute_name, default):
help = getattr(cls, "Help", None)
if help is None:
return default
return getattr(help, attribute_name, default)
@property
def tag_name(self):
return self._tag_name
@property
def moya_name(self):
ns = self.xmlns
if ns.startswith("http://moyaproject.com/"):
ns = ns[len("http://moyaproject.com/") :]
return "{{{}}}{}".format(ns, self.tag_name)
def get_app(self, context, app_attribute="from", check=True):
app = None
if self.supports_parameter(app_attribute):
app = self.get_parameter(context, app_attribute)
if not app:
app = context.get(".app", None)
if isinstance(app, string_types):
app = self.archive.find_app(app)
if check:
if not app:
raise errors.AppMissingError()
if not isinstance(app, Application):
raise errors.AppError(
"expected an application object here, not {!r}".format(app)
)
return app
# def check_app(self, app):
# if not isinstance(app, Application):
# self.throw("badvalue.notapp", "'app' is not an application")
# return app
def get_proxy(self, context, app):
return ElementProxy(context, app, self)
def get_let_map(self, context, check_missing=False):
"""Gets and evaluates attributes set with the item namespace"""
if self._let:
if self._let_exp is None:
self._let_exp = {k: Expression(v).eval for k, v in self._let.items()}
let_map = {k: v(context) for k, v in self._let_exp.items()}
if check_missing:
for k, v in iteritems(let_map):
if getattr(v, "moya_missing", False):
raise errors.ElementError(
"let:{} must not be missing (it is {!r})".format(k, v)
)
return let_map
else:
return {}
def get_let_map_eval(self, context, eval, check_missing=False):
"""Gets and evaluates attributes set with the item namespace, with an alternative eval"""
if self._let:
eval = eval or context.eval
let_map = {k: eval(v) for k, v in self._let.items()}
if check_missing:
for k, v in iteritems(let_map):
if getattr(v, "moya_missing", False):
raise errors.ElementError(
"let:{} must not be missing (it is {!r})".format(k, v)
)
return let_map
else:
return {}
def get_parameters(self, context, *names):
if not names:
return _Parameters(self._attr_values, context)
def make_param(name, get=self._attr_values.__getitem__):
try:
return get(name)(context)
except errors.BadValueError as e:
self.throw(
"bad-value.attribute",
"Attribute '{}' -- {}".format(name, text_type(e)),
)
return [make_param(n) for n in names]
def compile_expressions(self):
"""Attempt to compile anything that could be an expression (used by precache)"""
if getattr(self, "_attrs", None):
for k, v in self._attrs.items():
try:
Expression.compile_cache(v)
except:
pass
if "${" in v and "}" in v:
Expression.extract(v)
if getattr(self, "_let", None):
for k, v in self._let.items():
try:
Expression.compile_cache(v)
except:
pass
if getattr(self, "text", None):
Expression.extract(self.text)
def get_parameters_nonlazy(self, context):
return _Parameters(self._attr_values, context, lazy=False)
def get_parameter(self, context, name):
return self.get_parameters(context, name)[0]
def get_parameters_map(self, context, *names):
if not names:
raise ValueError("One or more attribute names must be supplied")
get = self._attr_values.__getitem__
return {name: get(name)(context) for name in names}
def get_all_parameters(self, context):
return {k: v(context) for k, v in self._attr_values.items()}
def get_all_data_parameters(self, context):
return {
k: v(context) for k, v in self._attr_values.items() if not k.startswith("_")
}
def has_parameter(self, name):
return name in self._tag_attributes and name not in self._missing
def has_parameters(self, *names):
return all(
name in self._tag_attributes and name not in self._missing for name in names
)
def supports_parameter(self, name):
return name in self._tag_attributes
def auto_build(self, context, text, attrs, translatable_attrs):
_missing = self._missing = set()
self._attrs = attrs
self._translatable_attrs = translatable_attrs
attrs_keys_set = frozenset(attrs.keys())
if (
self._required_tag_attributes
and not self._required_tag_attributes.issubset(attrs_keys_set)
):
missing = []
for k in self._required_tag_attributes:
if k not in attrs:
missing.append(k)
if len(missing) == 1:
raise errors.ElementError(
"'%s' is a required attribute" % missing[0], element=self
)
else:
raise errors.ElementError(
"%s are required attributes"
% ", ".join("'%s'" % m for m in missing),
element=self,
)
if hasattr(self._meta, "one_of"):
for group in self._meta.one_of:
if not any(name in attrs for name in group):
raise errors.ElementError(
"one of {} is required".format(textual_list(group)),
element=self,
)
if not getattr(
self._meta, "all_attributes", False
) and not self._tag_attributes_set.issuperset(attrs_keys_set):
unknown_attrs = sorted(attrs_keys_set - self._tag_attributes_set)
diagnosis = ""
if len(unknown_attrs) == 1:
msg = "{} is not a valid attribute on this tag"
nearest = nearest_word(unknown_attrs[0], self._tag_attributes_set)
if nearest is not None:
diagnosis = "Did you mean '{}'?".format(nearest)
else:
diagnosis = "Valid attributes on this tag are {}".format(
textual_list(sorted(self._tag_attributes_set))
)
else:
msg = "Attributes {} are not valid on this tag"
diagnosis += "\n\nrun the following for more information:\n\n**$ moya help {}**".format(
self.moya_name
)
raise errors.ElementError(
msg.format(textual_list(unknown_attrs, "and")),
element=self,
diagnosis=diagnosis,
)
self._attr_values = attr_values = {}
for attribute_name, attribute in self._tag_attributes.items():
if attribute_name not in attrs:
_missing.add(attribute_name)
if attribute.evaldefault and attribute.default is not None:
value = attribute.type(self, attribute_name, attribute.default)
else:
value = Getter(attribute.default)
else:
value = attribute.type(self, attribute_name, attrs[attribute_name])
name = attribute.map_to or attribute_name
if not attribute.context:
value = value.value
if attribute.choices:
value = ChoicesChecker(value, name, self, attribute.choices)
if not attribute.missing:
value = MissingChecker(value, name, self)
if not attribute.empty:
value = EmptyChecker(value, name, self)
if attribute_name in self._translatable_attrs:
value = Translator(value, self.lib)
setattr(self, name, value)
attr_values[name] = value
for k, v in attrs.items():
if k not in self._tag_attributes:
self._attributes[k] = v
self.post_build(context)
def __init__(
self, document, xmlns, tag_name, parent_docid, docid, source_line=None
):
super(ElementBaseType, self).__init__()
self._tag_attributes = self.__class__._tag_attributes
self._required_tag_attributes = self.__class__._required_tag_attributes
self._document = weakref.ref(document)
self.xmlns = xmlns
self._tag_name = tag_name
self._element_type = (xmlns, tag_name)
self.parent_docid = parent_docid
self._docid = docid
self._attributes = OrderedDict()
self._children = []
self.source_line = source_line
self.insert_order = 0
self._libid = None
self._location = document.location
if not hasattr(self.__class__, "_definition"):
self.__class__._definition = inspect.getsourcefile(self.__class__)
document.register_element(self)
def close(self):
pass
def run(self, context):
from moya.logic import DeferNodeContents
yield DeferNodeContents(self)
# def dumps(self):
# data = {
# 'xmlns': self.xmlns,
# 'tag_name': self._tag_name,
# 'parent_docid': self.parent_docid,
# 'docid': self._docid,
# 'source_line': self.source_line,
# '_attr_values': self._attr_values,
# 'libname': self.libname,
# '_item_attrs': self._let,
# 'docname': self.docname,
# 'text': self.text,
# '_location': self._location,
# '_attributes': self._attributes,
# '_code': self._code,
# '_let': self._let,
# '_missing': self._missing
# }
# for k in self.preserve_attributes:
# data[k] = getattr(self, k)
# data['children'] = [child.dumps() for child in self._children]
# return data
# @classmethod
# def loads(cls, data, document):
# element_type = get_element_type(data['xmlns'],
# data['tag_name'])
# element = element_type(document,
# data['xmlns'],
# data['tag_name'],
# data['parent_docid'],
# data['docid'],
# data['source_line'])
# element._attr_values = data['_attr_values']
# element.libname = data['libname']
# element._let = data['_let']
# element.docname = data['docname']
# element.text = data['text']
# element._location = data['_location']
# element._attributes = data['_attributes']
# element._code = data['_code']
# element._missing = data['_missing']
# for k, v in element._attr_values.items():
# setattr(element, k, v)
# for k in element.preserve_attributes:
# setattr(element, k, data[k])
# element._children = [cls.loads(child, document)
# for child in data['children']]
# if element.docname is not None:
# document.register_named_element(element.docname, element)
# if document.lib:
# document.lib.register_element(element)
# document.lib.register_named_element(element.libname, element)
# return element
def __str__(self):
try:
attrs = self._attributes.copy()
except:
attrs = {}
return _childless_tag(self._tag_name, attrs)
__repr__ = __str__
def __eq__(self, other):
return self._element_type == other._element_type and self.libid == other.libid
def __ne__(self, other):
return not (
self._element_type == other._element_type and self.libid == other.libid
)
def get_element(self, name, app=None):
return self.document.get_element(name, app=app, lib=self.lib)
def get_app_element(self, name, app=None):
app, element = self.document.get_element(name, app=app, lib=self.lib)
if app is None:
app = self.archive.find_app(element.lib.long_name)
return app, element
@property
def archive(self):
return self.document.archive
def __iter__(self):
return iter(self._children)
def __reversed__(self):
return reversed(self._children)
def is_root(self):
return self.parent_docid is None
@property
def parent(self):
if self.parent_docid is None:
return None
return self.document[self.parent_docid]
def get_ancestor(self, element_type):
"""Find the first ancestor of a given type"""
parent = self.parent
while parent is not None:
if parent._match_element_type(element_type):
return parent
parent = parent.parent
raise errors.ElementNotFoundError(
element_type,
msg="{} has no ancestor of type {}".format(
self, format_element_type(element_type)
),
)
@property
def docid(self):
return self._docid
@property
def libid(self):
if self._libid is not None:
return self._libid
if not hasattr(self, "libname"):
return None
if not self.document.lib:
return None
return "%s#%s" % (self.document.lib.long_name or "", self.libname)
def get_appid(self, app=None):
if app is None:
return self.libid
return "%s#%s" % (app.name or "", self.libname)
def render_tree(self, indent=0):
indent_str = " " * indent
attrs = self._attributes.copy()
if not self._children:
print(indent_str + _childless_tag(self._tag_name, attrs))
else:
print(indent_str + _open_tag(self._tag_name, attrs))
for child in self._children:
child.render_tree(indent + 1)
print(indent_str + _close_tag(self._tag_name))
def process_attrs(self, attrs, attr_types):
def asint(name, s):
if s.isdigit() or s.startswith("-") and s[1:].isdigit():
return int(s)
raise errors.AttributeTypeError(self, name, s, "int")
def asfloat(name, s):
try:
return float(s)
except:
raise errors.AttributeTypeError(self, name, s, "float")
for k in attrs.keys():
if k not in attr_types:
continue
attr_type = attr_types[k]
if attr_type in (bool, "bool"):
attrs[k] = attrs[k].strip().lower() in ("yes", "true")
elif attr_type in (int, "int"):
attrs[k] = asint(k, attrs[k])
elif attr_type in (float, "float"):
attrs[k] = asfloat(k, attrs[k])
else:
attrs[k] = attr_type(attrs[k])
def _build(self, context, text, attrs, translatable_attrs):
if self._meta.text_nodes:
text = ""
self._text = self.text = text
self.auto_build(context, text, attrs, translatable_attrs)
def _add_child(self, element):
self._children.append(element)
def _match_element_type(self, element_type):
if element_type is None:
return True
elif isinstance(element_type, tuple):
return element_type == self._element_type
else:
return self._tag_name == element_type
check_type = _match_element_type
def find(self, element_type=None, element_class=None, **attrs):
# 'fast' path
if not element_type and not element_class:
if attrs:
for child in self._children:
for k, v in iteritems(child._attributes):
if not (k in attrs and attrs[k] == v):
continue
yield child
else:
for child in self._children:
yield child
# 'slow' path
else:
for child in self._children:
if element_type is not None and not child._match_element_type(
element_type
):
continue
if element_class is not None and child._element_class != element_class:
continue
if not attrs:
yield child
else:
for k, v in iteritems(child._attributes):
if not (k in attrs and attrs[k] == v):
continue
yield child
def replace(self, element):
"""replace this node with a different element"""
for i, sibling in enumerate(self.parent._children):
if sibling is self:
self.parent._children[i] = element
element.parent_docid = self.parent_docid
def children(self, element_type=None, element_class=None, **attrs):
return self.find(element_type, element_class, **attrs)
def get_child(self, element_type=None):
return next(self.find(element_type), None)
@property
def has_children(self):
return bool(self._children)
def any_children(self, element_type=None, element_class=None, **attrs):
"""Check if there is at least one child that matches"""
for _child in self.children(element_type, element_class, **attrs):
return True
return False
def get_children(self, element_type=None, element_class=None, **attrs):
return list(self.find(element_type, element_class, **attrs))
def find_siblings(self, element_type=None, element_class=None, **attrs):
parent = self.parent
if parent is None:
return
for child in parent._children:
if element_type is not None and not child._match_element_type(element_type):
continue
if element_class is not None and child._element_class != element_class:
continue
if not attrs:
yield child
else:
for k, v in iteritems(child._attributes):
if not (k in attrs and attrs[k] == v):
continue
yield child
@property
def siblings(self):
if self.parent is None:
return []
return self.parent._children
def younger_siblings(self, element_type=None, element_class=None, **attrs):
iter_siblings = self.find_siblings(element_type, element_class, **attrs)
while 1:
if next(iter_siblings) is self:
break
while 1:
yield next(iter_siblings)
def younger_siblings_of_type(self, element_type):
"""Yield younger siblings that have a given type, directly after this element"""
iter_siblings = self.find_siblings()
while 1:
if next(iter_siblings) is self:
break
while 1:
sibling = next(iter_siblings)
if sibling._element_type == element_type:
yield sibling
else:
break
@property
def younger_sibling(self):
try:
return self.siblings[self.siblings.index(self) + 1]
except (ValueError, IndexError):
return None
@property
def older_sibling(self):
try:
return self.siblings[self.siblings.index(self) - 1]
except (ValueError, IndexError):
return None
@property
def next_sibling(self):
node = self
while node is not None:
next_sibling = self.older_sibling
if next_sibling is None:
node = node.parent
return next_sibling
return None
def get(self, element_type=None, element_class=None, **attrs):
for child in self.find(element_type, element_class, **attrs):
return child
raise errors.ElementNotFoundError(text_type(element_type))
def safe_get(self, element_type=None, element_class=None, **attrs):
for child in self.find(element_type, element_class, **attrs):
return child
return None
def build(self, text, **attrs):
pass
def finalize(self, context):
pass
def document_finalize(self, context):
pass
def lib_finalize(self, context):
pass
def get_all_children(self):
"""Recursively get all children"""
stack = deque([self])
extend = stack.extend
children = []
add_child = children.append
pop = stack.popleft
while stack:
node = pop()
add_child(node)
extend(node._children)
return children
def post_build(self, context):
pass
def __moyaconsole__(self, console):
console.xml(text_type(self))
def throw(self, exc_type, msg, info=None, diagnosis=None, **kwargs):
"""Throw a Moya exception"""
from moya.logic import MoyaException
if info is None:
info = {}
info.update(kwargs)
raise MoyaException(exc_type, msg, diagnosis=diagnosis, info=info)
def get_closure(self, context, element=None, extra=None):
"""Get element with a snapshot of data in the local context scope"""
if element is None:
element = self
data = {k: v for k, v in context.items("") if not k.startswith("_")}
if extra is not None:
data.update(extra)
return Closure(element, data)
def on_logic_exception(self, callstack, exc_node, logic_exception):
from moya.logic import render_moya_traceback
render_moya_traceback(callstack, exc_node, logic_exception)
class ElementBase(with_metaclass(ElementBaseMeta, ElementBaseType)):
pass
class DynamicElementMixin(object):
def auto_build(self, context, text, attrs, translatable_attrs):
self._attrs = attrs
_missing = self._missing = set()
self._attr_values = attr_values = {}
for attribute_name, attribute in self._tag_attributes.items():
if attribute_name not in attrs:
_missing.add(attribute_name)
if attribute.evaldefault and attribute.default is not None:
value = attribute.type(self, attribute.default)
else:
value = Getter(attribute.default)
else:
value = attribute.type(self, attrs[attribute_name])
name = attribute.map_to or attribute_name
if not attribute.context:
value = value.value
if attribute.choices:
value = ChoicesChecker(value, name, self, attribute.choices)
setattr(self, name, value)
attr_values[name] = value
if name in translatable_attrs:
value.translate = True
for k, v in attrs.items():
if k not in self._tag_attributes:
self._attributes[k] = v
self.post_build(context)
class RenderableElement(ElementBase):
xmlns = "http://moyaproject.com"
@implements_to_string
class FunctionCallParams(object):
"""Stores parameters for a function call"""
__slots = ["_args", "_kwargs"]
def __init__(self, *args, **kwargs):
self._args = list(args)
self._kwargs = dict(kwargs)
super(FunctionCallParams, self).__init__()
def __str__(self):
args_str = ", ".join(repr(a) for a in self._args)
kwargs_str = ", ".join(
"%s=%r" % (k.encode("ascii", "replace"), repr(v))
for k, v in self._kwargs.items()
)
return "(%s)" % ", ".join((args_str.strip(), kwargs_str.strip()))
def __repr__(self):
return "FunctionCallParams%s" % text_type(self)
def append(self, value):
self._args.append(value)
def __len__(self):
return len(self._args)
def __setitem__(self, key, value):
if key is None:
self._args.append(value)
else:
if isinstance(key, string_types):
self._kwargs[key] = value
else:
self._args[key] = value
def __getitem__(self, key):
if isinstance(key, string_types):
return self._kwargs[key]
else:
return self._args[key]
def update(self, map):
self._kwargs.update(map)
def __contains__(self, key):
if isinstance(key, string_types):
return key in self._kwargs
try:
self._args[key]
except IndexError:
return False
return True
def get(self, key, default=None):
try:
if isinstance(key, string_types):
return self._kwargs[key]
else:
return self._args[key]
except (KeyError, IndexError):
return default
def get_value(self, key):
return self._kwargs[key]
def get_call_params(self):
return self._args, self._kwargs
def keys(self):
return self._kwargs.keys()
_no_return = object()
@implements_to_string
class ReturnContainer(object):
"""A container that stores a single return value"""
# This has the interface of a dict, but only stores the last value
__slots__ = ["_key", "_value"]
def __init__(self, value=None):
self._key = "return"
self._value = value
super(ReturnContainer, self).__init__()
def get_return_value(self):
return self._value
def __moyarepr__(self, context):
return "<return {}>".format(self.get_return_value())
def __setitem__(self, k, v):
self._key = k
self._value = v
def __getitem__(self, k):
if k == self._key:
return self._value
raise KeyError(k)
def keys(self):
return [self._key]
def values(self):
return [self._value]
def items(self):
return [(self._key, self._value)]
def __iter__(self):
return iter([self._value])
def __len__(self):
return 1
def append(self, value):
self._key = 0
self._value = value
def update(self, value):
self._value = value
class CallStackEntry(dict):
__slots__ = ["element", "app", "yield_element", "yield_frame"]
def __init__(self, element, app, yield_element=None, yield_frame=None):
self.element = element
self.app = app
self.yield_element = yield_element
self.yield_frame = yield_frame
super(CallStackEntry, self).__init__()
class _CallContext(object):
def __init__(self, logic_element, context, app, params):
self.logic_element = logic_element
self.context = context
self.app = app
self.params = params
self.has_return = False
self.return_value = None
self.error = None
def __enter__(self):
self._call = self.logic_element.push_call(
self.context, self.params, app=self.app
)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
call = self.logic_element.pop_call(self.context)
if exc_type:
self.error = (exc_type, exc_val, exc_tb)
else:
if "_return" in call:
self.has_return = True
return_value = call.get("_return")
if hasattr(return_value, "get_return_value"):
return_value = return_value.get_return_value()
self.return_value = return_value
class _DeferContext(object):
def __init__(self, logic_element, context, app):
self.logic_element = logic_element
self.context = context
self.app = app
def __enter__(self):
self.logic_element.push_defer(self.context, app=self.app)
def __exit__(self, exc_type, exc_val, exc_tb):
self.logic_element.pop_defer(self.context)
class LogicElement(ElementBase):
_element_class = "logic"
_if = Attribute(
"Conditional expression", type="expression", map_to="_if", default=True
)
class Meta:
is_loop = False
is_try = False
def check(self, context):
return self._if(context)
def logic(self, context):
yield iter(self.children(element_class="logic"))
def call(self, context, app, **params):
return _CallContext(self, context, app, params)
def closure_call(self, context, app, closure_data, **params):
call_params = closure_data.copy()
call_params.update(params)
return _CallContext(self, context, app, call_params)
def push_call(
self, context, params, app=None, yield_element=None, yield_frame=None
):
callstack = context.set_new_call("._callstack", list)
call = CallStackEntry(
self, app, yield_element=yield_element, yield_frame=yield_frame
)
call.update(params)
callstack.append(call)
context.root["call"] = call
context.push_frame(".call")
return call
def pop_call(self, context):
callstack = context.set_new_call("._callstack", list)
call = callstack.pop()
context.pop_frame()
if callstack:
context.root["call"] = callstack[-1]
else:
del context[".call"]
return call
def defer(self, context, app=None):
return _DeferContext(self, context, app)
def push_defer(self, context, app=None):
callstack = context.set_new_call("._callstack", list)
call = CallStackEntry(self, app)
callstack.append(call)
context.root["call"] = call
return call
def pop_defer(self, context):
callstack = context.set_new_call("._callstack", list)
call = callstack.pop()
if callstack:
context.root["call"] = callstack[-1]
else:
del context.root[".call"]
return call
def push_funccall(self, context):
funccalls = context.set_new_call("._funccalls", list)
params = FunctionCallParams()
funccalls.append(params)
context.push_scope(makeindex("._funccalls", len(funccalls) - 1))
return params
def pop_funccall(self, context):
funccalls = context["._funccalls"]
context.pop_scope()
params = funccalls.pop()
return params
if __name__ == "__main__":
class TestElement(RenderableElement):
def build(self, text, p=5, w=11):
print(p, w)
document = Document()
t = TestElement(document, None, "tag", None, "foo")
ElementBase._build(t, "", dict(apples=1))
t2 = TestElement(document, None, "tag2", "foo", "bar")
ElementBase._build(t2, "", dict(p=1))
t3 = TestElement(document, None, "tag2", "foo", "baz")
ElementBase._build(t3, "", dict(p=2))
print(ElementBaseMeta.element_namespaces)
print(t.render_tree())
|
import socket
import struct
from uuid import getnode as get_mac
from random import randint
class DHCPDiscover:
def buildPacket(self):
packet = b''
packet += b'\x01' #OP
packet += b'\x01' #HTYPE
packet += b'\x06' #HLEN
packet += b'\x00' #HOPS
packet += b'\x39\x03\xF3\x26' #XID
packet += b'\x00\x00' #SECS
packet += b'\x00\x00' #FLAGS
packet += b'\x00\x00\x00\x00' #CIADDR(Client IP address)
packet += b'\x00\x00\x00\x00' #YIADDR(Your IP address)
packet += b'\x00\x00\x00\x00' #SIADDR(Server IP address)
packet += b'\x00\x00\x00\x00' #GIADDR(Gateway IP address)
packet += b'\x00\x05\x3C\x04' #CHADDR(Client hardware address)
packet += b'\x8D\x59\x00\x00'
packet += b'\x00\x00\x00\x00'
packet += b'\x00\x00\x00\x00'
packet += b'\x00'*192
packet += b'\x63\x82\x53\x63' #Magic cookie
packet += b'\x35\x01\x01' #Option:DHCP Discover
return packet
class DHCPRequest:
def buildPacket(self):
packet = b''
packet += b'\x01' #Op
packet += b'\x01' #HTYPE
packet += b'\x06' #HLEN
packet += b'\x00' #HOPS
packet += b'\x39\x03\xF3\x26' #XID
packet += b'\x00\x00' #SECS
packet += b'\x00\x00' #FLAGS
packet += b'\x00\x00\x00\x00' #CIADDR(Client IP address)
packet += b'\x00\x00\x00\x00' #YIADDR(Your IP address)
packet += b'\xC0\xA8\x01\x01' #SIADDR(Server IP address)
packet += b'\x00\x00\x00\x00' #GIADDR(Gateway IP address)
packet += b'\x00\x05\x3C\x04' #CHADDR(Client hardware address)
packet += b'\x8D\x59\x00\x00'
packet += b'\x00\x00\x00\x00'
packet += b'\x00\x00\x00\x00'
packet += b'\x00'*192
packet += b'\x63\x82\x53\x63' #Magic cookie
packet += b'\x35\x01\x03' #Option:DHCP Request
return packet
class DHCPOffer:
def buildPacket(self):
packet = b''
packet += b'\x02' #OP
packet += b'\x01' #HTYPE
packet += b'\x06' #HLEN
packet += b'\x00' #HOPS
packet += b'\x39\x03\xF3\x26' #XID
packet += b'\x00\x00' #SECS
packet += b'\x00\x00' #FLAGS
packet += b'\x00\x00\x00\x00' #CIADDR
packet += b'\xC0\xA8\x01\x64' #YIADDR
packet += b'\xC0\xA8\x01\x01' #SIADDR
packet += b'\x00\x00\x00\x00' #GIADDR
packet += b'\x00\x05\x3C\x04' #CHADDR
packet += b'\x8D\x59\x00\x00'
packet += b'\x00\x00\x00\x00'
packet += b'\x00\x00\x00\x00'
packet += b'\x00'*192
packet += b'\x63\x82\x53\x63' #Magic cookie
packet += b'\x35\x01\x02' #Option:DHCP Offer
return packet
class DHCPAck:
def buildPacket(self):
packet = b''
packet += b'\x02' #OP
packet += b'\x01' #HTYPE
packet += b'\x05' #HLEN
packet += b'\x00' #HOPS
packet += b'\x39\x03\xF3\x26' #XID
packet += b'\x00\x00' #SECS
packet += b'\x00\x00' #FLAGS
packet += b'\x00\x00\x00\x00' #CIADDR
packet += b'\xC0\xA8\x01\x64' #YIADDR
packet += b'\xC0\xA8\x01\x01' #SIADDR
packet += b'\x00\x00\x00\x00' #GIADDR
packet += b'\x00\x05\x3C\x04' #CHADDR
packet += b'\x8D\x59\x00\x00'
packet += b'\x00\x00\x00\x00'
packet += b'\x00\x00\x00\x00'
packet += b'\x00'*192
packet += b'\x63\x82\x53\x63' #Magic cookie
packet += b'\x35\x01\x05' #Option:DHCP Offer
return packet
if __name__ == '__main__':
dhcps_C = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
dhcps_C.setsockopt(socket.SOL_SOCKET,socket.SO_BROADCAST,1)
print("Discover begin!\n")
try:
dhcps_C.bind(('',68))
except Exception as e:
print('Port 68 in use.')
dhcps_C.close()
input('Press any key to quit.')
exit()
dhcps_S =socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
dhcps_S.setsockopt(socket.SOL_SOCKET,socket.SO_BROADCAST,1)
try:
dhcps_S.bind(('',67))
except Exception as e:
print('Port 67 in use.')
dhcps_S.close()
input('Press any key to quit.')
exit()
#--------------------------------------------------------------------------------------
discoverPacket=DHCPDiscover()
dhcps_C.sendto(discoverPacket.buildPacket(),('<broadcast>',67))
print('Send DISCOVER message!\n')
dhcps_C.settimeout(5)
try:
while True:
data = dhcps_C.recv(1024)
if data == '':
print('False\n')
break
else:
break
except socket.timeout as e:
print()
#--------------------------------------------------------------------------------------
dhcps_S.settimeout(5)
try:
while True:
data = dhcps_S.recv(1024)
if data == '':
print('False\n')
break
else:
break
except socket.timeout as e:
print()
offerPacket = DHCPOffer()
dhcps_S.sendto(offerPacket.buildPacket(),('<broadcast>',68))
#---------------------------------------------------------------------------------------
requestPackage = DHCPRequest()
dhcps_C.sendto(requestPackage.buildPacket(),('<broadcast>',67))
print('Send DHCPREQUEST message\n')
dhcps_C.close()
#---------------------------------------------------------------------------------------
dhcps_S.settimeout(5)
try:
while True:
data = dhcps_S.recv(1024)
if data == '':
print('False\n')
break
else:
break
except socket.timeout as e:
print()
ACK = DHCPAck()
dhcps_S.sendto(ACK.buildPacket(),('<broadcast>',68))
dhcps_S.close()
#----------------------------------------------------------------------------------------
print('Network Program Design hw1 finish!\n')
input('Press any key to exit.')
exit()
|
import os
import pickle
import requests
import youtube_dl
import datetime
from bookmark import FileBookmark
from google.auth.transport.requests import Request
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
from pprint import pprint
class YoutubeVideos:
def __init__(self, client_secrets_filename):
self.youtube_client = self.get_youtube_client(client_secrets_filename)
request = self.youtube_client.channels().list(
part="snippet,contentDetails,statistics", mine=True
)
channel_list = request.execute()
my_channel = channel_list["items"][0]
self.liked_pid = my_channel["contentDetails"]["relatedPlaylists"]["likes"]
self.bookmark_object = FileBookmark()
self.bookmark = self.bookmark_object.read()
def get_youtube_client(self, client_secrets_filename):
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
scopes = ["https://www.googleapis.com/auth/youtube.readonly"]
api_service_name = "youtube"
api_version = "v3"
cache_name = "youtube_credentials.pickle"
creds = None
if os.path.exists(cache_name):
with open(cache_name, "rb") as f:
creds = pickle.load(f)
if (
not creds or not creds.valid
): # creds.valid checks if stored auth token can be used as is.
if creds and creds.expired and creds.refresh_token:
# creds.expired checks if auth token needs to be refreshed.
creds.refresh(Request())
else:
# Get credentials and create an API client
flow = InstalledAppFlow.from_client_secrets_file(
client_secrets_filename, scopes
)
creds = flow.run_local_server(port=0)
with open(cache_name, "wb") as f:
pickle.dump(creds, f)
# from the Youtube DATA API
youtube_client = build(api_service_name, api_version, credentials=creds)
return youtube_client
def get_liked_videos(self):
vids = list()
max_vid_published = datetime.datetime.min
for item, vid_date in self._get_set_of_videos(self.bookmark):
video_title = item["snippet"]["title"]
item_id = item["contentDetails"]["videoId"]
youtube_url = f"https://www.youtube.com/watch?v={item_id}"
print(video_title)
vid_items = self._process_vid(youtube_url, video_title)
max_vid_published = max(max_vid_published, vid_date)
if vid_items:
vids.append(vid_items)
if max_vid_published != datetime.datetime.min:
self.bookmark_object.write(max_vid_published)
return vids
def _get_set_of_videos(self, bookmark):
vids = list()
# Grab Our Liked Videos & Create A Dictionary Of Important Song Information
request = self.youtube_client.playlistItems().list(
part="snippet,contentDetails,id", playlistId=self.liked_pid
)
response = request.execute()
# Get first set of videos
vid_items = response["items"]
for item in vid_items:
vid_date_str = item["snippet"]["publishedAt"]
vid_date = datetime.datetime.strptime(
vid_date_str, self.bookmark_object.datetime_format
)
if vid_date <= bookmark:
return
yield item, vid_date
# Keep getting other videos
while "nextPageToken" in response:
response = (
self.youtube_client.playlistItems()
.list(
part="snippet,contentDetails,id",
pageToken=response["nextPageToken"],
playlistId=self.liked_pid,
)
.execute()
)
vid_items = response["items"]
for item in vid_items:
vid_date_str = item["snippet"]["publishedAt"]
vid_date = datetime.datetime.strptime(
vid_date_str, self.bookmark_object.datetime_format
)
if vid_date <= bookmark:
return
yield item, vid_date
def _process_vid(self, youtube_url, video_title):
# use youtube_dl to collect the song name & artist name
video = youtube_dl.YoutubeDL({}).extract_info(youtube_url, download=False)
song_name = video["track"]
artist = video["artist"]
if song_name is not None and artist is not None:
return song_name, artist
|
from unittest.mock import *
from unittest import TestCase, main
from assertpy import assert_that
from src.serviceOrders import Order
from src.dataOrders import OrdersData
from src.dataProductsOrders import OrdersProductsData
class testDeleteOrder(TestCase):
def setUp(self):
self.temp = Order()
self.orders = OrdersData().orders
self.productsOrders = OrdersProductsData().productsOrders
def test_delete_order_bad_id_order(self):
self.temp.deleteOrder = MagicMock()
self.temp.deleteOrder.side_effect = TypeError("Bad type id order")
result = self.temp.deleteOrder
self.assertRaisesRegex(TypeError, "Bad type id order", result, "1")
def test_delete_order_not_exist_order(self):
self.temp.OrderStorage.getAllOrders = MagicMock()
self.temp.OrderStorage.getAllOrders.return_value = self.orders
result = self.temp.deleteOrder
self.assertRaisesRegex(Exception, "This order not exist in data base", result, 101)
def test_delete_order_positive(self):
self.temp.OrderStorage = FakeDeleteOrder()
self.temp.OrderStorage.getAllOrders = MagicMock()
self.temp.OrderStorage.getAllOrders.return_value = self.orders
result = self.temp.deleteOrder(8)
self.assertEqual(result, "Deleted order id:8")
def test_delete_order_positive_verification_mock(self):
self.temp.OrderStorage = FakeDeleteOrder()
self.temp.OrderStorage.getAllOrders = MagicMock()
self.temp.OrderStorage.getAllOrders.return_value = self.orders
self.temp.deleteOrder(8)
self.temp.OrderStorage.getAllOrders.assert_called_once()
def test_delete_orderProduct_bad_id_order(self):
self.temp.deleteOrder = MagicMock()
self.temp.deleteOrder.side_effect = TypeError("Bad type order id")
result = self.temp.deleteOrderProduct
self.assertRaisesRegex(TypeError, "Bad type order id", result, "two", int)
def test_delete_orderProduct_bad_id_product(self):
result = self.temp.deleteOrderProduct
self.assertRaisesRegex(TypeError, "Bad type product id", result, 3, "seven")
def test_delete_orderProduct_not_exist_order(self):
self.temp.OrderStorage.getAllOrdersProducts = Mock()
self.temp.OrderStorage.getAllOrdersProducts.return_value = self.productsOrders
result = self.temp.deleteOrderProduct
self.assertRaisesRegex(Exception, "This order not exist in data base", result, 1, 5)
def test_delete_orderProduct_positive(self):
self.temp.OrderStorage.getAllOrdersProducts = Mock()
self.temp.OrderStorage.getAllOrdersProducts.return_value = self.productsOrders
self.temp.OrderStorage.delOrderProduct = Mock()
self.temp.OrderStorage.delOrderProduct.return_value = {
"product_id": 8,
"order_id": 4
}
result = self.temp.deleteOrderProduct(4, 8)
assert_that(result).contains_value(8, 4)
def test_delete_orderProduct_positive_verification_mock(self):
self.temp.OrderStorage.getAllOrdersProducts = Mock()
self.temp.OrderStorage.getAllOrdersProducts.return_value = self.productsOrders
self.temp.OrderStorage.delOrderProduct = Mock()
self.temp.OrderStorage.delOrderProduct.return_value = {
"product_id": 8,
"order_id": 4
}
self.temp.deleteOrderProduct(4, 8)
self.temp.OrderStorage.delOrderProduct.assert_called_once_with(4, 8)
def tearDown(self):
self.temp = None
class FakeDeleteOrder:
def __init__(self):
self.deleted = "Deleted order"
def delOrder(self, id_order):
return self.deleted + " id:" + str(id_order)
if __name__ == '__main__':
main()
|
import pandas as pd
import re
def cast_as_bool(df):
# sorry...this is messy. just checks to see if there are columns w/data type of float and 1,0
# then casts as bool
for col in df.columns.values:
if df[col].dtype == "float64":
if len(df[col].unique()) == 2 \
and 1 in df[col].unique() \
and 0 in df[col].unique():
df[col] = df[col].astype(bool)
return df
def fill_na(df):
response_vars = ["num_testtakers", "num_offered", "pct_8th_graders_offered", "perc_testtakers", "perc_testtakers_quartile"]
for response in response_vars:
if response == "perc_testtakers_quartile":
na_fill_val = 1
else:
na_fill_val = 0
df[response] = df[response].fillna(value=na_fill_val)
nobs = float(len(df))
for col in df.columns.values:
num_nulls = float(df[col].isnull().sum())
if num_nulls / nobs > .1 or len(df[col].unique()) == 1:
df = df.drop(col, axis = 1)
elif num_nulls > 0:
if df[col].dtype == "object":
na_fill = df[col].value_counts().idxmax()
else:
na_fill = df[col].median()
df[col] = df[col].fillna(value = na_fill)
#invalid_preds = ["school_name", "dbn", "Address (Full)", "City", "Grades", "Grade Low", "Grade High", "SED Code", "Latitude", "Longitude", "Zip"]
#invalid_preds.extend(response_vars)
# interim_pred_df = interim_modeling_df.drop(invalid_preds, axis=1)
# interim_response_df = interim_modeling_df[response_vars]
return df
def transform_pct(col_string):
if pd.isnull(col_string):
col_val = col_string
else:
result = re.search('(.*)%', col_string)
col_val = float(result.group(1))
col_val = col_val / 100
return col_val
def transform_pct_diff(col_string):
#test = col_string.extract('^(\+|-)+(.*)%')
if pd.isnull(col_string):
col_val = col_string
else:
result = re.search('^(\+|-)+(.*)%', col_string)
sign = result.group(1)
col_val = float(result.group(2))
positive = True if sign == '+' else False
col_val = -1 * col_val if positive == False else col_val
col_val = col_val / 100
return col_val
def clean_percentage_cols(modeling_df):
modeling_df_cols = modeling_df.columns.values
for col in modeling_df_cols:
df_col = modeling_df[col]
clean_pct_flg = True if (df_col.dtype == object) and (df_col.str.contains('%').any()) else False
if clean_pct_flg:
# reason why escape char \ is used is bc of regex underneath the hood of Series.str.contains
perc_diff_flg = True if (df_col.str.contains('\+').any()) and (df_col.str.contains('-').any()) else False
if perc_diff_flg == True:
df_col = df_col.apply(transform_pct_diff)
else:
df_col = df_col.apply(transform_pct)
modeling_df[col] = df_col
return modeling_df
def find_grade_8_flg(df):
bool_series = df.apply(lambda row: True if '8' in str(row['Grades']) else False, axis=1)
df['grade_8_flg'] = bool_series
return df
def clean_rows_and_cols(df):
# these schools were established in last year or two, and do not yet have 8th graders
dbns_to_remove = ["15K839", "03M291", "84X492", "84X460", "28Q358"]
df = df[~df['dbn'].isin(dbns_to_remove)]
#TODO: use config to pull years and create incoming_state_score_cols in a better way
incoming_state_score_cols = [
"incoming_ela_level_1_2017_n",
"incoming_ela_level_2_2017_n",
"incoming_ela_level_3_2017_n",
"incoming_ela_level_4_2017_n",
"incoming_math_level_1_2017_n",
"incoming_math_level_2_2017_n",
"incoming_math_level_3_2017_n",
"incoming_math_level_4_2017_n"
]
for state_score_col in incoming_state_score_cols:
df[state_score_col] = df[state_score_col].replace(to_replace="N < 5", value=0)
df[state_score_col] = df[state_score_col].astype('float')
nobs = float(len(df))
# remove schools that don't have 8th graders taking the SHSAT
df = df[df["grade_8_flg"] == True]
# remove columns with > 25% nulls
for col_name in df.columns.values:
col_nulls = float(df[col_name].isnull().sum())
perc_nulls = col_nulls / nobs
if perc_nulls > 0.25:
df = df.drop(col_name, axis=1)
# remove schools that don't have 8th grade enrollment
df = df.dropna(axis=0, subset=["grade_8_2017_enrollment"])
return df |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.display import display
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
customers = pd.read_csv('StudentsPerformance.csv')
display(customers.head())
customers.head()
customers.info()
display(customers.describe())
sns.jointplot('reading score', 'writing score', data=customers)
sns.pairplot(customers)
sns.lmplot('reading score', 'writing score', data=customers)
X = customers[['writing score', 'reading score', 'math score']]
y = customers[['math score']]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=101)
lm = LinearRegression()
lm.fit(X_train, y_train)
print(lm.coef_)
predictions = lm.predict(X_test)
plt.scatter(y_test, predictions)
plt.xlabel('Y Test')
plt.ylabel('Predicted Y')
mae = metrics.mean_absolute_error(y_test, predictions)
mse = metrics.mean_squared_error(y_test, predictions)
rmse = np.sqrt(metrics.mean_squared_error(y_test, predictions))
print(mae, mse, rmse)
coeffs = pd.DataFrame(data=lm.coef_.transpose(), index=X.columns, columns=['Coefficient'])
coeffs.plot()
display(coeffs)
plt.show() |
# -*- coding: utf-8 -*-
import sys
import datetime
import logging
import pymysql
from dbhelper import DBHelper
sys.path.append('../utils/')
from fileutil import FileUtil
class RESHelper():
logger = logging.getLogger('RESHelper')
def __init__(self):
self.dbHelper=DBHelper()
self.resFileName = '../../data/res12.txt' #疾病文件位置
#保存数据 到DB的resitem表中
def save_data(self):
_resAll = FileUtil.readlines(self.resFileName)
self._clear_data()
_totalSize = len(_resAll)
print('Res data storing...')
_stepSize = 1000
_sql = "insert into resitem(name) values"
_sqlTemp = ''
for _index, _value in enumerate(_resAll):
try:
_value = pymysql.escape_string(_value);
if _index % _stepSize == 0 or _index == _totalSize - 1:
if _sqlTemp:
_conn = self.dbHelper.connectDatabase()
print('storing: %d / %d' % (_index + 1, _totalSize))
_cur = _conn.cursor();
_sqlTemp = _sqlTemp + ",('%s')" % _value if _index == _totalSize - 1 else _sqlTemp
_cur.execute(_sqlTemp)
_conn.commit()
_cur.close()
_conn.close()
_sqlTemp = _sql + "('%s')" % _value
else:
_sqlTemp += ",('%s')" % _value
except Exception as error:
self.logger.log(logging.ERROR, error)
def _clear_data(self):
params=('resitem',)
self.dbHelper.clear(*params)
if __name__=='__main__':
_begin = datetime.datetime.now()
_zresHelper = RESHelper()
_zresHelper.save_data()
_end = datetime.datetime.now()
_time = _end - _begin
# self.logger.log(logging.INFO, "Res data stored in db: %s" % _time)\
print("Res data stored in db: %s" % _time)
|
# Generated by Django 2.2.7 on 2019-11-24 23:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auctions_service', '0002_bids_most_current_bid'),
]
operations = [
migrations.AlterField(
model_name='bids',
name='bid_amount',
field=models.FloatField(default=0),
),
]
|
from datetime import datetime
import unittest
from home_eye.model.sensor import Sensor
class SensorTest(unittest.TestCase):
def test_parameters_in_to_json(self):
sensor = Sensor('indoor', 12.0, 55.5, datetime.now())
sensor_json = sensor.to_json()
self.assertIn('name', sensor_json)
self.assertIn('temperature', sensor_json)
self.assertIn('humidity', sensor_json)
self.assertIn('updated', sensor_json)
self.assertIn('age', sensor_json)
def test_to_json(self):
sensor = Sensor('indoor', 12.0, 55.5, datetime.now())
sensor_json = sensor.to_json()
self.assertEqual('indoor', sensor_json['name'])
self.assertIn('value', sensor_json['temperature'])
self.assertIn('display_value', sensor_json['temperature'])
def test_create(self):
sensor = Sensor('test', 20, 30, datetime(year=2021, month=3, day=21))
self.assertEqual(sensor.name, 'test')
self.assertEqual(sensor.temperature.value, 20)
self.assertEqual(sensor.humidity.value, 30)
self.assertEqual(sensor.updated.value,
datetime(year=2021, month=3, day=21))
if __name__ == '__main__':
unittest.main()
|
" Method containing activation functions"
from torch.optim import Adam, AdamW, SGD
from src.utils.mapper import configmapper
configmapper.map("optimizers", "adam")(Adam)
configmapper.map("optimizers", "adam_w")(AdamW)
configmapper.map("optimizers", "sgd")(SGD)
|
from pyspark.sql import SparkSession
from pyspark.sql import SQLContext, HiveContext
from pyspark import SparkContext
from pyspark.sql.functions import udf
from pyspark.sql.functions import *
from pyspark.sql.types import *
from pyspark.sql.window import Window
import pyspark.sql.functions as F
import numpy as np
import pandas as pd
import time
from pyspark.ml.feature import ChiSqSelector
from pyspark.ml.linalg import Vectors
from pyspark.ml import Pipeline
from pyspark.ml.classification import GBTClassifier
from pyspark.ml.classification import RandomForestClassifier
from pyspark.ml.feature import StringIndexer, VectorIndexer
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.ml.evaluation import BinaryClassificationEvaluator
import pyspark.ml.feature as ft
spark = SparkSession.builder.appName("pyspark_test").enableHiveSupport().getOrCreate()
sentenceData = spark.createDataFrame([
(0.0, "I like Spark",2),
(1.0, "Pandas is useful",3),
(2.0, "They are coded by Python",1)
], ["label", "sentence","a"])
# print(type(sentenceData))
#
print(sentenceData.agg(F.max("a")).toPandas().iloc[0,0])
#print(sentenceData.show())
pandas_df = sentenceData.toPandas()
#print(pandas_df)
d = sentenceData.rdd
d = d.map(lambda row: (row[0],row[2]))
d = d.toDF(["a","b"])
print(d.show())
from pyspark.ml.stat import *
spark= SparkSession\
.builder \
.appName("dataFrame") \
.getOrCreate()
data = [(0.0, Vectors.dense(0.5, 10.0)),
(0.0, Vectors.dense(1.5, 20.0)),
(1.0, Vectors.dense(1.5, 30.0)),
(0.0, Vectors.dense(3.5, 30.0)),
(0.0, Vectors.dense(3.5, 40.0)),
(1.0, Vectors.dense(3.5, 40.0))]
df = spark.createDataFrame(data, ["label", "features"])
r = ChiSquareTest.test(df, "features", "label").head()
# print("pValues: " + str(r.pValues))
# print("degreesOfFreedom: " + str(r.degreesOfFreedom))
# print("statistics: " + str(r.statistics))
|
import matplotlib.pyplot as plt
method=['PBE','PBE-D2','PBE-D3','RPBE','RPBE-D2','RPBE-D3','revPBE','rPW86','optPBE','optB88','optB86b']
lat=[0.209,0.652,0.456,0.036,0.440,0.417,0.255,0.364,0.383,0.427,0.458]
plt.bar(method,lat, color='r',width=0.35)
#plt.axvline(y=3.923)
plt.xlabel('Dispersion Method')
#plt.axhline(y=5.7, linestyle='--')
#plt.axhline(y=6.1, linestyle='--')
plt.ylabel('Energy (eV)')
#plt.text(0,5.9,'Experimetnal')
plt.title('Water monomer adsorption energy')
plt.rcParams['font.serif'] = "Times New Roman"
plt.xticks(fontsize=8)
plt.yticks(fontsize=12)
plt.show()
|
import requests
API_KEY = '8793610e7c4019ccd6189b9bc7bad61e'
parameters = {
"lat": 44.4323,
"lon": 26.1063,
"appid": "8793610e7c4019ccd6189b9bc7bad61e",
"exclude": 'current,minutely,daily'
}
response = requests.get(
url='https://api.openweathermap.org/data/2.5/onecall', params=parameters)
response.raise_for_status()
weather_data = response.json()
weather_slice = weather_data['hourly'][:12]
will_rain = False
# print(weather_data)
for i in weather_slice:
condition_code = i["weather"][0]['id']
if int(condition_code) < 700:
will_rain = True
if will_rain:
print('Bring an umbrella')
|
import sys
from _typeshed import BytesPath, StrOrBytesPath, StrPath
from genericpath import (
commonprefix as commonprefix,
exists as exists,
getatime as getatime,
getctime as getctime,
getmtime as getmtime,
getsize as getsize,
isdir as isdir,
isfile as isfile,
samefile as samefile,
sameopenfile as sameopenfile,
samestat as samestat,
)
from os import PathLike
from typing import AnyStr, Sequence, overload
supports_unicode_filenames: bool
# aliases (also in os)
curdir: str
pardir: str
sep: str
altsep: str | None
extsep: str
pathsep: str
defpath: str
devnull: str
# Overloads are necessary to work around python/mypy#3644.
@overload
def abspath(path: PathLike[AnyStr]) -> AnyStr: ...
@overload
def abspath(path: AnyStr) -> AnyStr: ...
@overload
def basename(p: PathLike[AnyStr]) -> AnyStr: ...
@overload
def basename(p: AnyStr) -> AnyStr: ...
@overload
def dirname(p: PathLike[AnyStr]) -> AnyStr: ...
@overload
def dirname(p: AnyStr) -> AnyStr: ...
@overload
def expanduser(path: PathLike[AnyStr]) -> AnyStr: ...
@overload
def expanduser(path: AnyStr) -> AnyStr: ...
@overload
def expandvars(path: PathLike[AnyStr]) -> AnyStr: ...
@overload
def expandvars(path: AnyStr) -> AnyStr: ...
@overload
def normcase(s: PathLike[AnyStr]) -> AnyStr: ...
@overload
def normcase(s: AnyStr) -> AnyStr: ...
@overload
def normpath(path: PathLike[AnyStr]) -> AnyStr: ...
@overload
def normpath(path: AnyStr) -> AnyStr: ...
@overload
def commonpath(paths: Sequence[StrPath]) -> str: ...
@overload
def commonpath(paths: Sequence[BytesPath]) -> bytes: ...
# First parameter is not actually pos-only,
# but must be defined as pos-only in the stub or cross-platform code doesn't type-check,
# as the parameter name is different in ntpath.join()
@overload
def join(__a: StrPath, *paths: StrPath) -> str: ...
@overload
def join(__a: BytesPath, *paths: BytesPath) -> bytes: ...
if sys.version_info >= (3, 10):
@overload
def realpath(filename: PathLike[AnyStr], *, strict: bool = ...) -> AnyStr: ...
@overload
def realpath(filename: AnyStr, *, strict: bool = ...) -> AnyStr: ...
else:
@overload
def realpath(filename: PathLike[AnyStr]) -> AnyStr: ...
@overload
def realpath(filename: AnyStr) -> AnyStr: ...
@overload
def relpath(path: BytesPath, start: BytesPath | None = ...) -> bytes: ...
@overload
def relpath(path: StrPath, start: StrPath | None = ...) -> str: ...
@overload
def split(p: PathLike[AnyStr]) -> tuple[AnyStr, AnyStr]: ...
@overload
def split(p: AnyStr) -> tuple[AnyStr, AnyStr]: ...
@overload
def splitdrive(p: PathLike[AnyStr]) -> tuple[AnyStr, AnyStr]: ...
@overload
def splitdrive(p: AnyStr) -> tuple[AnyStr, AnyStr]: ...
@overload
def splitext(p: PathLike[AnyStr]) -> tuple[AnyStr, AnyStr]: ...
@overload
def splitext(p: AnyStr) -> tuple[AnyStr, AnyStr]: ...
def isabs(s: StrOrBytesPath) -> bool: ...
def islink(path: StrOrBytesPath | int) -> bool: ...
def ismount(path: StrOrBytesPath | int) -> bool: ...
def lexists(path: StrOrBytesPath | int) -> bool: ...
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index),
url(r'^process_reg$',views.process_reg),
url(r'^process_log$',views.process_log),
url(r'^dashboard$', views.dashboard),
url(r'^logout$', views.logout),
url(r'^trips/new$', views.new),
url(r'^process_new_trip$', views.process_new_trip),
url(r'^trip/(?P<trip_id>\d+)/remove$', views.remove_trip),
url(r'^trip/(?P<trip_id>\d+)$', views.view_trip),
url(r'^edit/(?P<trip_id>\d+)$', views.view_edit_trip),
url(r'^process_edit_trip/(?P<trip_id>\d+)$', views.process_edit_trip),
] |
# Created by MechAviv
# Map ID :: 402000620
# Sandstorm Zone : Refuge Border
if sm.hasQuest(34929):
sm.spawnNpc(3001509, 298, 200)
sm.showNpcSpecialActionByTemplateId(3001509, "summon", 0)
sm.spawnNpc(3001512, 374, 200)
sm.showNpcSpecialActionByTemplateId(3001512, "summon", 0)
sm.spawnNpc(3001513, 431, 200)
sm.showNpcSpecialActionByTemplateId(3001513, "summon", 0)
sm.spawnNpc(3001510, 550, 200)
sm.showNpcSpecialActionByTemplateId(3001510, "summon", 0)
sm.spawnNpc(3001514, -181, 200)
sm.showNpcSpecialActionByTemplateId(3001514, "summon", 0)
sm.spawnNpc(3001515, -330, 200)
sm.showNpcSpecialActionByTemplateId(3001515, "summon", 0)
sm.spawnNpc(3001516, -275, 200)
sm.showNpcSpecialActionByTemplateId(3001516, "summon", 0)
sm.spawnNpc(3001517, -487, -5)
sm.showNpcSpecialActionByTemplateId(3001517, "summon", 0)
sm.spawnNpc(3001518, -330, -5)
sm.showNpcSpecialActionByTemplateId(3001518, "summon", 0)
sm.spawnNpc(3001519, -435, -5)
sm.showNpcSpecialActionByTemplateId(3001519, "summon", 0)
sm.spawnNpc(3001520, -380, -5)
sm.showNpcSpecialActionByTemplateId(3001520, "summon", 0)
sm.spawnNpc(3001521, -331, 132)
sm.showNpcSpecialActionByTemplateId(3001521, "summon", 0)
sm.spawnNpc(3001522, -439, 93)
sm.showNpcSpecialActionByTemplateId(3001522, "summon", 0)
sm.spawnNpc(3001511, -439, 200)
sm.showNpcSpecialActionByTemplateId(3001511, "summon", 0) |
from lstm_model import LSTM_Model,CHECKPOINT_PATH,log_dir,NUM_EPOCH,run_epoch
from transform2Tfrecord import makeDataSet
import tensorflow as tf
train_files = './result/TEST/try_multiple.tfrecords'
if __name__ == '__main__':
# init = tf.random_uniform_initializer(-2,2)
with tf.variable_scope('LSTM_model', reuse=None):
model = LSTM_Model()
batched_dataset = makeDataSet(train_files, 30) # Use test data
# Train
_it = batched_dataset.make_initializable_iterator()
(s, t, l) = _it.get_next()
ops = model.forward(s, t, l, type='train')
saver = tf.train.Saver()
step = 0
with tf.Session() as sess:
saver.restore(sess, CHECKPOINT_PATH)
writer = tf.summary.FileWriter(log_dir, sess.graph)
# tf.global_variables_initializer().run()
for i in range(NUM_EPOCH):
print("In Iteration: %d" % (i + 1))
sess.run(_it.initializer)
step = run_epoch(sess, ops, saver, step, writer,type='train')
writer.close()
|
# -*- coding: utf-8 -*-
"""
@author: carlos
"""
import csv
# La siguiente función ayuda al usuario a definir que reporte quiere
# generar
def elegir_reporte():
print("¿Qué reporte desea generar?")
print("-----------------------------------------------")
print("1) Rutas de importación y exportación")
print("2) Medio de transporte utilizado")
print("3) Valor total de importaciones y exportaciones")
print("-----------------------------------------------")
reporte = int(input("Indique con número: "))
print("-----------------------------------------------")
print("-----------------------------------------------")
return reporte
reporte = elegir_reporte()
# Creamos con este pedazo de código una lista que contiene todas las rutas
rutas = set()
with open("synergy_logistics_database.csv","r") as archivo_madre:
lector = csv.DictReader(archivo_madre)
for linea in lector:
if (linea["destination"],linea["origin"]) not in rutas:
tupla_de_ruta = (linea["origin"],linea["destination"])
rutas.add(tupla_de_ruta)
rutas = list(rutas)
rutas.sort()
# Definimos función que calcule el total de exportaciones o importaciones
# por año para cada ruta
def demanda_anual(ano,tipo):
resultado_final = []
with open("synergy_logistics_database.csv","r") as archivo_madre:
lector = csv.DictReader(archivo_madre)
for linea in lector:
if int(linea["year"]) == ano and linea["direction"] == tipo:
for ruta in rutas:
if ruta == (linea["origin"],linea["destination"]) or ruta == (linea["destination"],linea["origin"]):
resultado_final.append((ruta,int(linea["total_value"])))
resultado_final.sort(key=lambda x: x[0],reverse=True)
suma_final = []
suma = resultado_final[0][1]
for j in range(len(resultado_final)-1):
if resultado_final[j][0] == resultado_final[j+1][0]:
suma += resultado_final[j+1][1]
else:
suma_final.append((resultado_final[j][0],suma))
suma = resultado_final[j+1][1]
if resultado_final[-2][0] == resultado_final[-1][0]:
suma_final.append((resultado_final[-2][0],suma))
else:
suma_final.append((resultado_final[-1][0],resultado_final[-1][1]))
suma_final.sort(key=lambda x: x[1],reverse=True)
return suma_final
# suma_final contiene todas las exportaciones o importaciones por rutas
# del año indicado.
# Creamos con este pedazo de código una lista que contiene todos los
# diferentes medios de transporte
transportes = set()
with open("synergy_logistics_database.csv","r") as archivo_madre:
lector = csv.DictReader(archivo_madre)
for linea in lector:
if linea["transport_mode"] not in transportes:
valor_de_ruta = linea["transport_mode"]
transportes.add(valor_de_ruta)
transportes = list(transportes)
transportes.sort()
# La siguiente función ayuda a calcular el total de exportaciones más
# importaciones por año para cada medio de transporte
def total_medios(ano):
resultado_final = []
with open("synergy_logistics_database.csv","r") as archivo_madre:
lector = csv.DictReader(archivo_madre)
for linea in lector:
if int(linea["year"]) == ano:
for transporte in transportes:
if transporte == linea["transport_mode"]:
resultado_final.append((transporte,int(linea["total_value"])))
resultado_final.sort(key=lambda x: x[0],reverse=True)
suma_final = []
suma = resultado_final[0][1]
for j in range(len(resultado_final)-1):
if resultado_final[j][0] == resultado_final[j+1][0]:
suma += resultado_final[j+1][1]
else:
suma_final.append((resultado_final[j][0],suma))
suma = resultado_final[j+1][1]
if resultado_final[-2][0] == resultado_final[-1][0]:
suma_final.append((resultado_final[-2][0],suma))
else:
suma_final.append((resultado_final[-1][0],resultado_final[-1][1]))
suma_final.sort(key=lambda x: x[1],reverse=True)
return suma_final
# suma_final contiene todas las exportaciones e importaciones por medio
# de transporte para el año indicado.
# Creamos con este pedazo de código una lista que contiene todos los
# diferentes países de exportación e importación
paises = set()
with open("synergy_logistics_database.csv","r") as archivo_madre:
lector = csv.DictReader(archivo_madre)
for linea in lector:
if linea["origin"] not in paises:
valor_de_pais = linea["origin"]
paises.add(valor_de_pais)
if linea["destination"] not in paises:
valor_de_pais = linea["destination"]
paises.add(valor_de_pais)
paises = list(paises)
paises.sort()
# Definimos función que calcule el total de importaciones y exportaciones
# para un año determinado.
def total_exp_imp(ano):
suma_final = 0
with open("synergy_logistics_database.csv","r") as archivo_madre:
lector = csv.DictReader(archivo_madre)
for linea in lector:
if int(linea["year"]) == ano:
suma_final += int(linea["total_value"])
return suma_final
# Definimos funcion que calcule, para un año determinado, el porcentaje
# que contribuyó cada país al total de exportaciones e importaciones.
def contribucion(ano):
expos_total = total_exp_imp(ano)
resultado_final = []
with open("synergy_logistics_database.csv","r") as archivo_madre:
lector = csv.DictReader(archivo_madre)
for linea in lector:
if int(linea["year"]) == ano:
for pais in paises:
if pais == linea["origin"] or pais == linea["destination"]:
resultado_final.append((pais,int(linea["total_value"])))
resultado_final.sort(key=lambda x: x[0],reverse=True)
suma_final = []
suma = resultado_final[0][1]
for j in range(len(resultado_final)-1):
if resultado_final[j][0] == resultado_final[j+1][0]:
suma += resultado_final[j+1][1]
else:
suma_final.append((resultado_final[j][0],suma*100/expos_total))
suma = resultado_final[j+1][1]
if resultado_final[-2][0] == resultado_final[-1][0]:
suma_final.append((resultado_final[-2][0],suma*100/expos_total))
else:
suma_final.append((resultado_final[-1][0],resultado_final[-1][1]*100/expos_total))
suma_final.sort(key=lambda x: x[1],reverse=True)
return suma_final
# suma_final contiene todas las exportaciones o importaciones por rutas
# del año indicado.
# Este código genera e imprime en pantalla los reportes.
login = True
while login == True:
if reporte == 1:
print("RUTAS DE IMPORTACIÓN Y EXPORTACIÓN")
print("-----------------------------------------------")
for i in [2015,2016,2017,2018,2019,2020]:
print("-----------------------------------------------")
print("Las siguientes son las diez rutas más demandadas de exportación del ",i)
print("-----------------------------------------------")
expos = demanda_anual(i,"Exports")
print("Ruta Cantidad")
for j in range(10):
print(f"{expos[j][0][0]}-{expos[j][0][1]} {expos[j][1]}")
for i in [2015,2016,2017,2018,2019,2020]:
print("-----------------------------------------------")
print("Las siguientes son las diez rutas más demandadas de importación del ",i)
print("-----------------------------------------------")
imports = demanda_anual(i,"Imports")
print("Ruta Cantidad")
for j in range(10):
print(f"{imports[j][0][0]}-{imports[j][0][1]} {imports[j][1]}")
if reporte == 2:
print("METODO DE TRANSPORTE UTILIZADO")
print("-----------------------------------------------")
for i in [2015,2016,2017,2018,2019,2020]:
print("-----------------------------------------------")
print("Medios de transporte por volumen de importación y exportación para el ",i)
print("-----------------------------------------------")
transpos = total_medios(i)
print("Transporte Exportación+Importación")
for j in range(4):
print(f"{transpos[j][0]} {transpos[j][1]}")
if reporte == 3:
print("VALOR TOTAL DE IMPORTACIONES Y EXPORTACIONES")
print("-----------------------------------------------")
for i in [2015,2016,2017,2018,2019,2020]:
print("-----------------------------------------------")
print("Lista de países que contribuyeron con el 80% de todas las importaciones y exportaciones para el año ",i)
print("-----------------------------------------------")
contribuciones = contribucion(i)
print("País Contribución (%)")
lim = contribuciones[0][1]
for w in range(len(contribuciones)):
if lim < 80.0:
print(f"{contribuciones[w][0]} {round(contribuciones[w][1],2)}")
lim += contribuciones[w+1][1]
print("--------------------------------------------")
print("¿Desea generar otro reporte? (si/no)")
respuesta = input("Indique su respuesta: ")
print("--------------------------------------------")
if respuesta == "si":
reporte = elegir_reporte()
elif respuesta == "no":
print("Programa cerrado exitosamente")
login = False
|
n=int(input())
if(n%2==1):
print(1)
else:
x=n
while(n>0):
n//=2
if(n%2==1):
break
print(x//n)
|
import os
import random
import cv2
import struct
import numpy as np
import tensorflow as tf
from .utilize import semantic_down_sample_voxel
np.seterr(divide='ignore', invalid='ignore')
DATA_DIR = os.path.join(os.environ['HOME'], 'datasets', 'SUNCG')
RECORD_DIR = os.path.join(os.environ['HOME'], 'datasets', 'SUNCG-TF-60')
def details_and_fov(img_height, img_width, img_scale, vox_scale):
vox_details = np.array([0.02 * vox_scale, 0.24], np.float32)
camera_fov = np.array([518.8579 / img_scale, 0., img_width / (2 * img_scale),
0., 518.8579 / img_scale, img_height / (2 * img_scale),
0., 0., 1.], dtype=np.float32)
return vox_details, camera_fov
def _diff_vec(img, axis=0):
img_diff = np.diff(img, 1, axis)
img_diff_l = img_diff[1:, :] if axis == 0 else img_diff[:, 1:]
img_diff_h = img_diff[:-1, :] if axis == 0 else img_diff[:, :-1]
img_diff = img_diff_l + img_diff_h
pad_tuple = ((1, 1), (0, 0), (0, 0)) if axis == 0 else ((0, 0), (1, 1), (0, 0))
padded = np.lib.pad(img_diff, pad_tuple, 'edge')
return padded
def _gen_normal(depth_path, file_path='tmp.png'):
depth = cv2.imread(depth_path, cv2.IMREAD_UNCHANGED)
lower_depth = depth >> 3
higher_depth = (depth % 8) << 13
real_depth = (lower_depth | higher_depth).astype(np.float32) / 1000
_, fov = details_and_fov(*real_depth.shape, 1, 1)
img_x = np.repeat(np.expand_dims(np.arange(real_depth.shape[0]), axis=1), real_depth.shape[1], axis=1)
img_y = np.repeat(np.expand_dims(np.arange(real_depth.shape[1]), axis=0), real_depth.shape[0], axis=0)
point_cam_x = (img_x - fov[2]) * real_depth / fov[0]
point_cam_y = (img_y - fov[5]) * real_depth / fov[4]
points = np.stack([point_cam_x, point_cam_y, real_depth], axis=2)
diff_y = _diff_vec(points, axis=0)
diff_x = _diff_vec(points, axis=1)
normal = np.cross(diff_x, diff_y)
normal_factor = np.expand_dims(np.linalg.norm(normal, axis=2), axis=-1)
normal = np.where((normal_factor == 0.) | np.isnan(normal_factor), (0, 0, 0), normal / normal_factor)
normal = (np.clip((normal + 1) / 2, 0, 1) * 65535).astype(np.uint16)
cv2.imwrite(file_path, normal)
# cooked_normal = cv2.imread(file_path, cv2.IMREAD_UNCHANGED)
return open(file_path, 'rb').read()
def _gen_zip_voxel(meta_path, vox_size=None, scaled_vox_size=None):
seg_class_map = [0, 1, 2, 3, 4, 11, 5, 6, 7, 8, 8, 10, 10, 10, 11, 11, 9, 8, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10,
10, 11, 8, 10, 11, 9, 11, 11, 11, 12]
if scaled_vox_size is None:
scaled_vox_size = np.array([60, 36, 60])
if vox_size is None:
vox_size = np.array([240, 144, 240])
meta = open(meta_path, 'rb')
header_bytes = meta.read(76)
vox_meta_info = meta.read()
vox_info = struct.unpack('%di' % int(len(vox_meta_info) / 4), vox_meta_info)
labels, vox_nums = [np.squeeze(x) for x in np.split(np.array(vox_info).reshape([-1, 2]), 2, axis=1)]
full_voxel = np.full(vox_size, 37, np.uint8).reshape([-1])
offset = 0
for label, vox_num in zip(labels, vox_nums):
if label != 255:
full_voxel[offset:offset+vox_num] = label
offset += vox_num
full_voxel = np.take(seg_class_map, full_voxel)
full_voxel = np.reshape(full_voxel, vox_size)
final_voxel = semantic_down_sample_voxel(full_voxel, scaled_vox_size)
final_voxel = np.expand_dims(np.where(final_voxel == 12, np.full(final_voxel.shape, 255, dtype=final_voxel.dtype),
final_voxel), axis=-1)
meta_bytes = np.reshape(final_voxel, [-1]).astype(np.uint8).tobytes()
return header_bytes + meta_bytes
def prepare_data(target_path, shuffle=False, normal=False, zip_voxel=False):
if not os.path.exists(RECORD_DIR):
os.mkdir(RECORD_DIR)
print('write samples from %s' % target_path)
dir_name = os.path.dirname(target_path)
target_folders = [folder for folder in os.listdir(dir_name) if folder.startswith(os.path.basename(target_path))]
samples_path = []
# Get the total samples list
for target_folder in target_folders:
folder_path = os.path.join(dir_name, target_folder)
if not os.path.isdir(folder_path):
continue
sub_samples = sorted([os.path.splitext(f)[0] for f in os.listdir(folder_path) if f.endswith('bin')])
samples_path.extend([os.path.join(folder_path, sub_sample) for sub_sample in sub_samples])
if shuffle:
random.seed(0)
random.shuffle(samples_path)
option = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.ZLIB)
writer = tf.python_io.TFRecordWriter(os.path.join(RECORD_DIR, os.path.split(target_path)[-1] + '.tfrecord'),
options=None)
current_index = 0
for sample in samples_path:
print('--%07d write %s in TFRECORDS' % (current_index, sample))
current_index += 1
depth_path = sample + '.png'
bin_path = sample + '.bin'
if not os.path.exists(depth_path) or not os.path.exists(bin_path):
continue
features_dict = dict()
features_dict['img'] = tf.train.Feature(bytes_list=tf.train.BytesList(value=[open(depth_path, 'rb').read()]))
bin_meta = open(bin_path, 'rb').read() if not zip_voxel else _gen_zip_voxel(bin_path,
scaled_vox_size=[60, 36, 60])
features_dict['bin'] = tf.train.Feature(bytes_list=tf.train.BytesList(value=[bin_meta]))
alias = os.path.split(sample)[-1].encode('utf-8')
features_dict['alias'] = tf.train.Feature(bytes_list=tf.train.BytesList(value=[alias]))
if normal:
normal_img = _gen_normal(depth_path)
features_dict['normal'] = tf.train.Feature(bytes_list=tf.train.BytesList(value=[normal_img]))
example = tf.train.Example(features=tf.train.Features(feature=features_dict))
writer.write(example.SerializeToString())
writer.close()
if __name__ == '__main__':
prepare_data(os.path.join(DATA_DIR, 'SUNCGtrain'), shuffle=True, normal=True,zip_voxel=True)
prepare_data(os.path.join(DATA_DIR, 'SUNCGtest'), shuffle=False, normal=True, zip_voxel=True)
|
###
# DP
# State: dp[i]: min cost to i
# Function: dp[i] = dp[j] + A[j] for j < i
# Initialization: dp[0] = 0
# Answer: dp[n]
# Time Complexity: O(n^2)
# Space Complexity: O(n)
###
class Solution(object):
def cheapestJump(self, A, B):
"""
:type A: List[int]
:type B: int
:rtype: List[int]
"""
if not A or A[0] == -1 or A[-1] == -1:
return []
n = len(A)
dp = [float('inf')] * n
dp[0] = 0
l = [0] * n
root = [-1] * n
for i in xrange(1, n):
if A[i] == -1:
continue
for j in xrange(max(i-B, 0), i):
if A[j] != -1:
cost = dp[j] + A[j]
if cost < dp[i] or (cost == dp[i] and l[i] < l[j] + 1):
dp[i] = cost
root[i] = j
l[i] = l[j] + 1
re = []
cur = n-1
while cur >= 0:
re.append(cur+1)
cur = root[cur]
return re[::-1] if re[-1] == 1 else []
|
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from site_utility.models import Counties, Cities
from school.models import Facultati, Specializari
from prof.models import Profesori
from school.views import counties
from django.http import HttpResponse, JsonResponse
# Create your views here.
@csrf_exempt
def ajax_cities(request):
county_name = request.POST['selected_county']
county_id = counties[county_name]
cities = {city.name:city.id for city in Cities.objects.filter(county_id = county_id).order_by('name_simple')}
return JsonResponse(cities)
# Create your views here.
@csrf_exempt
def ajax_departments(request):
school_id = request.POST['school_id']
departments = {department.specializare : department.id_specializare for department in Specializari.objects.filter(id_facultate = school_id).order_by('specializare')}
return JsonResponse(departments)
# @csrf_exempt
# def ajax_load_professors(request):
# school_id = request.POST['school_id']
# departments = {department.specializare : department.id_specializare for department in Specializari.objects.filter(id_facultate = school_id).order_by('specializare')}
# return JsonResponse(departments)
|
import unittest
class TestRow(unittest.TestCase):
def test___init__(self):
from row import Row
from database import Database
try:
import os
os.remove('test_Row.__init__.db')
except OSError:
pass
db = Database('test_Row.__init__.db')
with self.assertRaises(TypeError):
Row(None, 1)
table = db.create_table('people', [('name', 'TEXT'), ('age', 'INTEGER', 0)])
from exceptions import MissingRowError
with self.assertRaises(MissingRowError):
Row(table, 1)
expectedRow = table.insert({'name':'Albert', 'age':13})
self.assertEqual(Row(table, 1), expectedRow)
def test___setitem__(self):
from row import Row
from database import Database
try:
import os
os.remove('test_Row.__setitem__.db')
except OSError:
pass
db = Database('test_Row.__setitem__.db')
table = db.create_table('people', [('name', 'TEXT'), ('age', 'INTEGER', 0)])
row = table.insert({'name':'Albert', 'age':13})
row['name'] = 'Joran'
self.assertEqual(row['name'], 'Joran')
row['age'] = 14
self.assertEqual(row['age'], 14)
from exceptions import MissingColumnError
with self.assertRaises(MissingColumnError):
row['nonexistentcolumn'] = 'A very boring value...'
row.delete()
from exceptions import MissingRowError
with self.assertRaises(MissingRowError):
row['age'] = 14
def test___getitem__(self):
from row import Row
from database import Database
try:
import os
os.remove('test_Row.__getitem__.db')
except OSError:
pass
db = Database('test_Row.__getitem__.db')
table = db.create_table('people', [('name', 'TEXT'), ('age', 'INTEGER', 0)])
row = table.insert({'name':'Albert', 'age':13})
self.assertEqual(row['name'], 'Albert')
self.assertEqual(row['age'], 13)
from exceptions import MissingColumnError
with self.assertRaises(MissingColumnError):
row['nonexistentcolumn']
row.delete()
from exceptions import MissingRowError
with self.assertRaises(MissingRowError):
row['age']
def test___iter__(self):
from row import Row
from database import Database
try:
import os
os.remove('test_Row.__iter__.db')
except OSError:
pass
db = Database('test_Row.__iter__.db')
table = db.create_table('people', [('name', 'TEXT'), ('age', 'INTEGER', 0)])
row = table.insert({'name':'Albert', 'age':13})
expected_list = ['Albert', 13]
actual_list = [field for field in row]
self.assertEqual(expected_list, actual_list)
row.delete()
from exceptions import MissingRowError
with self.assertRaises(MissingRowError):
for field in row:
pass
def test_delete(self):
from row import Row
from database import Database
try:
import os
os.remove('test_Row.delete.db')
except OSError:
pass
db = Database('test_Row.delete.db')
table = db.create_table('people', [('name', 'TEXT'), ('age', 'INTEGER', 0)])
row = table.insert({'name':'Albert', 'age':13})
row.delete()
from exceptions import MissingRowError
with self.assertRaises(MissingRowError):
Row(table, 1)
def test___ne__(self):
from row import Row
from database import Database
try:
import os
os.remove('test_Row.__ne__.db')
except OSError:
pass
db = Database('test_Row.__ne__.db')
table = db.create_table('people', [('name', 'TEXT'), ('age', 'INTEGER', 0)])
row = table.insert({'name':'Albert', 'age':13})
row2 = table.insert({'name':'Joran', 'age':13})
self.assertTrue(row != row2)
self.assertFalse(row != row)
def test___eq__(self):
from row import Row
from database import Database
try:
import os
os.remove('test_Row.__eq__.db')
except OSError:
pass
db = Database('test_Row.__eq__.db')
table = db.create_table('people', [('name', 'TEXT'), ('age', 'INTEGER', 0)])
row = table.insert({'name':'Albert', 'age':13})
row2 = table.insert({'name':'Joran', 'age':13})
self.assertFalse(row == row2)
self.assertTrue(row == row)
class TestTable(unittest.TestCase):
def test___init__(self):
from table import Table
from database import Database
try:
import os
os.remove('test_Table.__init__.db')
except OSError:
pass
db = Database('test_Table.__init__.db')
from exceptions import MissingTableError
with self.assertRaises(MissingTableError):
Table(db, 'people')
actual_table = db.create_table('people', [('name', 'TEXT'), ('age', 'INTEGER', 0)])
expected_table = Table(db, 'people')
self.assertEqual(expected_table, actual_table)
def test_columns(self):
from table import Table
from database import Database
try:
import os
os.remove('test_Table.columns.db')
except OSError:
pass
db = Database('test_Table.columns.db')
table = db.create_table('people', [('name', 'TEXT'), ('age', 'INTEGER', 0)])
self.assertEqual(table.columns, ['name', 'age'])
try:
import os
os.remove('test_Table.columns.db')
except OSError:
pass
db = Database('test_Table.columns.db')
from exceptions import MissingTableError
with self.assertRaises(MissingTableError):
table.columns
def test_rows(self):
from table import Table
from database import Database
try:
import os
os.remove('test_Table.rows.db')
except OSError:
pass
db = Database('test_Table.rows.db')
table = db.create_table('people', [('name', 'TEXT'), ('age', 'INTEGER', 0)])
table.insert({'name':'Albert', 'age':13})
table.insert({'name':'Joran', 'age':13})
self.assertEqual(table.rows, [table[1], table[2]])
try:
import os
os.remove('test_Table.rows.db')
except OSError:
pass
db = Database('test_Table.rows.db')
from exceptions import MissingTableError
with self.assertRaises(MissingTableError):
table.rows
def test___contains__(self):
from table import Table
from database import Database
try:
import os
os.remove('test_Table.__contains__.db')
except OSError:
pass
db = Database('test_Table.__contains__.db')
table = db.create_table('people', [('name', 'TEXT'), ('age', 'INTEGER', 0)])
row = table.insert({'name':'Albert', 'age':13})
self.assertIn(row, table)
row2 = table.insert({'name':'Joran', 'age':13})
row2.delete()
self.assertNotIn(row2, table)
try:
import os
os.remove('test_Table.__contains__.db')
except OSError:
pass
db = Database('test_Table.__contains__.db')
from exceptions import MissingTableError
with self.assertRaises(MissingTableError):
row in table
def test___iter__(self):
from table import Table
from database import Database
try:
import os
os.remove('test_Table.__iter__.db')
except OSError:
pass
db = Database('test_Table.__iter__.db')
table = db.create_table('people', [('name', 'TEXT'), ('age', 'INTEGER', 0)])
row = table.insert({'name':'Albert', 'age':13})
row2 = table.insert({'name':'Joran', 'age':13})
actual_list = [row for row in table]
expected_list = [row, row2]
self.assertEqual(actual_list, expected_list)
try:
import os
os.remove('test_Table.__iter__.db')
except OSError:
pass
db = Database('test_Table.__iter__.db')
from exceptions import MissingTableError
with self.assertRaises(MissingTableError):
for row in table:
pass
def test___getitem__(self):
from table import Table
from database import Database
try:
import os
os.remove('test_Table.__getitem__.db')
except OSError:
pass
db = Database('test_Table.__getitem__.db')
table = db.create_table('people', [('name', 'TEXT'), ('age', 'INTEGER', 0)])
row = table.insert({'name':'Albert', 'age':13})
self.assertEqual(table[1], row)
row.delete()
from exceptions import MissingRowError
with self.assertRaises(MissingRowError):
table[1]
try:
import os
os.remove('test_Table.__getitem__.db')
except OSError:
pass
from exceptions import MissingTableError
with self.assertRaises(MissingTableError):
table[1]
def test___eq__(self):
from table import Table
from database import Database
try:
import os
os.remove('test_Table.__eq__.db')
except OSError:
pass
db = Database('test_Table.__eq__.db')
table = db.create_table('people', [('name', 'TEXT'), ('age', 'INTEGER', 0)])
table2 = db.create_table('companies', [('name', 'TEXT'), ('address', 'TEXT')])
self.assertTrue(table == table)
self.assertFalse(table == table2)
def test___ne__(self):
from table import Table
from database import Database
try:
import os
os.remove('test_Table.__ne__.db')
except OSError:
pass
db = Database('test_Table.__ne__.db')
table = db.create_table('people', [('name', 'TEXT'), ('age', 'INTEGER', 0)])
table2 = db.create_table('companies', [('name', 'TEXT'), ('address', 'TEXT')])
self.assertFalse(table != table)
self.assertTrue(table != table2)
def test_insert(self):
from table import Table
from database import Database
try:
import os
os.remove('test_Table.insert.db')
except OSError:
pass
db = Database('test_Table.insert.db')
table = db.create_table('people', [('name', 'TEXT'), ('age', 'INTEGER', 0)])
row = table.insert({'name':'Albert', 'age':13})
self.assertEqual(row, table[1])
row2 = table.insert({'name':'Joran'})
self.assertEqual(row2, table[2])
self.assertEqual(row2['age'], 0)
from exceptions import MissingColumnError
with self.assertRaises(MissingColumnError):
table.insert({'name':'Albert', 'address':'Somewhere'})
class TestDatabase(unittest.TestCase):
def test___init__(self):
from database import Database
try:
import os
os.remove('test_Database.__init__.db')
except OSError:
pass
db = Database('test_Database.__init__.db')
self.assertIsInstance(db, Database)
with self.assertRaises(TypeError):
Database(None)
def test_get_connection(self):
from database import Database
try:
import os
os.remove('test_Database.get_connection.db')
except OSError:
pass
db = Database('test_Database.get_connection.db')
from sqlite3 import Connection
self.assertIsInstance(db.get_connection(), Connection)
def test_tables(self):
from database import Database
try:
import os
os.remove('test_Database.tables.db')
except OSError:
pass
db = Database('test_Database.tables.db')
table = db.create_table('people', [('name', 'TEXT'), ('age', 'INTEGER', 0)])
table2 = db.create_table('companies', [('name', 'TEXT'), ('address', 'TEXT')])
self.assertEqual(db.tables, [table, table2])
def test_create_table(self):
from database import Database
try:
import os
os.remove('test_Database.create_table.db')
except OSError:
pass
db = Database('test_Database.create_table.db')
table = db.create_table('people', [('name', 'TEXT'), ('age', 'INTEGER', 0)])
self.assertEqual(table, db['people'])
self.assertEqual(table.database, db)
self.assertEqual(table.name, 'people')
with db.get_connection() as connection:
cursor = connection.execute('PRAGMA table_info(\'people\');')
actual_data = [(row[1], row[2], row[4]) for row in cursor]
self.assertEqual([('name', 'TEXT', None), ('age', 'INTEGER', '0')], actual_data)
from exceptions import DuplicateTableError
with self.assertRaises(DuplicateTableError):
db.create_table('people', [('name', 'TEXT'), ('age', 'INTEGER', 0)])
def test___contains__(self):
from database import Database
try:
import os
os.remove('test_Database.__contains__.db')
except OSError:
pass
db = Database('test_Database.__contains__.db')
table = db.create_table('people', [('name', 'TEXT'), ('age', 'INTEGER', 0)])
self.assertIn(table, db)
try:
import os
os.remove('test_Database.__contains__.db')
except OSError:
pass
self.assertNotIn(table, db)
def test___getitem__(self):
from database import Database
try:
import os
os.remove('test_Database.__getitem__.db')
except OSError:
pass
db = Database('test_Database.__getitem__.db')
table = db.create_table('people', [('name', 'TEXT'), ('age', 'INTEGER', 0)])
self.assertEqual(table, db['people'])
from exceptions import MissingTableError
with self.assertRaises(MissingTableError):
db['companies']
def test___iter__(self):
from database import Database
try:
import os
os.remove('test_Database.__iter__.db')
except OSError:
pass
db = Database('test_Database.__iter__.db')
table1 = db.create_table('people', [('name', 'TEXT'), ('age', 'INTEGER', 0)])
table2 = db.create_table('companies', [('name', 'TEXT'), ('address', 'TEXT')])
self.assertEqual([table for table in db], [table1, table2])
def test___eq__(self):
from database import Database
try:
import os
os.remove('test_Database.__eq__1.db')
os.remove('test_Database.__eq__2.db')
except OSError:
pass
db1 = Database('test_Database.__eq__1.db')
db2 = Database('test_Database.__eq__2.db')
self.assertTrue(db1 == db1)
self.assertFalse(db1 == db2)
def test___ne__(self):
from database import Database
try:
import os
os.remove('test_Database.__ne__1.db')
os.remove('test_Database.__ne__2.db')
except OSError:
pass
db1 = Database('test_Database.__ne__1.db')
db2 = Database('test_Database.__ne__2.db')
self.assertFalse(db1 != db1)
self.assertTrue(db1 != db2)
class TestQuery(unittest.TestCase):
def test___init__(self):
from database import Database
try:
import os
os.remove('test_Query.__init__.db')
except OSError:
pass
db = Database('test_Query.__init__.db')
table = db.create_table('people', [('name', 'TEXT'), ('age', 'INTEGER')])
from query import Query
query = Query(table)
self.assertIsInstance(query, Query)
def test_column_string(self):
from database import Database
try:
import os
os.remove('test_Query.column_string.db')
except OSError:
pass
db = Database('test_Query.column_string.db')
table = db.create_table('people', [('name', 'TEXT'), ('age', 'INTEGER')])
from query import Query
query = Query(table)
self.assertEqual(query.column_string, 'rowid')
query = Query(table, ['rowid'])
self.assertEqual(query.column_string, 'rowid')
query = Query(table, ['name','age'])
self.assertEqual(query.column_string, 'rowid,name,age')
def test_filter_string(self):
from database import Database
try:
import os
os.remove('test_Query.filter_string.db')
except OSError:
pass
db = Database('test_Query.filter_string.db')
table = db.create_table('people', [('name', 'TEXT'), ('age', 'INTEGER')])
from query import Query
query = Query(table)
self.assertEqual(query.filter_string, '')
from query import EQUAL
query = Query(table, filters=[('name', EQUAL, 'A nonexistent name!')])
self.assertEqual(query.filter_string, 'name = ?')
from query import GREATER_THAN
query = Query(table, filters=[(('name', EQUAL, 'A nonexistent name!'), ('age', GREATER_THAN, 18))])
self.assertEqual(query.filter_string, '(name = ? AND age > ?)')
from query import NOT, AND, OR
query = Query(table, filters=[(NOT, (('name', EQUAL, 'Albert'), AND, ('age', EQUAL, 13))), OR, ('name', EQUAL, 'Joran')])
self.assertEqual(query.filter_string, '(NOT (name = ? AND age = ?)) OR name = ?')
query1 = Query(table, columns=['name'] ,filters=[('age', GREATER_THAN, 13)])
from query import IN
query2 = Query(table, filters=[(NOT, (('name', EQUAL, 'Albert'), AND, ('age', EQUAL, 13))), OR, ('name', IN, query1)])
self.assertEqual(query2.filter_string, '(NOT (name = ? AND age = ?)) OR name IN (SELECT name FROM people WHERE age > ?)')
def test_parameters(self):
from database import Database
try:
import os
os.remove('test_Query.parameters.db')
except OSError:
pass
db = Database('test_Query.parameters.db')
table = db.create_table('people', [('name', 'TEXT'), ('age', 'INTEGER')])
from query import Query
query = Query(table)
self.assertEqual(query.parameters, [])
from query import EQUAL, GREATER_THAN
query1 = Query(table, filters=[(('name', EQUAL, 'A nonexistent name!'), ('age', GREATER_THAN, 18))])
self.assertEqual(query1.parameters, ['A nonexistent name!', 18])
from query import IN
query2 = Query(table, filters=[(('name', EQUAL, 'A nonexistent name!'), ('age', GREATER_THAN, 18)), ('name', IN, query1)])
self.assertEqual(query2.parameters, ['A nonexistent name!', 18, 'A nonexistent name!', 18])
query = Query(table, filters=[('name', IN, ['Albert', 'Joran'])])
self.assertEqual(query.parameters, ['Albert', 'Joran'])
def test___str__(self):
from database import Database
try:
import os
os.remove('test_Query.__str__.db')
except OSError:
pass
db = Database('test_Query.__str__.db')
table = db.create_table('people', [('name', 'TEXT'), ('age', 'INTEGER')])
from query import Query
query = Query(table)
self.assertEqual(str(query), 'SELECT rowid FROM people;')
query = Query(table, ['name'])
self.assertEqual(str(query), 'SELECT rowid,name FROM people;')
from query import GREATER_THAN
query = Query(table, ['name'], [('age', GREATER_THAN, 18)])
self.assertEqual(str(query), 'SELECT rowid,name FROM people WHERE age > ?;')
def test_execute(self):
from database import Database
try:
import os
os.remove('test_Query.__str__.db')
except OSError:
pass
db = Database('test_Query.__str__.db')
table = db.create_table('people', [('name', 'TEXT'), ('age', 'INTEGER')])
from query import Query
query = Query(table)
rs = query.execute()
from resultset import ResultSet
self.assertIsInstance(rs, ResultSet)
self.assertEqual([r for r in rs], [])
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
#coding:utf-8
import numpy as np
from visualdl import LogWriter
class MyLog():
def __init__(self,mode="train",logDir="../log"):
self.mode=mode
self.varDic={}
self.log_writer = LogWriter(logDir, sync_cycle=10)
def add_scalar(self,tag,scalar_value,global_step):
if tag not in self.varDic:
with self.log_writer.mode(self.mode) as writer:
self.varDic[tag]=writer.scalar(tag)
self.varDic[tag].add_record(global_step,scalar_value)
def pad_batch_datasp(insts,
pad_idx=0,
pad_c=2,
max_seq_len=128,
return_pos=False,
return_input_mask=False,
return_max_len=False,
return_num_token=False,
return_seq_lens=False):
"""
Pad the instances to the max sequence length in batch, and generate the
corresponding position data and input mask.
"""
return_list = []
#max_len = max(len(inst) for inst in insts)
max_len = max_seq_len
# Any token included in dict can be used to pad, since the paddings' loss
# will be masked out by weights and make no effect on parameter gradients.
inst_data = np.array([
list(inst) + list([pad_idx] * (max_len - len(inst))) for inst in insts
])
#print(inst_data)
#print("inst_data",len(inst_data),inst_data)
return_list += [inst_data.astype("float32").reshape([-1, max_len, pad_c])]
# position data
if return_pos:
inst_pos = np.array([
list(range(0, len(inst))) + [pad_idx] * (max_len - len(inst))
for inst in insts
])
return_list += [inst_pos.astype("float32").reshape([-1, max_len, pad_c])]
if return_input_mask:
# This is used to avoid attention on paddings.
input_mask_data = np.array(
[[1] * len(inst) + [0] * (max_len - len(inst)) for inst in insts])
input_mask_data = np.expand_dims(input_mask_data, axis=-1)
return_list += [input_mask_data.astype("float32")]
if return_max_len:
return_list += [max_len]
if return_num_token:
num_token = 0
for inst in insts:
num_token += len(inst)
return_list += [num_token]
if return_seq_lens:
seq_lens = np.array([len(inst) for inst in insts])
return_list += [seq_lens.astype("int64").reshape([-1, 1])]
return return_list if len(return_list) > 1 else return_list[0]
def getBIOs(labels):
rst=["O"]
for label in labels:
rst.append("B-"+label)
rst.append("I-"+label)
return rst
class OneHot(object):
def __init__(self, labels,toBIO=False):
if toBIO:
labels=getBIOs(labels)
self.labels = labels
labelDic={}
for i,label in enumerate(labels):
labelDic[label]=i
self.labelDic=labelDic
self.count=len(labels)
def getLabelByID(self,id):
return self.labels[id]
def getLabelID(self,label):
return self.labelDic[label]
def getLabelIDs(self,labels):
rst=[]
for label in labels:
rst.append(self.getLabelID(label))
return rst
def oneHot(self,label):
rst=[0]*self.count
rst[self.getLabelID(label)]=1
return rst
def getDefault(self):
return [0]*self.count
def getOneHots(self,labels):
rst=[]
for label in labels:
rst.append(self.oneHot(label))
return rst
def label_data(data, start, l, _type):
"""label_data"""
for i in range(start, start + l):
suffix = u"B-" if i == start else u"I-"
data[i] = u"{}{}".format(suffix, _type)
return data
def label_dataOT(data, start, l, _type):
"""label_data"""
for i in range(start, start + l):
suffix = u""
data[i] = u"{}{}".format(suffix, _type)
return data
import paddlehub as hub
class LACTager(object):
def __init__(self):
self.module = hub.Module(name="lac")
def getTagResult(self,text):
inputs = {"text": [text]}
results = self.module.lexical_analysis(data=inputs)
result=results[0]
return result
def getTag(self,text):
result=self.getTagResult(text)
rst=[]
for word,ner in zip(result["word"],result["tag"]):
rst.append([word,ner])
return rst
def getLabels(self,text):
result=self.getTagResult(text)
labels=[""]*len(text)
start=0
for word,ner in zip(result["word"],result["tag"]):
#print(word,ner)
label_dataOT(labels,start,len(word),ner)
start+=len(word)
return labels
class LACOneHot(object):
def __init__(self):
self.module = hub.Module(name="lac")
labelStr="n,nr,nz,a,m,c,PER,f,ns,v,ad,q,u,LOC,s,nt,vd,an,r,xc,ORG,t,nw,vn,d,p,w,TIME"
labels=labelStr.split(",")
self.oneHot=OneHot(getBIOs(labels))
self.count=self.oneHot.count
print("LAC_D:",self.count)
def getDefault(self):
return self.oneHot.getDefault()
def getTextOneHot(self,text):
#print("textOneHot",text)
inputs = {"text": [text]}
results = self.module.lexical_analysis(data=inputs)
result=results[0]
labels=[""]*len(text)
start=0
for word,ner in zip(result["word"],result["tag"]):
#print(word,ner)
label_data(labels,start,len(word),ner)
start+=len(word)
#print("labels",labels)
rst=self.oneHot.getOneHots(labels)
return rst
def getFeature(self,example):
return example.lac
def getFeature1(self,example):
return self.getTextOneHot("".join(example.text_a.split(u"")))
class FeatureList(object):
def __init__(self,featureCreatorList):
self.featureCreatorList=featureCreatorList
self.count=0
self.defaultFeature=[]
for feature in featureCreatorList:
self.count+=feature.count
self.defaultFeature+=feature.getDefault()
def getDefault(self):
return self.defaultFeature
def getFeature(self,example):
fCount=(len(example.text_a)+1)//2
ft=[[] for i in range(fCount)]
for feature in self.featureCreatorList:
tft=feature.getFeature(example)
for i,item in enumerate(ft):
item+=tft[i]
#print("feature:",len(ft))
tl=len(ft[0])
for i,item in enumerate(ft):
ttl=len(item)
if ttl!=tl:
print("notsame",tl,ttl,example.text_a)
#print("countitem",len(item))
#print("feature info",len(ft),tl)
return ft
|
import sys
sys.stdin = open("D3_17319_input.txt", "r")
T = int(input())
for test_case in range(T):
N = int(input())
s = input()
print("#{} {}".format(test_case + 1, "Yes" if N % 2 == 0 and s[:N // 2] == s[N // 2:] else "No")) |
# coding = utf-8
"""
@author: zhou
@time:2019/2/15 15:41
"""
from sklearn.cluster import KMeans
from sklearn.preprocessing import MinMaxScaler
import pandas as pd
def zuqiu_kmeans(n):
data = pd.read_csv('data.csv', encoding='gbk')
# print(data)
train_x = data[['2019年国际排名', '2018世界杯', '2015亚洲杯']]
# 初始化KMeans
kmeans = KMeans(n_clusters=n)
# 规范化数据
min_max_scaler = MinMaxScaler()
train_x = min_max_scaler.fit_transform(train_x)
kmeans.fit(train_x)
predict_y = kmeans.predict(train_x)
# 将结果插回到原数据中
result = pd.concat((data, pd.DataFrame(predict_y)), axis=1)
result.rename({0: u'聚类结果'}, axis=1, inplace=True)
print(result)
if __name__ == "__main__":
zuqiu_kmeans(4)
|
# code has been inferred from https://github.com/aGIToz/kFineTuning/blob/master/finetune.py
# https://keras.io/preprocessing/image/
import os
import seaborn as sns
import itertools
import numpy as np
import sys
import pandas as pd
import xml.etree.ElementTree as ET
from PIL import Image
from collections import Counter
from keras.models import Model
from keras import backend as K
from keras.callbacks import Callback
from keras.layers import Dense
from pathlib import Path
from keras.applications.resnet50 import ResNet50
import matplotlib.pyplot as plt
from sklearn.metrics import (f1_score,
precision_score,
recall_score)
%matplotlib inline
from google.colab import drive
drive.mount('/content/gdrive')
def get_labels(annotations_dir, unique_labels=True):
for annotation_file in annotations_dir.iterdir():
with open(annotation_file) as f:
yield xml_to_labels(f.read(), unique_labels)
# import cv2
path = '/content/gdrive/My Drive/Colab Notebooks/project_phase2/Pascal_VOC/JPEGImages/2008_007057.jpg'
plt.imshow(plt.imread(Path(path).expanduser()))
plt.axis('off');
path1 = '/content/gdrive/My Drive/Colab Notebooks/project_phase2/Pascal_VOC/Annotations'
annotations_dir = Path(path1).expanduser()
images_dir = Path('/content/gdrive/My Drive/Colab Notebooks/project_phase2/Pascal_VOC/JPEGImages').expanduser()
img_metadata = pd.DataFrame(get_labels(annotations_dir), columns=['filename', 'labels'])
img_metadata.sample(7)
image_directory = Path('VOC2012/JPEGImages')
observation = img_metadata.sample(n=1).to_dict(orient='records')[0]
img = plt.imread(images_dir.joinpath(observation['filename']))
img_gen = ImageDataGenerator(rescale=1/255, validation_split=0.2)
img_iter = img_gen.flow_from_dataframe(
img_metadata,
shuffle=True,
directory=images_dir,
x_col='filename',
y_col='labels',
class_mode='categorical',
target_size=(128, 128),
batch_size=20,
subset='training'
)
img_iter_val = img_gen.flow_from_dataframe(
img_metadata,
shuffle=False,
directory=images_dir,
x_col='filename',
y_col='labels',
class_mode='categorical',
target_size=(128, 128),
batch_size=20,
subset='validation'
)
label_to_class = {v: k for k, v in img_iter.class_indices.items()}
def array_to_labels(onehot_array, label_to_class):
labels = []
idx = np.where(onehot_array == 1)[0]
return [label_to_class[i] for i in idx]
from tensorflow.keras import layers
from tensorflow.keras import models
from tensorflow.keras import optimizers
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.applications.densenet import DenseNet121
E_model_Res = ResNet50(weights = 'imagenet', include_top = False, input_shape=(128, 128, 3))
print('Number of trainable weights before freezing the conv base:', len(E_model_Res.trainable_weights))
E_model_Res.trainable = False
print('Number of trainable weights after freezing the conv base:', len(E_model_Res.trainable_weights))
E_model1 = models.Sequential()
E_model1.add(E_model_Res)
E_model1.add(layers.Flatten())
E_model1.add(layers.Dense(256, activation='relu'))
E_model1.add(layers.Dense(20, activation='sigmoid'))
E_model1.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])
metrics = Metrics(img_iter_val, validation_steps=10)
history = E_model1.fit_generator(
img_iter,
epochs=1,
steps_per_epoch=1,
class_weight=class_weights,
callbacks=[metrics]
)
total_counts = sum(labels_count.values())
class_weights = {img_iter.class_indices[cls]: total_counts / count for cls, count in labels_count.items()}
nr_batches = 10
threshold = 0.5
img_iter_val_0, img_iter_val_1 = itertools.tee(img_iter_val, 2)
y_true = np.vstack(next(img_iter_val_0)[1] for _ in range(nr_batches)).astype('int')
y_pred = (model.predict_generator(img_iter_val_1, steps=nr_batches) > threshold).astype('int')
y_pred = (E_model1.predict_generator(img_iter_val_1, steps=nr_batches) > threshold).astype('int')
print(y_pred)
|
import os
import logging
from flask import Flask, url_for as _url_for
from flask.ext.oauth import OAuth
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.migrate import Migrate
from elasticsearch import Elasticsearch
from celery import Celery
from grano import default_settings
logging.basicConfig(level=logging.DEBUG)
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.WARNING)
urllib3_log = logging.getLogger("urllib3")
urllib3_log.setLevel(logging.WARNING)
elasticsearch_log = logging.getLogger("elasticsearch")
elasticsearch_log.setLevel(logging.WARNING)
#sqlalchemy_log = logging.getLogger("sqlalchemy")
#sqlalchemy_log.setLevel(logging.INFO)
app = Flask(__name__)
app.config.from_object(default_settings)
app.config.from_envvar('GRANO_SETTINGS', silent=True)
app_name = app.config.get('APP_NAME', 'grano')
db = SQLAlchemy(app)
ALEMBIC_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../alembic'))
migrate = Migrate(app, db, directory=ALEMBIC_DIR)
es = Elasticsearch()
celery = Celery(app.config.get('CELERY_APP_NAME', app_name),
broker=app.config['CELERY_BROKER_URL'])
celery.config_from_object(app.config)
es_index = app.config.get('ES_INDEX', app_name)
oauth = OAuth()
def url_for(*a, **kw):
return _url_for(*a, _external=True, **kw)
|
# TrackAnalysisWidget.py
# (C)2014
# Scott Ernst
from __future__ import print_function, absolute_import, unicode_literals, division
from pyglass.threading.FunctionRemoteExecutionThread import FunctionRemoteExecutionThread
from pyglass.widgets.PyGlassWidget import PyGlassWidget
#_______________________________________________________________________________
class TrackAnalysisWidget(PyGlassWidget):
""" User interface class for handling track data IO from any of the possible sources and
saving them to, or loading them from the database. """
#===============================================================================
# C L A S S
RESOURCE_FOLDER_PREFIX = ['tools']
#_______________________________________________________________________________
def __init__(self, parent, **kwargs):
super(TrackAnalysisWidget, self).__init__(parent, **kwargs)
self.runIntegrityBtn.clicked.connect(self._handleRunIntegrityTests)
#===============================================================================
# P R O T E C T E D
#_______________________________________________________________________________
def _activateWidgetDisplayImpl(self, **kwargs):
pass
#_______________________________________________________________________________
@classmethod
def _runIntegrityTests(cls):
# tester = DataIntegrityTester()
# return tester.run()
pass
#===============================================================================
# H A N D L E R S
#_______________________________________________________________________________
def _handleRunIntegrityTests(self):
self.mainWindow.showStatus(
self,
u'Integrity Testing',
u'Running integrity test suite')
thread = FunctionRemoteExecutionThread(self, self._runIntegrityTests)
thread.execute(
callback=self._handleIntegrityTestsComplete,
logCallback=self._handleThreadLog)
#_______________________________________________________________________________
def _handleThreadLog(self, event):
self.mainWindow.appendStatus(self, event.get('message'))
#_______________________________________________________________________________
def _handleIntegrityTestsComplete(self, event):
self.mainWindow.showStatusDone(self)
|
"""
Ax_Metrics - Query component for time frame specification
------------------------------------------------------------------------------
Author: Dan Kamins <dos at axonchisel dot net>
Copyright (c) 2014 Dan Kamins, AxonChisel.net
"""
# ----------------------------------------------------------------------------
from axonchisel.metrics.foundation.ax.obj import AxObj
from axonchisel.metrics.foundation.chrono.framespec import FrameSpec
# ----------------------------------------------------------------------------
class QTimeFrame(AxObj):
"""
Query component for time frame specification.
Wraps complex FrameSpec object which defines query period, granularity,
smoothing, and more.
"""
def __init__(self):
self._tmfrspec = FrameSpec()
#
# Public Properties
#
@property
def tmfrspec(self):
"""Wrapped FrameSpec."""
return self._tmfrspec
@tmfrspec.setter
def tmfrspec(self, val):
self._assert_type("tmfrspec", val, FrameSpec)
self._tmfrspec = val
#
# Public Methods
#
#
# Internal Methods
#
def __unicode__(self):
return u"QTimeFrame({self._tmfrspec})".format(self=self)
|
from search import *
loopFlag = True
def printMenu():
print("=================== 메 뉴 ==================")
print("1. 종료")
print("2. 보호소 검색")
print("3. 유기동물 검색")
print("===========================================")
def launcherFunction(menu):
if menu == '1':
Quit()
elif menu == '2':
searchShelter()
elif menu == '3':
searchAnimal()
else:
print("Error: menu")
def Quit():
global loopFlag
loopFlag = False
def main():
while(loopFlag == True):
printMenu()
menu = str(input('select menu: '))
launcherFunction(menu)
pass
else:
print("다음에 또 만나요.")
main() |
def standaardtarief(afstandKM):
if afstandKM > 50:
output = 15 + afstandKM*.6
else:
output = afstandKM*.8
if afstandKM < 0:
output = 0
return output
def ritprijs(leeftijd, weekendrit, afstandKM):
if weekendrit == "ja" :
if leeftijd < 12 or leeftijd >= 65:
output = (standaardtarief(afstandKM)) * 0.65
else:
output = (standaardtarief(afstandKM)) * 0.6
else:
if leeftijd < 12 or leeftijd >= 65:
output = (standaardtarief(afstandKM)) * 0.7
else:
output = (standaardtarief(afstandKM))
return output
leeftijd = eval(input('Hoe oud bent u: '))
weekendrit = input('Is het weekend? (ja, nee): ')
afstandKM = eval(input('Hoe ver rijst u(in kilometer): '))
print('het kost U',round(ritprijs(leeftijd, weekendrit, afstandKM),2), 'euro, voor een rit van', afstandKM,'kilometer')
|
import turtle
smart = turtle.Turtle()
# Loop 4 times. Everything I want to repeat is
# *indented* by four spaces.
for i in range(4):
smart.forward(50)
smart.right(90)
# This isn't indented, so we aren't repeating it.
turtle.done() |
a = int(input().split()[4])
b = int(input().split()[4])
total = 0
for i in range(40000000):
a = (a * 16807) % 2147483647
b = (b * 48271) % 2147483647
if bin(a)[-16:] == bin(b)[-16:]:
total += 1
print(total) |
#enter display message
print("welcome to YOU CAN VOTE program")
#enter input from user
usr_name = str(input("enter your nationality :"))
usr_age = int(input("enter your age :"))
usr_anti = str(input("are you anti nationalist ? : (y/n)"))
# using condition
if usr_name =="indian" and usr_age >= 18 and usr_anti == "n":
print("you can vote")
elif usr_age <= 18 and usr_name =="indian":
print ("wait till 18")
else:
print("you cannot vote")
#end the program
input("press enter to exit")
|
import random, os
from time import sleep
v = 'rock', 'paper', 'scissor'
print(v)
sleep(1)
thing = input('Enter your things : ')
sleep(1)
if thing == 'rock':
z = random.choice(v)
sleep(1)
print(z)
sleep(1)
if z == 'paper':
print('i won! ')
input(' ')
exit()
if z == 'rock':
print('Tie !')
input(' ')
exit()
if z == 'scissor':
print('You won ')
input(' ')
exit()
else:
print('Error')
exit()
if thing == 'paper':
z = random.choice(v)
sleep(1)
print(z)
sleep(1)
if z == 'scissor':
print('i won! ')
input(' ')
exit()
if z == 'paper':
print('Tie !')
input(' ')
exit()
if z == 'rock':
print('You won ')
input(' ')
exit()
else:
print('Error')
exit()
if thing == 'scissor':
z = random.choice(v)
sleep(1)
print(z)
sleep(1)
if z == 'rock':
print('i won! ')
input(' ')
exit()
if z == 'scissor':
print('Tie !')
input(' ')
exit()
if z == 'paper':
print('You won ')
input(' ')
exit()
else:
print('Error')
exit()
|
'''
Class Interactions
Class User
'''
from collections import Counter
import pickle
from pymongo import MongoClient
from Tweet import Tweet
class Interactions:
FILE = "../data/TheGoodPlace/TheGoodPlace.csv"
COLLECTION = "old_tweets"
OUTPUT_FILE = "TheGoodPlace_interactions.p"
HOST = "10.1.10.96" # default localhost
def __init__(self, read_collection, query="user.id"):
self.users = self.get_unique_users_fromfile(Interactions.FILE)
self.db = self.get_mongo_connection(host=Interactions.HOST)
self.read_collection = read_collection
self.query = query
def get_interactions_for_user(self, user_id):
user_obj = User(user_id)
query_string = {self.query: int(user_id)}
tweets = self.read_collection.find(query_string)
print(tweets.count())
for t in tweets:
tweet = Tweet(t)
user_obj.add_connections(tweet)
return user_obj
def get_interactions_and_hashtags(self):
self.file_out = open(Interactions.OUTPUT_FILE, 'wb')
count = 0
remains = len(self.users)
for user in self.users:
user_obj = User(user)
query_string = {"user.id": int(user)}
tweets = self.db.old_tweets.find(query_string)
print(tweets.count())
for t in tweets:
tweet = Tweet(t)
user_obj.add_connections(tweet)
count += 1
pickle.dump(user_obj, self.file_out)
if count % 10 == 0:
print count, " /", (remains-count)
self.file_out.close()
def get_length(self):
return len(self.users)
def get_hashtags(users, db):
pass
def get_unique_users_fromfile(self, filename):
users = []
with open(filename, 'rb') as f:
users = map(int, f.readlines())
return users
def get_mongo_connection(self, host="localhost", port=27017, db_name="stream_store"):
return MongoClient(host=host, port=port)[db_name]
class User:
def __init__(self, user):
self.id = user
self.mentions = Counter()
self.replies = Counter()
self.quotes = Counter()
self.retweets = Counter()
self.interactions = Counter()
self.hashtags = set()
@classmethod
def create_from_dict(cls, dict):
user = cls(dict['id'])
user.hashtags = set(dict['hashtags'])
user.mentions = dict['mentions'];
user.replies = dict['replies']
user.retweets = dict['retweets']
user.interactions = dict['interactions']
user.quotes = dict['quotes']
return user
def add_connections(self, tweet):
if tweet.retweet_author_id:
self.add_retweet(tweet.retweet_author_id)
if tweet.quote_author_id:
self.add_quotes(tweet.quote_author_id)
if tweet.mentions:
self.add_mention(tweet.mentions)
if tweet.reply_id:
self.add_replies(tweet.reply_id)
if tweet.hashtags:
self.add_hashtags(tweet.hashtags)
def add_mention(self, users):
for user in users:
self.mentions[user] += 1
self.interactions[user] += 1
def add_replies(self, user):
self.replies[user] += 1
self.interactions[user] += 1
def add_retweet(self, user):
self.retweets[user] += 1
self.interactions[user] += 1
def add_quotes(self, user):
self.quotes[user] += 1
self.interactions[user] += 1
def add_hashtags(self, hashtags):
for h in hashtags:
self.hashtags.add(h['text'])
def toJson(self):
d = {}
d['id'] = self.id
d['mentions'] = self.change_to_str(self.mentions)
d['retweets'] = self.change_to_str(self.retweets)
d['quotes'] = self.change_to_str(self.quotes)
d['interactions'] = self.change_to_str(self.interactions)
d['hashtags'] = list(self.hashtags)
d['replies'] = self.change_to_str(self.replies)
return d
def change_to_str(self, d):
return {str(k): v for k, v in d.items()} |
#!/usr/bin/env python
import unittest
import logging
from testsmtpd import SSLSMTPServer, TestCredentialValidator
import os
import secure_smtpd
from datetime import datetime, timedelta
class RpiSecurityCamTest(unittest.TestCase):
_smtp_server = None
TEST_DIR = 'test_dir'
def _cleanup_file(self, file_name):
if os.path.exists(file_name):
os.unlink(file_name)
def setUp(self):
if os.path.exists(self.TEST_DIR):
os.system('rm -rf %s' % self.TEST_DIR)
os.mkdir(self.TEST_DIR)
os.system('cp mail_settings.test.json mail_settings.json')
os.system('cp settings.template.json settings.json')
self._cleanup_file('sendemail.py.time')
messages = self._smtp_server.pop_messages()
def tearDown(self):
self._cleanup_file('sendemail.py.time')
if os.path.exists(self.TEST_DIR):
os.system('rm -rf %s' % self.TEST_DIR)
def testMotionDetectedSendsEmail(self):
# test
os.system('./sendemail.py -q -d')
# test validation
messages = self._smtp_server.pop_messages()
self.assertEqual(1, len(messages))
self.assertTrue('Motion was detected on' in messages[0])
def testMotionRecordedSendsEmail(self):
# setup
TEST_ATTACHMENT_FILE_NAME = os.path.join(self.TEST_DIR, 'test.mkv')
with open(TEST_ATTACHMENT_FILE_NAME, 'wt') as file:
file.write('This is a test file\n')
self.assertTrue(os.path.exists(TEST_ATTACHMENT_FILE_NAME))
# test
os.system('./sendemail.py -q -m %s' % TEST_ATTACHMENT_FILE_NAME)
# test validation
messages = self._smtp_server.pop_messages()
self.assertEqual(1, len(messages))
self.assertTrue('Subject: Motion Video Captured' in messages[0])
self.assertTrue('Motion was recorded on' in messages[0])
self.assertTrue('Content-Disposition: attachment' in messages[0])
self.assertTrue('filename="test.mkv"' in messages[0])
# make sure the file was deleted
self.assertFalse(os.path.exists(TEST_ATTACHMENT_FILE_NAME))
def testCatchupOldMovies(self):
NUM_TEST_FILES = 10
FILES_TO_SEND = 8
# create some movie files with old dates
five_minutes_ago = datetime.now() - timedelta(minutes=5)
for i in range(NUM_TEST_FILES):
file_name = os.path.join(self.TEST_DIR, 'test_file%u.mkv' % i)
file_time = ((five_minutes_ago - timedelta(minutes=i)) - datetime.fromtimestamp(0)).total_seconds()
with open(file_name, 'wt') as file:
file.write('This is test file %u.\n' % i)
if i < FILES_TO_SEND:
os.utime(file_name, (file_time, file_time))
# Run catch up
os.system('./sendemail.py -q -c %s' % self.TEST_DIR)
# Make sure an email was received for each file
messages = self._smtp_server.pop_messages()
self.assertEqual(FILES_TO_SEND, len(messages))
for message in messages:
self.assertTrue('Subject: Motion Video Captured' in message)
# Make sure the test files older than 5 minutes were deleted
for i in range(NUM_TEST_FILES):
file_name = os.path.join(self.TEST_DIR, 'test_file%u.mkv' % i)
if i < FILES_TO_SEND:
self.assertFalse(os.path.exists(file_name))
else:
self.assertTrue(os.path.exists(file_name))
if __name__ == '__main__':
logger = logging.getLogger( secure_smtpd.LOG_NAME )
logger.setLevel(logging.CRITICAL)
logging.basicConfig(level=logging.CRITICAL)
RpiSecurityCamTest._smtp_server = SSLSMTPServer(
('0.0.0.0', 1025),
None,
require_authentication=True,
ssl=True,
certfile='server.crt',
keyfile='server.key',
credential_validator=TestCredentialValidator(),
maximum_execution_time = 1.0
)
RpiSecurityCamTest._smtp_server.start()
unittest.main()
|
''' Insert heading comments here.'''
import math
EPSILON = 1.0e-7
def display_options():
''' This function displays the menu of options'''
MENU = '''\nPlease choose one of the options below:
A. Display the sum of squares of the first N natural numbers.
B. Display the approximate value of Pi.
C. Display the approximate value of the sine of X.
D. Display the approximate value of the cosine of X.
M. Display the menu of options.
X. Exit from the program.'''
print(MENU)
def sum_natural_squares(N):
'''Insert docstring here.'''
N=int(N)
if N>0:
summ = (N*(N+1)*(2*N+1))/6
return summ
else:
return None
pass # insert your code here
def approximate_pi():
'''Insert docstring here.'''
pass # insert your code here
def approximate_sin(x):
'''Insert docstring here.'''
pass # insert your code here
def approximate_cos(x):
'''Insert docstring here.'''
pass # insert your code here
def main():
pass # insert your code here
if __name__ == "__main__":
main() |
import dash
import dash_core_components as dcc
import dash_html_components as html
import matplotlib.pyplot as plt
from dash.dependencies import Input, Output
import plotly.offline as py
from plotly.graph_objs import *
import plotly.graph_objs as go
import dash_bootstrap_components as dbc
import folium
from folium import IFrame, FeatureGroup
import os
import base64
import glob
import pandas as pd
from folium.plugins import MarkerCluster
### Data
import pandas as pd
import pickle
### Graphing
import plotly.graph_objects as go
### Dash
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Output, Input
## Navbar
from navbar import Navbar
import base64
####################
tab_selected_style = {
'borderTop': '1px solid #d6d6d6',
'borderBottom': '1px solid #d6d6d6',
'backgroundColor': '#119DFF',
'color': 'white',
'font-size': '70%',
'padding': '6px'
}
tab_style = { #Estilos das Tabs
'borderBottom': '1px solid #d6d6d6',
'padding': '6px',
'font-size': '75%',
'fontWeight': 'bold',
'fontSize' : '13'
}
##################################
ListaCentros_variacao = ['Todos os centros', 'CCS -','CEAR -','CCEN -','CT -','CCM -','CBIOTEC -','CTDR -','CCHLA -','CCTA -','CCHSA -','CCSA -','CI -','CCAE -','CCJ -','CCA -','CE -']
Lista_Centros = ['CCS','CEAR','CCEN','CT','CCM','CBIOTEC','CTDR','CCHLA','CCTA','CCHSA','CCSA','CI','CCAE','CCJ','CCA','CE']
anos = ['Todos os anos','2017','2018','2019']
nav = Navbar()
venn = base64.b64encode(open('Apoio/venn.png', 'rb').read())
card_content = [
dbc.CardHeader("Entendendo o gráfico",style={'font-size':24, 'textAlign':'center'}),
dbc.CardBody(
[
html.P(
className="card-text",id='relatorio_estudo_vocabular',style={'text-align':'justify'}
),
]
),
]
jumbotron = dbc.Card(card_content, outline=True)
card_content_2 = [
dbc.CardHeader("Opções de Filtro",style={'font-size':24, 'textAlign':'center'}),
dbc.CardBody(
[
##################################
dcc.Tabs(id='tab_escolha_grafico', value='variabilidade_vocabular',children=[
dcc.Tab(label= 'Variabilidade Vocabular', value='variabilidade_vocabular',children=[
html.Div(html.Br()),
#############################################
html.H4("Escolha os anos que deseja analisar:", style={'font-size':19}),
dcc.Dropdown(
id = 'dropdown_anos_variacao',
options=[{'label': "Todos os anos", 'value': 'todos'},
{'label': "2017", 'value': anos[1]},
{'label': "2018", 'value': anos[2]},
{'label': "2019", 'value': anos[3]},
],
value= None,
multi=True,
placeholder = "Selecione os anos",
searchable=False,
style={'margin-bottom':'10px'}
),
dcc.Checklist(
options=[
{'label': 'Selecionar Todos os Anos', 'value': 'ta'}, #ta = todos os anos
],
id = 'checklist_anos_variacao',
labelStyle={'display': 'none'}
),
html.Br(),
html.H4("Escolha os centros desejados:", style={'font-size':19}),
dcc.Dropdown(
id = 'dropdown_centros_variacao',
options=[{'label': "Todos os Centros", 'value': 'todos'},
{'label': "CCS", 'value': ListaCentros_variacao[1]},
{'label': "CEAR", 'value': ListaCentros_variacao[2]},
{'label': "CCEN", 'value': ListaCentros_variacao[3]},
{'label': "CT", 'value': ListaCentros_variacao[4]},
{'label': "CCM", 'value': ListaCentros_variacao[5]},
{'label': "CBIOTEC", 'value': ListaCentros_variacao[6]},
{'label': "CTDR", 'value': ListaCentros_variacao[7]},
{'label': "CCHLA", 'value': ListaCentros_variacao[8]},
{'label': "CCTA", 'value': ListaCentros_variacao[9]},
{'label': "CCHSA", 'value': ListaCentros_variacao[10]},
{'label': "CCSA", 'value': ListaCentros_variacao[11]},
{'label': "CI", 'value': ListaCentros_variacao[12]},
{'label': "CCAE", 'value': ListaCentros_variacao[13]},
{'label': "CCJ", 'value': ListaCentros_variacao[14]},
{'label': "CCA", 'value': ListaCentros_variacao[15]},
{'label': "CE", 'value': ListaCentros_variacao[16]},
],
value=None,
multi=True,
placeholder = "Selecione os centros",
searchable=False,
style={'margin-bottom':'10px'}
),
dcc.Checklist(
options=[
{'label': 'Selecionar Todos os Centros', 'value': 'tc'}, #ta = todos os anos
],
id = 'checklist_centros_variacao',
labelStyle={'display': 'none'}
),
html.Br(),
html.H4("Escolha o campo desejado:", style={'font-size':19}),
dcc.Dropdown(
id = 'dropdown_modalidades_variacao',
options=[
{'label': "Resumo", 'value': "Resumo"},
{'label': "Justificativa", 'value': "Justificativa"},
{'label': "Metodologia", 'value': "Metodologia"},
{'label': "Objetivos", 'value': "Objetivos"},
{'label': "Fundamentação Teórica", 'value': "Fundamentacao"},
],
value=None,
multi=False,
placeholder = "Selecione a modalidade",
searchable=False,
style={'margin-bottom':'10px'}
),
],
style=tab_style, selected_style=tab_selected_style),
dcc.Tab(label= 'Análise Gramatical', value='analise_gramatical',children=[
html.Div(html.Br()),
#############################################
html.H4("Escolha as classes gramaticais que deseja analisar:", style={'font-size':18}),
dcc.Dropdown(
id = 'dropdown_classes_analise',
options=[{'label': "Todos as classes", 'value': 'todos'},
{'label': "Substantivos", 'value': "Substantivos"},
{'label': "Adjetivos", 'value': "Adjetivos"},
{'label': "Verbos", 'value': "Verbos"},
],
value= None,
multi=True,
placeholder = "Selecione as classes",
searchable=False,
style={'margin-bottom':'10px'}
),
dcc.Checklist(
options=[
{'label': 'Selecionar Todas as classes', 'value': 'tc'}, #ta = todos os anos
],
id = 'checklist_classes_analise',
labelStyle={'display': 'none'}
),
html.Br(),
html.H4("Escolha os centros desejados:", style={'font-size':19}),
dcc.Dropdown(
id = 'dropdown_centros_analise',
options=[{'label': "Todos os Centros", 'value': 'todos'},
{'label': "CCS", 'value': ListaCentros_variacao[1]},
{'label': "CEAR", 'value': ListaCentros_variacao[2]},
{'label': "CCEN", 'value': ListaCentros_variacao[3]},
{'label': "CT", 'value': ListaCentros_variacao[4]},
{'label': "CCM", 'value': ListaCentros_variacao[5]},
{'label': "CBIOTEC", 'value': ListaCentros_variacao[6]},
{'label': "CTDR", 'value': ListaCentros_variacao[7]},
{'label': "CCHLA", 'value': ListaCentros_variacao[8]},
{'label': "CCTA", 'value': ListaCentros_variacao[9]},
{'label': "CCHSA", 'value': ListaCentros_variacao[10]},
{'label': "CCSA", 'value': ListaCentros_variacao[11]},
{'label': "CI", 'value': ListaCentros_variacao[12]},
{'label': "CCAE", 'value': ListaCentros_variacao[13]},
{'label': "CCJ", 'value': ListaCentros_variacao[14]},
{'label': "CCA", 'value': ListaCentros_variacao[15]},
{'label': "CE", 'value': ListaCentros_variacao[16]},
],
value=None,
multi=True,
placeholder = "Selecione os centros",
searchable=False,
style={'margin-bottom':'10px'}
),
dcc.Checklist(
options=[
{'label': 'Selecionar Todos os Centros', 'value': 'tc'}, #ta = todos os anos
],
id = 'checklist_centros_analise',
labelStyle={'display': 'none'}
),
html.Br(),
html.H4("Escolha o campo desejado:", style={'font-size':19}),
dcc.Dropdown(
id = 'dropdown_modalidades_analise',
options=[
{'label': "Resumo", 'value': "Resumo"},
{'label': "Justificativa", 'value': "Justificativa"},
{'label': "Metodologia", 'value': "Metodologia"},
{'label': "Objetivos", 'value': "Objetivos"},
{'label': "Fundamentação Teórica", 'value': "Fundamentacao"},
],
value=None,
multi=False,
placeholder = "Selecione o campo",
searchable=False,
style={'margin-bottom':'10px'}
),
], style=tab_style, selected_style=tab_selected_style),
dcc.Tab(label= 'Nuvem de Palavras', value='nuvem_palavras',children=[
html.Div(html.Br()),
html.H4("Escolha os anos que deseja analisar:", style={'font-size':19}),
dcc.Dropdown(
id = 'dropdown_anos_nuvem',
options=[{'label': "Todos os Anos", 'value': 'todos'},
{'label': "2017", 'value': anos[1]},
{'label': "2018", 'value': anos[2]},
{'label': "2019", 'value': anos[3]},
],
value= None,
multi=True,
placeholder = "Selecione os anos",
searchable=False,
style={'margin-bottom':'10px'}
),
dcc.Checklist(
options=[
{'label': 'Selecionar Todos os Anos', 'value': 'ta'}, #ta = todos os anos
],
id = 'checklist_anos_nuvem',
labelStyle={'display': 'none'}
),
html.Br(),
html.H4("Escolha os centros desejados:", style={'font-size':19}),
dcc.Dropdown(
id = 'dropdown_centros_nuvem',
options=[{'label': "Todos os Centros", 'value': 'todos'},
{'label': "CCS", 'value': ListaCentros_variacao[1]},
{'label': "CEAR", 'value': ListaCentros_variacao[2]},
{'label': "CCEN", 'value': ListaCentros_variacao[3]},
{'label': "CT", 'value': ListaCentros_variacao[4]},
{'label': "CCM", 'value': ListaCentros_variacao[5]},
{'label': "CBIOTEC", 'value': ListaCentros_variacao[6]},
{'label': "CTDR", 'value': ListaCentros_variacao[7]},
{'label': "CCHLA", 'value': ListaCentros_variacao[8]},
{'label': "CCTA", 'value': ListaCentros_variacao[9]},
{'label': "CCHSA", 'value': ListaCentros_variacao[10]},
{'label': "CCSA", 'value': ListaCentros_variacao[11]},
{'label': "CI", 'value': ListaCentros_variacao[12]},
{'label': "CCAE", 'value': ListaCentros_variacao[13]},
{'label': "CCJ", 'value': ListaCentros_variacao[14]},
{'label': "CCA", 'value': ListaCentros_variacao[15]},
{'label': "CE", 'value': ListaCentros_variacao[16]},
],
value=None,
multi=True,
placeholder = "Selecione o centro",
searchable=False,
style={'margin-bottom':'10px'}
),
dcc.Checklist(
options=[
{'label': 'Selecionar Todos os Centros', 'value': 'tc'}, #ta = todos os anos
],
id = 'checklist_centros_nuvem',
labelStyle={'display': 'none'}
),
html.Br(),
html.H4("Escolha o campo desejado:", style={'font-size':19}),
dcc.Dropdown(
id = 'dropdown_modalidades_nuvem',
options=[
{'label': "Resumo", 'value': "Resumo"},
{'label': "Justificativa", 'value': "Justificativa"},
{'label': "Metodologia", 'value': "Metodologia"},
{'label': "Objetivos", 'value': "Objetivos"},
{'label': "Fundamentação Teórica", 'value': "Fundamentacao"},
],
value=None,
multi=False,
placeholder = "Selecione a modalidade",
searchable=False,
style={'margin-bottom':'10px'}
),
], style=tab_style, selected_style=tab_selected_style),
dcc.Tab(label= 'Contagem de Palavras', value='contagem_palavras',children=[
html.Div(html.Br()),
html.H4("Escolha os anos que deseja analisar:", style={'font-size':19}),
dcc.Dropdown(
id = 'dropdown_anos_contagem',
options=[{'label': "Todos os anos", 'value': 'todos'},
{'label': "2017", 'value': anos[1]},
{'label': "2018", 'value': anos[2]},
{'label': "2019", 'value': anos[3]},
],
value= None,
multi=True,
placeholder = "Selecione os anos",
searchable=False,
style={'margin-bottom':'10px'}
),
dcc.Checklist(
options=[
{'label': 'Selecionar Todos Anos', 'value': 'ta'}, #ta = todos os anos
],
id = 'checklist_anos_contagem',
labelStyle={'display': 'none'}
),
html.Br(),
html.H4("Escolha os centros desejados:", style={'font-size':19}),
dcc.Dropdown(
id = 'dropdown_centros_contagem',
options=[{'label': "Todos os Centros", 'value': 'todos'},
{'label': "CCS", 'value': Lista_Centros[0]},
{'label': "CEAR", 'value': Lista_Centros[1]},
{'label': "CCEN", 'value': Lista_Centros[2]},
{'label': "CT", 'value': Lista_Centros[3]},
{'label': "CCM", 'value': Lista_Centros[4]},
{'label': "CBIOTEC", 'value': Lista_Centros[5]},
{'label': "CTDR", 'value': Lista_Centros[6]},
{'label': "CCHLA", 'value': Lista_Centros[7]},
{'label': "CCTA", 'value': Lista_Centros[8]},
{'label': "CCHSA", 'value': Lista_Centros[9]},
{'label': "CCSA", 'value': Lista_Centros[10]},
{'label': "CI", 'value': Lista_Centros[11]},
{'label': "CCAE", 'value': Lista_Centros[12]},
{'label': "CCJ", 'value': Lista_Centros[13]},
{'label': "CCA", 'value': Lista_Centros[14]},
{'label': "CE", 'value': Lista_Centros[15]},
],
value=None,
multi=True,
placeholder = "Selecione os centros",
searchable=False,
style={'margin-bottom':'10px'}
),
dcc.Checklist(
options=[
{'label': 'Selecionar Todos os Centros', 'value': 'tc'}, #ta = todos os anos
],
id = 'checklist_centros_contagem',
labelStyle={'display': 'none'}
),
html.Br(),
html.H4("Digite a(s) palavra(s) que deseja contar\nseparadas por vírgula:", style={'font-size':19}),
html.H4("Para plurais com o mesmo radical da palavra no singular exemplo 'pesquisa - pesquisas' deve ser considerada apenas a versão da palavra no singular, caso contrário o plural será contabilizado duas vezes", style={'font-size':13}),
dcc.Input(
id='palavra_contagem',
placeholder='Escreva a(s) palavra(s)',
type='text',
value = '',
style={'margin-bottom':'10px'}
),
], style=tab_style, selected_style=tab_selected_style),
]),
]
),
]
jumbotron_2 = dbc.Card(card_content_2, outline=True)
card_content_3 = [
############
dbc.CardHeader(id='texto_grafico_nuvem',style={'font-size':24, 'textAlign':'center'}),
############
dbc.CardBody(
[
#html.Div(id='grafico_variacao_vocabular',
#style={'display':'block', 'max-width': '100%', 'margin-left': 'auto', 'margin-right': 'auto'}
html.Div(id='grafico_variacao_vocabular'),
html.Div(id='grafico_analise_gramatical'),
html.Div(id='grafico_contagem_palavras'),
html.Img(id='grafico_nuvem_palavras', style={'display':'block', 'max-width': '100%', 'margin-left': 'auto', 'margin-right': 'auto','width':'90%','height':'90%'})
]
),
]
jumbotron_3 = dbc.Card(card_content_3, outline=True)
body_1 =html.Div([
dbc.Row(
[
dbc.Col(
[jumbotron_2,
jumbotron]
, md=4
),
dbc.Col([
jumbotron_3
#html.Iframe(id='mapa', srcDoc=open('Apoio/venn.svg', 'r').read(),width='100%',height='500px'),
], md=8 ),
],no_gutters=True
),
])
modal = html.Div(
[
dbc.Modal(
[
dbc.ModalHeader("ERROR"),
dbc.ModalBody("Escolha pelo menos um centro como parâmetro de entrada de centros"),
dbc.ModalFooter(
dbc.Button("Close", id="close", className="ml-auto")
),
],
id="modal",
),
]
)
modal_2 = html.Div(
[
dbc.Modal(
[
dbc.ModalHeader("ERROR"),
dbc.ModalBody("Escolha pelo menos dois anos como parâmetro de entrada de anos"),
dbc.ModalFooter(
dbc.Button("Close", id="close_2", className="ml-auto")
),
],
id="modal_2",
),
]
)
def variacao():
layout = html.Div([
nav,
body_1,
modal,
modal_2
])
return layout
|
import re
import pandas
import os
import file_manager as fm
MAX_SIZE = 200
MIN_SIZE = 180
progress = 1
def save_csv(filename_in, filename_out, data, mode='a'):
"""" Encode file to the csv (row for each file) """
df = pandas.DataFrame(data=[data])
df.to_csv(filename_out, sep=',', index=False, header=False, mode='a')
# print(f"File saved: {filename_in}")
def parse():
""" Depricated method """
progress = 1
regex_method = r"(?!class)(public|private)\s+([A-Za-z,\s*]){2,}\s*\([A-Za-z,\s*]*\)\s*\{[^\}]*\}"
# regex_whitespace = r"\t|\n"
regex_whitespace = r"\n"
directory = "/Users/martinholecek/Desktop/Datasets/Small/Dataset_2000/"
filename_out = "/Users/martinholecek/Desktop/Datasets/dataset1.csv"
for filename in os.listdir(directory):
if filename.startswith('.'):
continue
try:
filename = os.path.join(directory, filename)
with open(filename) as file:
content = file.read()
matches = re.finditer(regex_method, content, re.MULTILINE)
for _, match in enumerate(matches, start=1):
method = re.sub(regex_whitespace, '', match.group())
print(method)
# print(len(method))
if len(method) <= MAX_SIZE and len(method) >= MIN_SIZE:
save_csv(filename, filename_out, method)
print(progress)
progress += 1
except Exception as e:
print(e)
print("Error: ", filename, "could not be opened!")
def parse_file(filename_in, filename_out, progress):
# progress = 1
# regex_method = r"(?!class)(public|private)\s+([A-Za-z,\s*]){2,}\s*\([A-Za-z,\s*]*\)\s*\{[^\}]*\}"
regex_method = r"(?!class)(public|private)\s+([A-Za-z,\s*]){2,}\s*\([A-Za-z,\s*]*\)\s*\{([^\}]*)\}"
regex_whitespace = r"\s\s+|\n|\t"
# regex_variables = r"\w+\s+(\w+)\s+\="
try:
# filename = os.path.join(directory, filename_in)
with open(filename_in) as file:
content = file.read()
content = fm.remove_comments(content).strip()
if "abstract" in content:
# print("Abstract class error")
return progress
if "interface" in content:
# print("Interface class error")
return progress
if b"\x00" in content.encode('ascii'):
# print("Encoding error")
return progress
matches = re.finditer(regex_method, content, re.MULTILINE)
# saved = False
for _, match in enumerate(matches, start=1):
method = re.sub(regex_whitespace, " ", match.group(3))
# print(len(method))
# print(method)
# method = method.encode("ascii")
if len(method) <= MAX_SIZE and len(method) >= MIN_SIZE:
# method_obf = method
# if progress % 2 == 0:
# # print("obfuscated")
# # Obfuscate variables
# matches_obf = re.finditer(
# regex_variables, method_obf, re.MULTILINE)
# for _, match_v in enumerate(matches_obf, start=1):
# method_obf = re.sub(
# match_v.group(1), "XXXXX", method_obf)
save_csv(filename_in, filename_out,
method)
if progress % 10 == 0:
print("Progress :", progress)
# save_csv(filename_in, filename_out,
# [1, method, method_obf])
# saved = True
progress += 1
# if not saved:
# print("no matches")
return progress
except Exception:
# except Exception as e:
# print(e)
# print("Error: ", filename_in, "could not be opened!")
return progress
def parse_file_obffuscation(filename_in, filename_out, progress):
# progress = 1
# regex_method = r"(?!class)(public|private)\s+([A-Za-z,\s*]){2,}\s*\([A-Za-z,\s*]*\)\s*\{[^\}]*\}"
regex_method = r"(?!class)(public|private)\s+([A-Za-z,\s*]){2,}\s*\([A-Za-z,\s*]*\)\s*\{([^\}]*)\}"
regex_whitespace = r"\s\s+|\n|\t"
regex_variables = r"\w+\s+(\b\w+\b)\s+\="
try:
with open(filename_in) as file:
content = file.read()
content = fm.remove_comments(content).strip()
if "abstract" in content:
# print("Abstract class error")
return progress
if "interface" in content:
# print("Interface class error")
return progress
if b"\x00" in content.encode('ascii'):
# print("Encoding error")
return progress
matches = re.finditer(regex_method, content, re.MULTILINE)
for _, match in enumerate(matches, start=1):
method = re.sub(regex_whitespace, " ", match.group(3))
# print(len(method))
# print(method)
# method = method.encode("ascii")
if len(method) <= MAX_SIZE and len(method) >= MIN_SIZE:
method_obf = method
# if progress % 2 == 0:
# print("obfuscated")
# Obfuscate variables
matches_obf = re.finditer(
regex_variables, method_obf, re.MULTILINE)
for _, match_v in enumerate(matches_obf, start=1):
method_obf = re.sub(
match_v.group(1), "XXXXX", method_obf)
# save_csv(filename_in, filename_out,
# method)
save_csv(filename_in, filename_out,
[1, method, method_obf])
if progress % 10 == 0:
print("Progress :", progress)
progress += 1
return progress
except Exception:
# except Exception as e:
# print(e)
# print("Error: ", filename_in, "could not be opened!")
return progress
def create_dataset(source, destination, num_methods):
progress = 0
while progress != num_methods:
# Directories
directory = fm.folder_random(source)
filename = fm.file_random(directory)
filename_in = os.path.join(directory, filename)
# print(filename)
# print(filename_in)
# break
# filename_out = os.path.join(destination, filename)
if filename.startswith('.'):
continue
progress = parse_file_obffuscation(filename_in, destination, progress)
# if progress % 10 == 0:
# print("Progress :", progress)
DIRECTORY = "/Users/martinholecek/Desktop/Datasets/Small/Dataset_2000/"
FILENAME_OUT = "/Users/martinholecek/Desktop/Datasets/dataset_obfus.csv"
FILENAME_IN = "/Users/martinholecek/Desktop/Datasets/Small/Dataset_20000/TestHandle.java"
DATASET_FOLDER = "/Volumes/Untitled/Datasets/source-codes/batches2"
num_methods = 3000
create_dataset(DATASET_FOLDER, FILENAME_OUT, num_methods)
|
from enum import Enum
# noinspection PyCompatibility
def objTypeCheck(obj, parentType, objName):
if issubclass(parentType, Enum):
if obj not in [et.value for et in parentType]:
raise ValueError(f"Invalid value for {objName}:{obj}")
return
if not isinstance(obj, parentType):
raise TypeError(f"Argument {objName} must be of type {parentType.__name__}")
def enumTuples(enumType, valueFirst=True):
if valueFirst:
return [(et.value, et.name) for et in enumType]
return [(et.name, et.value) for et in enumType]
|
import pandas as pd
import numpy as np
from scipy.optimize import minimize
def van(tir, cf):
'''
Return VAN
tir = interest rate
cf = (array) cash flows
'''
if not isinstance(cf, np.ndarray): return('cf must be an array')
v = 1 / (1 + tir) ** np.arange(cf.size)
van = cf * v
return(van.sum())
x = van(0.05, np.array([-100, 5, 5, 5, 105]))
def fitness(tir, cf):
return(np.abs(van(tir, cf)))
fitness(0.05, np.array([-100, 5, 5, 5, 105]))
x = minimize(fitness, 0, np.array([-100, 5, 5, 5, 105]), method='BFGS')
def tir(cf):
if not isinstance(cf, np.ndarray): return('cf must be an array')
fit = lambda tir, cf: np.abs(van(tir, cf))
tir = minimize(fit, 0, np.array([-100, 5, 5, 5, 105]), method='BFGS')
return(x.x)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-06-18 16:04
from __future__ import unicode_literals
import DjangoUeditor.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('firstapp', '0015_auto_20170615_1344'),
]
operations = [
migrations.AlterField(
model_name='article',
name='body',
field=DjangoUeditor.models.UEditorField(blank=True, null=True),
),
]
|
from selenium import webdriver
import yaml
#获取页面cookies
def test_get_cookies():
driver = webdriver.Chrome()
driver.get('https://e1sm0k24i2.feishu.cn/calendar/week')
input("please input enter to continue")
cookie = driver.get_cookies()
with open("cookie_data.yaml", "w", encoding="UTF-8") as f:
yaml.dump(cookie, f) |
import math
import random
import time
from collections import deque, defaultdict, Counter
from typing import Tuple, List, Union
import numpy as np
import torch
from agents.belief_agent import BeliefBasedAgent
from agents.models.model_based_models import RewardModel, TransitionModel
from agents.models.multitask_models import MultitaskRewardModel, MultitaskTransitionModel
from environments.trick_taking_game import TrickTakingGame
from util import Card
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
action_tensor_cache = {}
def mcts(executor, num_workers, belief, game, transition_model, reward_model, task_name,
timeout: float = 0.5,
horizon: int = 4,
inverse_discount=1.2) -> int:
"""
Given models and state, outputs action
:param executor:
:param game:
:param timeout:
:param horizon:
:param inverse_discount:
:return:
"""
mcts_helper = _MCTSRunner(game, transition_model, reward_model, task_name, timeout, horizon,
inverse_discount)
# thread_results = executor.map(mcts_helper, [belief] * num_workers)
thread_results = [mcts_helper(belief) for _ in range(num_workers)]
thread_scores, thread_plays = list(map(list, zip(*thread_results)))
# combine scores lists
scores_counter = Counter()
for d in thread_scores:
scores_counter.update(d)
scores = dict(scores_counter)
# combine plays lists
plays_counter = Counter()
for d in thread_plays:
plays_counter.update(d)
plays = dict(plays_counter)
# compute best move
list_actions = list(range(game.num_cards))
card_index = max(list_actions,
key=lambda a: scores[(a,)] / plays[(a,)] if plays[(a,)] else -float('inf'))
return card_index
class _MCTSRunner:
"""
Helper class for mcts()
"""
def __init__(self, game, transition_model, reward_model, task_name,
timeout: float = 0.5,
horizon: int = 4,
inverse_discount=1.2):
self._game = game
self._transition_model = transition_model
self._reward_model = reward_model
self._task_name = task_name
self._timeout = timeout
self._horizon = horizon
self._inverse_discount = inverse_discount
def __call__(self, belief):
return self._mcts_helper(belief)
def get_transition_reward(self, current, selected_action, reward_cache, nodes, actions):
new_node = current + (selected_action,)
if new_node in reward_cache:
reward = reward_cache[new_node]
else:
if current in nodes:
belief = nodes[current]
else:
a = current[-1]
ba = torch.cat([nodes[current[:-1]], actions[a:a + 1]], dim=1)
belief = self._transition_model.forward(ba, self._task_name)
nodes[current] = belief
belief_action = torch.cat([belief, actions[selected_action:selected_action + 1]], dim=1)
reward = self._reward_model.forward(belief_action, self._task_name)
reward_cache[new_node] = reward
return reward
def _mcts_helper(self, belief):
# Monte Carlo
t0 = time.time()
timeout = self._timeout
horizon = self._horizon
inverse_discount = self._inverse_discount
start_belief = torch.FloatTensor([belief]).to(device)
actions = torch.eye(self._game.num_cards).float().to(device)
num_actions = self._game.num_cards
list_actions = list(range(num_actions))
nodes = {tuple(): start_belief}
plays = defaultdict(int)
reward_cache = {}
scores = defaultdict(float)
lowest_score = 1
while time.time() - t0 < timeout:
current = tuple()
plays[current] += 1
total_reward = 0
# Selection
while len(current) < horizon and current + (0,) in plays:
action_values = [MonteCarloAgent.ucb(scores[current + (a,)],
plays[current + (a,)],
plays[current],
lowest_score)
for a in list_actions]
selected_action = max(list_actions, key=lambda a: action_values[a])
reward = self.get_transition_reward(current, selected_action, reward_cache, nodes,
actions)
total_reward = inverse_discount * total_reward + reward
current = current + (selected_action,)
plays[current] += 1
# Expansion
if len(current) < horizon and current + (0,) not in plays:
plays[current + (0,)] = 0
selected_action = random.randint(0, num_actions - 1) # TODO: only expand legally
reward = self.get_transition_reward(current, selected_action, reward_cache, nodes,
actions)
total_reward = inverse_discount * total_reward + reward
current = current + (selected_action,)
plays[current] += 1
final_current = current
# Simulation
while len(current) < horizon:
selected_action = random.randint(0, num_actions - 1) # TODO: only expand legally
reward = self.get_transition_reward(current, selected_action, reward_cache, nodes,
actions)
total_reward = inverse_discount * total_reward + reward
current = current + (selected_action,)
# Backpropagation
for i in range(horizon + 1):
scores[final_current[:i]] += total_reward.item()
lowest_score = min(lowest_score, total_reward)
# detach tensors
return scores, plays
class ModelBasedAgent(BeliefBasedAgent):
def __init__(self, game: TrickTakingGame,
player_number: int,
transition_model: Union[TransitionModel, MultitaskTransitionModel],
reward_model: Union[RewardModel, MultitaskRewardModel]):
super().__init__(game, player_number)
self._task_name = game.name
self._transition_model = transition_model
self._reward_model = reward_model
self._current_observation = None
if self._game.num_cards not in action_tensor_cache:
action_tensor_cache[self._game.num_cards] = torch.eye(self._game.num_cards).to(device)
def observe(self, action: Tuple[int, int], observation: List[int], reward: int):
super().observe(action, observation, reward)
self._current_observation = observation
def act(self, epsilon: float = 0) -> Card:
if np.random.rand() <= epsilon:
return self._game.index_to_card(random.randint(0, self._game.num_cards - 1))
# valid_cards = self._get_hand(self._current_observation, valid_only=True)
# return random.sample(valid_cards, 1)[0]
# search
horizon = 1
inverse_discount = 1.1
actions = self._game.num_cards
nodes = deque()
nodes.append((torch.FloatTensor([self._belief]).to(device), None, 0,
0)) # belief, first_action, reward, steps
best_first_action = 0
best_score = -float('inf')
while len(nodes):
belief, first_action, reward, steps = nodes.popleft()
if steps == horizon: break
x = torch.cat([belief.repeat(actions, 1), action_tensor_cache[actions]], dim=1)
action_values = self._reward_model.forward(x, self._task_name)
next_beliefs = None
if steps < horizon - 1:
next_beliefs = self._transition_model.forward(x, self._task_name)
for i in range(actions):
new_reward = inverse_discount * reward + action_values[i].item()
if steps < horizon - 1:
nodes.append((next_beliefs[i:i + 1],
first_action if first_action else i,
new_reward,
steps + 1))
elif steps == horizon - 1:
if new_reward > best_score:
best_score = new_reward
best_first_action = i
return self._game.index_to_card(best_first_action)
class MonteCarloAgent(ModelBasedAgent):
def __init__(self, game: TrickTakingGame,
player_number: int,
transition_model: Union[TransitionModel, MultitaskTransitionModel],
reward_model: Union[RewardModel, MultitaskRewardModel],
timeout: float = 0.5,
horizon: int = 4,
inverse_discount=1.2):
super().__init__(game, player_number, transition_model, reward_model)
self._timeout = timeout
self._horizon = horizon
self._inverse_discount = inverse_discount
@staticmethod
def ucb(score, plays, parent_plays, lowest_score, c=1.4):
exploitation = score / plays if plays else 0
exploitation /= abs(lowest_score) / 5 # normalization
exploration = c * math.sqrt(math.log(parent_plays) / plays) if plays else float('inf')
return exploitation + exploration
def get_transition_reward(self, current, selected_action, reward_cache, nodes, actions):
new_node = current + (selected_action,)
if new_node in reward_cache:
reward = reward_cache[new_node]
else:
if current in nodes:
belief = nodes[current]
else:
a = current[-1]
ba = torch.cat([nodes[current[:-1]], actions[a:a + 1]], dim=1)
belief = self._transition_model.forward(ba, self._task_name)
nodes[current] = belief
belief_action = torch.cat([belief, actions[selected_action:selected_action + 1]], dim=1)
reward = self._reward_model.forward(belief_action, self._task_name)
reward_cache[new_node] = reward
return reward
def act(self, epsilon: float = 0) -> Card:
if np.random.rand() <= epsilon:
return self._game.index_to_card(random.randint(0, self._game.num_cards - 1))
# valid_cards = self._get_hand(self._current_observation, valid_only=True)
# return random.sample(valid_cards, 1)[0]
# Monte Carlo
card_index = mcts(torch.multiprocessing.Pool(2), 2, self._belief, self._game,
self._transition_model, self._reward_model, self._task_name)
return self._game.index_to_card(card_index)
|
with open('Day 5/input.txt') as f:
for intcode in f:
original_intcode = list(map(int, intcode.strip().split(',')))
intcode = original_intcode.copy()
program_input = 5
p = 0
jump = False
modes = []
while True:
# Opcode handler #
if intcode[p] == (3 or 4 or 99):
opcode = intcode[p]
else:
temp = str(intcode[p])
if len(temp) == 1:
opcode = intcode[p]
modes = []
else:
opcode = int(temp[-2:])
modes = list(map(int, list(temp[0:-2])))[::-1]
if opcode == 99:
break
start_p = p
# Instruction handler #
params = []
if opcode == 1:
zeros = (3 - len(modes)) * [0]
modes.extend(zeros)
for mode in modes:
p += 1
if mode == 0:
params.append(intcode[intcode[p]])
elif mode == 1:
params.append(intcode[p])
intcode[intcode[p]] = params[0]+params[1]
elif opcode == 2:
zeros = (3 - len(modes)) * [0]
modes.extend(zeros)
for mode in modes:
p += 1
if mode == 0:
params.append(intcode[intcode[p]])
elif mode == 1:
params.append(intcode[p])
intcode[intcode[p]] = params[0]*params[1]
elif opcode == 3:
p += 1
if not modes:
intcode[intcode[p]] = program_input
else:
if modes[0] == 0:
intcode[intcode[p]] = program_input
elif modes[0] == 1:
intcode[p] == program_input
elif opcode == 4:
p += 1
if not modes:
print("OUT: ", intcode[intcode[p]])
else:
if modes[0] == 0:
print("OUT: ", intcode[intcode[p]])
elif modes[0] == 1:
print("OUT: ", intcode[p])
elif opcode == 5:
zeros = (2 - len(modes)) * [0]
modes.extend(zeros)
for mode in modes:
p += 1
if mode == 0:
params.append(intcode[intcode[p]])
elif mode == 1:
params.append(intcode[p])
if params[0] != 0:
p = params[1]
jump = True
elif opcode == 6:
zeros = (2 - len(modes)) * [0]
modes.extend(zeros)
for mode in modes:
p += 1
if mode == 0:
params.append(intcode[intcode[p]])
elif mode == 1:
params.append(intcode[p])
if params[0] == 0:
p = params[1]
jump = True
elif opcode == 7:
zeros = (3 - len(modes)) * [0]
modes.extend(zeros)
for mode in modes:
p += 1
if mode == 0:
params.append(intcode[intcode[p]])
elif mode == 1:
params.append(intcode[p])
if params[0] < params[1]:
intcode[intcode[p]] = 1
else:
intcode[intcode[p]] = 0
elif opcode == 8:
zeros = (3 - len(modes)) * [0]
modes.extend(zeros)
for mode in modes:
p += 1
if mode == 0:
params.append(intcode[intcode[p]])
elif mode == 1:
params.append(intcode[p])
if params[0] == params[1]:
intcode[intcode[p]] = 1
else:
intcode[intcode[p]] = 0
# print("OPCODE: {}, START P: {}, END P: {}".format(opcode, start_p, p))
# print(intcode)
if jump:
jump = False
else:
p += 1 |
# https://github.com/Star-Clouds/CenterFace
from centerface.centerface_model import CenterFace
from common.det_face import DetFace
Name = 'CenterFace'
def __load_model():
return CenterFace()
__model = __load_model()
def detect_faces(frame, thresh=0.2):
h, w = frame.shape[:2]
faces, _ = __model(frame, h, w, threshold=thresh)
det_faces = [DetFace(b[4], (b[0], b[1], b[2], b[3])) for b in faces]
return det_faces
|
import re
import requests
import json
def getObjectsFromAPI(sentence):
sentence = re.sub(r'(?<!\d)\.(?!\d)', ' .', sentence)
text = '+'.join(sentence.split(' '))
url = 'http://bioai8core.fulton.asu.edu/kparser/ParserServlet?text='+text+'&useCoreference=false'
r1 = requests.get(url)
try:
result = json.loads(r1.text)
return result
except Exception as e:
print('Parsing Error removing the sentence')
return None
if __name__ == '__main__':
d = getObjectsFromAPI('John loves mia');
print(d)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.