text stringlengths 38 1.54M |
|---|
from django.shortcuts import render, redirect
from django.http import HttpResponse, request, HttpResponseRedirect, HttpResponseForbidden
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.views.generic.base import View
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.views import AuthenticationForm, LoginView, LogoutView
from django.views.generic import CreateView, TemplateView
from hyperjob.forms import MyForm
from resume.models import Resume
from vacancy.models import Vacancy
from django.core.exceptions import PermissionDenied
def menu(request):
return render(request, 'menu/main.html')
class MyHome(View):
def __init__(self):
self.all_resumes = Resume.objects.all()
self.all_vacancies = Vacancy.objects.all()
def get(self, request, *args, **kwargs):
f = MyForm()
context = {'resumes': self.all_resumes, 'vacancies': self.all_vacancies, 'is_staff': request.user.is_staff,
'form': f, 'is_authenticated': request.user.is_authenticated}
return render(request, 'menu/home.html', context=context)
def post(self, request, *args, **kwargs):
f = MyForm(data=request.POST)
user = request.user
context = {'resumes': self.all_resumes, 'vacancies': self.all_vacancies, 'is_staff': request.user.is_staff,
'form': f, }
if not user.is_authenticated:
raise PermissionDenied
if f.is_valid():
description = f.cleaned_data['description']
if user.is_staff:
v = Vacancy(description=description, author=user)
v.save()
else:
r = Resume(description=description, author=user)
r.save()
# redirect('/')
return render(request, 'menu/home.html', context=context)
def index(self, request, *args, **kwargs):
context = {'resumes': self.all_resumes, 'vacancies': self.all_vacancies, 'is_staff': request.user.is_staff, }
return render(request, 'menu/home.html', context=context)
class MyLoginView(LoginView):
form_class = AuthenticationForm
redirect_authenticated_user = True
template_name = "menu/login.html"
class MySignupView(CreateView):
form_class = UserCreationForm
success_url = "/login"
template_name = "menu/signup.html"
class MyLogOutView(LogoutView):
pass
'''
class SignUpView(View):
def get(self, request, *args, **kwargs):
f = UserCreationForm()
return render(request, 'signup.html', context={'form': f})
def post(self, request, *args, **kwargs):
f = UserCreationForm(request.POST)
if f.is_valid():
User.objects.create_user(username=f.cleaned_data['username'], password=f.cleaned_data['password1'])
return redirect('/')
return render(request, 'signup.html', {'form': f})
class LoginView(View):
def get(self, request, *args, **kwargs):
f = AuthenticationForm()
return render(request, 'menu/login.html', context={'form': f})
def post(self, request, *args, **kwargs):
f = AuthenticationForm(data=request.POST)
if f.is_valid():
user = authenticate(username=f.cleaned_data['username'], password=f.cleaned_data['password'])
if user is not None:
login(request, user)
return redirect('/')
else:
print("The username and password were incorrect.")
return render(request, 'login.html', {'form': f})
class LogoutView(View):
def get(self, request, *args, **kwargs):
logout(request)
return redirect('/')
'''
|
from daq import daqDevice
from daqh import DgainX1, DafBipolar,DafUnsigned
dev = daqDevice('DaqBoard2K0')
chan = 0
gain = DgainX1
flags = DafBipolar|DafUnsigned
read = dev.AdcRd(chan, gain, flags)
read = (20.0/2**16)*read -10
print read
dev.Close()
|
#!/usr/bin/python
import paramiko, time, sys
class SetFirewall:
def __init__(self, brand, ssh_ip, ssh_port, ssh_username, ssh_password):
self.brand = brand
self.ssh_ip = ssh_ip
self.ssh_port = int(ssh_port)
self.ssh_username = ssh_username
self.ssh_password = ssh_password
def _fg_ip_to_config(self, badip, _mode):
_addr_cmds = ['', 'config firewall address', 'end', '']
_adgp_cmds = ['', 'config firewall addrgrp', 'edit "LastLine_Deny"', 'end', '']
_ip = badip
_llip = 'll_' + _ip
if _mode == 'edit':
_set_host = 'edit ' + _llip
_set_subnet = 'set subnet ' + _ip + '/32'
_addr_cmds.insert(2, _set_subnet)
_addr_cmds.insert(2, _set_host)
_set_member = 'set member ' + _llip
_adgp_cmds.insert(3, _set_member)
_cmds = _addr_cmds + _adgp_cmds
elif _mode == 'delete':
_delete_host = 'delete ' + _llip
_set_member = 'set member _tmp'
_addr_cmds.insert(2, _delete_host)
_adgp_cmds.insert(3, _set_member)
_cmds = _adgp_cmds + _addr_cmds
_cmds.insert(-1, 'exit')
_scmds = '\n'.join(_cmds)
return _scmds
def _sw_ip_to_config(self, badip, _mode):
_cmds = [self.ssh_username, self.ssh_password, 'configure', 'y']
_ip = badip
_llip = 'll_' + _ip
if _mode == 'edit':
_addobj = 'address-object ' + _llip
_host = 'host ' + _ip
_zone = 'zone WAN'
_end = 'end'
_addgrp = 'address-group LastLine_Deny'
_cmds.append(_addobj)
_cmds.append(_host)
_cmds.append(_zone)
_cmds.append(_end)
_cmds.append(_addgrp)
_cmds.append(_addobj)
_cmds.append(_end)
elif _mode == 'delete':
_noobj = 'no ' + 'address-object ' + _llip
_cmds.append(_noobj)
_cmds.append('end\nexit\n')
_scmds = '\n'.join(_cmds)
return _scmds
def _wg_ip_to_config(self, badip, _mode):
_cmds = ['', 'configure', 'exit', '']
if _mode == 'edit':
_block_host = 'ip blocked-site host '
elif _mode == 'delete':
_block_host = 'no ip blocked-site host '
_ip = badip
_block_host = _block_host + _ip
_cmds.insert(2, _block_host)
_scmds = '\n'.join(_cmds)
return _scmds
def _pa_ip_to_config(self, badip, _mode):
_cmds = ['', 'configure', 'save config', 'commit', 'exit', 'exit', '']
_ip = badip
_llip = 'll_' + _ip
if _mode == 'edit':
_set_host = 'set address ' + _llip + ' ip-netmask ' + _ip + '/32'
_set_group = 'set address-group LastLine_Deny ' + _llip
_cmds.insert(2, _set_group)
_cmds.insert(2, _set_host)
elif _mode == 'delete':
_delete_group = 'delete address-group LastLine_Deny ' + _llip
_delete_host = 'delete address ' + _llip
_cmds.insert(2, _delete_host)
_cmds.insert(2, _delete_group)
_scmds = '\n'.join(_cmds)
return _scmds
def _dp_ip_to_config(self, badip, _mode):
_ip_cmds = ['classes modify network ', ' 1']
_dp_table = ['dp black-list table ']
_ip = badip
_llip = 'll_' + _ip
_update = 'classes update-policies set 1\n'
if _mode == 'edit':
_ip_cmds.append(' -a ' + _ip + ' -s 255.255.255.255 -m "IP Mask"')
_ip_cmds.insert(1, 'create ' + _llip)
_dp_table.append('create ' + _llip + ' -dn ' + _llip)
_ip_cmds = ''.join(_ip_cmds)
_dp_table = ''.join(_dp_table)
_scmds = _ip_cmds + '\n' + _dp_table + '\n' + ' '
#print _scmds
elif _mode == 'delete':
_dp_table.append('del ' + _llip)
_ip_cmds.insert(-1, 'del ' + _llip)
_dp_table = ''.join(_dp_table)
_ip_cmds = ''.join(_ip_cmds)
_scmds = _dp_table + '\n' + _ip_cmds + '\n'
return _scmds + _update
def do_config(self, badip):
_mode = 'edit'
_ssh = paramiko.SSHClient()
_ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if self.brand == 'FG':
_scmds = self._fg_ip_to_config(badip, _mode)
elif self.brand == 'WG':
_scmds = self._wg_ip_to_config(badip, _mode)
elif self.brand == 'PA':
_scmds = self._pa_ip_to_config(badip, _mode)
elif self.brand == 'DP':
_scmds = self._dp_ip_to_config(badip, _mode)
elif self.brand == 'SW':
_scmds = self._sw_ip_to_config(badip, _mode)
self.ssh_username = ''
self.ssh_password = ''
print _scmds
_sleep_time = 3
try:
_ssh.connect(self.ssh_ip, port=self.ssh_port, username=self.ssh_username, password=self.ssh_password)
_chan = _ssh.invoke_shell()
_chan.sendall(_scmds)
time.sleep(_sleep_time)
print _chan.recv(1024000)
_chan.close()
except:
print 'WARNING: Unable ssh to firewall !!!'
_ssh.close()
def clean_config(self, badip):
_mode = 'delete'
_ssh = paramiko.SSHClient()
_ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if self.brand == 'FG':
_scmds = self._fg_ip_to_config(badip, _mode)
elif self.brand == 'WG':
_scmds = self._wg_ip_to_config(badip, _mode)
elif self.brand == 'PA':
_scmds = self._pa_ip_to_config(badip, _mode)
elif self.brand == 'DP':
_scmds = self._dp_ip_to_config(badip, _mode)
elif self.brand == 'SW':
_scmds = self._sw_ip_to_config(badip, _mode)
self.ssh_username = ''
self.ssh_password = ''
print _scmds
_sleep_time = 3
try:
_ssh.connect(self.ssh_ip, port=self.ssh_port, username=self.ssh_username, password=self.ssh_password)
_chan = _ssh.invoke_shell()
_chan.sendall(_scmds)
time.sleep(_sleep_time)
print _chan.recv(1024000)
_chan.close()
except:
print 'WARNING: Unable ssh to firewall !!!'
_ssh.close()
def main():
setupfw = SetFirewall('DP', '1.1.1.1', '2222', 'sshusername', 'sshpassword')
scmds = setupfw._dp_ip_to_config('5.5.5.5', 'edit')
print scmds
if __name__ == '__main__':
main()
|
from tkinter import *
from tkinter import ttk
root = Tk()
root.title("Learn To Code")
root.iconbitmap('favicon.ico')
root.geometry("500x500")
my_notebook = ttk.Notebook(root)
my_notebook.pack()
def hide():
my_notebook.hide(1)
def show():
my_notebook.add(my_frame2, text="Red Tab")
def select():
my_notebook.select(1)
my_frame1 = Frame(my_notebook, width=500, height=500, bg="blue")
my_frame2 = Frame(my_notebook, width=500, height=500, bg="red")
my_frame1.pack(fill="both", expand=1)
my_frame2.pack(fill="both", expand=1)
my_notebook.add(my_frame1, text="Blue Tab")
my_notebook.add(my_frame2, text="Red Tab")
my_button = Button(my_frame1, text="Hide Tab 2", command=hide).pack(pady=10)
my_button2 = Button(my_frame1, text="show Tab 2", command=show).pack(pady=10)
my_button3 = Button(my_frame1, text="Navigate to Tab 2", command=select).pack(pady=10)
root.mainloop()
|
# Copyright 2016 Brocade Communications Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from ne_base import NosDeviceAction
from ne_base import log_exceptions
from execute_cli import CliCMD
class CreateVRF(NosDeviceAction):
"""
Implements the logic to Create a VRF on VDX Switches .
This action acheives the below functionality
1. Create VRF
"""
def run(self, mgmt_ip, username, password, vrf_name, rbridge_id, afi, rd):
"""Run helper methods to implement the desired state.
"""
try:
self.setup_connection(host=mgmt_ip, user=username, passwd=password)
except Exception as e:
self.logger.error(e.message)
sys.exit(-1)
changes = {}
return self.switch_operation(afi, changes, rbridge_id, vrf_name, rd)
@log_exceptions
def switch_operation(self, afi, changes, rbridge_id, vrf_name, rd):
with self.pmgr(conn=self.conn, auth_snmp=self.auth_snmp) as device:
self.logger.info('successfully connected to %s to Create VRF '
'for tenants',
self.host)
if device.suports_rbridge and rbridge_id is None:
rbridge_id = self.vlag_pair(device)
if rbridge_id:
for rb_id in rbridge_id:
self.validate_supports_rbridge(device, rb_id)
validation_VRF = self._check_requirements_VRF(device, rb_id,
vrf_name)
if validation_VRF:
changes['Create_VRF'] = self._create_VRF(device, rb_id,
vrf_name)
validate_vrf_afi = self._validate_vrf_afi(device, rb_id,
vrf_name, afi)
if validate_vrf_afi:
changes['Create_address_family'] = self._create_vrf_afi(
device, rb_id,
vrf_name, afi, rd)
self.logger.info('closing connection to %s after Create VRF '
'- all done!',
self.host)
if 'Create_VRF' in changes:
self._fetch_VRF_state(device, vrf_name)
else:
self.validate_supports_rbridge(device, rbridge_id)
validation_VRF = self._check_requirements_VRF(device, rbridge_id,
vrf_name)
if validation_VRF:
changes['Create_VRF'] = self._create_VRF(device, rbridge_id,
vrf_name)
validate_vrf_afi = self._validate_vrf_afi(device, rbridge_id,
vrf_name, afi)
if validate_vrf_afi:
changes['Create_address_family'] = self._create_vrf_afi(
device, rbridge_id,
vrf_name, afi, rd)
self.logger.info('closing connection to %s after Create VRF '
'- all done!',
self.host)
if 'Create_VRF' in changes:
self._fetch_VRF_state(device, vrf_name)
return changes
def _check_requirements_VRF(self, device, rbridge_id, vrf_name):
""" pre-checks to identify the existing vrf configurations"""
vrf_output = device.interface.vrf(get=True, rbridge_id=rbridge_id)
if vrf_output is not None:
for each_vrf in vrf_output:
if each_vrf['vrf_name'] == vrf_name:
self.logger.info('VRF %s already exists',
vrf_name)
return False
return True
def _validate_vrf_afi(self, device, rbridge_id, vrf_name, afi):
""" Pre-checks to identify VRF address family configurations"""
afi_status = device.interface.vrf_afi(
get=True, rbridge_id=rbridge_id, vrf_name=vrf_name)
if afi_status[afi]:
self.logger.info('VRF %s address family already configured for %s',
afi, vrf_name)
return False
return True
def _create_vrf_afi(self, device, rbridge_id, vrf_name, afi, rd):
""" Create Address Family """
try:
self.logger.info(
'Creating %s address family for VRF %s ',
afi,
vrf_name)
device.interface.vrf_afi(
vrf_name=vrf_name, rbridge_id=rbridge_id, afi=afi, rd=rd)
except (ValueError, KeyError) as e:
error_message = str(e.message)
self.logger.error(error_message)
raise ValueError('Invalid Input types while creating %s address '
'family on VRF %s',
afi, vrf_name)
return False
return True
def _create_VRF(self, device, rbridge_id, vrf_name):
""" create VRF """
try:
self.logger.info('Creating VRF %s ', vrf_name)
self.logger.info('vrf name type %s', type(vrf_name))
self.logger.info('Rbridge id type %s', type(rbridge_id))
device.interface.vrf(vrf_name=vrf_name, rbridge_id=rbridge_id)
except (ValueError, KeyError) as e:
error_message = str(e.message)
self.logger.error(error_message)
raise ValueError('Invalid Input types while creating VRF %s',
vrf_name)
return True
def _fetch_VRF_state(self, device, vrf_name):
"""validate DAI state.
"""
exec_cli = CliCMD()
host_ip = self.host
host_username = self.auth_snmp[0]
host_password = self.auth_snmp[1]
cli_arr = []
cli_cmd = 'show vrf "' + vrf_name + '"'
cli_arr.append(cli_cmd)
device_type = 'ni' if device.os_type == 'NI' else 'nos'
raw_cli_output = exec_cli.execute_cli_command(mgmt_ip=host_ip,
username=host_username,
password=host_password,
cli_cmd=cli_arr, device_type=device_type)
output = str(raw_cli_output)
self.logger.info(output)
return True
|
'''
create binary tree:
https://www.youtube.com/watch?v=XV1ADVV6FbQ&list=PL-Jc9J83PIiHYxUk8dSu2_G7MR1PaGXN4&index=3
display binary tree:
https://www.youtube.com/watch?v=sYU6AnSJyjo&list=PL-Jc9J83PIiHYxUk8dSu2_G7MR1PaGXN4&index=4
'''
class Node:
def __init__(self, data, left, right):
self.data = data
self.left = left
self.right = right
class Pair:
def __init__(self, node, state):
self.node = node
self.state = state
def create_binary_tree(arr):
st = []
#create root node and root pair
root = Node(arr[0], None, None)
root_pair = Pair(root, 1)
st.append(root_pair)
idx = 0
while(len(st) > 0):
top = st[-1]
if(top.state == 1):
idx += 1 # increment index
if(arr[idx] != None):
left_node = Node(arr[idx], None, None) # create left node bcz state is 1
top.node.left = left_node # assign left node to left of top node
left_pair = Pair(left_node, 1) # create pair for left_node with initial state as 1
st.append(left_pair) # push left_pair in stack
else:
top.node.left = None
top.state += 1 # increment state for top pair
elif(top.state == 2):
idx += 1 # increment index
if(arr[idx] != None):
right_node = Node(arr[idx], None, None) # create right node bcz state is 2
top.node.right = right_node # assign right node to right of top node
right_pair = Pair(right_node, 1) # create pair for right_node with initial state as 1
st.append(right_pair) # push right_pair in stack
else:
top.node.right = None
top.state += 1 # increment state for top pair
else:
st.pop(-1) # if state is 3 pop
return root
def display_binary_tree(root):
if(root == None):
return
val = ""
val += str(root.left.data) if root.left is not None else "."
val += " <- " + str(root.data) + " -> "
val += str(root.right.data) if root.right is not None else "."
print(val)
display_binary_tree(root.left)
display_binary_tree(root.right)
if __name__ == "__main__":
arr = [50, 25, 12, None, None, 37, 30, None, None, None, 75, 62, None, 70, None, None, 87, None, None]
root = create_binary_tree(arr)
print(root.data)
display_binary_tree(root)
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 27 18:07:30 2020
@author: willi
"""
#Test file for generalized SSA
#import rSNAPsim as rss
import numpy as np
import time
import matplotlib.pyplot as plt
import ssa_translation_generic_lowmem
def generate_additional_ks(enters,pauses,jumps,stops,L):
def frame_check_1(L,arr):
return (L- arr[:,1]+1)*(arr[:,1]>0) + L*(arr[:,1]>1)
def frame_check_3(L,arr):
return (L- arr[:,3]+1)*(arr[:,3]>0) + L*(arr[:,3]>1)
def gen_ks_1_loc(L,arr):
arr[:,0] = arr[:,0]+frame_check_1(L,arr)
arr[:,1] = arr[:,2]
arr = arr[:,0:2]
max_arr = np.max( arr[:,0])
return arr,max_arr
def gen_ks_3_loc(L,arr):
arr[:,0] = arr[:,0]+ frame_check_1(L,arr)
arr[:,1] = arr[:,2]+ frame_check_3(L,arr)
arr[:,2] = arr[:,4]
arr = arr[:,0:3]
max_arr = max([np.max( arr[:,0]),np.max( arr[:,1])])
return arr,max_arr
max_enter = 0
max_pause = 0
max_stop = 0
max_jump = 0
k_jumps = np.copy(jumps)
k_pauses = np.copy(pauses)
k_stops = np.copy(stops)
k_enters = np.copy(enters)
if len(k_enters) != 0:
k_enters,max_enter = gen_ks_1_loc(L,k_enters)
if len(k_pauses) != 0:
k_pauses,max_pause = gen_ks_1_loc(L,k_pauses)
if len(k_stops) != 0:
k_stops,max_stop = gen_ks_1_loc(L,k_stops)
if len(k_jumps) != 0:
k_jumps,max_jump = gen_ks_3_loc(L,k_jumps)
max_loc = max(max_jump,max_stop,max_pause,max_enter)
if max_loc <=L:
frames_used = 0
if max_loc > L:
frames_used = 1
if max_loc > 2*L-1 :
frames_used = 2
return k_enters, k_pauses, k_stops, k_jumps, frames_used
#rsnap = rss.rSNAPsim()
#rsnap.open_seq_file('gene_files/H2B_withTags.txt')
#rsnap.run_default()
k = np.ones((1,300)).flatten()
kelong = k[1:-1]
kelong[49] = 0
kelong[149]= 0
kelong[248] = 0
#k_fss = np.array([[200,0,200,1,.3]])
k_pause = np.array([[30,0,.005]])
k_enters = np.array([[5,0,.02],[5,2,.04]],dtype=np.float64)
k_stops = np.array([[50,0,10],[50,1,10],[50,2,10]],dtype=np.float64)
k_fss = np.array([[20,0,20,1,1]],dtype=np.float64)
#k_pause = np.array([[30,2,100],[40,2,100]],dtype=np.float64)
k_enters,k_pauses,k_stops,k_jumps,frames_used = generate_additional_ks(k_enters,[],k_fss,k_stops,100)
t_array = np.array([0,100,500],dtype=np.float64)
t0 = 15
t_array = np.linspace(0,400,400,dtype=np.float64)
N_rib = 200
result = np.zeros((len(t_array)*N_rib),dtype=np.int32 )
#kelong = np.array([3.1,3.2,3.3,3.4,3.5,3.1,3.2,3.3,3.4,3.5],dtype=np.float64)
n_trajectories = 10
start = time.time()
all_results = np.zeros((n_trajectories,2,len(t_array)),dtype=np.int32)
lenfrap = len(np.intersect1d(np.where(t_array>0)[0],np.where(t_array<20)[0]))
all_frapresults = np.zeros((n_trajectories,N_rib*len(t_array)),dtype=np.int32)
all_ribtimes = np.zeros((n_trajectories,400),dtype=np.float64)
all_coltimes = np.zeros((n_trajectories,400),dtype=np.int32)
nribs = np.array([0],dtype=np.int32)
all_ribs = np.zeros((n_trajectories,1))
seeds = np.random.randint(0,0x7FFFFFF,n_trajectories)
k_add = np.hstack((k_enters.flatten(),k_pauses.flatten(),k_stops.flatten(),k_jumps.flatten() ))
t_array_copy = np.copy(t_array)
while t_array_copy.shape[0] != 200:
t_array_copy = np.vstack((t_array_copy,t_array))
probe = np.zeros((298,2)).T
probe[0,10] = 1
probe[0,20] = 1
probe[1,225] = 1
probe[1,215] = 1
#probe = np.cumsum(probe,axis=1)
probe = probe.astype(int).copy(order='C')
for i in range(n_trajectories):
result = np.zeros((2,len(t_array)),dtype=np.int32)
frapresult = np.zeros((len(t_array)*N_rib),dtype=np.int32)
ssa_translation_generic_lowmem.run_SSA_generic(result, kelong,frapresult,t_array, np.array([0,0,0],dtype=np.float64), seeds[i], k_add.flatten() ,2,0,3,1, probe, 2,200 )
all_results[i,:,:] = result
all_frapresults[i,:] = frapresult
print(result.shape)
print(kelong.shape),
print(frapresult.shape)
print(k_add.flatten().shape)
print(probe.T.astype(int).shape)
traj = all_results[0,:,:].reshape((2,len(t_array))).T
f,ax = plt.subplots(2,1)
ax[0].set_ylim([0,300])
ax[0].fill_between([0,400],[100,100],color='red',alpha=.2)
ax[0].fill_between([0,400],[200,200],color='green',alpha=.2)
ax[0].fill_between([0,400],[300,300],color='blue',alpha=.2)
ax[0].plot(traj,'.')
ax[0].set_xlabel('Time')
ax[0].set_ylabel('Ribosome Location')
ax[0].set_title(' 100 codons, enters: 0,10 and +2,10 FSS: 0,20 to +1,20 Stops: 50 0,1,2' )
spatial_x = (traj + (traj > 100) + (traj > 199))%100
ax[1].set_ylim([0,100])
#ax[1].plot(t_array,spatial_x,'.')
ax[1].plot(t_array_copy.T[traj<=100],spatial_x[traj <= 100],'r.')
ax[1].plot(t_array_copy.T[traj>100],spatial_x[traj > 100],'g.')
ax[1].plot(t_array_copy.T[traj>199],spatial_x[traj > 199],'b.')
ax[1].set_xlabel('Time')
ax[1].set_ylabel('Ribosome Location')
ax[1].set_title(' spatial location ' )
ax[1].legend(['0','+1','+2'])
1/0
###################################################################
k = np.ones((1,300)).flatten()
kelong = k[1:-1]
kelong[49] = 3
kelong[79] = 0
k_enters = np.array([[10,0,.04]],dtype=np.float64)
k_stops = np.array([[50,0,10],[80,0,10]],dtype=np.float64)
k_fss = []
k_pause = []
#k_pause = np.array([[30,2,100],[40,2,100]],dtype=np.float64)
k_enters,k_pauses,k_stops,k_jumps,frames_used = generate_additional_ks(k_enters,k_pause,k_fss,k_stops,100)
t_array = np.array([0,100,500],dtype=np.float64)
t0 = 15
t_array = np.linspace(0,400,400,dtype=np.float64)
N_rib = 200
result = np.zeros((len(t_array)*N_rib),dtype=np.int32 )
#kelong = np.array([3.1,3.2,3.3,3.4,3.5,3.1,3.2,3.3,3.4,3.5],dtype=np.float64)
n_trajectories = 1
start = time.time()
all_results = np.zeros((n_trajectories,N_rib*len(t_array)),dtype=np.int32)
lenfrap = len(np.intersect1d(np.where(t_array>0)[0],np.where(t_array<20)[0]))
all_frapresults = np.zeros((n_trajectories,N_rib*len(t_array)),dtype=np.int32)
all_ribtimes = np.zeros((n_trajectories,400),dtype=np.float64)
all_coltimes = np.zeros((n_trajectories,400),dtype=np.int32)
nribs = np.array([0],dtype=np.int32)
all_ribs = np.zeros((n_trajectories,1))
seeds = np.random.randint(0,0x7FFFFFF,n_trajectories)
k_add = np.hstack((k_enters.flatten(),k_pauses.flatten(),k_stops.flatten(),k_jumps.flatten() ))
t_array_copy = np.copy(t_array)
while t_array_copy.shape[0] != 200:
t_array_copy = np.vstack((t_array_copy,t_array))
for i in range(n_trajectories):
result = np.zeros((len(t_array)*N_rib),dtype=np.int32)
frapresult = np.zeros((len(t_array)*N_rib),dtype=np.int32)
ribtimes = np.zeros((400),dtype=np.float64)
coltimes = np.zeros((400),dtype=np.int32)
ssa_translation_generic_lowmem.run_SSA_generic(result,ribtimes,coltimes, kelong,frapresult,t_array, np.array([0,0,0],dtype=np.float64), seeds[i],nribs, k_add.flatten() ,len(k_enters),len(k_pauses),len(k_stops),len(k_jumps) )
all_results[i,:] = result
all_frapresults[i,:] = frapresult
all_coltimes[i,:] = coltimes
all_ribtimes[i,:] = ribtimes
all_ribs[i,:] = nribs[0]
traj = all_results[0,:].reshape((N_rib,len(t_array))).T
f,ax = plt.subplots(2,1)
ax[0].set_ylim([0,300])
ax[0].fill_between([0,400],[100,100],color='red',alpha=.2)
ax[0].fill_between([0,400],[200,200],color='green',alpha=.2)
ax[0].fill_between([0,400],[300,300],color='blue',alpha=.2)
ax[0].plot(traj,'.')
ax[0].set_xlabel('Time')
ax[0].set_ylabel('Ribosome Location')
ax[0].set_title(' 100 codons, enters: 0,10 stops: 0,50 and 0,80' )
spatial_x = (traj + (traj > 100) + (traj > 199))%100
ax[1].set_ylim([0,100])
#ax[1].plot(t_array,spatial_x,'.')
ax[1].plot(t_array_copy.T[traj<=100],spatial_x[traj <= 100],'r.')
ax[1].plot(t_array_copy.T[traj>100],spatial_x[traj > 100],'g.')
ax[1].plot(t_array_copy.T[traj>199],spatial_x[traj > 199],'b.')
ax[1].set_xlabel('Time')
ax[1].set_ylabel('Ribosome Location')
ax[1].set_title(' spatial location ' )
ax[1].legend(['0','+1','+2'])
#####################################################
k = np.ones((1,300)).flatten()
kelong = k[1:-1]
kelong[49] = 0
kelong[179] = 0
k_enters = np.array([[10,0,.04]],dtype=np.float64)
k_stops = np.array([[50,0,10],[80,1,10]],dtype=np.float64)
k_fss = np.array([[30,0,30,1,1]],dtype=np.float64)
k_pause = []
#k_pause = np.array([[30,2,100],[40,2,100]],dtype=np.float64)
k_enters,k_pauses,k_stops,k_jumps,frames_used = generate_additional_ks(k_enters,k_pause,k_fss,k_stops,100)
t_array = np.array([0,100,500],dtype=np.float64)
t0 = 15
t_array = np.linspace(0,400,400,dtype=np.float64)
N_rib = 200
result = np.zeros((len(t_array)*N_rib),dtype=np.int32 )
#kelong = np.array([3.1,3.2,3.3,3.4,3.5,3.1,3.2,3.3,3.4,3.5],dtype=np.float64)
n_trajectories = 1
start = time.time()
all_results = np.zeros((n_trajectories,N_rib*len(t_array)),dtype=np.int32)
lenfrap = len(np.intersect1d(np.where(t_array>0)[0],np.where(t_array<20)[0]))
all_frapresults = np.zeros((n_trajectories,N_rib*len(t_array)),dtype=np.int32)
all_ribtimes = np.zeros((n_trajectories,400),dtype=np.float64)
all_coltimes = np.zeros((n_trajectories,400),dtype=np.int32)
nribs = np.array([0],dtype=np.int32)
all_ribs = np.zeros((n_trajectories,1))
seeds = np.random.randint(0,0x7FFFFFF,n_trajectories)
k_add = np.hstack((k_enters.flatten(),k_pauses.flatten(),k_stops.flatten(),k_jumps.flatten() ))
t_array_copy = np.copy(t_array)
while t_array_copy.shape[0] != 200:
t_array_copy = np.vstack((t_array_copy,t_array))
for i in range(n_trajectories):
result = np.zeros((len(t_array)*N_rib),dtype=np.int32)
frapresult = np.zeros((len(t_array)*N_rib),dtype=np.int32)
ribtimes = np.zeros((400),dtype=np.float64)
coltimes = np.zeros((400),dtype=np.int32)
ssa_translation_generic_lowmem.run_SSA_generic(result,ribtimes,coltimes, kelong,frapresult,t_array, np.array([0,0,0],dtype=np.float64), seeds[i],nribs, k_add.flatten() ,1,0,2,1 )
all_results[i,:] = result
all_frapresults[i,:] = frapresult
all_coltimes[i,:] = coltimes
all_ribtimes[i,:] = ribtimes
all_ribs[i,:] = nribs[0]
traj = all_results[0,:].reshape((N_rib,len(t_array))).T
f,ax = plt.subplots(2,1)
ax[0].set_ylim([0,300])
ax[0].fill_between([0,400],[100,100],color='red',alpha=.2)
ax[0].fill_between([0,400],[200,200],color='green',alpha=.2)
ax[0].fill_between([0,400],[300,300],color='blue',alpha=.2)
ax[0].plot(traj,'.')
ax[0].set_xlabel('Time')
ax[0].set_ylabel('Ribosome Location')
ax[0].set_title(' 100 codons, enters: 0,10 stops: 0,50 and 1,80 FSS: 0,30 to 1,30' )
spatial_x = (traj + (traj > 100) + (traj > 199))%100
ax[1].set_ylim([0,100])
#ax[1].plot(t_array,spatial_x,'.')
ax[1].plot(t_array_copy.T[traj<=100],spatial_x[traj <= 100],'r.')
ax[1].plot(t_array_copy.T[traj>100],spatial_x[traj > 100],'g.')
ax[1].plot(t_array_copy.T[traj>199],spatial_x[traj > 199],'b.')
ax[1].set_xlabel('Time')
ax[1].set_ylabel('Ribosome Location')
ax[1].set_title(' spatial location ' )
ax[1].legend(['0','+1','+2'])
######################
k = np.ones((1,300)).flatten()
kelong = k[1:-1]
kelong[49] = 0
kelong[278] = 0
k_enters = np.array([[10,0,.04],[10,2,.02]],dtype=np.float64)
k_stops = np.array([[50,0,10],[80,2,10]],dtype=np.float64)
k_fss = []
k_pause = []
#k_pause = np.array([[30,2,100],[40,2,100]],dtype=np.float64)
k_enters,k_pauses,k_stops,k_jumps,frames_used = generate_additional_ks(k_enters,k_pause,k_fss,k_stops,100)
t_array = np.array([0,100,500],dtype=np.float64)
t0 = 15
t_array = np.linspace(0,400,400,dtype=np.float64)
N_rib = 200
result = np.zeros((len(t_array)*N_rib),dtype=np.int32 )
#kelong = np.array([3.1,3.2,3.3,3.4,3.5,3.1,3.2,3.3,3.4,3.5],dtype=np.float64)
n_trajectories = 1
start = time.time()
all_results = np.zeros((n_trajectories,N_rib*len(t_array)),dtype=np.int32)
lenfrap = len(np.intersect1d(np.where(t_array>0)[0],np.where(t_array<20)[0]))
all_frapresults = np.zeros((n_trajectories,N_rib*len(t_array)),dtype=np.int32)
all_ribtimes = np.zeros((n_trajectories,400),dtype=np.float64)
all_coltimes = np.zeros((n_trajectories,400),dtype=np.int32)
nribs = np.array([0],dtype=np.int32)
all_ribs = np.zeros((n_trajectories,1))
seeds = np.random.randint(0,0x7FFFFFF,n_trajectories)
k_add = np.hstack((k_enters.flatten(),k_pauses.flatten(),k_stops.flatten(),k_jumps.flatten() ))
t_array_copy = np.copy(t_array)
while t_array_copy.shape[0] != 200:
t_array_copy = np.vstack((t_array_copy,t_array))
for i in range(n_trajectories):
result = np.zeros((len(t_array)*N_rib),dtype=np.int32)
frapresult = np.zeros((len(t_array)*N_rib),dtype=np.int32)
ribtimes = np.zeros((400),dtype=np.float64)
coltimes = np.zeros((400),dtype=np.int32)
ssa_translation_generic_lowmem.run_SSA_generic(result,ribtimes,coltimes, kelong,frapresult,t_array, np.array([0,0,0],dtype=np.float64), seeds[i],nribs, k_add.flatten() ,2,0,2,0 )
all_results[i,:] = result
all_frapresults[i,:] = frapresult
all_coltimes[i,:] = coltimes
all_ribtimes[i,:] = ribtimes
all_ribs[i,:] = nribs[0]
traj = all_results[0,:].reshape((N_rib,len(t_array))).T
f,ax = plt.subplots(2,1)
ax[0].set_ylim([0,300])
ax[0].fill_between([0,400],[100,100],color='red',alpha=.2)
ax[0].fill_between([0,400],[200,200],color='green',alpha=.2)
ax[0].fill_between([0,400],[300,300],color='blue',alpha=.2)
ax[0].plot(traj,'.')
ax[0].set_xlabel('Time')
ax[0].set_ylabel('Ribosome Location')
ax[0].set_title(' 100 codons, enters: 0,10 2,20 stops: 0,50 and 2,80' )
spatial_x = (traj + (traj > 100) + (traj > 199))%100
ax[1].set_ylim([0,100])
#ax[1].plot(t_array,spatial_x,'.')
ax[1].plot(t_array_copy.T[traj<=100],spatial_x[traj <= 100],'r.')
ax[1].plot(t_array_copy.T[traj>100],spatial_x[traj > 100],'g.')
ax[1].plot(t_array_copy.T[traj>199],spatial_x[traj > 199],'b.')
ax[1].set_xlabel('Time')
ax[1].set_ylabel('Ribosome Location')
ax[1].set_title(' spatial location ' )
ax[1].legend(['0','+1','+2'])
###########
k = np.ones((1,300)).flatten()
kelong = k[1:-1]
kelong[49] = 0
kelong[39] = 0.1
kelong[278] = 0
k_enters = np.array([[10,0,.04],[10,2,.02]],dtype=np.float64)
k_stops = np.array([[50,0,10],[80,2,10]],dtype=np.float64)
k_fss = []
k_pause = np.array([[40,0,100]],dtype=np.float64)
#k_pause = np.array([[30,2,100],[40,2,100]],dtype=np.float64)
k_enters,k_pauses,k_stops,k_jumps,frames_used = generate_additional_ks(k_enters,k_pause,k_fss,k_stops,100)
t_array = np.array([0,100,500],dtype=np.float64)
t0 = 15
t_array = np.linspace(0,400,400,dtype=np.float64)
N_rib = 200
result = np.zeros((len(t_array)*N_rib),dtype=np.int32 )
#kelong = np.array([3.1,3.2,3.3,3.4,3.5,3.1,3.2,3.3,3.4,3.5],dtype=np.float64)
n_trajectories = 1
start = time.time()
all_results = np.zeros((n_trajectories,N_rib*len(t_array)),dtype=np.int32)
lenfrap = len(np.intersect1d(np.where(t_array>0)[0],np.where(t_array<20)[0]))
all_frapresults = np.zeros((n_trajectories,N_rib*len(t_array)),dtype=np.int32)
all_ribtimes = np.zeros((n_trajectories,400),dtype=np.float64)
all_coltimes = np.zeros((n_trajectories,400),dtype=np.int32)
nribs = np.array([0],dtype=np.int32)
all_ribs = np.zeros((n_trajectories,1))
seeds = np.random.randint(0,0x7FFFFFF,n_trajectories)
k_add = np.hstack((k_enters.flatten(),k_pauses.flatten(),k_stops.flatten(),k_jumps.flatten() ))
t_array_copy = np.copy(t_array)
while t_array_copy.shape[0] != 200:
t_array_copy = np.vstack((t_array_copy,t_array))
for i in range(n_trajectories):
result = np.zeros((len(t_array)*N_rib),dtype=np.int32)
frapresult = np.zeros((len(t_array)*N_rib),dtype=np.int32)
ribtimes = np.zeros((400),dtype=np.float64)
coltimes = np.zeros((400),dtype=np.int32)
ssa_translation_generic_lowmem.run_SSA_generic(result,ribtimes,coltimes, kelong,frapresult,t_array, np.array([0,0,0],dtype=np.float64), seeds[i],nribs, k_add.flatten() ,len(k_enters),len(k_pauses),len(k_stops),len(k_jumps))
all_results[i,:] = result
all_frapresults[i,:] = frapresult
all_coltimes[i,:] = coltimes
all_ribtimes[i,:] = ribtimes
all_ribs[i,:] = nribs[0]
traj = all_results[0,:].reshape((N_rib,len(t_array))).T
f,ax = plt.subplots(2,1)
ax[0].set_ylim([0,300])
ax[0].fill_between([0,400],[100,100],color='red',alpha=.2)
ax[0].fill_between([0,400],[200,200],color='green',alpha=.2)
ax[0].fill_between([0,400],[300,300],color='blue',alpha=.2)
ax[0].plot(traj,'.')
ax[0].set_xlabel('Time')
ax[0].set_ylabel('Ribosome Location')
ax[0].set_title(' 100 codons, enters: 0,10 2,20 stops: 0,50 and 2,80' )
spatial_x = (traj + (traj > 100) + (traj > 199))%100
ax[1].set_ylim([0,100])
#ax[1].plot(t_array,spatial_x,'.')
ax[1].plot(t_array_copy.T[traj<=100],spatial_x[traj <= 100],'r.')
ax[1].plot(t_array_copy.T[traj>100],spatial_x[traj > 100],'g.')
ax[1].plot(t_array_copy.T[traj>199],spatial_x[traj > 199],'b.')
ax[1].set_xlabel('Time')
ax[1].set_ylabel('Ribosome Location')
ax[1].set_title(' spatial location ' )
ax[1].legend(['0','+1','+2'])
|
import sys
from PyQt5.QtWidgets import QApplication, QWidget, QMainWindow, QSpinBox
from design import Ui_MainWindow
class MyWidget(QMainWindow, Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
self.pushButton.clicked.connect(self.run)
def run(self):
self.plainTextEdit.setPlainText("")
if self.radioButton.isChecked():
self.graphicsView.clear()
self.graphicsView.plot([i for i in range(-int(self.spinBox.text()), int(self.spinBox.text()) + 1)],
[i for i in range(-int(self.spinBox.text()), int(self.spinBox.text()) + 1)], pen='r')
self.plainTextEdit.setPlainText("График функции - Прямая")
self.plainTextEdit.appendPlainText("Функция возрастает при x(-∞; +∞)")
self.plainTextEdit.appendPlainText("D(f) = (-∞; +∞) \nE(f) = (-∞; +∞)")
self.plainTextEdit.appendPlainText("y = 0, при x = 0")
self.plainTextEdit.appendPlainText("Функция является нечётной")
self.plainTextEdit.appendPlainText("y наиб - не существует \ny наим - не существует")
elif self.radioButton_2.isChecked():
self.graphicsView.clear()
self.graphicsView.plot([i for i in range(-int(self.spinBox.text()), int(self.spinBox.text()) + 1)],
[i ** 2 for i in range(-int(self.spinBox.text()),
int(self.spinBox.text()) + 1)], pen='g')
self.plainTextEdit.setPlainText("График функции - парабола\nВетви направлены вверх")
self.plainTextEdit.appendPlainText("Функция убывает при x(-∞; 0)\nФункция возрастает при x(0; +∞)")
self.plainTextEdit.appendPlainText("D(f) = (-∞; +∞) \nE(f) = (-∞; +∞)")
self.plainTextEdit.appendPlainText("y = 0, при x = 0")
self.plainTextEdit.appendPlainText("Функция является чётной")
self.plainTextEdit.appendPlainText("y наиб - не существует \ny наим = 0")
elif self.radioButton_3.isChecked():
self.graphicsView.clear()
self.graphicsView.plot([i for i in range(-int(self.spinBox.text()), int(self.spinBox.text()) + 1)],
[i ** 3 for i in range(-int(self.spinBox.text()),
int(self.spinBox.text()) + 1)], pen='b')
self.plainTextEdit.setPlainText("График функции - кубическая парабола")
self.plainTextEdit.appendPlainText("Функция возрастает при x(-∞; +∞)")
self.plainTextEdit.appendPlainText("D(f) = (-∞; +∞) \nE(f) = (-∞; +∞)")
self.plainTextEdit.appendPlainText("y = 0, при x = 0")
self.plainTextEdit.appendPlainText("Функция является нечётной")
self.plainTextEdit.appendPlainText("y наиб - не существует \ny наим - не существует")
elif self.radioButton_4.isChecked():
self.graphicsView.clear()
self.graphicsView.plot([i ** 2 for i in range(int(self.spinBox.text()) + 1)],
[i for i in range(int(self.spinBox.text()) + 1)], pen='b')
self.plainTextEdit.setPlainText("График функции")
self.plainTextEdit.appendPlainText("Функция возрастает при x(0; +∞)")
self.plainTextEdit.appendPlainText("D(f) = (0; +∞) \nE(f) = (0; +∞)")
self.plainTextEdit.appendPlainText("y = 0, при x = 0")
self.plainTextEdit.appendPlainText("Функция является ни нечётной, ни чётной")
self.plainTextEdit.appendPlainText("y наиб = не существует \ny наим = 0")
elif self.radioButton_5.isChecked():
self.graphicsView.clear()
self.graphicsView.plot([i for i in range(-int(self.spinBox.text()), int(self.spinBox.text()) + 1)],
[-i for i in range(-int(self.spinBox.text()), int(self.spinBox.text()) + 1)],
pen='r')
self.plainTextEdit.setPlainText("График функции - Прямая")
self.plainTextEdit.appendPlainText("Функция убывает при x(-∞; +∞)")
self.plainTextEdit.appendPlainText("D(f) = (-∞; +∞) \nE(f) = (-∞; +∞)")
self.plainTextEdit.appendPlainText("y = 0, при x = 0")
self.plainTextEdit.appendPlainText("Функция является нечётной")
self.plainTextEdit.appendPlainText("y наиб - не существует \ny наим - не существует")
elif self.radioButton_6.isChecked():
self.graphicsView.clear()
self.graphicsView.plot([i for i in range(-int(self.spinBox.text()), int(self.spinBox.text()) + 1)],
[-(i ** 2) for i in range(-int(self.spinBox.text()),
int(self.spinBox.text()) + 1)], pen='g')
self.plainTextEdit.setPlainText("График функции - парабола\nВетви направлены вниз")
self.plainTextEdit.appendPlainText("Функция возрастает при x(-∞; 0)\nФункция убывает при x(0; +∞)")
self.plainTextEdit.appendPlainText("D(f) = (-∞; +∞) \nE(f) = (-∞; +∞)")
self.plainTextEdit.appendPlainText("y = 0, при x = 0")
self.plainTextEdit.appendPlainText("Функция является чётной")
self.plainTextEdit.appendPlainText("y наиб = 0 \ny наим - не существует")
elif self.radioButton_7.isChecked():
self.graphicsView.clear()
self.graphicsView.plot([i for i in range(-int(self.spinBox.text()), int(self.spinBox.text()) + 1)],
[-(i ** 3) for i in range(-int(self.spinBox.text()),
int(self.spinBox.text()) + 1)], pen='b')
self.plainTextEdit.setPlainText("График функции - кубическая парабола")
self.plainTextEdit.appendPlainText("Функция убывает при x(-∞; +∞)")
self.plainTextEdit.appendPlainText("D(f) = (-∞; +∞) \nE(f) = (-∞; +∞)")
self.plainTextEdit.appendPlainText("y = 0, при x = 0")
self.plainTextEdit.appendPlainText("Функция является нечётной")
self.plainTextEdit.appendPlainText("y наиб - не существует \ny наим - не существует")
elif self.radioButton_8.isChecked():
self.graphicsView.clear()
self.graphicsView.plot([-(i ** 2) for i in range(int(self.spinBox.text()) + 1)],
[i for i in range(int(self.spinBox.text()) + 1)], pen='b')
self.plainTextEdit.setPlainText("График функции")
self.plainTextEdit.appendPlainText("Функция убывает при x(-∞; 0)")
self.plainTextEdit.appendPlainText("D(f) = (-∞; 0) \nE(f) = (-∞; 0)")
self.plainTextEdit.appendPlainText("y = 0, при x = 0")
self.plainTextEdit.appendPlainText("Функция является ни нечётной, ни чётной")
self.plainTextEdit.appendPlainText("y наиб = не существует \ny наим = 0")
elif self.radioButton_9.isChecked():
self.graphicsView.clear()
self.graphicsView.plot([i for i in range(-int(self.spinBox.text()), int(self.spinBox.text()) + 1)],
[int(self.spinBox_3.text()) * i for i in
range(-int(self.spinBox.text()), int(self.spinBox.text()) + 1)], pen='r')
b = int(self.spinBox_3.text())
if b > 0:
self.plainTextEdit.setPlainText("График функции - Прямая")
self.plainTextEdit.appendPlainText("Функция возрастает при x(-∞; +∞)")
self.plainTextEdit.appendPlainText("D(f) = (-∞; +∞) \nE(f) = (-∞; +∞)")
self.plainTextEdit.appendPlainText("y = 0, при x = 0")
self.plainTextEdit.appendPlainText("Функция является нечётной")
self.plainTextEdit.appendPlainText("y наиб - не существует \ny наим - не существует")
elif b < 0:
self.plainTextEdit.setPlainText("График функции - Прямая")
self.plainTextEdit.appendPlainText("Функция убывает при x(-∞; +∞)")
self.plainTextEdit.appendPlainText("D(f) = (-∞; +∞) \nE(f) = (-∞; +∞)")
self.plainTextEdit.appendPlainText("y = 0, при x = 0")
self.plainTextEdit.appendPlainText("Функция является нечётной")
self.plainTextEdit.appendPlainText("y наиб - не существует \ny наим - не существует")
else:
self.plainTextEdit.setPlainText("График функции - Прямая")
self.plainTextEdit.appendPlainText("D(f) = (-∞; +∞) \nE(f) = (-∞; +∞)")
self.plainTextEdit.appendPlainText("y = 0, при любом значении x")
self.plainTextEdit.appendPlainText("Функция является нечётной")
self.plainTextEdit.appendPlainText("y наиб = 0 \ny наим = 0")
elif self.radioButton_10.isChecked():
self.graphicsView.clear()
if int(self.spinBox_3.text()) != 0:
self.graphicsView.plot([i for i in range(-int(self.spinBox.text()), int(self.spinBox.text()) + 1)],
[int(self.spinBox_3.text()) * i ** int(self.spinBox_2.text()) for i in
range(-int(self.spinBox.text()),
int(self.spinBox.text()) + 1)], pen='g')
b = int(self.spinBox_3.text())
n = int(self.spinBox_2.text())
if b > 0 and n % 2 == 0 and n > 1:
self.plainTextEdit.setPlainText("График функции - парабола\nВетви направлены вверх")
self.plainTextEdit.appendPlainText("Функция убывает при x(-∞; 0)\nФункция возрастает при x(0; +∞)")
self.plainTextEdit.appendPlainText("D(f) = (-∞; +∞) \nE(f) = (-∞; +∞)")
self.plainTextEdit.appendPlainText("y = 0, при x = 0")
self.plainTextEdit.appendPlainText("Функция является чётной")
self.plainTextEdit.appendPlainText("y наиб - не существует \ny наим = 0")
elif b > 0 and n % 2 == 1 and n > 1:
self.plainTextEdit.setPlainText("График функции - кубическая парабола")
self.plainTextEdit.appendPlainText("Функция возрастает при x(-∞; +∞)")
self.plainTextEdit.appendPlainText("D(f) = (-∞; +∞) \nE(f) = (-∞; +∞)")
self.plainTextEdit.appendPlainText("y = 0, при x = 0")
self.plainTextEdit.appendPlainText("Функция является нечётной")
self.plainTextEdit.appendPlainText("y наиб - не существует \ny наим - не существует")
elif b < 0 and n % 2 == 0 and n > 1:
self.plainTextEdit.setPlainText("График функции - парабола\nВетви направлены вниз")
self.plainTextEdit.appendPlainText("Функция возрастает при x(-∞; 0)\nФункция убывает при x(0; +∞)")
self.plainTextEdit.appendPlainText("D(f) = (-∞; +∞) \nE(f) = (-∞; +∞)")
self.plainTextEdit.appendPlainText("y = 0, при x = 0")
self.plainTextEdit.appendPlainText("Функция является чётной")
self.plainTextEdit.appendPlainText("y наиб = 0 \ny наим - не существует")
elif b < 0 and n % 2 == 1 and n > 1:
self.plainTextEdit.setPlainText("График функции - кубическая парабола")
self.plainTextEdit.appendPlainText("Функция убывает при x(-∞; +∞)")
self.plainTextEdit.appendPlainText("D(f) = (-∞; +∞) \nE(f) = (-∞; +∞)")
self.plainTextEdit.appendPlainText("y = 0, при x = 0")
self.plainTextEdit.appendPlainText("Функция является нечётной")
self.plainTextEdit.appendPlainText("y наиб - не существует \ny наим - не существует")
elif b > 0 and n == 1:
self.plainTextEdit.setPlainText("График функции - Прямая")
self.plainTextEdit.appendPlainText("Функция возрастает при x(-∞; +∞)")
self.plainTextEdit.appendPlainText("D(f) = (-∞; +∞) \nE(f) = (-∞; +∞)")
self.plainTextEdit.appendPlainText("y = 0, при x = 0")
self.plainTextEdit.appendPlainText("Функция является нечётной")
self.plainTextEdit.appendPlainText("y наиб - не существует \ny наим - не существует")
elif b < 0 and n == 1:
self.plainTextEdit.setPlainText("График функции - Прямая")
self.plainTextEdit.appendPlainText("Функция убывает при x(-∞; +∞)")
self.plainTextEdit.appendPlainText("D(f) = (-∞; +∞) \nE(f) = (-∞; +∞)")
self.plainTextEdit.appendPlainText("y = 0, при x = 0")
self.plainTextEdit.appendPlainText("Функция является нечётной")
self.plainTextEdit.appendPlainText("y наиб - не существует \ny наим - не существует")
elif self.radioButton_11.isChecked():
self.graphicsView.clear()
if int(self.spinBox_3.text()) != 0:
self.graphicsView.plot(
[int(self.spinBox_3.text()) * (i ** 2) for i in range(int(self.spinBox.text()) + 1)],
[i for i in range(int(self.spinBox.text()) + 1)], pen='b')
b = int(self.spinBox_3.text())
if b > 0:
self.plainTextEdit.setPlainText("График функции")
self.plainTextEdit.appendPlainText("Функция возрастает при x(0; +∞)")
self.plainTextEdit.appendPlainText("D(f) = (0; +∞) \nE(f) = (0; +∞)")
self.plainTextEdit.appendPlainText("y = 0, при x = 0")
self.plainTextEdit.appendPlainText("Функция является ни нечётной, ни чётной")
self.plainTextEdit.appendPlainText("y наиб = не существует \ny наим = 0")
elif b < 0:
self.plainTextEdit.setPlainText("График функции")
self.plainTextEdit.appendPlainText("Функция убывает при x(-∞; 0)")
self.plainTextEdit.appendPlainText("D(f) = (-∞; 0) \nE(f) = (-∞; 0)")
self.plainTextEdit.appendPlainText("y = 0, при x = 0")
self.plainTextEdit.appendPlainText("Функция является ни нечётной, ни чётной")
self.plainTextEdit.appendPlainText("y наиб = не существует \ny наим = 0")
app = QApplication(sys.argv)
ex = MyWidget()
ex.show()
sys.exit(app.exec_()) |
import csv
import os
gameName = []
with open("./1000.csv", "r", newline='',encoding="gb18030") as csvfile:
#读取csv文件,返回的是迭代类型
read = csv.reader(csvfile)
for inx, item in enumerate(read):
print('item', item)
print('inx', inx)
if item[0] and item[0] != '产品名称':
# obj = {
# "text": item[0],
# "abbr": item[1],
# "nickName": item[2]
# }
gameName.append({
"text": item[0],
"abbr": item[1],
"nickName": item[2]
})
print(item)
print('gameName', gameName)
print(len(gameName))
|
#!/usr/bin/python
#
# $Id: kfsshell.py 24 2007-09-27 07:17:06Z sriramsrao $
#
# Copyright 2007 Kosmix Corp.
#
# This file is part of Kosmos File System (KFS).
#
# Licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# Script that launches KfsShell: get the location of the metaserver
# from the machines.cfg file and launch KfsShell
#
# Look for <bin-dir>/tools/KfsShell
#
# Use machines.cfg
#
import os,os.path,sys,getopt
from ConfigParser import ConfigParser
def usage():
print "%s [-f, --file <machines.cfg>] [ -b, --bin ]\n" % sys.argv[0]
if __name__ == '__main__':
(opts, args) = getopt.getopt(sys.argv[1:], "b:f:h",
["bin=", "file=", "help"])
op = ""
filename = ""
bindir = ""
for (o, a) in opts:
if o in ("-h", "--help"):
usage()
sys.exit(2)
if o in ("-f", "--file"):
filename = a
elif o in ("-b", "--bin"):
bindir = a
if not os.path.exists(filename):
print "%s : config file doesn't exist\n" % filename
sys.exit(-1)
if not os.path.exists(bindir):
print "%s : bindir doesn't exist\n" % bindir
sys.exit(-1)
config = ConfigParser()
config.readfp(open(filename, 'r'))
if not config.has_section('metaserver'):
raise config.NoSectionError, "No metaserver section"
node = config.get('metaserver', 'node')
port = config.getint('metaserver', 'baseport')
cmd = "%s/tools/KfsShell -s %s -p %d" % (bindir, node, port)
os.system(cmd)
|
from django.db import models
from django.db import transaction
from .settings import THUNDERING_FRAME_WIDTH
import time
class ImportProcessManager(models.Manager):
"""
Manager used to create import process and handling thundering herd problem.
"""
def create_process(self):
"""
Method used to create Process object.
:return: None if already process created in thundering frame
"""
thunder_frame_id = int(time.time() / THUNDERING_FRAME_WIDTH) * THUNDERING_FRAME_WIDTH
with transaction.atomic():
obj, created = self.get_or_create(thunder_id=thunder_frame_id)
if created:
from .tasks import start_import_process
start_import_process.delay(obj.id)
return obj if created else None
|
#!/usr/bin/python
import unittest
from parquet.schema import SchemaParser, SchemaHelper, RecordAssembler, RecordDissector
d1 = {
'DocId': 10,
'Links': {
'Forward': [20, 40, 60]
},
'Name': [
{'Language':[
{'Code': 'en-us', 'Country': 'us'},
{"Code": "en"}
],
'Url': 'http://A'},
{'Url': 'http://B'},
{'Language':[
{'Code': 'en-gb', 'Country': 'gb'}
]}
]
}
d2 = {
'DocId': 20,
'Links': {
'Backward': [10, 30],
'Forward': [80]
},
'Name': [
{'Url': 'http://C'}
]
}
field_values = {
'DocId': [
[0,0,10],
[0,0,20]
],
'Links.Backward': [
[0,1,None],
[0,2,10],
[1,2,30]
],
'Links.Forward': [
[0,2,20],
[1,2,40],
[1,2,60],
[0,2,80]
],
'Name.Language.Code': [
[0,2,"en-us"],
[2,2,"en"],
[1,1,None],
[1,2,"en-gb"],
[0,1,None]
],
'Name.Language.Country': [
[0,3,"us"],
[2,2,None],
[1,1,None],
[1,3,"gb"],
[0,1,None],
],
'Name.Url': [
[0,2,"http://A"],
[1,2,"http://B"],
[1,1,None],
[0,2,"http://C"],
]
}
schema_text = """
message Document {
required int64 DocId;
optional group Links {
repeated int64 Backward;
repeated int64 Forward;
}
repeated group Name {
repeated group Language {
required string Code;
optional string Country;
}
optional string Url;
}
}
"""
class TestSchemaParser(unittest.TestCase):
def test_parser(self):
p = SchemaParser()
s = p.parse(schema_text)
self.assertTrue(len(s) == 10)
print "test_parser done"
class FieldEmitter:
def __init__(self, schema_elements):
self.schema_elements = schema_elements
self.values = []
def emit(self, fid, rep_lvl, def_lvl, value):
#print "[{0},{1},{2},{3}]".format(fid,rep_lvl,def_lvl,value)
self.values.append([fid, rep_lvl, def_lvl, value])
class TestRecordDissector(unittest.TestCase):
def test_dissect(self):
p = SchemaParser()
s = p.parse(schema_text)
emitter = FieldEmitter(s)
rd = RecordDissector(s, emitter)
rd.dissect(d1)
rd.dissect(d2)
self.assertTrue(23 == len(emitter.values))
print "test_dissect done"
class ListReader:
def __init__(self, id, values):
self.values = values
self.id = id
self.pos = 0
self.repetition_level = self.values[0][0]
self.definition_level = self.values[0][1]
def dump(self):
print self.id,self.values
def consume(self):
if self.pos + 1 < len(self.values):
self.pos += 1
self.repetition_level = self.values[self.pos][0]
self.definition_level = self.values[self.pos][1]
else:
self.repetition_level = 0
self.definition_level = 0
class TestRecordAssemble(unittest.TestCase):
def test_partials_fsm(self):
p = SchemaParser()
s = p.parse(schema_text)
ra = RecordAssembler(s, {})
fsm = ra.select_fields(('DocId', 'Name.Language.Country'))
count = 0
for s,n in fsm.items():
if s != SchemaHelper.ROOT_NODE:
count += len(n.keys())
self.assertTrue( count == 4);
print "test_partial_fsm done"
def test_full_fsm(self):
p = SchemaParser()
s = p.parse(schema_text)
ra = RecordAssembler(s, {})
fsm = ra.select_fields()
count = 0
for s,n in fsm.items():
if s != SchemaHelper.ROOT_NODE:
count += len(n.keys())
self.assertTrue( count == 13)
print "test_full_fsm done"
def test_partial_assemble(self):
column_readers = dict([(id,ListReader(id,vl)) for id,vl in field_values.items()])
p = SchemaParser()
s = p.parse(schema_text)
ra = RecordAssembler(s, column_readers)
fsm = ra.select_fields(('DocId', 'Name.Language.Country'))
ra.assemble()
ra.assemble()
print "test_partial_assemble done"
def test_full_assemble(self):
column_readers = dict([(id,ListReader(id,vl)) for id,vl in field_values.items()])
p = SchemaParser()
s = p.parse(schema_text)
ra = RecordAssembler(s, column_readers)
fsm = ra.select_fields()
ra.assemble()
ra.assemble()
print "test_full_assemble done"
if __name__ == '__main__':
unittest.main()
|
from flask import Flask, session
from .config import Config
from datetime import timedelta
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
login = LoginManager(app)
login.login_view = "auth.login"
login.refresh_view = "auth.login"
login.needs_refresh_message = (u"Your Session timedout, please re-login")
login.needs_refresh_message_category = "info"
from . import views
from . import models
app.register_blueprint(views.auth.bp)
app.register_blueprint(views.dashboard.bp)
app.add_url_rule("/", endpoint="dashboard")
app.register_blueprint(views.watchlist.bp)
app.register_blueprint(views.charts.bp)
# before a request is made check if the session should be timedout
@app.before_request
def before_request():
session.permanent = True
app.permanent_session_lifetime = timedelta(minutes=10)
|
from init_database import *
def user_id_exist(user_id):
cursor = session.query(User).filter(User.user_id == user_id).first()
if (cursor is None):
return False
else:
return True
# 查询书店是否有某类书
def book_id_exist(store_id, book_id):
cursor = session.query(Store_detail).filter(Store_detail.store_id == store_id,
Store_detail.book_id == book_id).first()
if (cursor is None):
return False
else:
return True
def store_id_exist(store_id):
cursor = session.query(Store).filter(Store.store_id == store_id).first()
if (cursor is None):
return False
else:
return True
|
"""
Module proving a standard way to carry various options
"""
import typing
try:
from . import constants as _constants
except ImportError:
import constants as _constants
class Namespace(dict):
"""
Subclassed dict that allows accessing its items like attributes
"""
def __getattr__(self, key: str):
return self.__getitem__(key)
def __setattr__(self, key: str, value: typing.Any):
self.__setitem__(key, value)
def __setitem__(self, key: str, value: typing.Any):
if not isinstance(key, str):
raise TypeError(f"Keys must be str, not {type(key)}")
if not key.isidentifier():
raise KeyError(f"Keys must be valid identifiers, not '{key}'")
super().__setitem__(key, value)
class Options(Namespace):
"""
Collection of options with respect to default values
It's recommended to use this class whenever a function,
method or class requires an argument ``options``. One could
easily give a normal dictionary with the required attributes
already set. But this class tries to avoid KeyErrors by
looking up the default value for missing keys automatically.
Note that a call to ``copy`` will return an ``Options`` object.
The methods ``popitem`` and ``setdefault`` don't perform anything
for objects of this class on purpose. The methods ``items``,
``keys`` and ``values`` work exactly like for dictionaries, i.e.
they return (set-like) views. The method ``update`` ensures that
all keys are strings and raises TypeErrors if they aren't.
"""
def __repr__(self):
if len(self) == 0:
return "Options()"
try:
width = max(len(name) for name in self)
except TypeError as exc:
raise TypeError("Do not use non-string keys!") from exc
return "Options(\n" + "\n".join(
f" {key:<{width}} = {self[key]}" for key in sorted(self.keys())
) + "\n)"
def __getitem__(self, item: str):
if item in self:
return super().__getitem__(item)
elif isinstance(item, str) and hasattr(_constants, f"DEFAULT_{item.upper()}"):
return getattr(_constants, f"DEFAULT_{item.upper()}")
else:
raise KeyError(item)
def copy(self) -> "Options":
return Options(super().copy())
def popitem(self, *args, **kwargs):
"""
Don't do anything, on purpose
"""
pass
def setdefault(self, *args, **kwargs):
"""
Don't do anything, on purpose
"""
pass
def update(self, mapping: typing.Mapping = None, **kwargs: dict):
if mapping is not None:
if isinstance(mapping, typing.Mapping):
for k in mapping:
self[k] = mapping[k]
elif isinstance(mapping, typing.Iterable):
for k, v in mapping:
self[k] = v
for k in kwargs:
self[k] = kwargs[k]
|
#-------INCIANDO---------
#Importa e incia as biblietecas
import pygame
import random
from config import *
from assets import *
from sprites import *
def battle_screen(window):
# Variveis de ajuste de velocidade
clock = pygame.time.Clock()
assets = load_assets()
all_sprites = pygame.sprite.Group()
groups = {}
groups['all_sprites'] = all_sprites
#Criando o jagador
player = Hero2(groups, assets)
all_sprites.add(player)
#Criando o Boss
boss = Boss(assets)
all_sprites.add(boss)
DONE = 0
PLAYING = 1
state = PLAYING
SUA_VEZ = True
keys_down = {}
guard = False
#======Ciclo principal=======
pygame.mixer.music.play(loops=-1)
while state != DONE:
clock.tick(FPS)
#Trata eventos
for event in pygame.event.get():
#Verifica consequências
if event.type == pygame.QUIT:
return QUIT
#Só libera o teclado se está jogando
if state == PLAYING:
#Verifica se turno do jogador
if SUA_VEZ:
#Verifica se apertou alguma tecla.
if event.type == pygame.KEYDOWN:
#Seleciona opções no menu de combate
keys_down[event.key] = True
#Define a função de ataque
if event.key == pygame.K_1:
player.attack()
boss.health -= player.damage
SUA_VEZ = False
#Define a ação de defender
if event.key == pygame.K_2:
guard = True
SUA_VEZ = False
#Define a ação de usar cura
if event.key == pygame.K_3:
if player.health<100:
assets[HEAL_MUSIC].play()
player.health += (random.randint(10,15))
SUA_VEZ = False
if player.health > 100:
player.health = 100
#Define a ação de fugir
if event.key == pygame.K_4:
pygame.mixer.music.stop()
return OVER
else:
#Vez do chefe
pygame.time.delay(500)
boss.attack()
if guard:
guard = False
damage= boss.damage-10
if damage < 0:
damage = 0
else:
damage = boss.damage
player.health -= damage
SUA_VEZ = True
#======Atualiza o estado do jogo=======
#Atualiza os status dos personagens
all_sprites.update()
if state == PLAYING:
#Verifica se player morreu
if player.health <= 0:
#Toca o som de morte
assets[DYING_SOUND].play()
pygame.mixer.music.stop()
return OVER
#Verifica se chefão morreu
if boss.health <= 0:
#Toca o som de morte
assets[DYING_SOUND].play()
pygame.mixer.music.stop()
return WIN
#====Gera saídas=====
window.fill(BLACK) #Preenche com a cor preta
window.blit(assets[BACKGROUND], (0, 0))
#Desenhando os personagens
all_sprites.draw(window)
#Desenha a vida
#Heroi
text_surface = assets[SCORE_FONT].render("{:03}".format(player.health), True, RED)
text_rect = text_surface.get_rect()
text_rect.bottomleft = (player.rect.centerx, player.rect.top)
window.blit(text_surface, text_rect)
#Dracula
text_surface = assets[SCORE_FONT].render("{:03}".format(boss.health), True, WHITE)
text_rect = text_surface.get_rect()
text_rect.bottomleft = (boss.rect.right, boss.rect.top)
window.blit(text_surface, text_rect)
pygame.display.update() # Atualiza o novo frame
return state |
# encoding: utf-8
from __future__ import print_function
import sys
import datetime
def write(message):
time = datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")
print('[{}] {}'.format(time, message))
def debug(message):
time = datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")
print('[{}] [DEBUG] {}'.format(time, message))
def warning(message):
time = datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")
print('[{}] [WARNING] {}'.format(time, message), file=sys.stderr)
def error(message):
time = datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")
print('[{}] [ERROR] {}'.format(time, message), file=sys.stderr)
def error_exit(status, message):
time = datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")
print('[{}] [ERROR] {}'.format(time, message), file=sys.stderr)
sys.exit(status)
|
class BaseMatrix(object):
def __init__(self, type1, rows, columns):
self.n = rows
self.m = columns
self.t = type1
self.data = []
types = [int, complex, float]
if type1 in types:
for a in range(columns):
row = []
for b in range(rows):
row.append(self.t(0))
self.data.append(row)
else:
raise TypeError("Please enter a valid type")
def __str__(self):
s = self.__class__.__name__ + "({})". format((self.n, self.m))+"\n"
for i in range(0, self.n):
for j in range(0, self.m):
s += str(self.get(i, j)) + " "
s += "\n"
result = "{} x {} matrix of type {}: ".format(self.n, self.m, self.t)
return result + s
def get(self, row, column):
if row <= (self.n - 1) and column <= (self.m - 1):
val = self.data[row][column]
return val
else:
raise IndexError("the position you are looking for was not found")
def set(self, row, column, value):
if row <= (self.n - 1) and column <= (self.m - 1):
self.data[row][column] = value
else:
raise IndexError("the index you are looking for is out of range")
def __add__(self, other):
if self.n == other.n and self.m == other.m and self.t == other.t:
m = BaseMatrix(self.t, self.n, self.m)
for i in range(0, self.n):
for j in range(0, self.m):
s = self.get(i, j) + other.get(i, j)
m.set(i, j, s)
return m
else:
print("the matrices' dimensions / type do not match")
def __eq__(self, other):
condition = True
if self.n == other.n and self.m == other.m and self.t == other.t:
for i in range(0, self.n):
for j in range(0, self.m):
if not self.get(i, j) == other.get(i, j):
condition = False
return condition
return condition
else:
condition = False
return condition
class MySparseMatrix(BaseMatrix):
def __init__(self, t, n, m):
types = [int, complex, float]
if t in types:
self.n = n
self.m = m
self.t = t
self.data = {}
else:
raise TypeError("Please enter a valid type")
def set(self, i, j, v):
if i <= (self.n - 1) and j <= (self.m - 1):
key = (i, j)
self.data[key] = v
else:
raise IndexError("the index you are looking for is out of range")
def get(self, i, j):
if i <= (self.n - 1) and j <= (self.m - 1):
key = (i, j)
return self.data.get(key, self.t())
else:
raise IndexError("the position you are looking for was not found")
|
#%% 09 - Numeros impares 1 a 50
"""
Faça um programa que imprima na tela apenas os números ímpares entre
1 e 50.
"""
numero = 50
for i in range(1, numero+1, 2):
print(i)
|
# -*- coding: utf-8 -*-
# code for console Encoding difference. Dont' mind on it
import imp
import sys
from popbill import EasyFinBankService, PopbillException
import testValue
imp.reload(sys)
try:
sys.setdefaultencoding("UTF8")
except Exception as E:
pass
easyFinBankService = EasyFinBankService(testValue.LinkID, testValue.SecretKey)
easyFinBankService.IsTest = testValue.IsTest
easyFinBankService.IPRestrictOnOff = testValue.IPRestrictOnOff
easyFinBankService.UseStaticIP = testValue.UseStaticIP
easyFinBankService.UseLocalTimeYN = testValue.UseLocalTimeYN
"""
수집 요청(RequestJob API) 함수를 통해 반환 받은 작업 아이디의 상태를 확인합니다.
- 거래 내역 조회(Search API) 함수 또는 거래 요약 정보 조회(Summary API) 함수를 사용하기 전에
수집 작업의 진행 상태, 수집 작업의 성공 여부를 확인해야 합니다.
- 작업 상태(jobState) = 3(완료)이고 수집 결과 코드(errorCode) = 1(수집성공)이면
거래 내역 조회(Search) 또는 거래 요약 정보 조회(Summary) 를 해야합니다.
- 작업 상태(jobState)가 3(완료)이지만 수집 결과 코드(errorCode)가 1(수집성공)이 아닌 경우에는
오류메시지(errorReason)로 수집 실패에 대한 원인을 파악할 수 있습니다.
- https://developers.popbill.com/reference/easyfinbank/python/api/job#GetJobState
"""
try:
print("=" * 15 + " 수집 상태 확인 " + "=" * 15)
# 팝빌회원 사업자번호
CorpNum = testValue.testCorpNum
# 수집요청(requestJob) 호출시 발급받은 작업아이디
jobID = "022080215000000325"
response = easyFinBankService.getJobState(CorpNum, jobID)
print("jobID (작업아이디) : %s" % response.jobID)
print("jobState (수집상태) : %s" % response.jobState)
print("startDate (시작일자) : %s" % response.startDate)
print("endDate (종료일자) : %s" % response.endDate)
print("errorCode (오류코드) : %s" % response.errorCode)
print("errorReason (오류메시지) : %s" % response.errorReason)
print("jobStartDT (작업 시작일시) : %s" % response.jobStartDT)
print("jobEndDT (작업 종료일시) : %s" % response.jobEndDT)
print("regDT (수집 요청일시) : %s" % response.regDT)
except PopbillException as PE:
print("Exception Occur : [%d] %s" % (PE.code, PE.message))
|
# Copyright (C) 2017 Leandro Lisboa Penz <lpenz@lpenz.org>
# This file is subject to the terms and conditions defined in
# file 'LICENSE', which is part of this source code package.
"""Common functions used by tests"""
import tempfile
import omnilint
import omnilint.reporters as reporters
def checkthis(checkername, fileext, contents):
ol = omnilint.Omnilint()
ol.checker_load(checkername)
with tempfile.NamedTemporaryFile(suffix=fileext) as tmp:
tmp.write(contents.encode("utf-8"))
tmp.flush()
with reporters.ReporterList() as reporter:
ol.analyse_file(reporter, tmp.name)
for e in reporter.list:
e.pop("file")
return reporter.list
|
async def test1(x):
await x
def test2():
return 3
import inspect
def test3():
yield 1
print(inspect.iscoroutine(test1(1))) |
import geargrip
import cv2
import numpy as np
from networktables import NetworkTable
cam = cv2.VideoCapture("http://10.14.3.24/mjpg/video/mjpg")
if cam.isOpened():
print("yes")
ret, frame = cam.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cam.release()
|
# import Autoencoder
import operator
import pickle
import itertools
import math
import numpy
import logging
logger = logging.getLogger(__name__)
from sklearn.feature_extraction.text import CountVectorizer
from .DBN import train_DBN
from ..Utils import preprocess as pr
class SAmodel(object):
#######################################################################################
""" Initialize and train the Autoencoder topic model with the above parameters """
def __init__(self, hidden, max_iterations, num_topics, documents=None, file_path=None, input_data=None,
architecture='dbn-auto', sparsity=0.03, selectivity=0.03, df=2, max_f=2000):
self.vectorizer = CountVectorizer(min_df=df, max_features=max_f, binary=True)
if file_path:
with open(file_path) as f:
documents = f.read().splitlines()
self.raw_documents = [document for document in documents if len(document.split()) > 3]
documents = pr.process_sentences(documents)
self.documents = [" ".join(document) for document in documents]
self.features = self.vectorizer.fit_transform(self.documents)
else:
self.documents = documents
self.features = self.vectorizer.fit_transform(self.documents)
self.df = df
self.max_iterations = max_iterations
self.sparsity = sparsity
self.num_topics = num_topics
self.architecture = architecture
self.hidden = hidden
self.encoders = []
self.dbn = 0
logger.debug(self.getName())
if self.architecture == 'stacked-auto':
logger.error("Autoencoder not installed")
return # TODO: unreachable autoencoder
# print 'stacked-auto'
# i = 0
# if not input_data:
# input_data = self.features.toarray()[1:100,:]
# numpy.random.shuffle(input_data)
# activation_function = T.nnet.sigmoid
# output_function=T.nnet.sigmoid
# #training sigmoid autoencoders greedily
# for val in hidden:
# self.encoders.append(Autoencoder.AutoEncoder(input_data, val, activation_function, output_function))
# self.encoders[i].train(n_epochs=max_iterations, mini_batch_size=1, learning_rate=0.05)
# input_data = self.encoders[i].get_hidden(input_data)
# i += 1
# #training final softmax autoencoder layer
# activation_function = T.nnet.softmax
# self.encoders.append(Autoencoder.AutoEncoder(input_data, num_topics, activation_function, output_function))
# self.encoders[i].train(n_epochs=max_iterations, mini_batch_size=1, learning_rate=0.05)
elif self.architecture == 'dbn-auto':
logger.debug('dbn-auto')
if not input_data:
input_data = self.features.toarray()
frequency_weights = float(input_data.shape[0])/input_data.sum(axis=0) * 1000
numpy.random.shuffle(input_data)
# input_data = input_data[1:5,:]
self.dbn = train_DBN(input_data, finetune_lr=0.1, pretraining_epochs=max_iterations,
pretrain_lr=0.1, k=1, training_epochs=max_iterations, batch_size=100,
hidden_layers_sizes=hidden, softmax_size=num_topics, sparsity=sparsity,
selectivity=selectivity, frequency_weights=frequency_weights)
else:
logger.error("NO SUCH ARCHITECTURE!!!!")
def print_topics(self, index, num_words, word_to_idx=None, filepath=None):
""" Returns a number of words (as set by num_words) that correspond to a topic (defined by index) """
# activate one of the softmax units
t = numpy.zeros((1, self.num_topics))
t[0][index] = 1000
activations = self.reconstruct_given_hidden(t)[0]
topics = {}
i = 0
# map vector to the correct words
if not word_to_idx:
word_to_idx = self.vectorizer.get_feature_names()
for a in activations:
topics[word_to_idx[i]] = a
i += 1
sorted_topics = sorted(topics.items(), key=operator.itemgetter(1), reverse=True)
logger.debug("Topic %s", index)
topic = []
for j in range(num_words):
logger.debug("%s:%s", sorted_topics[j][0], sorted_topics[j][1])
topic.append(sorted_topics[j][0].__str__())
if filepath is not None:
with open(filepath, 'a') as f:
s = "Topic " + index.__str__()
print>> f, s
j = 0
for j in range(num_words):
s = sorted_topics[j][0].__str__() + ":" + sorted_topics[j][1].__str__()
print>> f, s
return topic
def get_topic(self, index, num_words):
""" Returns a number of words (as set by num_words) that correspond to a topic (defined by index) """
# activate one of the softmax units
t = numpy.zeros((1, self.num_topics))
t[0][index] = 1
activations = self.reconstruct_given_hidden(t)[0]
topics = {}
i = 0
# map vector to the correct words
words = self.vectorizer.get_feature_names()
for a in activations:
topics[words[i]] = a
i += 1
sorted_topics = sorted(topics.items(), key=operator.itemgetter(1), reverse=True)
topic = []
for j in range(num_words):
topic.append(sorted_topics[j][0].__str__())
return topic
def reconstruct_given_hidden(self, hidden):
""" Computes output of network (i.e. reconstructed input) given the hidden activations of the network"""
if self.architecture == 'stacked-auto':
# recursively reconstruct the probable input given the activations of the hidden softmax layer
for encoder in reversed(self.encoders):
hidden = encoder.outputgivenhidden(hidden)
elif self.architecture == 'dbn-auto':
hidden = self.dbn.decode(hidden)
return hidden
def feed_forward(self, input_data):
""" Computes the hidden layer activations given the input to the network"""
if self.architecture == 'stacked-auto':
# recursively compute the output of the stacked autoencoder
for encoder in self.encoders:
input_data = encoder.get_hidden(input_data)
elif self.architecture == 'dbn-auto':
input_data = self.dbn.encode(input_data)
return input_data
def get_topic_distribution(self, sentence):
""" Returns the topic distribution of a single sentence """
sent_tfidf = self.vectorizer.transform([" ".join(pr.stem_doc(sentence.split(' ')))])
dist = self.feed_forward(sent_tfidf.toarray())[0]
return dist
def get_topic_distribution_batch(self, sentences):
""" Returns the topic distribution of a batch of sentences """
stemmed_sentences = [" ".join(pr.stem_doc(sentence.split(' '))) for sentence in sentences]
sent_tfidf = self.vectorizer.transform(stemmed_sentences)
dist = self.feed_forward(sent_tfidf.toarray())
return dist
def save(self, filepath):
"""Saves the model to the designated file path"""
with open(filepath, 'wb') as output:
pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)
def getIntrinisicCoherence(self, topics=None):
"""Returns the average coherence of all the topics generated by the model"""
if not topics:
topics = []
for x in range(0, self.num_topics):
topics.append(self.get_topic(x, 20))
# topics.append(self.print_topics(x,20))
coherence = 0
for topic in topics:
for pair in itertools.combinations(topic, 2):
coherence += self.logProbability(pair)
return float(coherence) / len(topics)
def logProbability(self, pair):
# num_pair, num_uni_one, num_uni_two = self.numUniBigrams(pair)
# num_pair = float(num_pair)
num_pair = float(self.numBigrams(pair) + 1)
num_uni_one = float(self.numUnigrams(pair[0]))
num_uni_two = float(self.numUnigrams(pair[1]))
return (math.log10(num_pair / num_uni_one) + math.log10(num_pair / num_uni_two)) / 2
def numUniBigrams(self, pair):
countunione = 0
countunitwo = 0
countbi = 0
for d in self.documents:
splitdoc = d.split()
if pair[0] in splitdoc and pair[1] in splitdoc:
countbi += 1
if pair[0] in d.split():
countunione += 1
if pair[1] in d.split():
countunitwo += 1
return [countunione, countunitwo, countbi + 1]
def numBigrams(self, pair):
""" Counts the number of times a pair of words appear in an entire corpus """
count = 0
for d in self.documents:
splitdoc = d.split()
if pair[0] in splitdoc and pair[1] in splitdoc:
count += 1
return count
def numUnigrams(self, word):
""" Counts the number of times a word appears in the entire corpus """
count = 0
for d in self.documents:
if word in d.split():
count += 1
return count
def getName(self):
""" Returns paramters of the model concatenated as a string"""
return str(self.hidden) + ',' + str(self.df) + ',' + str(self.max_iterations) + ',' + str(
self.sparsity) + ',' + str(self.num_topics) + ',' + self.architecture
@staticmethod
def load(filepath):
dl = None
with open(filepath, 'rb') as input:
dl = pickle.load(input, encoding='latin1') #hacky 2to3
return dl
def sigmoid(self, z):
s = 1.0 / (1.0 + numpy.exp(-1.0 * z))
return s
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#import json
import mysql.connector
from scrapy import item
class ShengouPipeline(object):
# 仅仅输出到控制台
# def process_item(self, item, spider):
# print(item['xh'],' ',item['sgdm'],' ',item['zqdm'],' ',item['name'],' ',item['wsfxr'],' ',item['ssr'],
# ' ',item['fxl'],' ',item['wsfxl'],' ',item['sgsx'],' ',item['fxj'],' ',item['syl'],' ',item['zql'])
# 将数据写到json文件中
# 定义构造器, 初始化要写入的文件
# def __init__(self):
# self.json_file =open("table_info.json","wb+")
# self.json_file.write('\n'.encode("utf-8"))
# #重写close_spider 回调方法,用于关闭文件
# def close_spider(self,spider):
# print('--------关闭文件----------')
# #后退两个字符,也就是去掉最后一条记录之后的换行街和逗号
# self.json_file.seek(-2,1)
# self.json_file.write('\n'.encode("utf-8"))
# self.json_file.close()
# def process_item(self,item,spider):
# text = json.dumps(dict(item),ensure_ascii = False)+",\n"
# self.json_file.write(text.encode("utf-8"))
# 将数据写入mysql数据库
def __init__(self):
try:
self.conn = mysql.connector.connect(
host='localhost', user='root', database='stocks', port='3306', password='123456',
use_unicode=True)
self.cur = self.conn.cursor()
except Exception as e:
print(e)
# 重写close_spider 回调方法,用于关闭数据库
def close_spider(self, spider):
try:
print('--------关闭数据库资源----------')
# 关闭游标
self.cur.close()
# 关闭连接
self.conn.close()
except Exception as e:
print(e)
def process_item(self, item, spider):
# 1.Python 'list' cannot be converted to a MySQL type
# self.cur.execute("insert into info values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",
# (item['xh'],item['sgdm'],item['zqdm'],item['name'],item['wsfxr'],item['ssr'],
# item['fxl'],item['wsfxl'],item['sgsx'],item['fxj'],item['syl'],item['zql']))
# 2.ok
try:
values = [item['xh'][0], item['sgdm'][0], item['zqdm'][0], item['name'][0], item['wsfxr'][0], item['ssr'][0],
item['fxl'][0],
item['wsfxl'][0], item['sgsx'][0], item['fxj'][0], item['syl'][0], item['zql'][0]]
self.cur.execute("insert into info values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", values)
self.conn.commit()
print('正在插入数据')
except Exception as e:
print(e)
class StockListPipeline(object):
# def process_item(self, item, spider):
# print(item['name'],item['code'],item['region'])
# 将数据写入mysql数据库
def __init__(self):
try:
self.conn = mysql.connector.connect(
host='localhost', user='root', database='stocks', port='3306', password='123456',
use_unicode=True)
self.cur = self.conn.cursor()
except Exception as e:
print(e)
# 重写close_spider 回调方法,用于关闭数据库
def close_spider(self, spider):
try:
print('--------关闭数据库资源----------')
# 关闭游标
self.cur.close()
# 关闭连接
self.conn.close()
except Exception as e:
print(e)
def process_item(self, item, spider):
namestr = item['name'][0]
s = namestr.split('(')
namestr = s[0]
regionstr = item['region'][0]
if regionstr.find('sh') > 0:
regionstr = 'sh'
elif regionstr.find('sz') > 0:
regionstr = 'sz'
else:
regionstr = '0'
values = [namestr, item['code'][0], regionstr]
try:
self.cur.execute("insert into stock_code values (%s,%s,%s)", values)
self.conn.commit()
print('正在插入数据')
except Exception as e:
print(e) |
#!/usr/bin/env python3
import os.path as osp
import argparse
from baselines.common.cmd_util import mujoco_arg_parser
from baselines import bench, logger
def train(env_id, num_timesteps, seed, network, r_ex_coef, r_in_coef, lr, reward_freq,
begin_iter, model_train_num, K_model_num, regularize, selection_type):
from baselines.common import set_global_seeds
from baselines.common.vec_env.vec_normalize import VecNormalize
from baselines.ppo2 import ppo2_ensemble
from baselines.ppo2.policies import MlpPolicy
import gym
import tensorflow as tf
from baselines.common.vec_env.dummy_vec_env import DummyVecEnv
ncpu = 1
config = tf.ConfigProto(allow_soft_placement=True,
intra_op_parallelism_threads=ncpu,
inter_op_parallelism_threads=ncpu)
config.gpu_options.allow_growth = True
tf.Session(config=config).__enter__()
def make_env():
env = gym.make(env_id)
env = bench.Monitor(env, logger.get_dir())
return env
env = DummyVecEnv([make_env])
env = VecNormalize(env)
set_global_seeds(seed)
if network == 'mlp':
network = MlpPolicy
else:
raise NotImplementedError
ppo2_ensemble.learn(network=network, env=env, nsteps=2048, nminibatches=32,
lam=0.95, gamma=0.99, noptepochs=10, log_interval=1,
ent_coef=0.0,
lr=lr,
cliprange=0.2,
total_timesteps=num_timesteps,
r_ex_coef=r_ex_coef,
beta=r_in_coef,
reward_freq=reward_freq,
begin_iter=begin_iter,
model_train=model_train_num,
K=K_model_num,
alpha=regularize,
index_type=selection_type)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env', help='Environment ID', default='Walker2d-v2')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--network', help='Policy architecture', default='mlp')
parser.add_argument('--num-timesteps', type=int, default=int(1E6))
parser.add_argument('--r-ex-coef', type=float, default=0)
parser.add_argument('--r-in-coef', type=float, default=1)
parser.add_argument('--lr', type=float, default=3E-4)
parser.add_argument('--reward-freq', type=int, default=40)
parser.add_argument('--begin-iter', type=int, default=40)
parser.add_argument('--model-train-num', type=int, default=4)
parser.add_argument('--K-model-num', type=int, default=1)
parser.add_argument('--regularize', type=float, default=0.01)
parser.add_argument('--selection-type', choices=['min', 'max', 'avg', 'ens'], default='min')
args = parser.parse_args()
logger.configure()
logger.configure(osp.join(osp.abspath(osp.dirname(__file__)), 'Results_ensemble', 'begin_iter=' + str(args.begin_iter),
args.env + '_freq' + str(args.reward_freq),
'r_ex_coef=' + str(args.r_ex_coef) + ', r_in_coef=' + str(args.r_in_coef)
+ ', K_model_num=' + str(args.K_model_num) + ', model_train_num=' + str(args.model_train_num)
+ ', regularize=' + str(args.regularize) + ', selection_type=' + str(args.selection_type),
'iter' + str(args.seed)))
train(args.env, num_timesteps=args.num_timesteps, seed=args.seed, network=args.network,
r_ex_coef=args.r_ex_coef, r_in_coef=args.r_in_coef,
lr=args.lr, reward_freq=args.reward_freq,
begin_iter=args.begin_iter, model_train_num=args.model_train_num, K_model_num=args.K_model_num,
regularize=args.regularize, selection_type=args.selection_type)
if __name__ == '__main__':
main()
|
from movements.forms import MovementsForm
from sqlite3.dbapi2 import connect
from flask_wtf import form
from movements import app
from flask import render_template, request, redirect, url_for
import csv
import sqlite3
DBFILE = app.config['DBFILE']
def DBconsulta(query, params=()):
conn = sqlite3.connect(DBFILE)
c = conn.cursor()
'''
SELECT * FROM TABLA --> [(), (), (),]
SELECT * FROM TABLA VACIA -> []
INSERT....->[]
UPDATE....->[]
DELETE....->[]
'''
c.execute(query, params)
conn.commit()
filas = c.fetchall()
conn.close()
if len(filas) == 0:
return filas
columnNames = []
for columnName in c.description:
columnNames.append(columnName[0])
listaDeDiccionarios = []
for fila in filas:
d = {}
for ix, columnName in enumerate(columnNames):
d[columnName] = fila[ix]
listaDeDiccionarios.append(d)
return listaDeDiccionarios
@app.route('/')
def listaIngresos():
ingresos = DBconsulta('SELECT fecha, concepto, cantidad, id FROM Movimientos;')
sumador = 0
for ingreso in ingresos:
sumador += float(ingreso['cantidad'])
return render_template('movementsList.html', datos=ingresos, total=sumador)
@app.route('/creaalta', methods=['GET','POST'])
def nuevoIngreso():
form = MovementsForm()
if request.method == 'POST':
# grabar datos
DBconsulta('INSERT INTO Movimientos (cantidad, concepto, fecha) VALUES (?, ?, ?)',
(
float(request.form.get('cantidad')),
request.form.get('concepto'),
request.form.get('fecha')
)
)
return redirect(url_for('listaIngresos'))
return render_template("alta.html", form=form)
@app.route('/modifica/<identificador>', methods=['GET','POST'])
def modificaIngresos(identificador):
if request.method == 'GET':
datos = DBconsulta('SELECT fecha, concepto, cantidad, id FROM Movimientos WHERE id=?', (identificador,))
return render_template("modifica.html", registro = datos[0])
else:
cantidad = float(request.form.get('cantidad'))
concepto = request.form.get('concepto')
fecha= request.form.get('fecha')
DBconsulta('UPDATE Movimientos SET fecha=?, concepto=?, cantidad=? WHERE id=?', (fecha, concepto, cantidad, identificador))
return redirect(url_for('listaIngresos'))
@app.route('/delete/<identificador>', methods=['GET', 'POST'])
def deleteRegistro(identificador):
conn = sqlite3.connect(DBFILE)
c = conn.cursor()
if request.method == 'GET':
c.execute('SELECT fecha, concepto, cantidad, id FROM Movimientos WHERE id=?', (identificador,))
datos = c.fetchone()
conn.close()
return render_template('delete.html', registro = datos)
else:
c.execute('DELETE Movimientos WHERE id=?',identificador)
conn.commit()
conn.close()
return redirect(url_for('listaIngresos')) |
class Constants(object):
CONFIG_FILE_NAME = 'config.ini'
DEFAULT_CONFIG_FILE_NAME = 'default_config.ini'
CONFIG_SECTION_GLOBAL = 'Global'
CONFIG_OPTION_ACTIVE_USER = 'ActiveUser'
CONFIG_OPTION_ACTIVE_MODE = 'ActiveMode'
CONFIG_USERNAME_PREFIX = 'User_'
CONFIG_OPTION_PASSWORD_PREFIX = 'Password_'
USERNAME_MIN_LENGTH = 1
USERNAME_MAX_LENGTH = 10
SESSION_ITERATIONS = 50
SESSION_FILE_DATETIME_FORMAT = '%Y-%m-%d-%H-%M-%S'
|
# by Kush (more added in debugging by Stuart)
from main import *
import pickle
def askForPeriod(periodNum):
print("What class is your period "+periodNum)
period = input()
return period
def setup(adminYN):
userInfo = pickle.load(open('userinfo.txt', 'rb'))
if adminYN == 'yes' or adminYN == 'y':
print('You are setting up as an administrator')
print("What do you want to be your username")
username = input()
print("What do you want to be your password")
password = input()
if adminYN == "yes" or adminYN == "y":
userInfo[username] = admin(username, password)
else:
period1 = askForPeriod("1")
period2 = askForPeriod("2")
period3 = askForPeriod("3")
period4 = askForPeriod("4")
period5 = askForPeriod("5")
period6 = askForPeriod("6")
period7 = askForPeriod("7")
userSchedule = schedule(period1, period2, period3, period4, period5, period6, period7)
userInfo[username] = user(username, password, userSchedule)
print('Saving...')
pickle.dump(userInfo, open('userinfo.txt', 'wb'))
|
class Solution(object):
prev = None
def flatten(self, root):
"""
:type root: TreeNode
:rtype: void Do not return anything, modify root in-place instead.
"""
if not root:
return
self.prev = root
self.flatten(root.left)
tmp = root.right
root.right, root.left = root.left,None
self.prev.right = tmp
self.flatten(tmp)
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 28 14:28:36 2019
@author: ZhuangChi
"""
"""
给定 n 个非负整数 a1,a2,...,an,每个数代表坐标中的一个点 (i, ai) 。
在坐标内画 n 条垂直线,垂直线 i 的两个端点分别为 (i, ai) 和 (i, 0)。找出其中的两条线,
使得它们与 x 轴共同构成的容器可以容纳最多的水。
说明:你不能倾斜容器,且 n 的值至少为 2。
示例:
输入: [1,8,6,2,5,4,8,3,7]
输出: 49
"""
height = [1,8,6,2,5,4,8,3,7]
"""
下面这种应该是没错的 仅仅是太慢了...
"""
class Solution(object):
def maxArea(self,height):
result = 0
for i in range(len(height)):
for j in range(i+1,len(height)):
width = j-i
heighth = min(height[i],height[j])
if width*heighth>result:
result = width*heighth
return result
"""
有啥更快的办法呢....头疼...
自己没想出来 参考官方的解题思路
"""
class Solution(object):
def maxArea(self, height):
"""
:type height: List[int]
:rtype: int
"""
left = 0
right = len(height) - 1
maxArea = 0
while left < right:
b = right - left
if height[left] < height[right]:
h = height[left]
left += 1
else:
h = height[right]
right -= 1
area = b*h
if maxArea < area:
maxArea = area
return maxArea |
from functools import reduce
n=int(input("n="))
a=[int(input("a="))for i in range(n)]
positive_el=filter(lambda x: x//3,a)
sum=reduce((lambda x,sum: x+sum ),positive_el)
print(sum)
|
#!/usr/bin/python
#-*- encoding:utf-8 -*-
import jieba
def splitSentence(inputFile,outputFile):
fin=open(inputFile,'r')
fout=open(outputFile,'w')
for eachLine in fin:
line=eachLine.strip().decode('utf-8','ignore')
worldList=list(jieba.cut(line))
outStr=''
for word in worldList:
outStr +=word
outStr +='/'
fout.write(outStr.strip().encode('utf-8')+'\n')
fin.close()
fout.close()
splitSentence('myInput.txt','myOutput.txt') |
# -*- coding: utf-8 -*-
###
# Copyright (c) 2010 by Elián Hanisch <lambdae2@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###
###
#
#
# History:
# 2010-01-14
# version 0.1: new script!
#
###
try:
import weechat
WEECHAT_RC_OK = weechat.WEECHAT_RC_OK
import_ok = True
except ImportError:
import_ok = False
SCRIPT_NAME = "flip"
SCRIPT_AUTHOR = "Elián Hanisch <lambdae2@gmail.com>"
SCRIPT_VERSION = "0.1"
SCRIPT_LICENSE = "GPL3"
SCRIPT_DESC = "Flips text upside down."
SCRIPT_COMMAND = "flip"
fliptable = {
# Upper case
u'A' : u'\N{FOR ALL}',
u'B' : u'\N{GREEK SMALL LETTER XI}',
u'C' : u'\N{ROMAN NUMERAL REVERSED ONE HUNDRED}',
u'D' : u'\N{LEFT HALF BLACK CIRCLE}',
u'E' : u'\N{LATIN CAPITAL LETTER REVERSED E}',
u'F' : u'\N{TURNED CAPITAL F}',
u'G' : u'\N{TURNED SANS-SERIF CAPITAL G}',
u'J' : u'\N{LATIN SMALL LETTER LONG S}',
u'K' : u'\N{RIGHT NORMAL FACTOR SEMIDIRECT PRODUCT}',
u'L' : u'\N{LATIN CAPITAL LETTER TURNED L}',
u'M' : u'W',
u'N' : u'\N{LATIN LETTER SMALL CAPITAL REVERSED N}',
#u'P' : u'\N{CYRILLIC CAPITAL LETTER KOMI DE}',
u'P' : u'd',
u'Q' : u'\N{GREEK CAPITAL LETTER OMICRON WITH TONOS}',
u'R' : u'\N{LATIN LETTER SMALL CAPITAL TURNED R}',
u'T' : u'\N{UP TACK}',
u'U' : u'\N{INTERSECTION}',
u'V' : u'\N{LATIN CAPITAL LETTER TURNED V}',
u'Y' : u'\N{TURNED SANS-SERIF CAPITAL Y}',
# Lower case
u'a' : u'\N{LATIN SMALL LETTER TURNED A}',
u'b' : u'q',
u'c' : u'\N{LATIN SMALL LETTER OPEN O}',
u'd' : u'p',
u'e' : u'\N{LATIN SMALL LETTER TURNED E}',
u'f' : u'\N{LATIN SMALL LETTER DOTLESS J WITH STROKE}',
u'g' : u'\N{LATIN SMALL LETTER B WITH TOPBAR}',
u'h' : u'\N{LATIN SMALL LETTER TURNED H}',
u'i' : u'\N{LATIN SMALL LETTER DOTLESS I}',
u'j' : u'\N{LATIN SMALL LETTER R WITH FISHHOOK}',
u'k' : u'\N{LATIN SMALL LETTER TURNED K}',
u'l' : u'\N{LATIN SMALL LETTER ESH}',
u'm' : u'\N{LATIN SMALL LETTER TURNED M}',
u'n' : u'u',
u'r' : u'\N{LATIN SMALL LETTER TURNED R}',
u't' : u'\N{LATIN SMALL LETTER TURNED T}',
u'v' : u'\N{LATIN SMALL LETTER TURNED V}',
u'w' : u'\N{LATIN SMALL LETTER TURNED W}',
u'y' : u'\N{LATIN SMALL LETTER TURNED Y}',
# Numbers
u'3' : u'\N{LATIN CAPITAL LETTER OPEN E}',
u'4' : u'\N{CANADIAN SYLLABICS YA}',
u'6' : u'9',
u'7' : u'\N{LATIN CAPITAL LETTER L WITH MIDDLE TILDE}',
# Misc
u'!' : u'\N{INVERTED EXCLAMATION MARK}',
u'"' : u'\N{DOUBLE LOW-9 QUOTATION MARK}',
u'&' : u'\N{TURNED AMPERSAND}',
u'\'': u',',
u'(' : u')',
u'.' : u'\N{DOT ABOVE}',
u'/' : u'\\',
u';' : u'\N{ARABIC SEMICOLON}',
u'<' : u'>',
u'?' : u'\N{INVERTED QUESTION MARK}',
u'[' : u']',
u'_' : u'\N{OVERLINE}',
u'{' : u'}',
u'\N{UNDERTIE}' : u'\N{CHARACTER TIE}',
u'\N{LEFT SQUARE BRACKET WITH QUILL}' : u'\N{RIGHT SQUARE BRACKET WITH QUILL}',
u'\N{THEREFORE}' : u'\N{BECAUSE}',
# Spanish
u'\N{LATIN SMALL LETTER N WITH TILDE}' : u'\N{LATIN SMALL LETTER U WITH TILDE BELOW}',
}
### Classes ###
class TwoWayDict(dict):
def __init__(self, d):
dict.__init__(self, d)
keys = d.keys()
for k, v in d.iteritems():
if v not in keys:
self[v] = k
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return key
### Commands
def cmd_flip(data, buffer, args):
"""Flips text."""
if not args:
return WEECHAT_RC_OK
unicode_args = args.decode('utf-8')
L = [ fliptable[c] for c in unicode_args ]
L.reverse()
u = u''.join(L)
s = u.encode('utf-8')
weechat.buffer_set(buffer, 'input', s)
return WEECHAT_RC_OK
### Main ###
if __name__ == '__main__' and import_ok and \
weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE, \
SCRIPT_DESC, '', ''):
weechat.hook_command(SCRIPT_COMMAND, cmd_flip.__doc__, "text", "", '', 'cmd_flip', '')
#for test all chars, change False to True
if False:
L = []
for k, v in fliptable.iteritems():
L.append(u'%s %s' %(k, v))
u = u' '.join(L)
s = u.encode('utf-8')
weechat.prnt('', s)
fliptable = TwoWayDict(fliptable)
# vim:set shiftwidth=4 tabstop=4 softtabstop=4 expandtab textwidth=100:
|
import random, os, sys
import numpy as np
from tensorflow.keras import backend as K
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
from tensorflow.keras.callbacks import *
from tensorflow.keras.initializers import *
import tensorflow as tf
from tensorflow.python.keras.layers import Layer
try:
from dataloader import TokenList, pad_to_longest
# for transformer
except: pass
#
from app.drl.scaled_dot_product_attention import ScaledDotProductAttention
from app.drl.layer_normalization import LayerNormalization
class MultiHeadAttention(Layer):
# mode 0 - big martixes, faster; mode 1 - more clear implementation
def __init__(self, n_head, d_model, d_k, d_v, dropout, mode=0, use_norm=True):
self.mode = mode
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.dropout = dropout
if mode == 0:
self.qs_layer = Dense(n_head*d_k, use_bias=False)
self.ks_layer = Dense(n_head*d_k, use_bias=False)
self.vs_layer = Dense(n_head*d_v, use_bias=False)
elif mode == 1:
self.qs_layers = []
self.ks_layers = []
self.vs_layers = []
for _ in range(n_head):
self.qs_layers.append(TimeDistributed(Dense(d_k, use_bias=False)))
self.ks_layers.append(TimeDistributed(Dense(d_k, use_bias=False)))
self.vs_layers.append(TimeDistributed(Dense(d_v, use_bias=False)))
self.attention = ScaledDotProductAttention(d_model)
self.layer_norm = LayerNormalization() if use_norm else None
self.w_o = TimeDistributed(Dense(d_model))
def __call__(self, q, k, v, mask=None):
d_k, d_v = self.d_k, self.d_v
n_head = self.n_head
if self.mode == 0:
qs = self.qs_layer(q) # [batch_size, len_q, n_head*d_k]
ks = self.ks_layer(k)
vs = self.vs_layer(v)
def reshape1(x):
s = tf.shape(x) # [batch_size, len_q, n_head * d_k]
x = tf.reshape(x, [s[0], s[1], n_head, d_k])
x = tf.transpose(x, [2, 0, 1, 3])
x = tf.reshape(x, [-1, s[1], d_k]) # [n_head * batch_size, len_q, d_k]
return x
qs = Lambda(reshape1)(qs)
#qs = reshape1(qs)
ks = Lambda(reshape1)(ks)
#ks = reshape1(ks)
vs = Lambda(reshape1)(vs)
#vs = reshape1(vs)
if mask is not None:
mask = Lambda(lambda x:K.repeat_elements(x, n_head, 0))(mask)
#mask = K.repeat_elements(mask, n_head, 0)
head, attn = self.attention(qs, ks, vs, mask=mask)
print('###### head:{0}; qs:{1}'.format(head, attn))
head = qs
attn = ks
def reshape2(x):
s = tf.shape(x) # [n_head * batch_size, len_v, d_v]
x = tf.reshape(x, [n_head, -1, s[1], s[2]])
x = tf.transpose(x, [1, 2, 0, 3])
x = tf.reshape(x, [-1, s[1], n_head*d_v]) # [batch_size, len_v, n_head * d_v]
return x
head = Lambda(reshape2)(head)
#head = reshape2(head)
elif self.mode == 1:
heads = []; attns = []
for i in range(n_head):
qs = self.qs_layers[i](q)
ks = self.ks_layers[i](k)
vs = self.vs_layers[i](v)
head, attn = self.attention(qs, ks, vs, mask)
heads.append(head); attns.append(attn)
head = Concatenate()(heads) if n_head > 1 else heads[0]
attn = Concatenate()(attns) if n_head > 1 else attns[0]
outputs = self.w_o(head)
outputs = Dropout(self.dropout)(outputs)
if not self.layer_norm:
return outputs, attn
#outputs = Add()([outputs, qs]) # sl: fix
return self.layer_norm(outputs), attn |
from abc import ABC, abstractmethod
from typing import List, Tuple
import copy
from moviepy.editor import VideoClip,CompositeVideoClip
import mugen.utility as util
import mugen.video.sizing as v_sizing
import mugen.video.effects as v_effects
from mugen.mixins.Filterable import Filterable
from mugen.mixins.Persistable import Persistable
from mugen.video.effects import VideoEffectList
from mugen.video.constants import LIST_3D
from mugen.video.sizing import Dimensions
import cv2
class Segment(Filterable, Persistable, ABC):
"""
A segment of content in a video.
Simulates a wrapper for moviepy's VideoClip class.
Attributes
----------
effects
A list of effects to apply to the segment when composed
"""
effects: VideoEffectList
DEFAULT_VIDEO_FPS = 24
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.effects = VideoEffectList()
def __repr__(self):
return f"<{self.__class__.__name__}: {self.name}>, duration: {self.duration}>"
def __copy__(self):
"""
Override copy to avoid causing conflicts with custom pickling
"""
cls = self.__class__
new_segment = cls.__new__(cls)
new_segment.__dict__.update(self.__dict__)
return new_segment
def __deepcopy__(self, memo):
return self.copy()
def copy(self) -> 'Segment':
new_segment = super().copy()
# Deepcopy effects
new_segment.effects = copy.deepcopy(self.effects)
return new_segment
def ipython_display(self, *args, **kwargs):
"""
Fixes inheritance naming issue with moviepy's ipython_display
"""
seg_copy = self.copy()
# Class should also always be set to VideoClip for expected video display
seg_copy.__class__ = VideoClip().__class__
return seg_copy.ipython_display(*args, **kwargs)
@property
def dimensions(self) -> Dimensions:
return Dimensions(self.w, self.h)
@property
def aspect_ratio(self) -> float:
return self.w / self.h
@property
def resolution(self) -> int:
return self.w * self.h
@property
def duration_time_code(self) -> str:
return util.seconds_to_time_code(self.duration)
@property
def first_frame(self) -> LIST_3D:
return self.get_frame(t=0)
@property
def middle_frame(self) -> LIST_3D:
return self.get_frame(t=self.duration / 2)
@property
def last_frame(self) -> LIST_3D:
return self.get_frame(t=self.duration)
@property
def first_last_frames(self) -> List[LIST_3D]:
return [self.first_frame, self.last_frame]
@property
def first_middle_last_frames(self) -> List[LIST_3D]:
return [self.first_frame, self.middle_frame, self.last_frame]
def crop_to_aspect_ratio(self, aspect_ratio: float) -> 'Segment':
"""
Returns
-------
A new Segment, cropped as necessary to reach specified aspect ratio
"""
segment = self.copy()
if segment.aspect_ratio != aspect_ratio:
# Crop video to match desired aspect ratio
x1, y1, x2, y2 = v_sizing.crop_coordinates_for_aspect_ratio(segment.dimensions,
aspect_ratio)
segment = segment.crop(x1=x1, y1=y1, x2=x2, y2=y2)
return segment
def crop_scale(self, dimensions: Tuple[int, int]) -> 'Segment':
"""
Returns
-------
A new Segment, cropped and/or scaled as necessary to reach specified dimensions
"""
segment = self.copy()
dimensions = Dimensions(*dimensions)
def blur(image):
#return cv2.GaussianBlur(image.astype(float),(99,99),0)
return cv2.blur(image.astype(float), (30, 30) , 0)
#if segment.aspect_ratio != dimensions.aspect_ratio:
# Crop segment to match aspect ratio
#segment = segment.crop_to_aspect_ratio(dimensions.aspect_ratio)
#if segment.dimensions != dimensions:
# Resize segment to reach final dimensions
#segment = segment.resize(dimensions)
replace_width = dimensions.width
replace_height = dimensions.height
if segment.aspect_ratio != replace_width/replace_height:
##########################################Below 1 AR##################################################
if segment.aspect_ratio <= 1:
#print("below 1")
if segment.size[0] != replace_width:
segment = segment.resize(width=replace_width)
if segment.size[1] != replace_height:
segment = segment.resize(height=replace_height)
segment = segment.set_position("center")
background1 = segment.crop(x1=0,width = (segment.w/2))
background2 = segment.crop(x1=(segment.w/2),width = (segment.w/2))
if segment.aspect_ratio != 1:
#print("Not 1:1")
background1 = background1.resize(width=(replace_width-segment.w)/2)
background2 = background2.resize(width=((replace_width-segment.w)/2)+1)
background1 = background1.set_position(("left",'center')).fl_image( blur )
background2 = background2.set_position(("right",'center')).fl_image( blur )
segment = CompositeVideoClip([background1,background2,segment], size=(replace_width,replace_height))
segment.effects = self.effects
#########################################Above 1080 ratio###############################################
elif segment.aspect_ratio > round(replace_width/replace_height,2):
#print("above 1.7")
if segment.size[1] != replace_height:
segment = segment.resize(height=replace_height)
if segment.size[0] != replace_width:
segment = segment.resize(width=replace_width)
test = (replace_height-segment.h)/2
segment = segment.set_position("center")
background1 = segment.crop(y1=0,height = ((replace_height-segment.h)/2))
background2 = segment.crop(y1=segment.h-test,height = test)
background1 = background1.set_position(('center','top')).fl_image( blur )
background2 = background2.set_position(('center','bottom')).fl_image( blur )
segment = CompositeVideoClip([background1,background2,segment], size=(replace_width,replace_height))
segment.effects = self.effects
######################################other#######################################################
elif segment.aspect_ratio > 1 and segment.aspect_ratio < round(replace_width/replace_height,2):
#print('midway between')
if segment.size[0] != replace_width:
segment = segment.resize(width=replace_width)
if segment.size[1] != replace_height:
segment = segment.resize(height=replace_height)
segment = segment.set_position("center")
background1 = segment.crop(x1=0,width = (segment.w/2))
background2 = segment.crop(x1=(segment.w/2),width = (segment.w/2))
background1 = background1.set_position(("left",'center')).fl_image( blur )
background2 = background2.set_position(("right",'center')).fl_image( blur )
segment = CompositeVideoClip([background1,background2,segment], size=(replace_width,replace_height))
segment.effects = self.effects
#############################################################################################
if segment.w != replace_width and segment.h != replace_height:
segment = segment.resize((replace_width,replace_height))
#print("On Aspect, too big or small")
return segment
def apply_effects(self) -> 'Segment':
"""
Composes the segment, applying all effects
Returns
-------
A new segment with all effects applied
"""
segment = self.copy()
for effect in self.effects:
if isinstance(effect, v_effects.FadeIn):
segment = segment.fadein(effect.duration, effect.rgb_color)
if segment.audio:
segment.audio = segment.audio.audio_fadein(effect.duration)
elif isinstance(effect, v_effects.FadeOut):
segment = segment.fadeout(effect.duration, effect.rgb_color)
if segment.audio:
segment.audio = segment.audio.audio_fadeout(effect.duration)
return segment
CompositeVideoClip.apply_effects = apply_effects
@property
@abstractmethod
def name(self) -> str:
"""
Human-readable name for the segment
"""
pass
@abstractmethod
def trailing_buffer(self, duration) -> 'Segment':
"""
Parameters
----------
duration
duration of the buffer
Returns
-------
A new segment picking up where this one left off, for use in crossfades
"""
pass
|
# -*- coding: utf-8 -*-
#!/usr/bin/python
import sys
from PyQt4 import QtGui, QtCore
from datetime import datetime, date
class Window(QtGui.QMainWindow):
def __init__(self):
super (Window,self).__init__()
self.setGeometry(50,50,500,500)
self.setWindowTitle(" ¡Viva Mexico!")
self.setWindowIcon(QtGui.QIcon('banderaMexico.png'))
self.label()
self.creaBoton()
self.setVisible(True)
def creaBoton(self):
texto = "Oprimeme"
self.boton = Boton (texto,10,150,self)
def label(self):
label = QtGui.QLabel("Jose Maria Morelos y Pavon",self)
label.move(0,40)
label.resize(200,40)
label.show()
label2 = QtGui.QLabel("Miguel Hidalgo y Costilla",self)
label2.move(0,20)
label2.resize(200,40)
label2.show()
label3 = QtGui.QLabel("Juan Aldama Gonzalez",self)
label3.move(0,0)
label3.resize(200,40)
label3.show()
class Boton(QtGui.QPushButton):
def __init__(self, texto, ancho, alto, padre):
QtGui.QPushButton.__init__(self, texto, padre)
self.move(ancho, alto)
self.clicked.connect(self.apretar)
def apretar(self):
texto = "Faltan " + calculaDias() + " para el 15 de septiembre"
self.setText(texto)
self.resize(300,100)
def calculaDias():
hoy = datetime.now()#Se obtiene la fecha de hoy
diasFaltantes = ''
if hoy.month == 9:
if hoy.day < 15:
diasFaltantes = datetime(hoy.year, 9, 15) - hoy
elif hoy.day == 15:
diasFaltantes = "0"
else:
diasFaltantes = datetime(hoy.year + 1, 9, 15) - hoy
elif hoy.month < 9:
diasFaltantes = datetime(hoy.year, 9, 15) - hoy
else:
diasFaltantes = datetime(hoy.year + 1, 9, 15) - hoy
return str(diasFaltantes)
def run():
app = QtGui.QApplication(sys.argv)
GUI = Window ()
sys.exit(app.exec_())
run() |
s, t = map(str,input().split())
num_s, num_t = map(int,input().split())
del_target = input()
data = {s:num_s,t:num_t}
data[del_target] = data[del_target]-1
print(data[s],data[t])
|
#!/usr/bin/env python2
from decimal import Decimal
from enum import Enum
import random
SAPLING_ACTIVATION_HEIGHT = 400
SIMULATION_END_HEIGHT = 600
ZATOSHIS_PER_ZEC = 100000000
ZATOSHIS_PER_BLOCK = 10 * ZATOSHIS_PER_ZEC
MIN_COINBASE_DISTRIBUTION = ZATOSHIS_PER_ZEC // 20
SHIELDED_PROBABILITY = 0.2 # Assume 20% of coinbase txs are shielded
# TODO Add tiers of users
NUM_USERS = 10
class TxType(Enum):
transparent = 't'
sprout = 'x'
sapling = 'z'
class TxInput(object):
def __init__(self, tx_type, prev_txid, prev_index):
self.tx_type = tx_type
self.prev_txid = prev_txid
self.prev_index = prev_index
def __str__(self):
return "{{\"tx_type\":\'{}\',\"prev_tx\":{},\"prev_index\":{}}}".format(self.tx_type.value, self.prev_txid, self.prev_index)
class TxOutput(object):
def __init__(self, tx_type, index, amount):
assert(amount > 0)
self.tx_type = tx_type
self.index = index
self.amount = amount
self.spent = False
def __str__(self):
return "{{\"tx_type\":\'{}\',\"index\":{},\"amount\":{},\"spent\":{}}}".format(self.tx_type.value, self.index, self.amount, self.spent)
class Transaction(object):
# A map from txid to transaction
tx_map = dict()
# Used to calculate a unique txid
next_txid = 0
def __init__(self, inputs, outputs):
self.txid = Transaction.get_next_txid()
self.inputs = inputs
self.outputs = outputs
Transaction.tx_map[self.txid] = self
# Check total_in == total_out for non-coinbase transactions
if len(inputs) > 0:
global tx_map
total_in = 0
for input in inputs:
prevout = Transaction.tx_map[input.prev_txid].get_prevout(input)
assert(not prevout.spent)
prevout.spent = True
total_in += prevout.amount
total_out = 0
for output in outputs:
total_out += output.amount
assert(total_in == total_out)
def get_prevout(self, input):
for output in self.outputs:
if output.tx_type == input.tx_type and output.index == input.prev_index:
return output
raise Exception('Prevout not found')
@staticmethod
def get_next_txid():
txid = Transaction.next_txid
Transaction.next_txid += 1
return txid
class User(object):
def __init__(self, user_id):
self.user_id = user_id
self.outputs = [] # This is a pair of (Transaction.txid, TxOutput)
def get_balance(self, tx_type):
balance = 0
for txid, output in self.outputs:
if output.tx_type == tx_type and not output.spent:
balance += output.amount
return balance
def add_output(self, txid, output):
self.outputs.append((txid, output))
class UserMigrationStrategy(object):
def migrate_funds(self, user, block_height):
return []
class UniformRandomDistributionStrategy(UserMigrationStrategy):
def __init__(self, lowerbound, upperbound):
self.lowerbound = lowerbound
self.upperbound = upperbound
def migrate_funds(self, user, block_height):
target_amount = random.randint(self.lowerbound, self.upperbound)
actual_amount = 0
inputs = []
# TODO: sort amounts?
for txid, output in user.outputs:
if not output.spent and output.tx_type == TxType.sprout:
inputs.append(TxInput(output.tx_type, txid, output.index))
actual_amount += output.amount
if actual_amount >= target_amount:
break
if len(inputs) == 0:
return []
out_amount = min(target_amount, actual_amount)
outputs = [TxOutput(TxType.sapling, 0, out_amount)]
if actual_amount > target_amount:
outputs.append(TxOutput(TxType.sprout, 1, actual_amount - target_amount))
tx = Transaction(inputs, outputs)
for output in outputs:
user.add_output(tx.txid, output)
return [tx]
def distribute_coinbase_transactions(users, is_sapling):
outputs = []
out_index = 0
coinbase_amount = ZATOSHIS_PER_BLOCK
while coinbase_amount > 0:
# Distribute random amount to random user
if coinbase_amount > MIN_COINBASE_DISTRIBUTION:
distibution_amount = random.randint(0, coinbase_amount)
else:
distibution_amount = coinbase_amount
is_shielded = random.uniform(0, 1) <= SHIELDED_PROBABILITY
tx_type = (TxType.sapling if is_sapling else TxType.sprout) if is_shielded else TxType.transparent
outputs.append(TxOutput(tx_type, out_index, distibution_amount))
coinbase_amount -= distibution_amount
out_index += 1
tx = Transaction([], outputs)
for output in outputs:
user = users[random.randint(0, NUM_USERS - 1)]
user.add_output(tx.txid, output)
return tx
def write_user_balance_file(users, is_sapling):
if is_sapling:
sorted_users = sorted(users, key=lambda user: user.get_balance(TxType.sapling), reverse=True)
user_balance_file_name = "user_balance_sapling.csv"
else:
sorted_users = sorted(users, key=lambda user: user.get_balance(TxType.sprout), reverse=True)
user_balance_file_name = "user_balance_sprout.csv"
user_balance_file = open(user_balance_file_name, "w+")
user_balance_file.write("user_id,sprout_balance,sapling_balance,transparent_balance\n")
total_sprout = 0
total_sapling = 0
total_transparent = 0
for user in sorted_users:
sprout = user.get_balance(TxType.sprout)
sapling = user.get_balance(TxType.sapling)
transparent = user.get_balance(TxType.transparent)
user_balance_file.write("{},{},{},{}\n".format(
user.user_id,
Decimal(sprout) / ZATOSHIS_PER_ZEC,
Decimal(sapling) / ZATOSHIS_PER_ZEC,
Decimal(transparent) / ZATOSHIS_PER_ZEC
))
total_sprout += sprout
total_sapling += sapling
total_transparent += transparent
user_balance_file.write("{},{},{},{}\n".format(
"total",
Decimal(total_sprout) / ZATOSHIS_PER_ZEC,
Decimal(total_sapling) / ZATOSHIS_PER_ZEC,
Decimal(total_transparent) / ZATOSHIS_PER_ZEC
))
user_balance_file.write("grand_total:{}\n".format(Decimal(total_sprout+total_sapling+total_transparent) / ZATOSHIS_PER_ZEC))
user_balance_file.flush()
user_balance_file.close()
def main():
# Generate some users
users = []
for user_id in xrange(0, NUM_USERS):
users.append(User(user_id))
block_chain = []
# Generate Sprout block data
for block_height in xrange(0, SAPLING_ACTIVATION_HEIGHT):
cb_tx = distribute_coinbase_transactions(users, False)
block_chain.append([cb_tx])
write_user_balance_file(users, False)
strategy = UniformRandomDistributionStrategy(ZATOSHIS_PER_ZEC, 10 * ZATOSHIS_PER_ZEC) # Try to migrate 1 - 10 zec
# Generate Sapling block data
for block_height in xrange(SAPLING_ACTIVATION_HEIGHT, SIMULATION_END_HEIGHT):
cb_tx = distribute_coinbase_transactions(users, True)
block_txs = [cb_tx]
for user in users:
for tx in strategy.migrate_funds(user, block_chain):
block_txs.append(tx)
block_chain.append(block_txs)
write_user_balance_file(users, True)
blockchain_file = open("blockchain.csv", "w+")
blockchain_file.write("block_height,txid,inputs,outputs\n")
for block_height, txs in enumerate(block_chain):
for tx in txs:
blockchain_file.write("{},{},{},{}\n".format(
block_height,
tx.txid,
"[{}]".format(','.join(str(i) for i in tx.inputs)),
"[{}]".format(','.join(str(o) for o in tx.outputs))
))
blockchain_file.flush()
blockchain_file.close()
if __name__ == '__main__':
main()
|
def sum(*MyData):
sum = 0
for data in MyData:
sum += data
return sum
def average(*MyData):
sum = 0
i = 0
for data in MyData:
sum += data
i += 1
average = sum/i
return average
def maks(*MyData):
#maks = 0
maks = -(float("inf"))
for data in MyData:
if (data > maks):
maks = data
return maks
def min(*MyData):
#min = 99999999
min = float("inf")
for data in MyData:
if (data < min):
min = data
return min |
# -*- coding: utf-8 -*-
#-------------------------------------------------
# File Name: ExportUserDict
# Author : fengge
# date: 2019/3/27
# Description : 将词和原始的字典合并成新的词典
#-------------------------------------------------
from config import data_dir
import os
def ExporUserDict(words,infile=data_dir+"/Data/user.txt",outfile=data_dir+"/Data/user.txt",flag=True,n="n",v=100):
'''
将词和原始的字典合并成新的词典
Parameter:
- words: 添加的新词
- infile: 原来词典的位置
- outfile:添加的词典位置
- flag: True的时候使用默认词频和词性,False的时候不使用
- n: 默认词性
- v: 默认词频
'''
userWord=[]
if os.path.exists(infile):
with open(infile,'r',encoding='utf8') as old_file:
tmp=[t.strip() for t in old_file.readlines()]
userWord.extend(tmp)
suffix=' '+str(v)+' '+str(n) if flag else ""
for w in words:
userWord.append(w+suffix)
newWord=set(userWord)
new_file=open(outfile,'w',encoding='utf8')
for w in newWord:
new_file.write(w+"\n")
new_file.close()
|
#!/usr/bin/env python3
import struct
from scapy.packet import Packet, Raw, RawVal
from scapy.compat import raw
from scapy.fields import IntField, XIntField, StrFixedLenField, Field
class PacketFieldOffset(Packet):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields_off = {}
def self_build(self, field_pos_list=None):
self.raw_packet_cache = None
p = b""
for f in self.fields_desc:
val = self.getfieldval(f.name)
self.fields_off[f] = len(p)
if isinstance(val, RawVal):
sval = raw(val)
p += sval
if field_pos_list is not None:
field_pos_list.append((f.name, sval.encode("string_escape"), len(p), len(sval))) # noqa: E501
else:
p = f.addfield(self, p, val)
return super().self_build(field_pos_list)
def post_build(self, pkt, pay):
for f_name, lbd in self.post_value.items():
if not getattr(self, f_name, None):
f = self.get_field(f_name)
off = self.fields_off[f]
start = f.addfield(pkt, pkt[:off], lbd(pkt, pay))
pkt = start + pkt[len(start):]
return pkt+pay
class AsciiIntField(StrFixedLenField):
"""
Field containing an ASCII encoded int
"""
__slots__ = ["length"]
def __init__(self, *args, **kwargs):
self.length = kwargs["length"]
super().__init__(*args, **kwargs)
def m2i(self, pkt, x):
return int(x, 16)
def i2m(self, pkt, x):
if x is None:
return b""
elif isinstance(x, int):
return "{:0{width}x}".format(x, width=self.length).encode()
else:
return x
class TestPacket(PacketFieldOffset):
fields_desc = [
AsciiIntField("full_len", 0, length=8),
IntField("self_len", 0),
XIntField("dummy", default=10)
]
post_value = {"full_len" : (lambda pkt, pay: len(pkt)+len(pay)),
"self_len" : (lambda pkt, pay: len(pkt))}
test = TestPacket(dummy=19)
test.show2()
print(repr(bytes(test)))
test = TestPacket(dummy=0xFFFFFFFF)/Raw(b"testtest")
test.show2()
print(repr(bytes(test)))
|
import logging
import json
from itertools import chain
import pandas as pd
from wash import Washer
class AggregateBar(Washer):
"""
1. 聚合各种周期的数据,并存库
2. 基于清洗完成后的 1min bar 来聚合
3. 一般情况下这个 bar 聚合是跟 washer 一起运行的。就不需要再向 slavem 汇报了
"""
def run(self):
"""
:return:
"""
self.log.info('isTradingDay: {}'.format(self.isTradingDay))
self.log.info('聚合 {} 的数据'.format(self.tradingDay.date()))
# 汇报
# self.slavemReport.lanuchReport()
# 启动循环
# self.drDataLocal.start()
# self.drDataRemote.start()
# 从本地加载数据即可,此时已经完成了两地数据互补,两地数据已经一致了
self.drDataLocal.loadOriginData()
# 聚合5分钟 bar
# 聚合15分钟 bar
# 聚合30分钟 bar
# 聚合1小时 bar
# 聚合日线数据
self.aggregatedAllDayBar()
# 重新聚合所有合约的 1min bar 数据,并存库
self.stop()
self.log.info('聚合结束')
def aggregatedAllDayBar(self):
"""
聚合日线数据
:return:
"""
vtSymbolsChain = list(chain(self.drDataLocal.originData.keys(), self.drDataRemote.originData.keys()))
self.log.info('共 {} 个合约'.format(len(vtSymbolsChain),))
for vtSymbol in vtSymbolsChain:
self.aggregatedDayBar(vtSymbol)
def aggregatedDayBar(self, vtSymbol):
"""
聚合指定合约的日线数据
:param vtSymbol:
:return:
"""
originData = self.drDataLocal.originData.get(vtSymbol)
if originData is None:
# self.log.warning('symbol {} local 没有数据可以聚合'.format(symbol))
return
assert isinstance(originData, pd.DataFrame)
df = originData.set_index('tradingDay').sort_index()
# 聚合日线
ndf = self.resample1DayBar(df)
# 更新数据
self.drDataLocal.updateDayData(ndf)
self.drDataRemote.updateDayData(ndf)
if __name__ == '__main__':
settingFile = 'conf/kwarg.json'
loggingConfigFile = 'conf/logconfig.json'
serverChanFile = 'conf/serverChan.json'
if __debug__:
settingFile = 'tmp/kwarg.json'
loggingConfigFile = 'tmp/logconfig.json'
with open(serverChanFile, 'r') as f:
serverChanUrls = json.load(f)['serverChanSlaveUrls']
with open(settingFile, 'r') as f:
kwargs = json.load(f)
with open(loggingConfigFile, 'r') as f:
loggingConfig = json.load(f)
import datetime
a = AggregateBar(loggingConfig=loggingConfig, **kwargs)
import arrow
# a.tradingDay = arrow.get('2017-08-24 00:00:00+08:00').datetime
# print(a.tradingDay)
a.start()
|
# for loop
# users=[
# ['ram','sita','gita','hari'],
# ['jonish','gauri','alisha','anuj'],
# ['q','w','e','r']
# ]
# for user in users:
# for name in user:
# print(name)
# while Loop
# i=1
# while i<=50:
# if i%2==0:
# print(i)
# i+=1
# while loop result
num=int(input("Enter your number"))
|
#Sako Haji
#04/21/2019
avail_edges = {"0": ["1", "2", "3"],"1": ["4", "5"], "2": ["5"], "3": ["6"], "4": ["7"], "5": ["7"], "6": ["7"], "7": []}
graph = {}
def flow():
counter = 0
graph = avail_edges
pointer = "0"
while BFS(pointer, graph):
next_child = graph[pointer][0]
#print(graph,"pointer",pointer, "child",next_child)
graph[pointer].remove(next_child)
graph[next_child].append(pointer)
pointer = next_child
if pointer == "7":
pointer = "0"
counter += 1
if counter == 3:
print("You have reached optimal flow!")
print(graph)
def BFS(start, graph):
visited = []
next_node = [start]
while next_node != []:
pointer = next_node.pop(0)
visited.append(pointer)
for i in graph[pointer]:
if i == "7":
return True
if i not in visited and i not in next_node:
next_node.append(i)
return False
flow()
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class CoreNetwork(nn.Module):
def __init__(self, glimpse_hid_dim, location_hid_dim, recurrent_hid_dim):
super().__init__()
self.i2h = nn.Linear(glimpse_hid_dim+location_hid_dim, recurrent_hid_dim)
self.h2h = nn.Linear(recurrent_hid_dim, recurrent_hid_dim)
def forward(self, glimpse_hidden, recurrent_hidden):
# glimpse_hidden = [batch size, glimpse_hid_dim+location_hid_dim]
# recurrent_hidden = [batch size, recurrent_hid_dim]
glimpse_hidden = self.i2h(glimpse_hidden)
recurrent_hidden = self.h2h(recurrent_hidden)
recurrent_hidden = F.relu(glimpse_hidden + recurrent_hidden)
# recurrent_hidden = [batch size, recurrent_hid_dim]
return recurrent_hidden
|
''' lesson 2 - building a movie website
build a site that hosts movie trailers and their info
steps:
1. we need:
title
synopsis
release date
ratings
this means we need a template for this data,
ex: avatar.show_info(), toyStory.show_trailer()
but we dont want to use separate modules/files for each movie
CLASSES
learning classes by drawing shapes
classes is a lot like a blueprint
same blueprint can be used to make similar buildings
objects are examples of the class blueprint
turtle module
Turtle class
a neatly packaged box
turtle.Turtle()
calls the function __init__ w/in class Turtle
creates a new instance of Turtle and inherits all of the methods
webbrowser.open() vs turtle.Turtle()
the first calls a function
the second calls the init func, which allocates memory and creates an obj
steps to make multiple squares that make a circle
1. build a square
2. for n < 360/tiltangle, tilt by some amount and build another square
'''
import turtle
def drawThings():
redbox = turtle.Screen()
redbox.bgcolor("red")
# brad = turtle.Turtle()
# brad.shape("turtle")
# brad.color("blue")
# brad.speed(60)
sunny = turtle.Turtle()
sunny.shape("turtle")
sunny.color("black")
sunny.speed("3")
''' draws a series of squares that make a circle
num = 1
tiltAngle = 6
tilts = 360 / tiltAngle
drawSquare(brad)
while num < tilts:
brad.right(tiltAngle)
drawSquare(brad)
num += 1
'''
drawSunnysName(sunny)
redbox.exitonclick()
# angie = turtle.Turtle()
# angie.shape("arrow")
# angie.color("yellow")
# drawCircle(angie)
def drawSunnysName(person):
# draws the letter 'S'
person.left(90)
person.circle(100, 270)
person.circle(-100, 270)
# moves the turtle to draw the next letter
person.penup()
person.forward(100)
person.right(90)
person.forward(600)
person.pendown()
# draws the letter 'G'
person.left(90)
person.penup()
person.circle(200, 45)
person.pendown()
person.circle(200, 315)
person.left(90)
person.forward(200)
def drawSquare(someTurtle):
for i in range(1, 5):
someTurtle.forward(100)
someTurtle.right(90)
def drawCircle(someTurtle):
someTurtle.circle(100)
main()
|
import os
import sys
from radon.complexity import cc_rank, cc_visit
from radon.visitors import Function, Class
# Get a list of all python files in the project
print('Compiling list of modules...')
MODULES = []
for i in os.walk(os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))):
path, folders, files = i
for file in files:
if file.endswith('.py'):
MODULES.append(os.path.join(path, file))
# Analyze each module with Radon
print('Analyzing modules with Radon...')
RESULTS = {}
for mod in MODULES:
with open(mod, 'r') as py_file:
RESULTS[mod] = cc_visit(py_file.read())
# Generate a report and save it to a file
print('Generating report...')
TEMPLATE = "{0:8} {1:9} {2:6} {3:20}" # column widths: 7, 10, 12, 20
SCORES = []
def add_to_report(scores, chunk):
lines = "{}-{}".format(chunk.lineno, chunk.endline)
scores.append(chunk.complexity)
complexity = cc_rank(chunk.complexity)
if isinstance(chunk, Function):
if chunk.is_method is False:
print(TEMPLATE.format(lines, 'Function', complexity, chunk.name))
else:
print(TEMPLATE.format(lines, 'Method', complexity, '{}.{}'.format(chunk.classname, chunk.name)))
elif isinstance(chunk, Class):
print(TEMPLATE.format(lines, 'Class', complexity, chunk.name))
for method in chunk.methods:
add_to_report(scores, method)
for mod in RESULTS:
if not RESULTS[mod]:
continue
print(mod + '\n' + TEMPLATE.format('Lines', 'Type', 'Score', 'Name'))
for chonk in RESULTS[mod]:
if isinstance(chonk, Function) and chonk.is_method:
continue
add_to_report(SCORES, chonk)
# Create a summary of the analysis
print('\nFiles analyzed: {}\nAverage score: {}\n\n'.format(str(len(SCORES)), str(sum(SCORES)/len(SCORES))))
# If the complexity of a module is above 10 (a 'c' grade), return a non-zero exit code
if max(SCORES) > 10:
print('Analysis score too low, exiting with error code 1')
sys.exit(1)
sys.exit(0)
|
one={'O':1,'N':1,'E':1}
two={"T":1,"W":1,"O":1}
three={"T":1,"H":1,"R":1,"E":2}
four={"F":1,"O":1,"U":1,"R":1}
five={"F":1,"I":1,"V":1,"E":1}
six={"S":1,"I":1,"X":1}
seven={"S":1,"E":2,"V":1,"N":1}
eight={"E":1,"I":1,"G":1,"H":1,"T":1}
nine={"N":2,"I":1,"E":1}
zero={"Z":1,"E":1,"R":1,"O":1}
lst=[zero,one,two,three,four,five,six,seven,eight,nine,zero]
alpha={}
for i in lst:
for j in i:
if j not in alpha:
alpha[j]=1
##fin={}
##huge={}
##def count(s):
## dic={}
## for i in s:
## nu=lst[int(i)]
## for w in nu:
## if w in dic:
## dic[w]+=nu[w]
## else:
## dic[w]=nu[w]
## return(dic)
##
##for i in range(1000000):
## num=[]
## for j in str(i):
## num.append(int(j))
## num.sort()
## s=''
## for j in num:
## s+=str(j)
## if s in fin:
## pass
## else:
## fin[s]=1
## huge[s]=count(s)
def sub(dic1,dic2,x):
for i in dic2:
dic1[i]-=dic2[i]*x
file=open("A-large.in")
k=file.readline()
a=int(k[:-1])
w=open("output.txt","w")
for num in range(a):
k=file.readline()[:-1]
dic={}
for i in k:
if i in dic:
dic[i]+=1
else:
dic[i]=1
for i in alpha:
if i not in dic:
dic[i]=0
lst=[]
lst+=[6]*dic["X"]
sub(dic,six,dic["X"])
lst+=[0]*dic["Z"]
sub(dic,zero,dic["Z"])
lst+=[7]*dic["S"]
sub(dic,seven,dic["S"])
lst+=[5]*dic["V"]
sub(dic,five,dic["V"])
lst+=[4]*dic["F"]
sub(dic,four,dic["F"])
lst+=[8]*dic["G"]
sub(dic,eight,dic["G"])
lst+=[3]*dic["H"]
sub(dic,three,dic["H"])
lst+=[2]*dic["W"]
sub(dic,two,dic["W"])
lst+=[9]*dic["I"]
sub(dic,nine,dic["I"])
lst+=[1]*dic["O"]
sub(dic,one,dic["O"])
lst.sort()
s=""
for i in lst:
s+=str(i)
s="case #"+str(num+1)+": "+str(s)+"\n"
w.write(s)
print(s)
w.close()
|
import tkinter as tk
from tkinter.messagebox import showwarning
import subprocess
root = tk.Tk()
root.geometry("600x500")
label = tk.Label(root, text="Example of xterm embedded in frame")
label.pack(fill=tk.X)
xterm_frame = tk.Frame(root)
xterm_frame.pack(fill=tk.BOTH, expand=True)
xterm_frame_id = xterm_frame.winfo_id()
try:
p = subprocess.Popen(
["xterm", "-into", str(xterm_frame_id), "-geometry", "80x20"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
except FileNotFoundError:
showwarning("Error", "xterm is not installed")
raise SystemExit
root.mainloop()
|
#if __name__ == '__main__':
# print('Hello Heimid!')
import fritzconnection as fc
#print(fc.get_version())
connection = fc.FritzConnection(password='butt2740')
info = connection.call_action('WANIPConnection', 'GetInfo')
Uptime = info['NewUptime']
#print(Uptime)
import fritzhosts as fh
#print(fh.get_version())
host = fh.FritzHosts(password='butt2740')
#fh.print_hosts(host)
import pyodbc
cnxn = pyodbc.connect('DRIVER={SQL Server};SERVER=JULIA\SQLEXPRESS;DATABASE=WifiMonitor')
cursor = cnxn.cursor()
hosts = host.get_hosts_info()
for index, host in enumerate(hosts):
if host['status'] == '1':
ip = '-' if host['ip'] == None else host['ip']
mac = '-' if host['mac'] == None else host['mac']
name = '-' if host['name'] == None else host['name']
insertstatement = 'INSERT INTO HOSTS VALUES (getdate(), \'' + name + '\',\'' + ip + '\',\'' + mac + '\')'
print(insertstatement)
cursor.execute(insertstatement)
cnxn.commit()
import fritzstatus as fs
#fs.print_status()
status = fs.FritzStatus(fc=connection)
#print(status.external_ip)
#print(status.max_byte_rate)
|
def time_for_playing(learning,eating,sleeping,others):
time_for_playing=24-learning - eating - sleeping - others
print "Time for playing today is %d." %time_for_playing
time_for_playing(2,2,8,3) |
import zmq
from zmq.log.handlers import PUBHandler
class ZMQPUBHandler(PUBHandler):
"""Custom class for zmq pub handler"""
def __init__(self, sock, root_topic):
"""
:param string sock: zmq address
:param string root_topic: prefix for log message topic
"""
context = zmq.Context()
publisher = context.socket(zmq.PUB)
publisher.connect(sock)
super(ZMQPUBHandler, self).__init__(publisher, context)
self.root_topic = root_topic
|
s1=input().split()
n=int(s1[0])
a=4
check=True
def test(a):
for i in range(2,a):
if a%i==0:
return True
return False
while (check==True):
if test(a)==True:
b=n-a
if test(b)==True:
check=False
print(a,b)
a=a+1
|
# This file is part of pyrerp
# Copyright (C) 2012-2013 Nathaniel Smith <njs@pobox.com>
# See file COPYING for license information.
import os.path
import struct
import os
import string
from collections import OrderedDict
import bisect
import numpy as np
import pandas
from pyrerp.data import DataFormat, DataSet
from pyrerp.util import maybe_open
from pyrerp.io._erpss import _decompress_crw_chunk
PAUSE_CODE = 49152
DELETE_CODE = 57344
# There are also read_avg and write_erp_as_avg functions in here, but their
# API probably needs another look before anyone should use them.
__all__ = ["load_erpss"]
# Derived from erp/include/64header.h:
_header_dtype = np.dtype([
("magic", "<u2"),
("epoch_len", "<i2"), # epoch length in msec
("nchans", "<i2"),
("sums", "<i2"), # 0 = ERP, 1 = single trial
# -- 8 bytes --
("tpfuncs", "<i2"), # number of processing funcs
("pp10uv", "<i2"), # points / 10 uV
("verpos", "<i2"), # 1 normally, -1 for sign inversion (I think?)
("odelay", "<i2"), # ms from trigger to stim (usually 8)
# -- 16 bytes --
("totevnt", "<i2"), # "total log events" (0 in mima217.avg)
("10usec_per_tick", "<i2"),
("time", "<i4"), # "time in sample clock ticks" (0 in mima217.avg)
# -- 24 bytes --
("cond_code", "<i2"), # (0 in mima217.avg)
("presam", "<i2"), # pre-event time in epoch in msec
("trfuncs", "<i2"), # "number of rejection functions"
("totrr", "<i2"), # "total raw records including rejects" (0 in mima217.avg)
# -- 32 bytes --
("totrej", "<i2"), # "total raw rejects" (0 in mima217.avg) (0 in mima217.avg)
("sbcode", "<i2"), # "subcondition number ( bin number )" (0 in mima217.avg)
("cprecis", "<i2"), # Our average contains cprecis * 256 samples
("dummy1", "<i2"),
# -- 40 bytes --
("decfact", "<i2"), # "decimation factor used in processing"
("dh_flag", "<i2"), # "see defines - sets time resolution" (0 in mima217.avg)
("dh_item", "<i4"), # "sequential item #" (0 in mima217.avg)
# -- 48 bytes --
("rfcnts", "<i2", (8,)), # "individual rejection counts 8 poss. rfs"
("rftypes", "S8", (8,)), # "8 char. descs for 8 poss. rfs"
("chndes", "S128"),
("subdes", "S40"),
("sbcdes", "S40"),
("condes", "S40"),
("expdes", "S40"),
("pftypes", "S24"),
("chndes2", "S40"),
("flags", "<u2"), # "see flag values below" (0 in mima217.avg)
("nrawrecs", "<u2"), # "# raw records if this is a raw file header"
# (0 in mima217.avg)
("idxofflow", "<u2"), # (0 in mima217.avg)
("idxoffhi", "<u2"), # (0 in mima217.avg)
("chndes3", "S24"),
# -- 512 bytes --
])
# If, say, chndes has trailing null bytes, then rec["chndes"] will give us a
# less-than-128-byte string back. But this function always gives us the full
# 128 byte string, trailing nuls and all.
def _get_full_string(record, key):
val = record[key]
desired_len = record.dtype.fields[key][0].itemsize
return val + (desired_len - len(val)) * "\x00"
# Translation tables for the ad hoc 6-bit character encoding used to encode
# electrode names in the 64-channel format:
_char2code = {}
for i, char in enumerate(string.lowercase):
_char2code[char] = i + 1
for i, char in enumerate(string.uppercase):
_char2code[char] = i + 27
for i, char in enumerate(string.digits):
_char2code[char] = i + 53
_code2char = dict([(v, k) for (k, v) in _char2code.iteritems()])
def _read_header(stream):
header_str = stream.read(512)
header = np.fromstring(header_str, dtype=_header_dtype)[0]
if header["magic"] == 0x17a5:
# Raw file magic number:
reader = _read_raw_chunk
elif header["magic"] == 0x97a5:
# Compressed file magic number:
reader = _read_compressed_chunk
else: # pragma: no cover
assert False, "Unrecognized file type"
hz = 1 / (header["10usec_per_tick"] / 100000.0)
if abs(hz - int(hz)) > 1e-6:
raise ValueError("file claims weird non-integer sample rate %shz"
% hz)
hz = int(hz)
channel_names = _channel_names_from_header(header)
# Also read out the various general informational bits:
info = {}
info["subject"] = header["subdes"]
info["experiment"] = header["expdes"]
info["odelay"] = header["odelay"]
# And save the raw header in case anyone wants it later (you never know)
info["erpss_raw_header"] = header_str
return (reader, header["nchans"], hz, channel_names, info, header)
def _channel_names_from_header(header):
if header["nchans"] <= 16:
# For small montages, each channel gets 8 bytes of ascii, smushed
# together into a single array:
return np.fromstring(_get_full_string(header, "chndes"),
dtype="S8")[:header["nchans"]]
elif header["nchans"] <= 32:
# For mid-size montages, each channel gets 4 bytes:
return np.fromstring(_get_full_string(header, "chndes"),
dtype="S4")[:header["nchans"]]
else:
# And for large montages, a complicated scheme is used.
# First, pull out and combine all the relevant buffers:
chan_buf = (_get_full_string(header, "chndes")
+ _get_full_string(header, "chndes2")
+ _get_full_string(header, "chndes3"))
# Then, each 3 byte chunk represents 4 characters, each coded in 6
# bits and packed together:
channel_names_l = []
for i in xrange(header["nchans"]):
chunk = np.fromstring(chan_buf[3*i : 3*i+3], dtype=np.uint8)
codes = [
(chunk[0] >> 2) & 0x3f,
(chunk[0] & 0x03) << 4 | (chunk[1] >> 4) & 0x0f,
(chunk[1] & 0x0f) << 2 | (chunk[2] >> 6) & 0x03,
(chunk[2] & 0x3f),
]
chars = [_code2char[code] for code in codes if code != 0]
channel_names_l.append("".join(chars))
return np.array(channel_names_l[:header["nchans"]])
def _channel_names_to_header(channel_names, header):
header["nchans"] = len(channel_names)
if len(channel_names) <= 16:
header["chndes"] = np.asarray(channel_names, dtype="S8").tostring()
elif len(channel_names) <= 32:
header["chndes"] = np.asarray(channel_names, dtype="S4").tostring()
else:
encoded_names = []
for channel_name in channel_names:
codes = [_char2code[char] for char in channel_name]
codes += [0] * (4 - len(codes))
char0 = ((codes[0] << 2) | (codes[1] >> 4)) & 0xff
char1 = ((codes[1] << 4) | (codes[2] >> 2)) & 0xff
char2 = ((codes[2] << 6) | codes[3]) & 0xff
encoded_names += [chr(char0), chr(char1), chr(char2)]
concat_buf = "".join(encoded_names)
header["chndes"] = concat_buf[:128]
header["chndes2"] = concat_buf[128:128 + 40]
header["chndes3"] = concat_buf[128 + 40:]
if not np.all(_channel_names_from_header(header) == channel_names):
raise ValueError("failed to encode channel names in header -- maybe "
"some names are too long?")
def test_channel_names_roundtrip():
# Try 1 char, 2 char, 3 char, 4 char names
# Try all letters in 6-bit character set (digits, lowercase, uppercase)
names = ["A", "a", "1", "Aa", "Aa1", "Aa1A"]
import itertools
for char, digit in itertools.izip(itertools.cycle(string.uppercase),
itertools.cycle(string.digits)):
names.append(char + char.lower() + digit)
if len(names) == 64:
break
def t(test_names):
header = np.zeros(1, dtype=_header_dtype)[0]
_channel_names_to_header(test_names, header)
got_names = _channel_names_from_header(header)
assert np.all(got_names == test_names)
# skip names == [], b/c we hit https://github.com/numpy/numpy/issues/3764
# and anyway, who cares about the nchans=0 case
for i in xrange(1, len(names)):
# Try all lengths
t(names[:i])
# Also try some long names for small headers where they're allowed
long_names = ["a" * i for i in xrange(8)] * 2
t(long_names)
from nose.tools import assert_raises
header = np.zeros(1, dtype=_header_dtype)[0]
# But even for small headers, only 8 chars are allowed
assert_raises(ValueError, _channel_names_to_header, ["a" * 9], header)
# And for larger headers, only 4 chars are allowed
for i in xrange(17, 64):
assert_raises(ValueError,
_channel_names_to_header, ["a" * 5] * i, header)
def read_raw(stream, dtype):
(reader, nchans, hz, channel_names, info, header) = _read_header(stream)
# Data is stored in a series of "chunks" -- each chunk contains 256 s16
# samples from each channel (the 32/64/whatever analog channels, plus 1
# channel for codes -- that channel being first.). The code channel
# contains a "record number" as its first entry in each chunk, which
# simply increments by 1 each time.
chunkno = 0
code_chunks = []
data_chunks = []
while True:
read = reader(stream, nchans)
if read is None:
break
(code_chunk, data_chunk) = read
assert len(code_chunk) == 256
assert data_chunk.shape == (256 * nchans,)
assert code_chunk[0] == chunkno
code_chunk[0] = 0
code_chunk = np.asarray(code_chunk, dtype=np.uint16)
data_chunk.resize((256, nchans))
data_chunk = np.asarray(data_chunk, dtype=dtype)
code_chunks.append(code_chunk)
data_chunks.append(data_chunk)
chunkno += 1
return (hz, channel_names,
np.concatenate(code_chunks),
np.row_stack(data_chunks),
info)
def _read_raw_chunk(stream, nchans):
chunk_bytes = (nchans + 1) * 512
buf = stream.read(chunk_bytes)
# Check for EOF:
if not buf:
return None
codes_list = list(struct.unpack("<256H", buf[:512]))
data_chunk = np.fromstring(buf[512:], dtype="<i2")
return (codes_list, data_chunk)
def _read_compressed_chunk(stream, nchans):
# Check for EOF:
ncode_records_minus_one_buf = stream.read(1)
if not ncode_records_minus_one_buf:
return None
# Code track (run length encoded):
(ncode_records_minus_one,) = struct.unpack("<B",
ncode_records_minus_one_buf)
ncode_records = ncode_records_minus_one + 1
code_records = []
for i in xrange(ncode_records):
code_records.append(struct.unpack("<BH", stream.read(3)))
codes_list = []
for (repeat_minus_one, code) in code_records:
codes_list += [code] * (repeat_minus_one + 1)
assert len(codes_list) == 256
# Data bytes (delta encoded and packed into variable-length integers):
(ncompressed_words,) = struct.unpack("<H", stream.read(2))
compressed_data = stream.read(ncompressed_words * 2)
data_chunk = _decompress_crw_chunk(compressed_data, ncompressed_words,
nchans)
return (codes_list, data_chunk)
def assert_files_match(p1, p2):
(hz1, channames1, codes1, data1, info1) = read_raw(open(p1), "u2")
(hz2, channames2, codes2, data2, info2) = read_raw(open(p2), "u2")
assert hz1 == hz2
assert (channames1 == channames2).all()
assert (codes1 == codes2).all()
assert (data1 == data2).all()
for k in set(info1.keys() + info2.keys()):
if k != "erpss_raw_header":
assert info1[k] == info2[k]
def test_read_raw_on_test_data():
import glob
from pyrerp.test import test_data_path
tested = 0
for rawp in glob.glob(test_data_path("erpss/*.raw")):
crwp = rawp[:-3] + "crw"
print rawp, crwp
assert_files_match(rawp, crwp)
tested += 1
# Cross-check, to make sure is actually finding the files... (bump up this
# number if you add more test files):
assert tested == 5
def test_64bit_channel_names():
from pyrerp.test import test_data_path
stream = open(test_data_path("erpss/two-chunks-64chan.raw"))
(hz, channel_names, codes, data, info) = read_raw(stream, int)
# "Correct" channel names as listed by headinfo(1):
assert (channel_names ==
["LOPf", "ROPf", "LMPf", "RMPf", "LTPf", "RTPf", "LLPf", "RLPf",
"LPrA", "RPrA", "LTFr", "RTFr", "LLFr", "RLFr", "LDPf", "RDPf",
"LTOc", "RTOc", "LTCe", "RTCe", "LLCe", "RLCe", "LDFr", "RDFr",
"LMFr", "RMFr", "MiFo", "MiPf", "MiFr", "A2", "LHEy", "RHEy",
"LIOc", "RIOc", "LLOc", "RLOc", "LLPP", "RLPP", "LLPa", "RLPa",
"LDCe", "RDCe", "LMCe", "RMCe", "LDOc", "RDOc", "LDPP", "RDPP",
"LDPa", "RDPa", "LCer", "RCer", "LMOc", "RMOc", "LMPP", "RMPP",
"LMPa", "RMPa", "MiCe", "MiPa", "MiPP", "MiOc", "LLEy", "RLEy"]
).all()
def read_log(file_like):
fo = maybe_open(file_like)
ticks = []
events = []
while True:
event = fo.read(8)
if not event:
break
(code, tick_hi, tick_lo, condition, flag) \
= struct.unpack("<HHHBB", event)
ticks.append(tick_hi << 16 | tick_lo)
events.append((code, condition, flag))
df = pandas.DataFrame(events, columns=["code", "condition", "flag"],
index=ticks)
df["flag_data_error"] = np.asarray(df["flag"] & 0o100, dtype=bool)
df["flag_rejected"] = np.asarray(df["flag"] & 0o40, dtype=bool)
df["flag_polinv"] = np.asarray(df["flag"] & 0o20, dtype=bool)
return df
# Little hack useful for testing. AFAIK this is identical to the erpss
# 'makelog' program, except that:
# - 'makelog' throws away some events from the end of the file, including the
# very helpful final "pause" marker
# - 'makelog' "cooks" the log file, i.e., toggles the high bit of all events
# that occur in a span ended by a "delete mark" (see logfile.5). We don't
# bother. (Though could, I guess.)
def make_log(raw, condition=64): # pragma: no cover
import warnings; warnings.warn("This code is not tested!")
codes = read_raw(maybe_open(raw), np.float64)[2]
log = []
for i in codes.nonzero()[0]:
log.append(struct.pack("<HHHBB",
codes[i], (i & 0xffff0000) >> 16, i & 0xffff,
condition,
0))
if codes[i] in (PAUSE_CODE, DELETE_CODE):
condition += 1
return "".join(log)
def test_read_log():
def t(data, expected):
from cStringIO import StringIO
got = read_log(StringIO(data))
# .sort() is a trick to make sure columns line up
from pandas.util.testing import assert_frame_equal
assert_frame_equal(expected.sort(axis=1), got.sort(axis=1))
# The first 80 bytes of arquan25.log (from Delong, Urbach & Kutas 2005)
data = "01000000ec01010001000000e103010001000000f50601004b00000044070100010000007b0701004b000000ca07010001000000010801004b0000004f08010001000000860801004b000000d5080100".decode("hex")
# From 'logexam arquan25.log 1' (1 means, measure time in ticks)
# then 'l 0 9'
expected = pandas.DataFrame(
{"code": [1, 1, 1, 75, 1, 75, 1, 75, 1, 75],
"condition": [1] * 10,
"flag": [0] * 10,
"flag_data_error": [False] * 10,
"flag_rejected": [False] * 10,
"flag_polinv": [False] * 10,
},
index=[492, 993, 1781, 1860, 1915, 1994, 2049, 2127, 2182, 2261],
)
t(data, expected)
# 80 bytes from arquan25.log, starting at 8080*8 bytes into the file
data = "01000e00d39b010000c00e00ff9e010023010e005a9f000023010e00dc9f000023010e005da0000023010e00dea0000023010e005fa1000023010e00e1a1000023010e0062a2000023010e00e3a20000".decode("hex")
# from logexam, 'l 8080 8089'
expected = pandas.DataFrame(
{"code": [1, 49152, 291, 291, 291, 291, 291, 291, 291, 291],
"condition": [1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
"flag": [0] * 10,
"flag_data_error": [False] * 10,
"flag_rejected": [False] * 10,
"flag_polinv": [False] * 10,
},
index=[957395, 958207, 958298, 958428, 958557,
958686, 958815, 958945, 959074, 959203],
)
t(data, expected)
# XX someday should fix this so that it has the option to delay reading the
# actual data until needed (to avoid the giant memory overhead of loading in
# lots of data sets together). The way to do it for crw files is just to read
# through the file without decompressing to find where each block is located
# on disk, and then we can do random access after we know that.
def load_erpss(raw, log, calibration_events="condition == 0"):
dtype = np.float64
metadata = {}
if isinstance(raw, basestring):
metadata["raw_file"] = os.path.abspath(raw)
if isinstance(log, basestring):
metadata["log_file"] = os.path.abspath(log)
metadata["calibration_events"] = str(calibration_events)
raw = maybe_open(raw)
log = maybe_open(log)
(hz, channel_names, raw_codes, data, header_metadata) = read_raw(raw, dtype)
metadata.update(header_metadata)
data_format = DataFormat(hz, "RAW", channel_names)
raw_log_events = read_log(log)
expanded_log_codes = np.zeros(raw_codes.shape, dtype=int)
try:
expanded_log_codes[raw_log_events.index] = raw_log_events["code"]
except IndexError as e:
raise ValueError("log file claims event at position where there is "
"no data: %s" % (e,))
if np.any(expanded_log_codes != raw_codes):
raise ValueError("raw and log files have mismatched codes")
del raw_codes
del expanded_log_codes
pause_events = (raw_log_events["code"] == PAUSE_CODE)
delete_events = (raw_log_events["code"] == DELETE_CODE)
break_events = pause_events | delete_events
break_ticks = raw_log_events.index[break_events]
# The pause/delete code appears at the last sample of the old era, so if
# used directly, adjacent pause ticks give contiguous spans of recording
# as (pause1, pause2]. (Confirmed by checking by hand in a real recording
# that the data associated with the sample that has the pause code is
# contiguous with the sample before, but not the sample after.) Adding +1
# to each of them then converts this to Python style [pause1, pause2)
# intervals. There is a pause code at the last record of the file, but not
# one at the first, so we add that in explicitly.
break_ticks += 1
span_edges = np.concatenate(([0], break_ticks))
assert span_edges[0] == 0
assert span_edges[-1] == data.shape[0]
span_slices = [slice(span_edges[i], span_edges[i + 1])
for i in xrange(len(span_edges) - 1)]
dataset = DataSet(data_format)
for span_slice in span_slices:
dataset.add_recspan(data[span_slice, :], metadata)
span_starts = [s.start for s in span_slices]
for tick, row in raw_log_events.iterrows():
attrs = row.to_dict()
span_id = bisect.bisect(span_starts, tick) - 1
span_slice = span_slices[span_id]
span_start = span_slice.start
span_stop = span_slice.stop
assert span_start <= tick < span_stop
dataset.add_event(span_id,
tick - span_start, tick - span_start + 1,
attrs)
if attrs["code"] == DELETE_CODE:
dataset.recspan_infos[span_id]["deleted"] = True
for cal_event in dataset.events_query(calibration_events):
for key in list(cal_event):
del cal_event[key]
cal_event["calibration_pulse"] = True
return dataset
def test_load_erpss():
from pyrerp.test import test_data_path
# This crw/log file is constructed to have a few features:
# - it only has 3 records, so it's tiny
# - the first two records are in one recspan, the last is in a second, so
# we test the recspan splitting code
# - the first recspan ends in a PAUSE event, the second ends in a DELETE
# event, so we test the deleted event handling.
# There are some weird things about it too:
# - several events in the first recspan have condition 0, to test
# calibration pulse stuff. In a normal ERPSS file all events within a
# single recspan would have the same condition number.
# - most of the event codes are >32767. In a normal ERPSS file such events
# are supposed to be reserved for special stuff and deleted events, but
# it happens the file I was using as a basis violated this rule. Oh
# well.
dataset = load_erpss(test_data_path("erpss/tiny-complete.crw"),
test_data_path("erpss/tiny-complete.log"))
assert len(dataset) == 2
assert dataset[0].shape == (512, 32)
assert dataset[1].shape == (256, 32)
assert dataset.data_format.exact_sample_rate_hz == 250
assert dataset.data_format.units == "RAW"
assert list(dataset.data_format.channel_names) == [
"lle", "lhz", "MiPf", "LLPf", "RLPf", "LMPf", "RMPf", "LDFr", "RDFr",
"LLFr", "RLFr", "LMFr", "RMFr", "LMCe", "RMCe", "MiCe", "MiPa", "LDCe",
"RDCe", "LDPa", "RDPa", "LMOc", "RMOc", "LLTe", "RLTe", "LLOc", "RLOc",
"MiOc", "A2", "HEOG", "rle", "rhz",
]
for recspan_info in dataset.recspan_infos:
assert recspan_info["raw_file"].endswith("tiny-complete.crw")
assert recspan_info["log_file"].endswith("tiny-complete.log")
assert recspan_info["experiment"] == "brown-1"
assert recspan_info["subject"] == "Subject p3 2008-08-20"
assert recspan_info["odelay"] == 8
assert len(recspan_info["erpss_raw_header"]) == 512
assert dataset.recspan_infos[0].ticks == 512
assert dataset.recspan_infos[1].ticks == 256
assert dataset.recspan_infos[1]["deleted"]
assert len(dataset.events()) == 14
# 2 are calibration events
assert len(dataset.events("has code")) == 12
for ev in dataset.events("has code"):
assert ev["condition"] in (64, 65)
assert ev["flag"] == 0
assert not ev["flag_data_error"]
assert not ev["flag_polinv"]
assert not ev["flag_rejected"]
for ev in dataset.events("calibration_pulse"):
assert dict(ev) == {"calibration_pulse": True}
def check_ticks(query, recspan_ids, start_ticks):
events = dataset.events(query)
assert len(events) == len(recspan_ids) == len(start_ticks)
for ev, recspan_id, start_tick in zip(events, recspan_ids, start_ticks):
assert ev.recspan_id == recspan_id
assert ev.start_tick == start_tick
assert ev.stop_tick == start_tick + 1
check_ticks("condition == 64",
[0] * 8, [21, 221, 304, 329, 379, 458, 483, 511])
check_ticks("condition == 65",
[1] * 4,
[533 - 512, 733 - 512, 762 - 512, 767 - 512])
check_ticks("calibration_pulse", [0, 0], [250, 408])
# check calibration_events option
dataset2 = load_erpss(test_data_path("erpss/tiny-complete.crw"),
test_data_path("erpss/tiny-complete.log"),
calibration_events="condition == 65")
assert len(dataset2.events("condition == 65")) == 0
assert len(dataset2.events("condition == 0")) == 2
assert len(dataset2.events("calibration_pulse")) == 4
# check that we can load from file handles (not sure if anyone cares but
# hey you never know...)
assert len(load_erpss(open(test_data_path("erpss/tiny-complete.crw")),
open(test_data_path("erpss/tiny-complete.log")))) == 2
# check that code/raw mismatch is detected
from nose.tools import assert_raises
for bad in ["bad-code", "bad-tick", "bad-tick2"]:
assert_raises(ValueError,
load_erpss,
test_data_path("erpss/tiny-complete.crw"),
test_data_path("erpss/tiny-complete.%s.log" % (bad,)))
# test .transform and .copy
from pyrerp.test_data import check_transforms
check_transforms(load_erpss(test_data_path("erpss/tiny-complete.crw"),
test_data_path("erpss/tiny-complete.log")))
|
from django.conf.urls import url
from . import views
urlpatterns = (
# urls for Author
url(r'^authors/author/$', views.AuthorListView.as_view(), name='authors_author_list'),
url(r'^authors/author/create/$', views.AuthorCreateView.as_view(), name='authors_author_create'),
url(r'^authors/author/detail/(?P<pk>\S+)/$', views.AuthorDetailView.as_view(), name='authors_author_detail'),
url(r'^authors/author/update/(?P<pk>\S+)/$', views.AuthorUpdateView.as_view(), name='authors_author_update'),
)
|
from pathlib import Path
from typing import Dict, Union
from pydantic import BaseModel
from cored import CfgObj
class NodeInfo:
pass
class DirInfo:
name: str = ''
path: str = ''
nodes_cnt: int = 0
nodes_map: Dict[str, str] = {}
@staticmethod
def from_path(path):
path = Path(path)
info = DirInfo()
info.name = path.name
info.path = str(path.absolute())
info.nodes_map = {it.name: it.name for it in path.iterdir()}
info.nodes_cnt = len(info.nodes_map)
class Wdir:
def __init__(self, path):
self.path = Path(path)
def get_info(self, cfg: CfgObj):
info = DirInfo.from_path(self.path)
return info
|
import pandas as pd
# defines a function for reformating world bank data
def reformatDF(DF, *, feature_name='', round_to=2):
''' a function to reformat world bank data '''
# removes unnecessary columns
DF.drop(columns=['Indicator Name', 'Indicator Code', 'Unnamed: 63'],
inplace=True)
# only keeps columns for years between 1960 and 2016
DF.drop(columns=list(DF.loc[:, '1960':'1989'].columns), inplace=True)
DF = DF.drop(columns=['2017', '2018'])
# rounds numerical values to given number of decimals
DF = DF.round(decimals=round_to)
# reformats DF to have one year feature instead of a feature for each year
newDF = pd.DataFrame()
for year in range(1990, 2017):
tempDF = DF[['Country Name', 'Country Code', f'{year}']]
tempDF = tempDF.rename(columns={f'{year}': feature_name})
tempDF['Year'] = year
tempDF = tempDF[['Country Name', 'Country Code',
'Year', feature_name]]
newDF = pd.concat([newDF, tempDF])
return(newDF)
def mergeDFs(list_of_dfs):
'''
a function to merge a list of dfs all formated with the reformatDF function
'''
# removes first df from list and uses it as start of the mergedDF
mergedDF = list_of_dfs.pop(0)
# merges the rest of the dataframes with the first one
for DF in list_of_dfs:
mergedDF = mergedDF.merge(DF,
on=['Country Code', 'Year', 'Country Name'])
return(mergedDF)
# importing dataframes
DF1 = pd.read_csv('forest_percent.csv', skiprows=3)
DF2 = pd.read_csv('forest_area.csv', skiprows=3)
DF3 = pd.read_csv('agriculture_percent.csv', skiprows=3)
DF4 = pd.read_csv('population.csv', skiprows=3)
DF5 = pd.read_csv('gdp.csv', skiprows=3)
# reformats dataframes
newDF1 = reformatDF(DF1, feature_name='Forest Land Percent')
newDF2 = reformatDF(DF2, feature_name='Forest Area (sq km)', round_to=0)
newDF3 = reformatDF(DF3, feature_name='Agriculture Land Percentage')
newDF4 = reformatDF(DF4, feature_name='Population', round_to=0)
newDF5 = reformatDF(DF5, feature_name='GDP Per Capita (2019 USD)', round_to=0)
# creates list of dataframes
DFlist = [newDF1, newDF2, newDF3, newDF4, newDF5]
# merges the dataframes for past data
df0 = mergeDFs(DFlist)
# imports prediction data
predDF = pd.read_csv('forest_predictions.csv', index_col=0)
# creates a dictionary for country codes/names
oneYear = df0[df0['Year'] == 1990]
countries = oneYear[['Country Code', 'Country Name']]
codeList = list(countries['Country Code'])
nameList = list(countries['Country Name'])
countryDict = dict(zip(codeList, nameList))
# creates list of countries
countryList = list(predDF.columns)[1:]
# reformats prediction dataframe to apend to other dataframe
newPredDF = pd.DataFrame()
for country in countryList:
tempDF = predDF[['Year', f'{country}']]
tempDF = tempDF.rename(columns={f'{country}': 'Forest Land Percent'})
tempDF['Country Code'] = f'{country}'
tempDF['Country Name'] = countryDict[f'{country}']
tempDF['Forest Land Percent']
newPredDF = pd.concat([newPredDF, tempDF])
newPredDF['Forest Land Percent'] = newPredDF['Forest Land Percent'].astype('float') # noqa
newPredDF.sort_values(by='Year', inplace=True)
# sets prediction dataframe
predictionDF = newPredDF.copy()
# adds prediction data to past data
df = pd.concat([df0, newPredDF], sort=False).reset_index(drop=True)
# fills null vlaues
df = df.fillna('NODATA')
df.to_csv('dataframe.csv', index=0)
|
from django.http import HttpResponse
from django.shortcuts import render
def home_view(request,*args, **kwargs):
print(args, kwargs)
print(request.user )
#return HttpResponse ("<h1>Welcome to cvmaker</h1>")
return render(request,"home.html",{})
def login_view(request,*args, **kwargs):
print(args, kwargs)
print(request.user )
return render(request,"login.html",{})
def signup_view(request,*args, **kwargs):
print(args, kwargs)
print(request.user )
return render(request,"signup.html",{})
def details_view(request,*args, **kwargs):
print(args, kwargs)
print(request.user )
#return HttpResponse ("<h1>Welcome to cvmaker</h1>")
return render(request,"details.html",{})
|
# In combinatorial mathematics, a derangement is a permutation of the
# elements of a set, such that no element appears in its original
# position.
#
# There's originally an array consisting of n integers from 1 to n in
# ascending order, you need to find the number of derangement it can
# generate.
#
# Also, since the answer may be very large, you should return the
# output mod 109 + 7.
#
# Example 1:
# Input: 3
# Output: 2
# Explanation: The original array is [1,2,3]. The two derangements are [2,3,1] and [3,1,2].
# Note:
# n is in the range of [1, 106].
class Solution:
def findDerangement(self, n):
"""
:type n: int
:rtype: int
"""
if n in {1, 0}:
return 0
elif n == 2:
return 1
first, second = 0, 1
for i in range(n-2):
first, second = second, (first+second)*(i+2)%(1000000007)
print(i+3, first, second)
return second
Solution().findDerangement(12) |
import sys
from tqdm import tqdm
from pubsub import MessageQueue
m = MessageQueue()
m.connect()
m.subscribe('random')
for i in tqdm(range(int(1e6)), desc='Benchmarking', file=sys.stdout, unit_scale=True, unit=' msg'):
m.get_message(timeout=None)
|
# use ls /dev/tty* to detect the arduino port
import serial
import time
arduino = serial.Serial("/dev/ttyACM0", baudrate=115200, timeout=3.0)
while True:
val = arduino.readline()
print(val)
arduino.close() |
#!/usr/bin/env python
from utils import getDatasetPresence
import json
import sys
url='cmsweb.cern.ch'
presence = getDatasetPresence(url, sys.argv[1])
print json.dumps( presence, indent=2)
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 03 19:28:12 2015
Non Intrusive Load Monitoring for Energy Disaggregation for the REDD data
Class project for CS446: Machine Learning @ University of Illinois at Urbana-Champaign
REDD Reference: "J. Zico Kolter and Matthew J. Johnson. REDD: A public data set for
energy disaggregation research. In proceedings of the SustKDD
workshop on Data Mining Applications in Sustainability, 2011."
@authors: Anand Deshmukh, Danny Lohan
University of Illinois at Urbana-Champaign
"""
import numpy as np
import matplotlib.pyplot as plt
import csv
import time, os
from scipy import interpolate
from MLData import createInstances, deviceErrors
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.svm import SVR
#from sklearn.lda import LDA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.ensemble import RandomForestClassifier
from energyCalcs import actDevEnergy,appDevEnergy,energyComp
from sklearn.cluster import KMeans
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
datapath = '../../../datasets/REDD/low_freq/house_3/'
weatherfile = 'Data/weather/20110405hourly_Boston.txt'
weatherfile_test = 'Data/weather/20110405hourly_Boston.txt'
available_weatherdata = True if os.path.isdir(weatherfile) else False
fileprefix = 'channel_'
train_vals_start = 0
train_vals_end = 120001
test_vals_start = 120001
test_vals_end = 240002
# specify the timewindow for instances
timewindow = 90
# training data arrays
device_timer = np.zeros(shape=(train_vals_end - train_vals_start,9))
device_power = np.zeros(shape=(train_vals_end - train_vals_start,9))
total_device_power = np.zeros(shape=(train_vals_end - train_vals_start))
weather_timer = np.zeros(shape=(1980))
weather_temp = np.zeros(shape=(1980))
weather_data = np.zeros(shape=(train_vals_end - train_vals_start))
# test data arrays
device_timer_test = np.zeros(shape=(test_vals_end - test_vals_start, 9))
device_power_test = np.zeros(shape=(test_vals_end - test_vals_start, 9))
total_device_power_test = np.zeros(shape=(test_vals_end - test_vals_start))
weather_timer_test = np.zeros(shape= (1980,))
weather_temp_test = np.zeros(shape= (1980,))
weather_data_test = np.zeros(shape=(test_vals_end - test_vals_start))
# devices to be used in training and testing
use_idx = np.array([3,4,6,7,10,11,13,17,19])
uidx = 0
################################################################
# READ TRANING DATA ##
# read the weather data
if available_weatherdata:
wfile = open(weatherfile,'rt')
rownum = 0
try:
wreader = csv.reader(wfile, delimiter=',')
for row in wreader:
#print row[1]+','+row[2]+','+row[10]
wdate = row[1]
wtime = row[2]
wdatelist = list(wdate)
wtimelist = list(wtime)
timedatestr = ''.join(wdatelist[0:4])+'-'+ ''.join(wdatelist[4:6])+'-'+''.join(wdatelist[6:8]) +'-'+ ''.join(wtimelist[0:2])+'-'+''.join(wtimelist[2:4])+'-'+'00'
weather_timer[rownum] = int(time.mktime(time.strptime(timedatestr,"%Y-%m-%d-%H-%M-%S")))
weather_temp[rownum] = int(row[10])
#print str(weather_timer[rownum]) + ','+ str(weather_temp[rownum])
rownum = rownum + 1
finally:
wfile.close
###############################################################
# read the device data
print("Generating train data...")
for device in range(0,20):
channel = device + 3
filename = 'channel_'+ str(channel) +'.dat'
filepath = datapath + filename
xtemp,ytemp = np.loadtxt(filepath,unpack=True)
#plt.plot(device_timer[:,device],device_power[:,device])
if (device in use_idx):
print('Device: {} / Uidx: {}'.format(device, uidx))
device_timer[:,uidx] = xtemp[train_vals_start:train_vals_end]# - 1302930690
device_power[:,uidx] = ytemp[train_vals_start:train_vals_end]
total_device_power += ytemp[train_vals_start:train_vals_end]
uidx = uidx + 1
if available_weatherdata:
interp_func = interpolate.interp1d(weather_timer, weather_temp)
weather_data = interp_func(device_timer[:,0])
################################################################
## READ TEST DATA ##
uidx = 0
# read the weather data
if available_weatherdata:
wfile = open(weatherfile_test,'rt')
rownum = 0
try:
wreader = csv.reader(wfile, delimiter=',')
for row in wreader:
#print row[1]+','+row[2]+','+row[10]
wdate = row[1]
wtime = row[2]
wdatelist = list(wdate)
wtimelist = list(wtime)
timedatestr = ''.join(wdatelist[0:4])+'-'+ ''.join(wdatelist[4:6])+'-'+''.join(wdatelist[6:8]) +'-'+ ''.join(wtimelist[0:2])+'-'+''.join(wtimelist[2:4])+'-'+'00'
weather_timer_test[rownum] = int(time.mktime(time.strptime(timedatestr,"%Y-%m-%d-%H-%M-%S")))
weather_temp_test[rownum] = int(row[10])
#print str(weather_timer[rownum]) + ','+ str(weather_temp[rownum])
rownum = rownum + 1
finally:
wfile.close
#################################################################
## read the device data
print("Generating test data...")
uidx = 0
for device in range(0,20):
channel = device + 3
filename = 'channel_'+ str(channel) +'.dat'
filepath = datapath + filename
xtemp,ytemp = np.loadtxt(filepath,unpack=True)
#plt.plot(device_timer[:,device],device_power[:,device])
if (device in use_idx):
print('Device: {} / Uidx: {}'.format(device, uidx))
device_timer_test[:,uidx] = xtemp[test_vals_start:test_vals_end]# - 1302930690
device_power_test[:,uidx] = ytemp[test_vals_start:test_vals_end]
total_device_power_test += ytemp[test_vals_start:test_vals_end]
uidx = uidx + 1
if available_weatherdata:
interp_func = interpolate.interp1d(weather_timer, weather_temp)
weather_data = interp_func(device_timer[:,0])
#temp_array = range(train_vals_start,train_vals_end)
#plt.plot(temp_array,total_device_power)
################################################################
# create the instances and labels from the training data
classifier = 3 # 1 - Naive Bayes, 2 - Regression, 3 - SVM, 4 - Linear Discriminant Analysis, 5 - Random Forest Classifier
# Treino / Teste
train_instances,train_labels,train_labels_binary = createInstances(
total_device_power, device_timer, device_power, weather_data,
classifier,
timewindow)
test_instances,test_labels,test_labels_binary = createInstances(
total_device_power_test, device_timer_test, device_power_test, weather_data_test,
classifier,
timewindow)
# else:
# train_instances,train_labels,train_labels_binary = createInstances(total_device_power, device_timer, device_power,classify,timewindow)
# test_instances,test_labels,test_labels_binary = createInstances(total_device_power_test, device_timer_test, device_power_test,classify,timewindow)
np.save('../datasets/train_instances.npy', train_instances)
np.save('../datasets/train_labels.npy', train_labels)
np.save('../datasets/train_labels_binary.npy', train_labels_binary)
np.save('../datasets/test_instances.npy', test_instances)
np.save('../datasets/test_labels.npy', test_labels)
np.save('../datasets/test_labels_binary.npy', test_labels_binary)
for clf_idx in range (1,7):
#clf = i
if clf_idx == 1:
cLabel = 'Naive Bayes'
clf = MultinomialNB()
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
elif clf_idx == 2:
cLabel = 'Logistic Regression'
clf = LogisticRegression()
LogisticRegression(C = 1.0, penalty = 'l1', tol=1e-6)
elif clf_idx == 3:
cLabel = 'SVM'
clf = SVC()
elif clf_idx == 4:
cLabel = 'Linear Discriminant Analysis'
clf = LDA()
elif clf_idx == 5:
cLabel = 'Random Forest Classifier'
clf = RandomForestClassifier(n_estimators=10)
SVR(C = 1.0, epsilon=0.2)
elif clf_idx ==6:
cLabel = 'K-means clustering'
clf = KMeans(n_clusters=512, init='random')
t0 = time.time()
clf.fit(train_instances, train_labels)
t1 = time.time()
nd = len(use_idx)
# prediction on training and test data
accuracyTr, dev_acc_train, predicted_labels_binary_train = deviceErrors(
clf,nd,train_instances,train_labels,train_labels_binary)
accuracyTs, dev_acc_test, predicted_labels_binary_test = deviceErrors(
clf,nd,test_instances,test_labels,test_labels_binary)
# prediction of device energy consumption
agg_energy_train = train_instances[:,5]
actEnergy_train = actDevEnergy(device_power,device_timer,nd)
appEnergy_train = appDevEnergy(train_labels_binary,agg_energy_train,nd)
preEnergy_train = appDevEnergy(predicted_labels_binary_train,agg_energy_train,nd)
acTap_train, acTpre_train, apTde_train = energyComp(actEnergy_train, appEnergy_train, preEnergy_train)
agg_energy_test = test_instances[:,5]
actEnergy_test = actDevEnergy(device_power_test,device_timer_test,nd)
appEnergy_test = appDevEnergy(test_labels_binary,agg_energy_test,nd)
preEnergy_test = appDevEnergy(predicted_labels_binary_test,agg_energy_test,nd)
acTap_test, acTpre_test, apTde_test = energyComp(actEnergy_test, appEnergy_test, preEnergy_test)
t2 = time.time()
t3 = time.time()
trainTime = t1-t0
test1Time = t2-t1
test2Time = t3-t2
print( '================================================================================')
print( 'Classifier = ' + cLabel)
print( 'Computational Expense for Training Classifier = ' + str(trainTime) + 's')
print( '------------------------- Results for Traning Data -----------------------------')
print( 'Percent Accuracy on Training Data = ' + str(accuracyTr) + '%')
print( 'Percent Accuracy per device on Training Data = ' + str(dev_acc_train) + '%')
print( 'Actual Device Energy on Training Data = ' + str(actEnergy_train))
print( 'Approx Device Energy on Training Data = ' + str(appEnergy_train))
print( 'Predicted Device Energy on Training Data = ' + str(preEnergy_train))
print( 'Computational Expense Classifying Training Data = ' + str(test1Time) + 's')
print( 'Device Accuracy Approx. vs Actual = ' + str(acTap_train))
print( 'Device Accuracy Pre. vs. Actual = ' + str(acTpre_train))
print( 'Device Accuracy Pre. vs. approx. = ' + str(apTde_train))
print( '------------------------- Results for Test Data -----------------------------')
print( 'Percent Accuracy on Test Data = ' + str(accuracyTs) + '%')
print( 'Percent Accuracy per device on Test Data = ' + str(dev_acc_test) + '%')
print( 'Actual Device Energy on Test Data = ' + str(actEnergy_test))
print( 'Approx Device Energy on Test Data = ' + str(appEnergy_test))
print( 'Predicted Device Energy on Test Data = ' + str(preEnergy_test))
print( 'Computational Expense Classifying Test Data = ' + str(test2Time) + 's')
print( 'Device Accuracy Approx. vs Actual = ' + str(acTap_test))
print( 'Device Accuracy Pre. vs. Actual = ' + str(acTpre_test))
print( 'Device Accuracy Pre. vs. approx. = ' + str(apTde_test))
# compute the energy consumption of each device.
################################################################
# plot 4 of the devices for illustration
#fig = plt.figure(0)
#lendev = len(device_timer[:,0])
#ax1 = plt.subplot(221)
#plt.plot((device_timer[:,0]-device_timer[0,0])/(device_timer[lendev-1,0]-device_timer[0,0]),device_power[:,0])
#ax1.set_title('Electronics')
#plt.ylabel('Device Power (W)')
#
#ax2 = plt.subplot(222)
#plt.plot((device_timer[:,0]-device_timer[0,0])/(device_timer[lendev-1,0]-device_timer[0,0]),device_power[:,1])
#ax2.set_title('Refrigerator')
##plt.ylabel('Device Power (W)')
#
#ax3 = plt.subplot(223)
#plt.plot((device_timer[:,0]-device_timer[0,0])/(device_timer[lendev-1,0]-device_timer[0,0]),device_power[:,3])
#ax3.set_title('Furnace')
#plt.xlabel('Normalized Time')
#plt.ylabel('Device Power (W)')
#
#ax4 = plt.subplot(224)
#plt.plot((device_timer[:,0]-device_timer[0,0])/(device_timer[lendev-1,0]-device_timer[0,0]),device_power[:,5])
#ax4.set_title('Washer Dryer 2')
#plt.xlabel('Normalized Time')
##plt.ylabel('Device Power (W)')
#
#fig = plt.figure(1)
#plt.plot((device_timer[0:288,0]-device_timer[0,0])/(device_timer[288-1,0]-device_timer[0,0]),device_power[0:288,0])
#
#
#plt.show()
#plt.ylabel('Mains Power Consumption (W)')
#plt.xlabel('time (s)') |
# Copyright 2015, Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Validation tests for master handler."""
import sys
import unittest
from pinball.master.master_handler import MasterHandler
from pinball.master.thrift_lib.ttypes import ArchiveRequest
from pinball.master.thrift_lib.ttypes import GroupRequest
from pinball.master.thrift_lib.ttypes import ModifyRequest
from pinball.master.thrift_lib.ttypes import Query
from pinball.master.thrift_lib.ttypes import QueryAndOwnRequest
from pinball.master.thrift_lib.ttypes import QueryRequest
from pinball.master.thrift_lib.ttypes import Token
from tests.pinball.persistence.ephemeral_store import EphemeralStore
__author__ = 'Pawel Garbacki'
__copyright__ = 'Copyright 2015, Pinterest, Inc.'
__credits__ = [__author__]
__license__ = 'Apache'
__version__ = '2.0'
class MasterHandlerTestCase(unittest.TestCase):
def _insert_token(self, handler):
request = ModifyRequest()
token = Token(name='/some_other_dir/some_token', data='some data')
request.updates = [token]
response = handler.modify(request)
self.assertEqual(1, len(response.updates))
return response.updates[0]
def test_archive(self):
handler = MasterHandler(EphemeralStore())
token = self._insert_token(handler)
request = ArchiveRequest()
request.tokens = [token]
handler.archive(request)
# The logic handling the request is tested thoroughly in
# transaction tests. Here we only make sure that the plumbing is in
# place.
def test_group(self):
request = GroupRequest()
request.namePrefix = '/'
handler = MasterHandler(EphemeralStore())
response = handler.group(request)
self.assertEqual(1, len(response.counts))
self.assertEqual(1, response.counts.values()[0])
def test_modify(self):
handler = MasterHandler(EphemeralStore())
self._insert_token(handler)
def test_query(self):
query = Query()
query.namePrefix = ''
query.maxTokens = 10
request = QueryRequest()
request.queries = [query]
handler = MasterHandler(EphemeralStore())
response = handler.query(request)
self.assertEqual(1, len(response.tokens))
def test_query_and_own(self):
query = Query()
query.namePrefix = ''
query.maxTokens = 10
request = QueryAndOwnRequest()
request.owner = 'some_owner'
request.expirationTime = sys.maxint
request.query = query
handler = MasterHandler(EphemeralStore())
response = handler.query_and_own(request)
self.assertEqual(0, len(response.tokens))
|
from django.shortcuts import render
import requests
from .models import City
# Create your views here.
def index(request):
url = 'https://api.openweathermap.org/data/2.5/weather?q={}&appid=8af2aa7fa978da0c3dc608a85406875c'
# city = 'kaneohe'
cities = City.objects.all()
# print(cities)
weather_data = []
for city in cities:
r = requests.get(url.format(city)).json()
# print(r)
city_weather = {
# 'city': city.name,
'windspeed': r['wind']['speed'],
'winddirection': r['wind']['deg'],
}
print(city_weather)
weather_data.append(city_weather)
# print(weather_data)
context = {'weather_data': weather_data}
print(context)
return render (request, 'info/hawaiiinfo.html', context)
|
import sys, os
import argparse
from Converter import Converter
def main():
parser = argparse.ArgumentParser(description='IC file converter to larcv format')
parser.add_argument('-i','--input',required=True,
dest='ic_fin',
help='Input IC file (Required)')
parser.add_argument('-nevents','--num-events',
type=int, dest='nevents', default=None,
help='integer, Number of events to process (default all)')
parser.add_argument('-o','--output',default=None,
type=str, dest='larcv_fout',
help='string, Output larcv file name (optional)')
args = parser.parse_args()
c = Converter()
c.convert(_file_in = args.ic_fin, _file_out=args.larcv_fout, max_entries=args.nevents)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
#
# Author: Oscar Benjamin
# Date: Feb 2021
# Description:
# Command line script to find integer roots of polynomials with
# integer coefficients.
#-------------------------------------------------------------------#
# #
# Command-line interface #
# #
#-------------------------------------------------------------------#
PROGRAM_EXPLANATION = """
Usage:
$ python roots.py COEFF1 COEFF2 ...
Find integer roots of a polynomial with integer coefficients.
Example:
Find the roots of x^4 - 3x^3 - 75x^2 + 475x - 750.
$ python roots.py 1 -3 -75 475 -750
-10
3
5
"""
def main(*arguments):
"""Main entry point for the program"""
if not arguments:
print(PROGRAM_EXPLANATION)
return
poly = parse_coefficients(arguments)
roots = integer_roots(poly)
print_roots(roots)
def parse_coefficients(arguments):
"""Convert string arguments to integer
>>> parse_coefficients(["2", "3"])
[2, 3]
"""
return [int(arg) for arg in arguments]
def print_roots(roots):
"""Print the roots one per line if there are any
>>> print_roots([2, 3])
2
3
"""
if roots:
roots_str = [str(r) for r in roots]
print('\n'.join(roots_str))
#-------------------------------------------------------------------#
# #
# Polynomial functions #
# #
#-------------------------------------------------------------------#
class BadPolynomialError(Exception):
"""Raised by polynomial routines when the polynomial is invalid.
A valid polynomial is a list of coefficients like [1, 2, 1]
The first (leading) coefficient must *not* be zero in a valid polynomial.
"""
pass
def integer_roots(poly):
""" Returns the integer roots of the polynomial p(x).
This works using the rational root theorem.
A simple observation is that given e.g. a polynomial
ax^3 + bx^2 + cx + d, then any integer root must satisfy
|x| <= |d|. So if for example d is 4 then the only possibilities
for integer roots are {−4,−3,−2,−1,0,1,2,3,4}. Example:
>>> integer_roots([1, -5, 6])
[2, 3]
"""
roots = []
if not isinstance(poly, list):
raise BadPolynomialError("Polynomial should be a list of coefficients")
if poly != []:
for coeff in poly:
if not isinstance(coeff, int):
raise BadPolynomialError("Polynomial should be a list of coefficients")
d = abs(poly[-1])
for i in range(-d, d+1):
if is_root(poly, i):
roots.append(i)
return roots
def evaluate_polynomial(poly, xval):
""" Calculates the value of the polynomial p(x) at x.
For example evaluate_polynomial([1, 2, 1], 3) calculates p(3)
where p(x) = x^2 + 2x + 1, p(x)=x
>>> evaluate_polynomial([1, 2, 1], 3)
16
"""
if not isinstance(poly, list):
raise BadPolynomialError("Polynomial should be a list of coefficients")
elif len(poly) == 0:
return 0
elif (poly == [0]):
raise BadPolynomialError("Polynomial should be a list of coefficients")
else:
result = 0
for coefficient in poly:
if not isinstance(coefficient, int):
raise BadPolynomialError("Polynomial should be a list of coefficients")
poly.reverse()
for power, coefficient in enumerate(poly):
result += coefficient*pow(xval, power)
poly.reverse()
return result
def is_root(poly, xval):
""" Returns True if x is a root of p(x) e.g.
>>> is_root([1, 2, 1], 3)
False
"""
return evaluate_polynomial(poly, xval) == 0
#-------------------------------------------------------------------#
# #
# Unit tests #
# #
#-------------------------------------------------------------------#
#
# Run these tests with pytest:
#
# $ pytest roots.py
#
def test_evaluate_polynomial():
assert evaluate_polynomial([], 1) == 0
assert evaluate_polynomial([1], 2) == 1
assert evaluate_polynomial([1, 2], 3) == 5
assert evaluate_polynomial([1, 2, 1], 4) == 25
# Invalid inputs should raise BadPolynomialError
from pytest import raises
raises(BadPolynomialError, lambda: evaluate_polynomial([0], 1))
raises(BadPolynomialError, lambda: evaluate_polynomial({}, 1))
raises(BadPolynomialError, lambda: evaluate_polynomial([[1]], 1))
def test_is_root():
assert is_root([], 1) is True
assert is_root([1], 1) is False
assert is_root([1, 1], 1) is False
assert is_root([1, 1], -1) is True
assert is_root([1, -1], 1) is True
assert is_root([1, -1], -1) is False
assert is_root([1, -5, 6], 2) is True
assert is_root([1, -5, 6], 3) is True
assert is_root([1, -5, 6], 4) is False
# Invalid inputs should raise BadPolynomialError
from pytest import raises
raises(BadPolynomialError, lambda: is_root([0], 1))
raises(BadPolynomialError, lambda: is_root({}, 1))
raises(BadPolynomialError, lambda: is_root([[1]], 1))
def test_integer_roots():
# In the case of the zero polynomial every value is a root but we return
# the empty list because we can't list every possible value!
assert integer_roots([]) == []
assert integer_roots([1]) == []
assert integer_roots([1, 1]) == [-1]
assert integer_roots([2, 1]) == []
assert integer_roots([1, -5, 6]) == [2, 3]
assert integer_roots([1, 5, 6]) == [-3, -2]
assert integer_roots([1, 2, 1]) == [-1]
assert integer_roots([1, -2, 1]) == [1]
assert integer_roots([1, -2, 1]) == [1]
assert integer_roots([1, -3, -75, 475, -750]) == [-10, 3, 5]
# Invalid inputs should raise BadPolynomialError
from pytest import raises
raises(BadPolynomialError, lambda: integer_roots([0]))
raises(BadPolynomialError, lambda: integer_roots({}))
raises(BadPolynomialError, lambda: integer_roots([[1]]))
if __name__ == "__main__":
import sys
arguments = sys.argv[1:]
main(*arguments)
|
from django.urls import path
from .views import ResultView, ResultDataTable
app_name = 'event'
urlpatterns = [
path('', ResultView.as_view(), name='result'),
path('data/', ResultDataTable.as_view(), name='resultdatatable'),
] |
"""
This file is part of the openPMD-updater.
Copyright 2018 openPMD contributors
Authors: Axel Huebl
License: ISC
"""
from openpmd_updater.backends.IBackend import IBackend
import packaging.version
try:
import h5py as h5
except:
h5 = None
class HDF5(IBackend):
"""HDF5 File handling."""
def __init__(self, filename):
"""Open a HDF5 file"""
if h5 is None:
raise RuntimeError("h5py is not installed!")
if self.can_handle(filename):
self.fh = h5.File(filename, 'r+')
self.pwd = self.fh["/"]
else:
raise RuntimeError("HDF5 backend can not open non-HDF5 files!")
@staticmethod
def can_handle(filename):
"""Check if filename is a HDF5 file."""
signature = b'\x89HDF\r\n\x1a\n'
try:
with open(filename, 'rb') as f:
header = f.read(8)
return header == signature
except:
return False
@property
def version(self):
"""Return openPMD standard version of the file."""
ver_string = self.fh.attrs["openPMD"].decode()
return packaging.version.parse(ver_string)
def cd(self, path):
"""Change current directory in file."""
if path is None:
self.pwd = self.fh["/"]
else:
self.pwd = self.pwd[path]
def pwd(self, path):
"""Return current directory in file."""
self.pwd.name
def list_groups(self, path):
"""Return a list of groups in a path"""
cur_path = self.pwd[path]
return list(filter(lambda x: type(cur_path[x]) is h5.Group, cur_path.keys()))
def list_attrs(self, path):
"""Return a list of attributes on a path"""
cur_path = self.pwd[path]
return list(cur_path.attrs.keys())
def list_data(self, path):
"""Return a list of datasets in a path"""
cur_path = self.pwd[path]
return list(filter(lambda x: type(cur_path[x]) is h5.Dataset, cur_path.keys()))
def is_group(self, path):
"""Return if a path is a group"""
cur_path = self.pwd[path]
return type(cur_path) is h5.Group
def is_data(self, path):
"""Return if a path is a dataset"""
cur_path = self.pwd[path]
return type(cur_path) is h5.Dataset
def move(self, old_path, new_path):
"""Move (rename) a group, attribute or dataset"""
if new_path == old_path:
raise RuntimeError("old_path and new_path are identical!")
obj = self.pwd["old_path"]
if type(obj) is h5.Group:
self.pwd.move(old_path, new_path)
elif type(obj) is h5.Dataset:
self.pwd.move(old_path, new_path)
elif type(obj) is h5.Attribute:
self.pwd.attrs[new_path] = self.pwd.attrs[old_path]
self.delete(old_path)
else:
NotImplementedError("Move is not implemented for "
"'{0}' at '{1}'!".format(type(obj), old_path))
def del_goup(self, name):
"""Remove a group, attribute or dataset"""
del self.pwd[name]
def del_attr(self, name, path=None):
"""Remove a group, attribute or dataset"""
if path is None:
del self.pwd.attrs[name]
else:
del self.pwd[path].attrs[name]
def del_data(self, name):
"""Remove a group, attribute or dataset"""
del self.pwd[name]
def add_group(self, path):
"""Add a new group at path"""
self.pwd.create_group[path]
def add_attr(self, name, value, path=None):
"""Add a new attribute at path"""
if path is None:
self.pwd.attrs[name] = value
else:
self.pwd[path].attrs[name] = value
def get_attr(self, name, path=None):
"""Read an attribute"""
if path is None:
return self.pwd.attrs[name]
else:
return self.pwd[path].attrs[name]
|
# import os
# import subprocess
#
# def batch_ping(host):
# result = subprocess.call('ping -c2 %s' % host, shell = True)
# if result:
# print('%s is down' % host)
# else:
# print('%s is up' % host)
#
# if __name__ == '__main__':
# ip_addrs = ['192.168.4.%s' % i for i in range(1,10)]
# for ip in ip_addrs:
# retval = os.fork()
# if not retval:
# batch_ping(ip)
# exit()
import subprocess
import os
def ping_batch(host):
rc = subprocess.call('ping -c2 %s &> /dev/null' % host, shell=True)
if rc:
print('%s : down' % host)
else:
print('%s : up' % host)
if __name__ == '__main__':
ips = ('192.168.4.%s' % i for i in range(1,20))
for ip in ips:
pid = os.fork()
if not pid:
ping_batch(ip)
exit() |
###############################################################################
# Way to use this:
# cmsRun runSens_cfg.py geometry=Run3
#
# Options for geometry Run3, D88, D92, D93
#
###############################################################################
import FWCore.ParameterSet.Config as cms
import os, sys, imp, re
import FWCore.ParameterSet.VarParsing as VarParsing
####################################################################
### SETUP OPTIONS
options = VarParsing.VarParsing('standard')
options.register('geometry',
"Run3",
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"geometry of operations: Run3, D88, D92, D93")
### get and parse the command line arguments
options.parseArguments()
print(options)
#####p###############################################################
# Use the options
if (options.geometry == "Run3"):
geomFile = "Configuration.Geometry.GeometryExtended2021Reco_cff"
from Configuration.Eras.Era_Run3_DDD_cff import Run3_DDD
process = cms.Process('PrintSensitive',Run3_DDD)
else:
geomFile = "Configuration.Geometry.GeometryExtended2026" + options.geometry + "Reco_cff"
from Configuration.Eras.Era_Phase2C11M9_cff import Phase2C11M9
process = cms.Process('PrintSensitive',Phase2C11M9)
print("Geometry file: ", geomFile)
process.load(geomFile)
process.load('FWCore.MessageService.MessageLogger_cfi')
process.MessageLogger.G4cout=dict()
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.load('SimGeneral.HepPDTESSource.pdt_cfi')
process.load('IOMC.EventVertexGenerators.VtxSmearedFlat_cfi')
process.load('GeneratorInterface.Core.generatorSmeared_cfi')
process.source = cms.Source("EmptySource")
process.generator = cms.EDProducer("FlatRandomPtGunProducer",
PGunParameters = cms.PSet(
PartID = cms.vint32(13),
MinEta = cms.double(-2.5),
MaxEta = cms.double(2.5),
MinPhi = cms.double(-3.14159265359),
MaxPhi = cms.double(3.14159265359),
MinPt = cms.double(9.99),
MaxPt = cms.double(10.01)
),
AddAntiParticle = cms.bool(False),
Verbosity = cms.untracked.int32(0),
firstRun = cms.untracked.uint32(1)
)
process.RandomNumberGeneratorService = cms.Service("RandomNumberGeneratorService",
generator = cms.PSet(
initialSeed = cms.untracked.uint32(123456789),
engineName = cms.untracked.string('HepJamesRandom')
),
VtxSmeared = cms.PSet(
engineName = cms.untracked.string('HepJamesRandom'),
initialSeed = cms.untracked.uint32(98765432)
),
g4SimHits = cms.PSet(
initialSeed = cms.untracked.uint32(11),
engineName = cms.untracked.string('HepJamesRandom')
)
)
process.load('SimG4Core.Application.g4SimHits_cfi')
process.p1 = cms.Path(process.generator*process.VtxSmeared*process.generatorSmeared*process.g4SimHits)
process.g4SimHits.UseMagneticField = False
process.g4SimHits.Physics.DefaultCutValue = 10.
process.g4SimHits.Watchers = cms.VPSet(cms.PSet(
Name = cms.string('*'),
DD4Hep = cms.bool(False),
type = cms.string('PrintSensitive')
))
|
# ------------------------------------------------------------------------ #
# Title: Assignment 07
# Description: Research Exception Handling & Pickling in Python
# ChangeLog (Who,When,What):
# Kstevens,11-20-19,Modified code to complete assignment 7
# ------------------------------------------------------------------------ #
# Research topic: Pickle Module
# Code Version: Example code 2.1
# Reference: 11.3. copy_reg — Register pickle support functions
# https://docs.python.org/2/library/copy_reg.html
# Description: The copy_reg module defines functions which are used by pickling specific
# objects while pickling or copying. This module provides configuration
# information about object constructors(may be factory functions or class
# instances) which are not classes.
import copyreg, copy, pickle
class C(object):
def __init__(self, a):
self.a = a
def pickle_c(c):
print("pickling a C instance...")
return C, (c.a,)
copyreg.pickle(C, pickle_c)
c = C(1)
d = copy.copy(c)
#pickling a C instance...
p = pickle.dumps(c)
#pickling a C instance... |
from django.urls import path
from vkrb.activity.views import (GiListView, SiListView,
FavoriteGiCreateView,
FavoriteSiCreateView,
FavoriteGiDeleteView,
FavoriteSiDeleteView,
GiGetView,
SiGetView)
app_name = 'activity'
urlpatterns = [
path('list_gi/', GiListView.as_view()),
path('list_si/', SiListView.as_view()),
path('get_gi/', GiGetView.as_view()),
path('get_si/', SiGetView.as_view()),
path('add_favorite_gi/', FavoriteGiCreateView.as_view()),
path('add_favorite_si/', FavoriteSiCreateView.as_view()),
path('delete_favorite_gi/', FavoriteGiDeleteView.as_view()),
path('delete_favorite_si/', FavoriteSiDeleteView.as_view()),
]
|
class Solution(object):
def closeStrings(self, word1, word2):
return len(word1) == len(word2) \
and set(word1) == set(word2) \
and Counter(Counter(word1).values()) == Counter(Counter(word2).values()) |
#coding: utf-8
import math
import heapq
import bisect
import numpy as np
from collections import Counter, deque
#from scipy.misc import comb
X = int(input())
ans = 0
i = 1
while 1:
ans += i
if X <= ans:
print(i)
break
i += 1 |
# --------------------------------------------------------
# (c) Copyright 2014, 2020 by Jason DeLaat.
# Licensed under BSD 3-clause licence.
# --------------------------------------------------------
""" Adds operators to the Either monad. """
from typing import Any, TypeVar
import pymonad.either
import pymonad.monad
import pymonad.operators.operators
S = TypeVar('S') # pylint: disable=invalid-name
T = TypeVar('T') # pylint: disable=invalid-name
class Either(pymonad.operators.operators.MonadOperators, pymonad.either.Either[S, T]): # pylint: disable=abstract-method
""" See pymonad.operators.operators and pymonad.either. """
def Left(value: S) -> Either[S, Any]: # pylint: disable=invalid-name
""" Creates a value of the first possible type in the Either monad. """
return Either(None, (value, False))
def Right(value: T) -> Either[Any, T]: # pylint: disable=invalid-name
""" Creates a value of the second possible type in the Either monad. """
return Either(value, (None, True))
class _Error(Either[S, T]): # pylint: disable=too-many-ancestors, abstract-method
def __repr__(self):
return f'Result: {self.value}' if self.is_right() else f'Error: {self.monoid[0]}' # pylint: disable=no-member
def Error(value: S) -> _Error[S, Any]: # pylint: disable=invalid-name
""" Creates an error value as the result of a calculation. """
return _Error(None, (value, False))
def Result(value: T) -> _Error[Any, T]: # pylint: disable=invalid-name
""" Creates a value representing the successful result of a calculation. """
return _Error(value, (None, True))
Error.insert = _Error.insert
|
# -*- coding: utf-8 -*-
import psycopg2
conn = psycopg2.connect("dbname='ocrjpn' user='siena' host='localhost' password='unicorns'")
cur = conn.cursor()
def main():
cur.execute("SELECT entries.kanji, entries.readings, senses.blob FROM entries INNER JOIN senses ON (entries.sense_id = senses.id) where entries.lookup = %s",(u'さびしい',))
test = cur.fetchone()
kanji, readings, blob = test
print blob
main() |
from rest_framework.views import APIView
from rest_framework import generics
from rest_framework import permissions
from django_filters.rest_framework import DjangoFilterBackend
from django.db.models import Avg, Count, Min, Sum
from rest_framework.response import Response
import datetime
from .models import (
Proveedor,
VisitaProveedor,
Producto,
Lote,
Marca,
PresentacionProducto,
Categoria,
Compra,
Venta,
Transaccion,
DetalleVenta
)
from .serializers import (
ProveedorSerializer,
ProveedorVisitaSerializer,
ProductoSerializer,
ProductoUpdateSerializer,
ProductoCreateSerializer,
LoteSerializer,
LoteDetailedSerializer,
MarcaSerializer,
PresentacionSerializer,
CategoriaSerializer,
CompraCreateSerializer,
CompraSerializer,
CompraDetailSerializer,
CompraUpdateSerializer,
VentaCreateSerializer,
VentaDetailSerializer,
VentaUpdateSerializer,
TransaccionesSerializer,
TransaccionCreateSerializer,
DetalleVentaSerializer,
DetalleVentaDetailedLotesSerializer,
LoteReporteSerializer,
ProductoSimpleSerializer
)
class ProductosSimpleList(generics.ListAPIView):
permission_classes = (permissions.IsAuthenticated,)
serializer_class = ProductoSimpleSerializer
queryset = Producto.objects.filter(estado=1)
class LotesReporte(generics.ListAPIView):
permission_classes = (permissions.IsAuthenticated,)
serializer_class = LoteReporteSerializer
queryset = Lote.objects.filter(estado=1).order_by('producto')
class ReporteTransacciones(generics.ListAPIView):
serializer_class = TransaccionesSerializer
def get_queryset(self):
fecha_inicio = datetime.date(
self.kwargs['ys'],
self.kwargs['ms'],
self.kwargs['ds']
)
fecha_fin = datetime.date(
self.kwargs['ye'],
self.kwargs['me'],
self.kwargs['de']
)
queryset = Transaccion.objects.filter(fecha_hora_creacion__range=(
datetime.datetime.combine(fecha_inicio, datetime.time.min),
datetime.datetime.combine(fecha_fin, datetime.time.max),
),
estado=1
)
return queryset
class ReporteVentas(generics.ListAPIView):
serializer_class = VentaDetailSerializer
def get_queryset(self):
fecha_inicio = datetime.date(
self.kwargs['ys'],
self.kwargs['ms'],
self.kwargs['ds']
)
fecha_fin = datetime.date(
self.kwargs['ye'],
self.kwargs['me'],
self.kwargs['de']
)
queryset = Venta.objects.filter(fecha_hora_creacion__range=(
datetime.datetime.combine(fecha_inicio, datetime.time.min),
datetime.datetime.combine(fecha_fin, datetime.time.max),
),
estado=1
)
return queryset
class TotalDebitos(APIView):
def get(self, request, format=None, **kwargs):
total = Transaccion.objects.filter(
estado=1, tipo=1
).aggregate(
total=Sum('monto')
)
return Response(total)
class TotalCreditos(APIView):
def get(self, request, format=None, **kwargs):
total = Transaccion.objects.filter(
estado=1, tipo=2
).aggregate(
total=Sum('monto')
)
return Response(total)
class LotesUnidadesVendidas(APIView):
def get(self, request, format=None, **kwargs):
lote_pk = kwargs.get('lote_pk')
total = DetalleVenta.objects.filter(
lote=lote_pk, estado=1
).aggregate(
total=Sum('cantidad')
)
return Response(total)
class VentasTotales(APIView):
def get(self, request, format=None, **kwargs):
total = Venta.objects.filter(estado=1).aggregate(total=Sum('total'))
return Response(total)
class ComprasTotales(APIView):
def get(self, request, format=None, **kwargs):
total = Compra.objects.filter(estado=1).aggregate(total=Sum('total'))
return Response(total)
class DetalleVentaList(generics.ListAPIView):
permission_classes = (permissions.IsAuthenticated,)
queryset = DetalleVenta.objects.filter(estado=1).order_by('lote__producto__id')
serializer_class = DetalleVentaDetailedLotesSerializer
class DetalleVentaRetrieve(generics.RetrieveAPIView):
permission_classes = (permissions.IsAuthenticated,)
queryset = DetalleVenta.objects.filter(estado=1)
serializer_class = DetalleVentaDetailedLotesSerializer
class DetalleVentaUpdate(generics.UpdateAPIView):
permission_classes = (permissions.IsAuthenticated,)
queryset = DetalleVenta.objects.filter(estado=1)
serializer_class = DetalleVentaSerializer
class TransaccionCreate(generics.CreateAPIView):
permission_classes = (permissions.IsAuthenticated,)
queryset = Transaccion.objects.filter(estado=1)
serializer_class = TransaccionCreateSerializer
class TransaccionesList(generics.ListAPIView):
permission_classes = (permissions.IsAuthenticated,)
queryset = Transaccion.objects.filter(estado=1)
serializer_class = TransaccionesSerializer
class TransaccionRetrieveUpdate(generics.RetrieveUpdateAPIView):
permission_classes = (permissions.IsAuthenticated,)
queryset = Transaccion.objects.filter(estado=1)
serializer_class = TransaccionesSerializer
class VentaCreate(generics.CreateAPIView):
permission_classes = (permissions.IsAuthenticated,)
queryset = Venta.objects.filter(estado=1)
serializer_class = VentaCreateSerializer
class VentaList(generics.ListAPIView):
permission_classes = (permissions.IsAuthenticated,)
filter_backends = (DjangoFilterBackend,)
serializer_class = VentaDetailSerializer
queryset = Venta.objects.filter(estado=1)
filter_fields = ('fecha_hora_creacion', )
class VentaRetrieve(generics.RetrieveAPIView):
permission_classes = (permissions.IsAuthenticated,)
queryset = Venta.objects.filter(estado=1)
serializer_class = VentaDetailSerializer
class VentaUpdate(generics.UpdateAPIView):
permission_classes = (permissions.IsAuthenticated,)
queryset = Venta.objects.filter(estado=1)
serializer_class = VentaUpdateSerializer
class LoteDetailList(generics.ListAPIView):
permission_classes = (permissions.IsAuthenticated,)
queryset = Lote.objects.filter(estado=1)
serializer_class = LoteDetailedSerializer
class LoteRetrieveDetail(generics.RetrieveAPIView):
permission_classes = (permissions.IsAuthenticated,)
queryset = Lote.objects.all().exclude(estado=2)
serializer_class = LoteDetailedSerializer
class LoteRetrieveUpdate(generics.RetrieveUpdateAPIView):
permission_classes = (permissions.IsAuthenticated,)
queryset = Lote.objects.all().exclude(estado=2)
serializer_class = LoteSerializer
class CompraCreate(generics.CreateAPIView):
permission_classes = (permissions.IsAuthenticated,)
queryset = Compra.objects.all().exclude(estado=2)
serializer_class = CompraCreateSerializer
class CompraList(generics.ListAPIView):
permission_classes = (permissions.IsAuthenticated,)
queryset = Compra.objects.all().exclude(estado=2)
serializer_class = CompraSerializer
class CompraRetrieve(generics.RetrieveAPIView):
permission_classes = (permissions.IsAuthenticated,)
queryset = Compra.objects.all().exclude(estado=2)
serializer_class = CompraDetailSerializer
class CompraUpdate(generics.UpdateAPIView):
permission_classes = (permissions.IsAuthenticated,)
queryset = Compra.objects.all().exclude(estado=2)
serializer_class = CompraUpdateSerializer
class ProveedoresListCreate(generics.ListCreateAPIView):
permission_classes = (permissions.IsAuthenticated,)
queryset = Proveedor.objects.all().exclude(estado=2)
serializer_class = ProveedorSerializer
class ProveedoresUpdateRetrieve(generics.RetrieveUpdateAPIView):
permission_classes = (permissions.IsAuthenticated,)
queryset = Proveedor.objects.all().exclude(estado=2)
serializer_class = ProveedorSerializer
class ProveedorVisitaListCreate(generics.ListCreateAPIView):
permission_classes = (permissions.IsAuthenticated,)
queryset = VisitaProveedor.objects.all()
serializer_class = ProveedorVisitaSerializer
class ProveedorVisitaUpdateRetrieve(generics.RetrieveUpdateAPIView):
permission_classes = (permissions.IsAuthenticated,)
queryset = VisitaProveedor.objects.all()
serializer_class = ProveedorVisitaSerializer
class ProductoCreate(generics.CreateAPIView):
permission_classes = (permissions.IsAuthenticated,)
queryset = Producto.objects.all().exclude(estado=2)
serializer_class = ProductoCreateSerializer
class ProductoList(generics.ListAPIView):
permission_classes = (permissions.IsAuthenticated,)
queryset = Producto.objects.all().exclude(estado=2)
serializer_class = ProductoSerializer
class ProductoUpdate(generics.UpdateAPIView):
permission_classes = (permissions.IsAuthenticated,)
queryset = Producto.objects.all()
serializer_class = ProductoUpdateSerializer
class ProductoRetrieve(generics.RetrieveUpdateAPIView):
permission_classes = (permissions.IsAuthenticated,)
queryset = Producto.objects.all()
serializer_class = ProductoSerializer
class MarcaListCreate(generics.ListCreateAPIView):
permission_classes = (permissions.IsAuthenticated,)
queryset = Marca.objects.all().exclude(estado=2)
serializer_class = MarcaSerializer
class MarcaUpdateRetrieve(generics.RetrieveUpdateAPIView):
permission_classes = (permissions.IsAuthenticated,)
queryset = Marca.objects.all()
serializer_class = MarcaSerializer
class PresentacionListCreate(generics.ListCreateAPIView):
permission_classes = (permissions.IsAuthenticated,)
queryset = PresentacionProducto.objects.all().exclude(estado=2)
serializer_class = PresentacionSerializer
class PresentacionUpdateRetrieve(generics.RetrieveUpdateAPIView):
permission_classes = (permissions.IsAuthenticated,)
queryset = PresentacionProducto.objects.all()
serializer_class = PresentacionSerializer
class CategoriaListCreate(generics.ListCreateAPIView):
permission_classes = (permissions.IsAuthenticated,)
queryset = Categoria.objects.all().exclude(estado=2)
serializer_class = CategoriaSerializer
class CategoriaUpdateRetrieve(generics.RetrieveUpdateAPIView):
permission_classes = (permissions.IsAuthenticated,)
queryset = Categoria.objects.all()
serializer_class = CategoriaSerializer
# class ProveedorCreate(generics.CreateAPIView):
# permission_classes = (permissions.IsAuthenticated,)
# queryset = Consulta.objects.all()
# serializer_class = ConsultaCreateSerializer
# class ConsultasL(generics.ListAPIView):
# permission_classes = (permissions.IsAuthenticated,)
# queryset = Consulta.objects.all().exclude(estado=2)
# serializer_class = ConsultaSerializer
# class ConsultaR(generics.RetrieveAPIView):
# queryset = Consulta.objects.all()
# serializer_class = ConsultaSerializer
# class ConsultaU(generics.UpdateAPIView):
# queryset = Consulta.objects.all()
# serializer_class = ConsultaUpdateSerializer |
import shared_buffer
from shared_buffer import *
import sys
import os
import threading
import logger
from logger import Logger
from defines import *
import socket
from datetime import datetime
class proxyNetworkServiceLayer(threading.Thread) :
def __init__(self,logFile,powerSimIP) :
threading.Thread.__init__(self)
self.threadCmdLock = threading.Lock()
self.NetLayerRxLock = threading.Lock()
self.NetLayerRxBuffer = []
self.threadCmdQueue = []
self.powerSimIP = powerSimIP
self.log = logger.Logger(logFile,"core Network Layer Thread")
self.transportLayer = None
def setTransportLayer(self, transportLayer):
self.transportLayer = transportLayer
def getTransportLayer(self):
return self.transportLayer
def setPowerSimIdMap(self, powerSimIdMap):
self.hostIDtoPowerSimID = powerSimIdMap
self.powerSimIDtohostID = {}
for hostID in self.hostIDtoPowerSimID.keys():
powerSimIdSet = self.hostIDtoPowerSimID[hostID]
for powerSimId in powerSimIdSet:
self.powerSimIDtohostID[powerSimId] = hostID
def getcurrCmd(self) :
self.threadCmdLock.acquire()
try:
currCmd = self.threadCmdQueue.pop()
except:
currCmd = None
self.threadCmdLock.release()
return currCmd
def cancelThread(self):
self.threadCmdLock.acquire()
self.threadCmdQueue.append(CMD_QUIT)
self.threadCmdLock.release()
def onRxPktFromNetwork(self, pkt):
self.transportLayer.runOnThread(self.transportLayer.onRxPktFromNetworkLayer, extractPowerSimIdFromPkt(pkt), pkt)
def run(self) :
self.log.info("Started listening on Port: " + str(PROXY_UDP_PORT))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
sock.settimeout(SOCKET_TIMEOUT)
sock.bind(('0.0.0.0', PROXY_UDP_PORT))
while True :
currCmd = self.getcurrCmd()
if currCmd != None and currCmd == CMD_QUIT :
self.log.info("Stopping ...")
sock.close()
break
if POWERSIM_TYPE == "POWER_WORLD" :
try:
data, addr = sock.recvfrom(MAXPKTSIZE)
except socket.timeout:
data = None
if data != None :
self.log.info("%s RECV_FROM=%s:%s PKT=%s"%(datetime.now(), str(addr[0]), str(addr[1]), str(data)))
self.onRxPktFromNetwork(str(data))
|
# -*- coding: utf-8 -*-
from flask import Flask, render_template, request, flash, redirect, url_for, make_response
from sentimentator.meta import Message, Status
from sentimentator.database import init, get_random_sentence, save_annotation, get_score, get_username, count
from flask_login import LoginManager, current_user, logout_user, login_required, login_user
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import DataRequired
from functools import wraps, update_wrapper
from werkzeug.http import http_date
from datetime import datetime
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SECRET_KEY'] = 'midnight-sun'
login = LoginManager()
login.init_app(app)
login.login_view = 'login'
@login.user_loader
def load_user(id):
return User.query.get(int(id))
init(app)
def disable_cache(view):
@wraps(view)
def disable_cache(*args, **kwargs):
resp = make_response(view(*args, **kwargs))
resp.headers['Last-Modified'] = http_date(datetime.now())
resp.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
resp.headers['Pragma'] = 'no-cache'
resp.headers['Expires'] = '-1'
return resp
return update_wrapper(disable_cache, view)
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
submit = SubmitField('SIGN IN')
from sentimentator.model import User
@app.route('/')
def index():
"""
Check if user is authenticated and render index page
Or login page
"""
if current_user.is_authenticated:
user_id = current_user._uid
return render_template('index.html', score=get_score(user_id), username=get_username(user_id))
else:
return redirect(url_for('login'))
@app.route('/login', methods=['GET', 'POST'])
def login():
""""
Login page
If user is already authenticated render index page
"""
if current_user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(_user=form.username.data).first()
if user is None or not user.check_password(form.password.data):
flash('Invalid username or password...')
return redirect(url_for('login'))
login_user(user)
return render_template('index.html', score=get_score(current_user._uid), username=get_username(current_user._uid))
return render_template('login.html', title='SIGN IN', form=form)
@app.route('/language')
@login_required
def language():
""" Language selection page """
if current_user.is_authenticated:
user_id = current_user._uid
return render_template('language.html', score=get_score(user_id), username=get_username(user_id))
@app.route('/annotate/<lang>', methods=['GET', 'POST'])
@disable_cache
@login_required
def annotate(lang):
"""
Annotation page
lang -- User selected language
When annotation page is requested with POST method, there is incoming
annotation data which needs to be validated and saved to database.
A sensible use case should not allow invalid input, thus error messages
are not displayed to user, but logged instead.
"""
if current_user.is_authenticated:
user_id = current_user._uid
sen = get_random_sentence(lang)
score = get_score(user_id)
if sen is None:
flash('There are no sentences for the selected language!')
return redirect(url_for('language', score=score, username=get_username(user_id)))
else:
username = get_username(user_id)
if request.method == 'POST':
status = save_annotation(request)
score += 1
if status == Status.ERR_COARSE:
app.logger.error(Message.INPUT_COARSE)
elif status == Status.ERR_FINE:
app.logger.error(Message.INPUT_FINE)
else:
pass
return render_template('annotate.html', lang=lang, sentence=sen, sentence_id=sen.sid, score=score, username=username)
@app.route('/stats')
def stats():
if current_user.is_authenticated:
user_id = current_user._uid
return render_template('stats.html', score=get_score(user_id), username=get_username(user_id),
positive=count(user_id, "%pos%"), negative=count(user_id, "%neg%"),
neutral=count(user_id, "%neu%"), anticipation=count(user_id, "%ant%"),
joy=count(user_id, "%joy%"), surprise=count(user_id, "%sur%"),
trust=count(user_id, "%tru%"), anger=count(user_id, "%ang%"),
disgust=count(user_id, "%dis%"), fear=count(user_id, "%fea%"),
sadness=count(user_id, "%sad%"))
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('login'))
|
import asyncio
import logging
import time
import os
import urllib.parse
from asyncio.exceptions import CancelledError
import pyppeteer
import aiofiles
import ipwhois
import tldextract
import hashlib
import dns.resolver
import dns.rdatatype
import whois
import redis
import datetime
import json
from ipwhois import IPWhois
from lxml import etree
from pyppeteer import launch
from asyncio import Queue
from collections import namedtuple
clogger = logging.getLogger(__name__)
URLStatistic = namedtuple("URLSstatistic",
["url",
"fqdn",
"sld",
"redirect",
"pre_url",
"next_url",
"url_nums",
"title",
"html",
"screenshot",
"status",
"exception",
])
DNSRecordStatistic = namedtuple("DNSRecordStatistic",
["fqdn",
"sld",
"records",
"status",
"exception"
])
WhoisStatistic = namedtuple("WhoisStatistic",
["fqdn",
"sld",
"whois",
"status",
"exception"
])
IPStatistic = namedtuple("IPStatistic",
["ip",
"fqdn",
"ipinfo",
"status",
"exception"
])
class ncrawler:
def __init__(self,max_tasks=10,max_tries=4,loop=None,task_queue=None,redis_db = None,redis_set=None,page_timeout = 20,timeout_index = 1):
self.max_tasks = max_tasks
self.max_tries = max_tries
self.loop = loop or asyncio.get_event_loop()
self.task_queue = task_queue or Queue(loop = self.loop)
self.done = []
self.pool = redis.ConnectionPool(host="127.0.0.1",port=6379,db=redis_db)
self.t0 = time.time()
self.set_index = 0
self.redis_set = redis_set
def default(self,o):
if isinstance(o, (datetime.date, datetime.datetime)):
return o.isoformat()
def record_statistic(self,key,info_statistic):
# 将每个URL信息,保存到redis 或 mysql
resdb = redis.Redis(connection_pool=self.pool)
res_id = hashlib.md5(key.encode()).hexdigest()
resdb.set(res_id,json.dumps(info_statistic._asdict(),default=self.default))
resdb.zadd(self.redis_set,{key:self.set_index})
self.set_index += 1
print(info_statistic)
self.done.append(info_statistic)
async def is_seen(self,key):
# 是否收集过,没有收集过返回True,否则返回False
resdb = redis.Redis(connection_pool=self.pool)
res_id = hashlib.md5(key.encode()).hexdigest()
if not resdb.get(res_id):
return True
else:
return False
def report(self):
pass
class IPcrawler(ncrawler):
def __init__(self,targets, max_tries=3,max_tasks = 1,loop=None,task_queue=None,redis_db = None,redis_set=None,page_timeout = 20,timeout_index = 1):
super(IPcrawler,self).__init__(max_tries= max_tries,max_tasks=max_tasks,loop=loop,task_queue=task_queue,redis_db = redis_db,redis_set=redis_set,page_timeout = page_timeout,timeout_index = timeout_index)
if targets:
for target,fqdn in targets:
self.task_queue.put_nowait((target,fqdn))
async def fetch(self,ip,fqdn):
if not await self.is_seen(ip):
print("have done")
return
exception = None
try:
ipinfo = IPWhois(ip).lookup_whois()
except ipwhois.exceptions.WhoisLookupError as e:
ipinfo = "failed"
except Exception as e:
ipinfo = "unknown"
self.record_statistic(ip,IPStatistic(ip=ip,
fqdn=fqdn,
ipinfo=ipinfo,
status=200,
exception=exception))
async def work(self):
try:
while True:
target,fqdn = await self.task_queue.get()
if target =="done": # 结束信号
return 1
await self.fetch(target,fqdn)
self.task_queue.task_done()
await asyncio.sleep(3)
except asyncio.CancelledError as e:
print(e)
return
async def crawl(self):
self.t0 = time.time()
workers = [asyncio.Task(self.work(),loop=self.loop) for _ in range(self.max_tasks)]
for wi in workers:
done = await wi
if done:
for wi in workers:
wi.cancel()
self.t1 = time.time()
class DNSRecordcrawler(ncrawler):
def __init__(self,targets, max_tries=3,max_tasks = 1,loop=None,task_queue=None,ip_queue=None,redis_db = None,redis_set=None,page_timeout = 20,timeout_index = 1):
super(DNSRecordcrawler,self).__init__(max_tries= max_tries,max_tasks=max_tasks,loop=loop,task_queue=task_queue,redis_db = redis_db,redis_set=redis_set,page_timeout = page_timeout,timeout_index = timeout_index)
if targets:
for target in targets:
self.task_queue.put_nowait(target)
self.ip_queue = ip_queue
# def record_statistic(self,dns_statistic):
# # 将每个URL信息,保存到redis 或 mysql
# print(dns_statistic)
# self.done.append(dns_statistic)
# async def find_dns(self,target):
# record_types = [r.name for r in list(dns.rdatatype.RdataType)]
# return dns.resolver.resolve(target,rti)
async def fetch(self,target):
if not await self.is_seen(target):
print("have done")
return
tries = 0
tlde = tldextract.extract(target)
sld = tlde.domain+"."+tlde.suffix
exception = None
# while tries < self.max_tries:
# try:
record_types = [r.name for r in list(dns.rdatatype.RdataType)]
records = {}
ip_list = []
for rti in record_types:
try:
answers = dns.resolver.resolve(target,rti)
records[rti]=answers.rrset.to_text()
if rti=="A" or rti=="AAAA":
ip_list.extend([ai.address for ai in answers])
except dns.resolver.NoAnswer as e:
# No Answer
records[rti]="No Answer"
except dns.resolver.NoMetaqueries as e:
# Not Allow
records[rti]="Not Allow"
except dns.resolver.NoNameservers as e:
# SERVFAIL
records[rti]="SERVFAIL"
except dns.resolver.NXDOMAIN as e:
records[rti]="NXDOMAIN"
except Exception as e:
records[rti]="UNKNOWN"
self.record_statistic(target,DNSRecordStatistic(fqdn=target,
sld=sld,
records=records,
status=200,
exception=exception))
for ip in ip_list:
if await self.is_seen(ip):
self.ip_queue.put_nowait((ip,target))
return
async def work(self):
try:
while True:
target = await self.task_queue.get()
if target =="done": # 结束信号
return 1
await self.fetch(target)
self.task_queue.task_done()
await asyncio.sleep(3)
except asyncio.CancelledError as e:
print(e)
return
async def crawl(self):
self.t0 = time.time()
workers = [asyncio.Task(self.work(),loop=self.loop) for _ in range(self.max_tasks)]
for wi in workers:
done = await wi
if done:
self.ip_queue.put_nowait(("done",""))
for wi in workers:
wi.cancel()
self.t1 = time.time()
class Whoiscrawler(ncrawler):
def __init__(self,targets,max_tries=3,max_tasks = 1,loop=None,task_queue=None,redis_db = None,redis_set=None,page_timeout = 20,timeout_index = 1):
super(Whoiscrawler,self).__init__(max_tries= max_tries,max_tasks=max_tasks,loop=loop,task_queue=task_queue,redis_db = redis_db,redis_set=redis_set,page_timeout = page_timeout,timeout_index = timeout_index)
if targets:
for target in targets:
self.task_queue.put_nowait(target)
async def fetch(self,target):
if not await self.is_seen(target):
print("have done")
return
tlde = tldextract.extract(target)
sld = tlde.domain+"."+tlde.suffix
exception = None
try:
tar_whois = whois.whois(target)
except whois.parser.PywhoisError as e:
tar_whois = {target:"unknown"}
except:
tar_whois = {target:"failed"}
self.record_statistic(sld,WhoisStatistic(fqdn=target,
sld=sld,
whois=tar_whois,
status=200,
exception=exception))
return
async def work(self):
try:
while True:
target = await self.task_queue.get()
if target =="done": # 结束信号
return 1
await self.fetch(target)
self.task_queue.task_done()
await asyncio.sleep(5)
except asyncio.CancelledError as e:
print(e)
return
async def crawl(self):
self.t0 = time.time()
workers = [asyncio.Task(self.work(),loop=self.loop) for _ in range(self.max_tasks)]
for wi in workers:
done = await wi
if done:
for wi in workers:
wi.cancel()
self.t1 = time.time()
class URLcrawler(ncrawler):
def __init__(self,urls,max_tries=4,max_tasks=10,loop=None, html_out=None,screen_out=None,
mul_layer=None,task_queue = None,dns_queue = None,redis_db = None,redis_set=None,
page_timeout = 20,timeout_index = 1):
super(URLcrawler,self).__init__(max_tries= max_tries,max_tasks=max_tasks,loop=loop,task_queue=task_queue,redis_db = redis_db,redis_set = redis_set,page_timeout = page_timeout,timeout_index = timeout_index)
# self.loop = loop or asyncio.get_event_loop()
self.urls = urls
# self.max_tries = max_tries
# self.max_tasks = max_tasks
# self.task_queue = task_queue or Queue(loop=self.loop)
self.seen_urls = set() # 从redis中取已经处理过的
# self.done = []
self.html_out = html_out
self.screen_out = screen_out
self.dns_queue = dns_queue
self.mul_layer =mul_layer
self.page_timeout = page_timeout
self.timeout_index = timeout_index
# self.t0 = time.time()
for ui in urls:
self.task_queue.put_nowait((ui,None))
# def record_statistic(self,url_statistic):
# # 将每个URL信息,保存到redis 或 mysql
# print(url_statistic)
# self.done.append(url_statistic)
async def is_seen(self,url,link):
# 判断URL是否已经抓取过,若抓取过,将当前URL设置为该URL的pre_url
if await super().is_seen(link):
return True
else:
resdb = redis.Redis(connection_pool=self.pool)
res_id = hashlib.md5(link.encode()).hexdigest()
sdata = json.loads(resdb.get(res_id))
sdata["pre_url"].append(url)
return False
async def put_dns_task(self,links):
for link in links:
await self.dns_queue.put(urllib.parse.urlparse(link).netloc)
def parse_url(self,response):
dom = etree.HTML(response)
if dom:
a_link = dom.xpath('//*/a/@href')
style_link = dom.xpath('//*/link/@href')
js_link = dom.xpath('//*/script/@src')
image_link = dom.xpath('//*/img/@src')
return a_link,style_link,js_link,image_link
else:
return [],[],[],[]
def link_ver(self,url,links):
res_links = []
for link in set(links):
normalized = urllib.parse.urljoin(url, link)
defragmented, frag = urllib.parse.urldefrag(normalized)
res_links.append(defragmented)
return res_links
async def fetch(self,url,pre_url):
print(url)
if not await self.is_seen("",url):
print("have done")
return
tries = 0
title = None
url_parse = urllib.parse.urlparse(url)
url_hash = hashlib.md5(url.encode()).hexdigest()
filename = url_parse.netloc +"_"+ url_hash
tlde = tldextract.extract(url_parse.netloc)
sld = tlde.domain+"."+tlde.suffix
html_out_path = os.path.join(self.html_out,filename+".html") if self.html_out else None
screen_out_path = os.path.join(self.screen_out,filename+".png") if self.screen_out else None
exception = None
redirect = None
my_timeout = self.page_timeout
while tries < self.max_tries:
try:
# ,'--proxy-server=127.0.0.1:1080'
browser = await launch(headless=True, args=['--disable-infobars','--proxy-server=127.0.0.1:1080'])
page = await browser.newPage()
await page.evaluateOnNewDocument('function(){Object.defineProperty(navigator, "webdriver", {get: () => undefined})}')
await page.setViewport(viewport={'width': 1920, 'height': 1080})
try:
await page.goto(url,timeout=my_timeout)
title = await page.title()
except pyppeteer.errors.NetworkError as ne:
if ne.__str__()=="Execution context was destroyed, most likely because of a navigation.":
await page.goto(page.url,timeout=self.page_timeout * self.timeout_index)
title = await page.title()
if url != page.url:
redirect = page.url
content = await page.content()
a_link,style_link,js_link,image_link = self.parse_url(content)
a_link = self.link_ver(url,a_link)
style_link = self.link_ver(url,style_link)
js_link = self.link_ver(url,js_link)
image_link = self.link_ver(url,image_link)
links = a_link
if self.html_out:
async with aiofiles.open(html_out_path,"w",encoding="utf-8") as fp:
await fp.write(content)
clogger.info('%r collected to %r'%(url,self.html_out))
# 输出到文件
if self.screen_out:
await page.waitFor(3)
await page.screenshot({"path":screen_out_path})
await browser.close()
self.record_statistic(url,URLStatistic(url=url,
fqdn = url_parse.netloc,
sld =sld,
redirect=redirect,
pre_url = [pre_url],
next_url=[a_link,style_link,js_link,image_link],
url_nums = len(links),
title = title,
html = html_out_path,
screenshot=screen_out_path,
status = 200,
exception=exception))
if self.mul_layer:
for link in links:
if await self.is_seen(url,link):
self.task_queue.put_nowait((link,url))
# self.dns_queue.put_nowait(urllib.parse.urlparse(link).netloc)
self.seen_urls.update(links)
# await self.put_dns_task(links)
return
except Exception as e:
await browser.close()
my_timeout = my_timeout *self.timeout_index
clogger.info('try %r for %r raised %r'%(tries,url,e))
print('try %r for %r raised %r'%(tries,url,e))
exception = e
tries += 1
if not url.startswith("https://www."):
url = url.replace("https://","https://www.")
continue
else:
print("________________________")
await browser.close()
clogger.error('%r failed after %r tries',url,self.max_tries)
self.record_statistic(url,URLStatistic(url=url,
fqdn = url_parse.netloc,
sld = sld,
redirect=redirect,
pre_url = [pre_url],
next_url=[[],[],[],[]],
url_nums = 0,
title = title,
html = html_out_path,
screenshot=screen_out_path,
status = 400,
exception=exception))
return
async def work(self):
try:
while True:
url,pre_url = await self.task_queue.get()
await self.fetch(url,pre_url)
self.task_queue.task_done()
except asyncio.CancelledError as e:
print(e)
async def crawl(self):
workers = [asyncio.Task(self.work(),loop=self.loop) for _ in range(self.max_tasks)]
self.t0 = time.time()
await self.task_queue.join()
self.t1 = time.time()
for w in workers:
w.cancel()
|
#! /usr/bin/env python
import sys
import json
import xml.etree.ElementTree as ET
from file_object import file_object
from datetime import datetime
new_files = set()
renamed_files = set()
moved_files = set()
changed_metadata = set()
deleted_files = set()
def get_file_objects_dict (root):
file_objects_dict = {}
for fo in root.findall('fileobject'):
parent_inode = fo.find('parent_object').find('i_node')
if parent_inode is not None:
parent_inode = fo.find('parent_object').find('i_node').text
filename = fo.find('filename').text
name_md5 = fo.find('name_md5').text
name_B64 = fo.find('name_B64').text
name_type = fo.find('name_type').text
filesize = fo.find('filesize').text
inode = fo.find('inode').text
mode = fo.find('mode').text
nlink = fo.find('nlink').text
namesize = fo.find('nameSize').text
uid = fo.find('uid').text
gid = fo.find('gid').text
genId = fo.find('genId').text
mtime = fo.find('mtime')
if mtime is not None:
mtime = fo.find('mtime').text
atime = fo.find('atime')
if atime is not None:
atime = fo.find('atime') .text
ctime = fo.find('ctime')
if ctime is not None:
ctime = fo.find('ctime').text
crtime = fo.find('crtime')
if crtime is not None:
crtime = fo.find('crtime').text
xnonce = fo.find('xnonce')
if xnonce is not None:
xnonce = fo.find('xnonce').text
xmaster = fo.find('xmaster')
if xmaster is not None:
xmaster = fo.find('xmaster').text
xNameCipher = fo.find('xNameCipher')
if xNameCipher is not None:
xNameCipher = fo.find('xNameCipher').text
fileobject = file_object(filename, name_md5, name_B64, name_type, filesize, inode, parent_inode, mode, nlink, namesize, uid, gid,genId, mtime, atime, ctime, crtime, xnonce, xmaster, xNameCipher)
file_objects_dict[inode] = fileobject
return file_objects_dict
def compare_dictionaries (old, new):
for key, n_fo in new.items():
if key in old:
o_fo = old[key]
if n_fo["parent_inode"] != o_fo["parent_inode"]:
moved_files.add((o_fo, n_fo))
elif n_fo["filename"] != o_fo["filename"]:
renamed_files.add((o_fo, n_fo))
elif not (n_fo["mtime"] == o_fo["mtime"] or
n_fo["atime"] == o_fo["atime"] or
n_fo["ctime"] == o_fo["ctime"] or
n_fo["crtime"] == o_fo["crtime"]):
changed_metadata.add((o_fo, n_fo))
# delete already processed key,value from dictionary
del old[key]
else:
# inode is not present in first xml dump, that means it is a new file
new_files.add(n_fo)
if len(old) > 0:
# file objects present in first xml dump and not in second xml dump are the deleted ones
global deleted_files
deleted_files = old.values()
def get_datetime (str_datetime):
"""Get datetime in iso format"""
datetime_object = datetime.strptime(str_datetime, '%Y-%m-%dT%H:%M:%SZ')
return datetime_object.isoformat(' ')
def get_file_objects_data (fileobjects):
"""Take a set of file_objects as input and
return a sorted set containing: ('mtime', 'inode') for each file_object"""
result = set()
for fo in fileobjects:
result.add((get_datetime(fo["mtime"]), fo["inode"]))
return sorted(result)
def get_file_objects2_data (fileobjects):
"""Take a set containing tuples of file_objects as input
for each tuple: compare file_objects data
return a sorted set containing different data"""
result = set()
for (o_fo, n_fo) in fileobjects:
if o_fo["filename"] != n_fo["filename"]:
result.add((get_datetime(n_fo["mtime"]), n_fo["inode"], "filename changed"))
if o_fo["parent_inode"] != n_fo["parent_inode"]:
result.add((get_datetime(n_fo["mtime"]), n_fo["inode"], "file moved, parent inode changed", o_fo["parent_inode"],
n_fo["parent_inode"]))
if o_fo["mtime"] != n_fo["mtime"]:
result.add((get_datetime(n_fo["mtime"]), n_fo["inode"], "mtime changed", get_datetime(o_fo["mtime"])))
if o_fo["atime"] != n_fo["atime"]:
result.add((get_datetime(n_fo["atime"]), n_fo["inode"], "atime changed", get_datetime(o_fo["atime"])))
if o_fo["ctime"] != n_fo["ctime"]:
result.add((get_datetime(n_fo["ctime"]), n_fo["inode"], "ctime changed", get_datetime(o_fo["ctime"])))
if o_fo["crtime"] != n_fo["crtime"]:
result.add((get_datetime(n_fo["crtime"]), n_fo["inode"], "crtime changed", get_datetime(o_fo["crtime"])))
return sorted(result)
def append_to_output (key, value, output_data):
"""Append a new key and value to an output data"""
output_data[key] = []
for fo in value:
data = {
'datetime' : str(fo[0]),
'inode' : str(fo[1])
}
output_data[key].append(data)
def append_to_output2 (key, value, output_data):
output_data[key] = []
for fo in value:
if len(fo) == 3:
data = {
'datetime' : str(fo[0]),
'inode' : str(fo[1]),
'action': str(fo[2])
}
elif len(fo) == 4:
data = {
'datetime' : str(fo[0]),
'inode' : str(fo[1]),
'action': str(fo[2]),
'previous_datetime' : str(fo[3])
}
elif len(fo) == 5:
data = {
'datetime' : str(fo[0]),
'inode' : str(fo[1]),
'action': str(fo[2]),
'previous_parent_inode' : str(fo[3]),
'current_parent_inode' : str(fo[4])
}
output_data[key].append(data)
def output_json(filename) :
output_data = {}
append_to_output('New files', get_file_objects_data(new_files), output_data)
append_to_output('Deleted files', get_file_objects_data(deleted_files), output_data)
append_to_output2("Renamed files:", get_file_objects2_data(renamed_files), output_data)
append_to_output2("Moved files:", get_file_objects2_data(moved_files), output_data)
append_to_output2("Changed metadata:", get_file_objects2_data(changed_metadata), output_data)
with open(filename, 'w+') as outfile:
json.dump(output_data, outfile)
if __name__ == "__main__":
if len(sys.argv) < 4:
print "Usage: python pretty_print.py <first_xml_dump_name> <second_xml_dump_name> <output file name>"
print "<first_xml_dump_name> xml dump before an action"
print "<second_xml_dump_name> xml dump after an action"
exit()
first_xml_dump = sys.argv[1]
first_root = ET.parse(first_xml_dump).getroot()
old = get_file_objects_dict(first_root)
second_xml_dump = sys.argv[2]
second_root = ET.parse(second_xml_dump).getroot()
new = get_file_objects_dict(second_root)
output_filename = sys.argv[3]
compare_dictionaries(old, new)
output_json(output_filename + ".json")
|
# import time
# import multiprocessing
#
#
# def basic_func(x):
# if x == 0:
# return 'zero'
# elif x % 2 == 0:
# return 'even'
# else:
# return 'odd'
#
#
# def multiprocessing_func(x):
# y = x * x
# time.sleep(2)
# print('{} squared results in a/an {} number'.format(x, basic_func(y)))
#
#
# if __name__ == '__main__':
# starttime = time.time()
# pool = multiprocessing.Pool()
# pool.map(multiprocessing_func, range(0, 10))
# pool.close()
# print('That took {} seconds'.format(time.time() - starttime))
import multiprocessing
import os
import time
from multiprocessing import Process
from tqdm import tqdm
def basic_func(x):
if x == 0:
return 'zero'
elif x % 2 == 0:
return 'even'
else:
return 'odd'
def multiprocessing_func(x):
y = x * x
time.sleep(2)
print('{} squared results in a/an {} number'.format(x, basic_func(y)))
def ExtractDicomMetadata(subset, root, file, return_dict):
time.sleep(2)
# try:
# # dcm = read_dicom(os.path.join(root, file))
# # for tag in tags:
# # try:
# # d[tag].append(dcm[tag].value)
# # except KeyError:
# # d[tag].append(None)
# # d['path'].append(os.path.join(root, file))
# # d['subset'].append(subset)
# except Exception as e:
# print(e)
# print(os.path.join(root, file))
return_dict['subset'] = subset
return_dict['root'] = root
return_dict['file'] = file
if __name__ == '__main__':
starttime = time.time()
processes = []
for root, dirs, files in os.walk(r"C:\Users\user\Downloads\AVCutter"):
processes = []
i = 0
return_dict = {}
# print(f"Creating Multiprocessing processes for having data in ")
for j, file in enumerate(files):
return_dict[j] = multiprocessing.Manager().dict()
p = Process(target=ExtractDicomMetadata, args=(dirs, root, file, return_dict[j]))
processes.append(p)
p.start()
i += 1
if i > 3000:
break
# print(f"Executing processes for ")
for process in tqdm(processes):
process.join()
print(return_dict.values())
#
#
# for i in range(0, 10):
# p = multiprocessing.Process(target=multiprocessing_func, args=(i,))
# processes.append(p)
# p.start()
#
# for process in processes:
# process.join()
print('That took {} seconds'.format(time.time() - starttime))
# import time
#
#
# def basic_func(x):
# if x == 0:
# return 'zero'
# elif x % 2 == 0:
# return 'even'
# else:
# return 'odd'
#
#
# starttime = time.time()
# for i in range(0, 10):
# y = i * i
# time.sleep(2)
# print('{} squared results in a/an {} number'.format(i, basic_func(y)))
#
# print('That took {} seconds'.format(time.time() - starttime))
|
N = int(input())
L = list(map(int, input().split()))
cnt = 0
for i in range(N):
for j in range(i+1, N):
for k in range(j+1, N):
big, small = max(L[i], L[j]), min(L[i], L[j])
cnt += big - small < L[k] < big + small and L[i] != L[j] != L[k] != L[i]
print(cnt)
|
1. Let _target_ be _F_.[[BoundTargetFunction]].
1. Assert: _target_ has a [[Construct]] internal method.
1. Let _boundArgs_ be _F_.[[BoundArguments]].
1. Let _args_ be a new list containing the same values as the list _boundArgs_ in the same order followed by the same values as the list _argumentsList_ in the same order.
1. If SameValue(_F_, _newTarget_) is *true*, set _newTarget_ to _target_.
1. Return ? Construct(_target_, _args_, _newTarget_). |
from functools import partial
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
from tensorflow.keras import Model
from tensorflow.keras.layers import Embedding, LSTM, Dense
from tensorflow.keras.activations import sigmoid
from tensorflow.keras.losses import BinaryCrossentropy
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.metrics import TopKCategoricalAccuracy, Mean
from .params import fname_wordlist, categories
class GenreModel(Model):
embed_dim = 10
lstm_units = [30, 30, 30]
target_dim = len(categories)
def __init__(self):
super().__init__()
vocab = list(set(open(fname_wordlist).readlines()))
self.vectorizer = TextVectorization(max_tokens=len(vocab), output_mode='int')
self.vectorizer.adapt(vocab)
self.emb = Embedding(len(vocab)+1, self.embed_dim)
self.lstm1 = LSTM(self.lstm_units[0], return_sequences=True)
self.lstm2 = LSTM(self.lstm_units[1], return_sequences=True)
self.lstm3 = LSTM(self.lstm_units[2])
self.out = Dense(self.target_dim, activation=sigmoid)
def call(self, input, training=None, mask=None):
x = self.vectorizer(input)
x = self.emb(x)
x = self.lstm1(x)
x = self.lstm2(x)
x = self.lstm3(x)
return self.out(x)
loss_object = BinaryCrossentropy()
optimizer = Adam()
accuracy = partial(TopKCategoricalAccuracy, k=2)
train_acc, test_acc = accuracy(name='train_acc'), accuracy(name='test_acc')
train_loss, test_loss = Mean(name='train_loss'), Mean(name='test_loss')
|
def linear_two_points(p1, p2):
x1, y1 = p1
x2, y2 = p2
slope = (y2-y1)/float(x2-x1)
return lambda x: slope*(x-x1)+y1 |
#!/usr/bin/env python
import sys
base_idx = {'A': 0, 'G': 1, 'C': 2, 'T': 3 }
PTR_NONE, PTR_GAP1, PTR_GAP2, PTR_BASE = 0, 1, 2, 3
def multiseqalign1DP(seq1, seq2, subst_matrix, gap_penalty):
"""
Return the score of the optimal Needdleman-Wunsch alignment for seq1
and seq2.
Note: gap_penalty should be positive (it is subtracted)
"""
F = [[0 for j in range(len(seq2)+1)] for i in range(len(seq1)+1)]
# Initializes F to matrix of zeros based on length of seq1 and seq2
TB = [[PTR_NONE for j in range(len(seq2)+1)] for i in range(len(seq1)+1)]
# Initializes TB to matrix of PTR_NONE
# initialize dynamic programming table for Needleman-Wunsch alignment
# (Durbin p.20)
for i in range(1, len(seq1)+1):
F[i][0] = 0 - i*gap_penalty
TB[i][0] = PTR_GAP2 # indicates a gap in seq2
for j in range(1, len(seq2)+1):
F[0][j] = 0 - j*gap_penalty
TB[0][j] = PTR_GAP1 # indicates a gap in seq1
for i in range(1, len(seq1)+1):
for j in range(1, len(seq2)+1):
# Determine index of substitution matrix
val_i = base_idx[seq1[i-1]]
val_j= base_idx[seq2[j-1]]
# Determine possible scores and select maximum value
S1 = F[i-1][j-1] + subst_matrix[val_i][val_j]
S2 = F[i-1][j] - gap_penalty
S3 = F[i][j-1] - gap_penalty
F[i][j] = max(S1,S2,S3)
if F[i][j] == S1:
TB[i][j] = PTR_BASE
elif F[i][j] == S2:
TB[i][j] = PTR_GAP2
else:
TB[i][j] = PTR_GAP1
return F[len(seq1)][len(seq2)], F, TB
def traceback(seq1, seq2, TB):
s1 = ""
s2 = ""
i = len(seq1)
j = len(seq2)
while TB[i][j] != PTR_NONE:
if TB[i][j] == PTR_BASE:
s1 = seq1[i-1] + s1
s2 = seq2[j-1] + s2
i = i - 1
j = j - 1
elif TB[i][j] == PTR_GAP1:
s1 = '-' + s1
s2 = seq2[j-1] + s2
j = j - 1
elif TB[i][j] == PTR_GAP2:
s1 = seq1[i-1] + s1
s2 = '-' + s2
i = i - 1
else:
assert False
return s1, s2
def readSeq(filename):
"""Reads in a FASTA sequence. Assumes one sequence in the file"""
seq = []
with open(filename, "r") as f:
for line in f:
if line.startswith(">"):
continue
seq.append(line.rstrip().upper())
return "".join(seq)
# Substituation matrix and gap_penalty
S = [
# A G C T
[0, 2, 2, 1], # A
[2, 0, 1, 2], # G
[2, 1, 0, 2], # C
[1, 2, 2, 0] # T
]
gap_penalty = 4
def main():
# parse command line
if len(sys.argv) < 3:
print("Usage: {0} <FASTA 1> <FASTA 2>".format(sys.argv[0]))
sys.exit(1)
file1 = sys.argv[1]
file2 = sys.argv[2]
#seq1 = sys.argv[1]
#seq2 = sys.argv[2]
seq1 = readSeq(file1)
seq2 = readSeq(file2)
score, F, TB = multiseqalign1DP(seq1, seq2, S, gap_penalty)
print("Score: {0}".format(score))
s1, s2 = traceback(seq1, seq2, TB)
print(s1)
print(s2)
if __name__ == "__main__":
main()
|
from flask_restful import Resource
from flask import request
from app import api
from flask_jwt_extended import (
get_jwt_identity,
get_jwt_claims,
jwt_required,
get_raw_jwt
)
from app.models.user import UserSchema, UserModel
from app.models.organisation import OrganisationModel, OrganisationSchema
user_schema = UserSchema()
user_schema_list = UserSchema(many=True)
class User(Resource):
@jwt_required
def get(self, uuid):
user = UserModel.find_by_uuid(uuid)
if user:
return {"user": user_schema.dump(user)}, 200
return {"message": "No user found"}, 404
@jwt_required
def put(self, uuid):
user = UserModel.find_by_uuid(uuid)
if user:
return {"user": user_schema.dump(user)}, 200
return {"message": "No user found"}, 404
@jwt_required
def delete(self, uuid):
user = UserModel.find_by_uuid(uuid)
if user:
return {"user": user_schema.dump(user)}, 200
return {"message": "No user found"}, 404
api.add_resource(User, '/api/auth/user/<uuid>')
class Users(Resource):
@jwt_required
def get(self):
return {"users": user_schema_list.dump(UserModel.find_all_users())}
api.add_resource(Users, '/api/auth/users')
|
"""
byceps.blueprints.common.authentication.decorators
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from functools import wraps
from flask import g
from ....util.framework.flash import flash_notice
from ....util.views import redirect_to
def login_required(func):
"""Ensure the current user has logged in."""
@wraps(func)
def wrapper(*args, **kwargs):
if not g.current_user.is_active:
flash_notice('Bitte melde dich an.')
return redirect_to('authentication.login_form')
return func(*args, **kwargs)
return wrapper
|
'''
Created on Oct 24, 2012
@author: Gary
'''
from Tkinter import Tk, Frame, BOTH, W, E, N, S
class Display(object):
'''
classdocs
'''
TITLE = 'House Monitor'
TIME_COLUMN = 0
NAME_COLUMN = 1
VALUE_COLUMN = 2
mainframe = None
proxy = None
current_row = 0
current_values = {}
def __init__(self, current_values):
'''
Constructor
'''
self.current_values = current_values
super(Display, self).__init__()
self.root = Tk()
self.root.title(self.TITLE)
# self.mainframe = Frame(self.root, padding="3 3 12 12")
self.mainframe = Frame(self.root)
self.mainframe.grid(column=0, row=0, sticky=(N, W, E, S))
self.mainframe.columnconfigure(0, weight=1)
self.mainframe.rowconfigure(0, weight=1)
print(current_values)
# self.display_header()
def display_header(self):
Tk.Label(self.mainframe, text='Time', width=15, background='lightblue').grid(column=self.TIME_COLUMN, row=self.current_row, sticky=W)
Tk.Label(self.mainframe, text='Name', width=45, background='lightblue').grid(column=self.NAME_COLUMN, row=self.current_row, sticky=W)
Tk.Label(self.mainframe, text='Value', width=15, background='lightblue').grid(column=self.VALUE_COLUMN, row=self.current_row, sticky=W)
self.current_row = self.current_row + 1
def update(self):
self.current_row = 1
for device in self.current_values.keys():
for port in self.current_values[device].keys():
arrival_time = self.current_values[device][port]['arrival_time']
Tk.Label(self.mainframe, text=arrival_time).grid(column=self.TIME_COLUMN, row=self.current_row, sticky=W)
name = self.current_values[device][port]['name']
Tk.Label(self.mainframe, text=name).grid(column=self.NAME_COLUMN, row=self.current_row, sticky=W)
value = self.current_values[device][port]['current_value']
units = self.current_values[device][port]['units']
value = '{}{}'.format(value, units)
Tk.Label(self.mainframe, text=value).grid(column=self.VALUE_COLUMN, row=self.current_row, sticky=W)
self.current_row = self.current_row + 1
def run(self):
self.root.mainloop()
|
import sys
from functools import partial
from returns.contrib.pytest.plugin import _DesiredFunctionFound
class DesiredValueFound(_DesiredFunctionFound):
def __init__(self, value):
self.value = value
def trace_func(function_to_search, frame, event, arg):
if event == "return" and frame.f_code.co_name == function_to_search.__name__:
raise DesiredValueFound(arg)
def get_return_value(function_to_search, my_flow):
old_tracer = sys.gettrace()
sys.setprofile(partial(trace_func, function_to_search))
try:
my_flow()
except DesiredValueFound as e:
return e.value
finally:
sys.settrace(old_tracer)
|
from donation import app
from flask import render_template, url_for ,redirect,url_for,flash, request, jsonify, json
from donation.models import user, category, district, govt_pvt, blood_bank, userbank, doctor, blood
from donation.forms import change, registerform , loginform, Blood_bank, registerbankform, registerdoctorform, loginbankform, logindoctorform, finddoner
from donation import db
from flask_login import login_user, logout_user,login_required, current_user
import sqlite3 as sql
from donation.send_email import send
@app.route('/')
def select_page():
return render_template('select_page.html')
@app.route('/user/home')
def home_page():
return render_template('user_home.html')
@app.route('/bank/home')
def bank_home_page():
return render_template('user_home.html')
@app.route('/doctor/home')
def doctor_home_page():
return render_template('user_home.html')
#aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
#registrtn
@app.route('/register/user', methods=['GET','POST'])
def register_user_page():
form = registerform()
form.district.choices = [(district.id,district.district) for district in district.query.all()]
if form.validate_on_submit():
user_to_create= user(username=form.username.data, email_address=form.email_address.data, blood_grp=form.blood_grp.data, mobile_no=form.mobile_no.data, district=form.district.data , password=form.password1.data)
db.session.add(user_to_create)
db.session.commit()
login_user(user_to_create)
flash(f'Account created sucessfully! You are now logged in as: {user_to_create.username}')
return redirect(url_for('home_page'))
if form.errors !={}:
for err_msg in form.errors.values():
flash(f'There was an error in creating the user:{err_msg}',category='danger')
return render_template('register_user.html',form=form)
@app.route('/register/bank', methods=['GET','POST'])
def register_bank_page():
form = registerbankform()
form.district.choices = [(district.id,district.district) for district in district.query.all()]
if form.validate_on_submit():
x = form.name.data
user_to_create = userbank(district_id=form.district.data, name_id=form.name.data, password=form.password1.data )
db.session.add(user_to_create)
db.session.commit()
#login_user(user_to_create)
#flash(f'Account created sucessfully! You are now logged in as: {user_to_create.name_id}')
return redirect(url_for('home_page'))
if form.errors !={}:
for err_msg in form.errors.values():
flash(f'There was an error in creating the user:{err_msg}',category='danger')
return render_template('register_bank.html', form= form)
@app.route('/register/blood_bank/<get_district>')
def bank(get_district):
bb =blood_bank.query.filter_by(district_id=get_district).all()
districtArray = []
for bloodbank in bb :
districtobj = {}
districtobj['id'] = bloodbank.id
districtobj['name'] = bloodbank.name
districtArray.append(districtobj)
return jsonify({'bb' : districtArray})
@app.route('/register/doctor', methods=['GET','POST'])
def register_doctor_page():
form = registerdoctorform()
if form.validate_on_submit():
user_to_create = doctor(username=form.username.data, email_address=form.email_address.data, mobile_no=form.mobile_no.data, password=form.password1.data)
db.session.add(user_to_create)
db.session.commit()
login_user(user_to_create)
flash(f'Account created sucessfully! You are now logged in as: {user_to_create.username}')
return redirect(url_for('home_page'))
if form.errors != {}:
for err_msg in form.errors.values():
flash(f'There was an error in creating the user:{err_msg}',category='danger')
return render_template('register_doctor.html', form=form)
#aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
#login
@app.route('/login/user', methods=['GET','POST'])
def user_login_page():
form = loginform()
if form.validate_on_submit():
attempted_user = user.query.filter_by(email_address=form.email_address.data).first()
if attempted_user and attempted_user.check_password_correction(attempted_password=form.password.data):
login_user(attempted_user)
flash(f'Sucess! You are logged in as: {attempted_user.username}',category = 'success')
return redirect(url_for('home_page'))
else:
flash('Username or Password is incorrect! Please try again', category= 'danger')
return render_template('login_user.html',form=form)
@app.route('/login/bank', methods=['GET','POST'])
def bank_login_page():
form = loginbankform()
form.district.choices = [(district.id,district.district) for district in district.query.all()]
if form.validate_on_submit():
attempted_user = userbank.query.filter_by(name_id=form.name.data).first()
if attempted_user and attempted_user.check_password_correction(attempted_password=form.password.data):
login_user(attempted_user)
flash(f'Sucess! You are logged in as: {attempted_user.name_id}',category = 'success')
return redirect(url_for('official_blood_bank'))
else:
flash('Username or Password is incorrect! Please try again', category= 'danger')
return render_template('login_bank.html',form=form)
@app.route('/login/blood_bank/<get_district>')
def logbank(get_district):
bb =blood_bank.query.filter_by(district_id=get_district).all()
districtArray = []
for bloodbank in bb :
districtobj = {}
districtobj['id'] = bloodbank.id
districtobj['name'] = bloodbank.name
districtArray.append(districtobj)
return jsonify({'bb' : districtArray})
@app.route('/login/doctor', methods=['GET','POST'])
def doctor_login_page():
form = logindoctorform()
if form.validate_on_submit():
attempted_user = doctor.query.filter_by(email_address=form.email_address.data).first()
if attempted_user and attempted_user.check_password_correction(attempted_password=form.password.data):
login_user(attempted_user)
flash(f'Sucess! You are logged in as: {attempted_user.username}',category = 'success')
return redirect(url_for('home_page'))
else:
flash('Username or Password is incorrect! Please try again', category= 'danger')
return render_template('login_doctor.html',form=form)
#aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
#logout
@app.route('/logout')
def logout_page():
logout_user()
flash('You have been logged out!',category='info')
return redirect(url_for('home_page'))
#aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
# User blood bank Page
@app.route('/blood_bank_page', methods=['GET','POST'])
def blood_bank_page():
form = Blood_bank()
form.district.choices = [(district.id,district.district) for district in district.query.all()]
#print(form.district.data)
if form.validate_on_submit():
detailbk = blood_bank.query.filter_by(id=form.name.data).first()
#dst = district.query.filter_by(id=form.district.data).first()
global dt
global bk
bk = int(detailbk.id)
dt = int(form.district.data)
return redirect(url_for('detail_blood_bank'))
if form.errors !={}:
for err_msg in form.errors.values():
flash(f'There was an error in creating the user:{err_msg}',category='danger')
#form.name.choices = [(blood_bank.id,blood_bank.name) for blood_bank in blood_bank.query.all()]
return render_template('blood_bank.html', form=form)
@app.route('/blood_bank/<get_district>')
def bloodbank(get_district):
bb =blood_bank.query.filter_by(district_id=get_district).all()
districtArray = []
for bloodbank in bb :
districtobj = {}
districtobj['id'] = bloodbank.id
districtobj['name'] = bloodbank.name
districtArray.append(districtobj)
return jsonify({'bb' : districtArray})
@app.route('/detail_of_bank', methods=['GET'])
def detail_blood_bank():
x = bk
database = blood_bank.query.filter_by(id= x).first()
y = database
s = district.query.filter_by(id=dt).first()
print('/n')
print('/n')
print('sss')
print(dt)
print('/n')
print('/n')
return render_template('detail_blood_bank.html', y=y, s=s)
#aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
#blood bank official
@app.route('/official_blood_bank', methods=['GET','POST'])
def official_blood_bank():
form = change()
happy = current_user.id
hi = userbank.query.filter_by(id=happy).first()
x = hi.name_id
database = blood_bank.query.filter_by(id= x).first()
y = database
b = blood.query.filter_by(id=x).first()
if form.validate_on_submit():
con = sql.connect('donation/donation.db')
c = con.cursor()
print('hello')
print('hello')
print('hello')
print('hello')
do = form.do.data
amount = form.amount.data
grp = form.blood_grp.data
if grp == 'A+ve':
# grp = 'A_positive'
if do == 'add':
c.execute('UPDATE blood SET A_positive = A_positive + ? WHERE id = ?',(amount,x,))
print(x)
con.commit()
# print('helloadd')
if do == 'subtract':
c.execute('UPDATE blood SET A_positive = A_positive - ? WHERE id = ?',(amount,x,))
con.commit()
k = int(b.A_positive) - int(amount)
# print('happyday')
print(k)
if (k < 14):
number = y.name
districtid = y.district_id
datas = user.query.filter_by(district=districtid, blood_grp=grp).all()
emails =[]
for data in datas:
emails.append(data.email_address)
send(emails,number)
elif grp == 'B+ve':
if do == 'add':
c.execute('UPDATE blood SET B_positive = B_positive + ? WHERE id = ?',(amount,x,))
print(x)
con.commit()
# print('helloadd')
if do == 'subtract':
c.execute('UPDATE blood SET B_positive = B_positive - ? WHERE id = ?',(amount,x,))
con.commit()
k = int(b.B_positive) - int(amount)
# print('happyday')
print(k)
if (k < 14):
number = y.name
districtid = y.district_id
datas = user.query.filter_by(district=districtid, blood_grp=grp).all()
emails =[]
for data in datas:
emails.append(data.email_address)
send(emails,number)
# print('helloadd')
# grp = 'B_positive'
elif grp == 'AB+ve':
if do == 'add':
c.execute('UPDATE blood SET AB_positive = AB_positive + ? WHERE id = ?',(amount,x,))
print(x)
con.commit()
# print('helloadd')
if do == 'subtract':
c.execute('UPDATE blood SET AB_positive = AB_positive - ? WHERE id = ?',(amount,x,))
con.commit()
k = int(b.AB_positive) - int(amount)
# print('happyday')
print(k)
if (k < 14):
number = y.name
districtid = y.district_id
datas = user.query.filter_by(district=districtid, blood_grp=grp).all()
emails =[]
for data in datas:
emails.append(data.email_address)
send(emails,number)
elif grp == 'O+ve':
if do == 'add':
c.execute('UPDATE blood SET O_positive = O_positive + ? WHERE id = ?',(amount,x,))
print(x)
con.commit()
# print('helloadd')
if do == 'subtract':
c.execute('UPDATE blood SET O_positive = O_positive - ? WHERE id = ?',(amount,x,))
con.commit()
k = int(b.O_positive) - int(amount)
# print('happyday')
print(k)
if (k < 14):
number = y.name
districtid = y.district_id
datas = user.query.filter_by(district=districtid, blood_grp=grp).all()
emails =[]
for data in datas:
emails.append(data.email_address)
send(emails,number)
# NEGATIVE
elif grp == 'A-ve':
# grp = 'A_negative'
if do == 'add':
c.execute('UPDATE blood SET A_negative = A_negative + ? WHERE id = ?',(amount,x,))
print(x)
con.commit()
# print('helloadd')
if do == 'subtract':
c.execute('UPDATE blood SET A_negative = A_negative - ? WHERE id = ?',(amount,x,))
con.commit()
# print('helloadd')
elif grp == 'B-ve':
if do == 'add':
c.execute('UPDATE blood SET B_negative = B_negative + ? WHERE id = ?',(amount,x,))
print(x)
con.commit()
# print('helloadd')
if do == 'subtract':
c.execute('UPDATE blood SET B_negative = B_negative - ? WHERE id = ?',(amount,x,))
con.commit()
# print('helloadd')
# grp = 'B_negative'
elif grp == 'AB-ve':
if do == 'add':
c.execute('UPDATE blood SET AB_negative = AB_negative + ? WHERE id = ?',(amount,x,))
print(x)
con.commit()
# print('helloadd')
if do == 'subtract':
c.execute('UPDATE blood SET AB_negative = AB_negative - ? WHERE id = ?',(amount,x,))
con.commit()
elif grp == 'O-ve':
if do == 'add':
c.execute('UPDATE blood SET O_negative = O_negative + ? WHERE id = ?',(amount,x,))
print(x)
con.commit()
# print('helloadd')
if do == 'subtract':
c.execute('UPDATE blood SET O_negative = O_negative - ? WHERE id = ?',(amount,x,))
con.commit()
# elif grp == 'A-ve':
# grp = 'A_negative'
# elif grp == 'B-ve':
# grp = 'B_negative'
# elif grp == 'AB-ve':
# grp = 'AB_negative'
# elif grp == 'O-ve':
# grp = 'O_negative'
# print(grp)
# print(do)
# print(amount)
#flash('sucess',category=sucess)
return redirect(url_for('official_blood_bank'))
if form.errors !={}:
for err_msg in form.errors.values():
flash(f'There was an error in creating the user:{err_msg}',category='danger')
return render_template('official_blood_bank.html', y=y, b=b,form=form)
#aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
#donor finder
@app.route('/donor', methods=['GET','POST'])
@login_required
def donor():
form = finddoner()
form.district.choices = [(district.id,district.district) for district in district.query.all()]
if form.validate_on_submit():
# print('hello')
# print('hello')
# print('hello')
# print('hello')
datas = user.query.filter_by(district=form.district.data , blood_grp=form.blood_grp.data).all()
emails =[]
number = current_user.mobile_no
number = str(number)
for data in datas:
emails.append(data.email_address)
send(emails,number)
if form.errors !={}:
for err_msg in form.errors.values():
flash(f'There was an error in creating the user:{err_msg}',category='danger')
#form.name.choices = [(blood_bank.id,blood_bank.name) for blood_bank in blood_bank.query.all()]
return render_template('donor.html', form=form)
|
# encoding: utf-8
from api.results import result_service
print 'Importing results from the web...'
result_service.import_results('http://www.cccc.cat/rss-resultats')
print 'Successfully imported'
|
import sys
import unittest
test_suite = sys.argv[1]
loader = unittest.TestLoader()
suite = loader.discover(start_dir=test_suite)
results = unittest.TextTestRunner(verbosity=2).run(suite)
if not results.wasSuccessful():
sys.exit(1)
|
from selenium import webdriver
from time import sleep
from tiktok import title, description
from video_editor import editing
from downloader import downloader
downloader()
sleep(5)
print("i'm fucking working")
editing()
print("editing done !")
url = "https://www.instagram.com/"
username = ""
password = ""
video_path = "tiktok-scraper/final_clip.mp4"
# options = webdriver.ChromeOptions()
# options.headless = True
driver = webdriver.Chrome(r"C:\Users\LENOVO\PycharmProjects\web scraping\chromedriver.exe")
driver.get(url)
sleep(3)
def login():
try:
username_input = driver.find_element_by_xpath('//*[@id="loginForm"]/div/div[1]/div/label/input')
password_input = driver.find_element_by_xpath('//*[@id="loginForm"]/div/div[2]/div/label/input')
username_input.send_keys(username)
password_input.send_keys(password)
login_button = driver.find_element_by_xpath("//button[@type='submit']")
login_button.click()
sleep(5)
print("login done.")
except Exception as e:
print(e)
def home():
driver.find_element_by_xpath('//*[@id="react-root"]/section/main/div/div/div/div/button').click()
driver.find_element_by_xpath('/html/body/div[5]/div/div/div/div[3]/button[2]').click()
print("home done.")
def upload():
driver.get('https://www.instagram.com/tv/upload/')
driver.find_element_by_xpath(
'//*[@id="react-root"]/section/main/div/form/div/div[1]/label/input').send_keys(video_path)
sleep(2)
driver.find_element_by_xpath(
'//*[@id="react-root"]/section/main/div/form/div/div[2]/div[4]/div/div/input').send_keys(title)
sleep(2)
driver.find_element_by_tag_name('textarea').send_keys(description)
sleep(3)
driver.find_element_by_xpath(
'//*[@id="react-root"]/section/main/div/form/div/div[2]/div[7]/button').click()
print("upload in progress.")
login()
home()
upload()
print("Done thanks for using InstaTik ! <3")
|
import sys
from collections import Counter
from typing import Collection, List, Dict
from traceutils.as2org.as2org import AS2Org
from traceutils.bgp.bgp import BGP
from traceutils.progress.bar import Progress
from traceutils.utils.utils import max_num, peek
from bdrmapit.bdrmapit_parser import Updates
from bdrmapit.bdrmapit_parser import Graph
from bdrmapit.bdrmapit_parser import Router, Interface
DEBUG = False
NOTIMPLEMENTED = 0
NODEST = 1
MODIFIED = 3
SINGLE = 4
SINGLE_MODIFIED = 5
HEAPED = 6
HEAPED_MODIFIED = 7
MISSING_NOINTER = 10
MISSING_INTER = 9
REALLOCATED_PREFIX = 500
REALLOCATED_DEST = 1000
SINGLE_SUCC_ORIGIN = 10
SINGLE_SUCC_4 = 11
SUCC_ORIGIN_INTER = 12
SUCC_ORIGIN_CUST = 13
REMAINING_4 = 14
IUPDATE = 15
ALLPEER_SUCC = 16
ALLPEER_ORIGIN = 17
IASN_SUCC_HALF = 18
ALLRELS = 19
VOTE_SINGLE = 50
VOTE_TIE = 70
SINGLE_SUCC_RASN = 15
HIDDEN_INTER = 100
HIDDEN_NOINTER = 200
class Debug:
def __init__(self):
self.old = DEBUG
def __enter__(self):
global DEBUG
DEBUG = True
def __exit__(self, exc_type, exc_val, exc_tb):
global DEBUG
DEBUG = self.old
return False
class Bdrmapit:
def __init__(self, graph: Graph, as2org: AS2Org, bgp: BGP):
self.graph = graph
self.as2org = as2org
self.bgp = bgp
self.rupdates = Updates()
self.iupdates = Updates()
self.routers_succ: List[Router] = []
self.lasthops: List[Router] = []
for router in graph.routers.values():
if router.succ:
self.routers_succ.append(router)
else:
self.lasthops.append(router)
self.interfaces_pred: List[Interface] = [i for i in graph.interfaces.values() if i.pred]
self.previous_updates = []
def annotate_router(self, router: Router):
isucc: Interface
if DEBUG:
print('Nexthop: {}'.format(router.nexthop))
asns = Counter(i.asn for i in router.interfaces if i.asn > 0)
if DEBUG: print('IASNS: {}'.format(asns))
for isucc in router.succ:
if DEBUG: print('Succ: {}'.format(isucc))
iupdate = self.iupdates[isucc]
rupdate = self.rupdates[isucc.router]
if DEBUG:
print('\tIUPDATE: {}'.format(iupdate))
print('\tRUPDATE: {}'.format(rupdate))
if iupdate and iupdate.asn > 0:
if not rupdate:
asn = isucc.asn
else:
if rupdate.asn > 0 and isucc.org == rupdate.org:
asn = iupdate.asn
else:
asn = isucc.asn
else:
asn = isucc.asn
if asn > 0:
asns[asn] += 1
if not asns:
return 0
selections = max_num(asns, key=lambda x: asns[x])
if len(selections) == 1:
return selections[0]
return router.interfaces[0].asn
def annotate_routers(self, routers: Collection[Router], increment=100000):
pb = Progress(len(routers), 'Annotating routers', increment=increment)
for router in pb.iterator(routers):
asn = self.annotate_router(router)
self.rupdates.add_update(router, asn, self.as2org[asn], 1)
def annotate_interface(self, interface: Interface):
edges: Dict[Router, int] = interface.pred
asns = Counter()
if interface.asn > 0:
asns[interface.asn] += 1
if DEBUG: print('IASN: {}'.format(asns))
for rpred in edges:
rasn = self.rupdates.asn(rpred)
if DEBUG:
print('Pred={addr:} RASN={rasn:} {num:}'.format(addr=rpred.name, rasn=rasn, num=len(rpred.succ)))
if rasn > 0:
asns[rasn] += 1
selections = max_num(asns, key=lambda x: asns[x])
if len(selections) == 1:
return selections[0], len(edges)
return interface.asn, 0
def annotate_interfaces(self, interfaces: Collection[Interface]):
pb = Progress(len(interfaces), 'Adding links', increment=100000)
for interface in pb.iterator(interfaces):
if interface.asn >= 0:
asn, utype = self.annotate_interface(interface)
self.iupdates.add_update(interface, asn, self.as2org[asn], utype)
def annotate_stubs(self, routers: Collection[Router]):
pb = Progress(len(routers), 'Stub Heuristic', increment=100000)
for router in pb.iterator(routers):
if len(router.succ) == 1:
interface = router.interfaces[0]
iupdate = self.iupdates[interface]
if iupdate and iupdate.utype > 1:
if interface.org != iupdate.org:
continue
isucc = peek(router.succ)
iupdate = self.iupdates[isucc]
if iupdate and iupdate.utype > 1:
# if isucc.org != iupdate.org:
continue
rupdate = self.rupdates[isucc.router]
if rupdate:
if isucc.org != rupdate.org:
continue
conesize = self.bgp.conesize[isucc.asn]
if conesize < 5 and conesize < self.bgp.conesize[router.interfaces[0].asn]:
self.rupdates.add_update_direct(router, isucc.asn, isucc.org, 2)
def annotate_lasthops(self, routers: List[Router]):
pb = Progress(len(routers), increment=1000000)
for router in pb.iterator(routers):
interface = router.interfaces[0]
self.rupdates.add_update_direct(router, interface.asn, interface.org, 0)
def graph_refinement(self, routers: List[Router], interfaces: List[Interface], iterations=-1):
iteration = 0
while iterations < 0 or iteration < iterations:
Progress.message('********** Iteration {:,d} **********'.format(iteration), file=sys.stderr)
self.annotate_routers(routers)
self.rupdates.advance()
self.annotate_interfaces(interfaces)
self.iupdates.advance()
ru = dict(self.rupdates)
iu = dict(self.iupdates)
if (ru, iu) in self.previous_updates:
break
self.previous_updates.append((ru, iu))
iteration += 1
|
"""All invalid/not found like url specific errors go here"""
from pygmy.exception.error import PygmyExcpetion
class URLNotFound(PygmyExcpetion):
def __init__(self, url):
super().__init__()
self.url = url
def __str__(self):
return '{0} url not found'.format(self.url)
|
import math
class Piramide:
largura = 0
volume = 0
comprimento = 0
area_base = 0
altura = 0
def __init__(self,
largura=None,
volume=None,
comprimento=None,
area_base=None,
altura=None):
if largura is not None:
self.largura = largura
if volume is not None:
self.volume = volume
if comprimento is not None:
self.comprimento = comprimento
if area_base is not None:
self.area_base = area_base
if altura is not None:
self.altura = altura
# Op 1--------------------------------------------------------------------
def get_formula_altura(self):
return '3 * ( V / (w*l))'
def calcula_altura(self):
return 3 * (self.volume / (self.comprimento*self.largura) )
# Op 2--------------------------------------------------------------------
def get_formula_area_base(self):
return 'l * w'
def calcula_area_base(self):
return self.largura * self.comprimento
# Op 3--------------------------------------------------------------------
def get_formula_area_superficie(self):
return '(l*w) + l*sqrt((w/2)^2 + h^2) + w*sqrt((l/2)^2 + h^2)'
def calcula_area_superficie(self):
return (self.comprimento * self.largura) + self.comprimento*math.sqrt( (self.largura/2)**2 + self.altura**2) + self.largura*math.sqrt( (self.comprimento/2)**2 + self.altura**2)
# Op 4--------------------------------------------------------------------
def get_formula_comprimento_base(self):
return 'A / w'
def calcula_comprimento_base(self):
return self.area_base / self.largura
# Op 5--------------------------------------------------------------------
def get_formula_largura_base(self):
return 'A / l'
def calcula_largura_base(self):
return self.area_base / self.comprimento
# Op 6--------------------------------------------------------------------
def get_formula_superficie_lateral(self):
return 'l*sqrt((w/2)^2 + h^2) + w*sqrt((l/2)^2 + h^2)'
def calcula_superficie_lateral(self):
return self.comprimento*math.sqrt((self.largura/2)**2 + self.altura**2) + self.largura*math.sqrt((self.comprimento/2)**2 + self.altura**2)
# Op 7--------------------------------------------------------------------
def get_formula_volume(self):
return '(l * w) * (h / 3)'
def calcula_volume(self):
return (self.comprimento*self.largura)*(self.altura/3) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.