text stringlengths 38 1.54M |
|---|
class CReader(object):
def __init__(self):
self._fobj = None
self._fpos = 0
def is_eof(self):
self._fobj.seek(self._fpos)
return self._fobj.readline() == ""
def set_file(self, filename):
if self._fobj:
self._fobj.close()
self._fobj = open(filename, "r")
self._fpos = self._fobj.tell()
def rem_comment(self, line):
try:
comment_pos = line.index("//")
return line[:comment_pos]
except ValueError:
return line
def peek_line(self):
self._fobj.seek(self._fpos)
raw_line = self._fobj.readline()
if raw_line == "":
return "#END"
else:
return self.rem_comment(raw_line)
def skip_line(self):
self._fobj.seek(self._fpos)
line = self._fobj.readline()
self._fpos = self._fobj.tell()
def read_header(self):
line = self.peek_line()
# contains no header
try:
colon_pos = line.index(":")
except ValueError:
return None, None
# split into {Key: Value}
key = line[: colon_pos].strip()
val = line[colon_pos + 1:].strip()
return key, val
def read_notes(self):
notes = ""
tot_notes = 0
fpos = self._fpos
while True:
notes_line = self._read_notes_line()
# first read fail
if notes == "" and notes_line == "":
return "", 0
self.skip_line()
# Only return the first notes line
if notes == "":
notes = notes_line
tot_notes += len(notes_line)
# check if reaches end
if notes_line.endswith(","):
break
self._fpos = fpos
self._fobj.seek(fpos)
return notes, tot_notes - 1
def _read_notes_line(self):
line = self.peek_line()
ripped = ""
for c in line:
# Digit, means a note
if c.isdigit(): ripped += c
# `,` means end of a bar
elif c == ",": return ripped + c
# Space is ignored
elif c.isspace(): continue
# Else is invalid
else: return ""
return ripped
def read_command(self):
line = self.peek_line()
cmd_name = ""
args = ()
if line.startswith("#"):
parts = line.split(" ", 1)
if len(parts) in (1, 2):
cmd_name = parts[0].strip()
if len(parts) == 1:
args = ()
else:
args2 = []
for str in parts[1].split(","):
args2.append(str.strip())
args = tuple(args2)
return cmd_name, args
def check_command(self, cmd):
return self.peek_line().split(" ")[0].strip() == cmd
def check_commands(self, cmd):
return self.peek_line().split(" ")[0].strip() in cmd
def close(self):
if self._fobj:
self._fobj.close()
self._fobj = None
if __name__ == "__main__":
import tja_header
import sys
reader = CReader()
reader.set_file(sys.argv[1])
header = tja_header.CData()
header.read(reader)
header.refresh()
header.print_out()
while True:
if reader.check_command("#END"):
break
notes = reader.read_notes()
cmd_name, args = reader.read_command()
if notes:
print "[NOTES]", notes
elif cmd_name:
print "[ CMD ]", cmd_name, args
else:
print "[EMPTY]"
reader.skip_line()
|
"""
Copyright (c) 2016-2020 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import Boolean
from programy.storage.stores.sql.base import Base
from programy.storage.stores.utils import DAOUtils
class Service(Base):
__tablename__ = 'services'
id = Column(Integer, primary_key=True)
type = Column(String(48))
name = Column(String(48))
category = Column(String(48))
service_class = Column(String(512))
default_response = Column(String(128))
default_srai = Column(String(48))
default_aiml = Column(String(512))
success_prefix = Column(String(128))
load_default_aiml = Column(Boolean(), nullable=False, default=True)
url = Column(String(100))
rest_timeout = Column(Integer)
rest_retries = Column(String(512))
def __repr__(self):
if self.type == 'rest':
return "<Service(id='%s', type='%s', name='%s', category='%s', service_class='%s', " \
"default_response='%s', default_srai='%s', default_aiml='%s', " \
"load_default_aiml='%s', " \
"success_prefix='%s', " \
"url='%s', " \
"rest_timeout='%s', rest_retries='%s'" \
")>" % (
DAOUtils.valid_id(self.id), self.type, self.name, self.category, self.service_class,
self.default_response, self.default_srai, self.default_aiml,
"False" if self.load_default_aiml is False else "True",
self.success_prefix,
self.url,
self.rest_timeout, self.rest_retries)
else:
return "<Service(id='%s', type='%s', name='%s', category='%s', service_class='%s', " \
"default_response='%s', default_srai='%s', default_aiml='%s', load_default_aiml='%r', " \
"success_prefix='%s', " \
"url='%s' " \
")>" % (
DAOUtils.valid_id(self.id), self.type, self.name, self.category, self.service_class,
self.default_response, self.default_srai, self.default_aiml,
"False" if self.load_default_aiml is False else "True",
self.success_prefix,
self.url
)
|
import math
import sys
from scipy.spatial import distance
import sys
# detect the traid in topic evolution
# according to TKDE15-Traid Closure Pattern Analysis and Prediction
# There are two kinds of open Traid, and 1 kind of close Traid
class TraidDetect(object):
"""docstring for TraidDetect"""
def __init__(self, q):
super(TraidDetect, self).__init__()
self.query = q.replace(' ', '_')
self.key2num = dict()
self.num2key = list()
self.counter = 0
# build two graphs:
# self.evolution: check son using parent
# self.evolution_reverse: check parent using son
self.evolution = dict()
self.evolution_reverse = dict()
def add_line(self, li):
if li[0] not in self.key2num:
self.key2num[ li[0] ] = self.counter
self.counter += 1
if li[1] not in self.key2num:
self.key2num[ li[1] ] = self.counter
self.counter += 1
num1 = self.key2num[ li[0] ]
num2 = self.key2num[ li[1] ]
if num1 not in self.evolution:
self.evolution[num1] = [ num2 ]
else:
self.evolution[num1].append(num2)
if num2 not in self.evolution_reverse:
self.evolution_reverse[num2] = [ num1 ]
else:
self.evolution_reverse[num2].append(num1)
def load_labeled_file(self, skip_char):
filename = '../views/label/label_' + self.query + '.txt'
# load evolution from one file
content = open(filename).readlines()
for i in content:
li = i.strip().split(' ')
if li[2] == skip_char:
continue
self.add_line(li)
self.gen_num2key()
def load_unlabeled_file(self, filename):
content = open(filename).readlines()
for i in content:
li = i.strip().split(' ')
self.add_line(li)
self.gen_num2key()
def gen_num2key(self):
self.num2key = [''] * len(self.key2num)
for i in self.key2num:
kid = self.key2num[i]
self.num2key[kid] = i
def output_traids(self, skip_char, op0, op1, op3, cp6):
if skip_char == '0':
filename = '../results/' + self.query + '.traid'
print ('detect traid: %d op0, %d op1, %d op3, %d cp6' % (len(op0), len(op1), len(op3), len(cp6)) )
elif skip_char == '1':
filename = '../results/' + self.query + '.utraid'
print ('detect untraid: %d op0, %d op1, %d op3, %d cp6' % (len(op0), len(op1), len(op3), len(cp6)) )
output = open(filename, 'w')
output.write('#----------------------------------------------------------------------\n')
output.write('#open_traid_0\n')
output.write('#format is (from_node, to_node1, to_node2)\n')
for i in op0:
output.write('%s %s %s \n' % (self.num2key[i[0]], self.num2key[i[1]], self.num2key[i[2]]) )
output.write('\n\n\n#----------------------------------------------------------------------\n')
output.write('#open_traid_1\n')
output.write('#format is (first_node, second_node, thrid_node)\n')
for i in op1:
output.write('%s %s %s \n' % (self.num2key[i[0]], self.num2key[i[1]], self.num2key[i[2]]) )
output.write('\n\n\n#----------------------------------------------------------------------\n')
output.write('#open_traid_3\n')
output.write('#format is (from_node1, from_node2, to_node)\n')
for i in op3:
output.write('%s %s %s \n' % (self.num2key[i[0]], self.num2key[i[1]], self.num2key[i[2]]) )
output.write('\n\n\n#----------------------------------------------------------------------\n')
output.write('#close_traid_6\n')
output.write('#format is (from_node, to_node1, to_node1_to_node2)\n')
for i in cp6:
output.write('%s %s %s \n' % (self.num2key[i[0]], self.num2key[i[1]], self.num2key[i[2]]) )
output.close()
def detect_traid(self):
# (from, to1, to2)
open_traid_0 = list()
# (from, to1, to1_to2)
close_traid_6 = list()
for i in self.evolution:
for num1 in self.evolution[i]:
for num2 in self.evolution[i]:
if num1 in self.evolution and num2 in self.evolution[num1]:
close_traid_6.append( (i, num1, num2) )
elif num1 > num2:
open_traid_0.append( (i, num1, num2) )
# (first, second, thrid)
open_traid_1 = list()
# (A->B->C->A)
# just for debug, should not occur in evolution
close_traid_7 = list()
for first in self.evolution:
for second in self.evolution[first]:
if second in self.evolution:
for third in self.evolution[second]:
if third not in self.evolution[first]:
open_traid_1.append( (first, second, third) )
if third in self.evolution and first in self.evolution[third]:
close_traid_7.append( (first, second, third) )
if len(close_traid_7) != 0:
print ('cyclic evolution detected')
for i in close_traid_7:
print ('(%s, %s, %s)' % (self.num2key[i[0]], self.num2key[i[1]], self.num2key[i[2]]) )
# (from1, from2, to)
open_traid_3 = list()
for i in self.evolution_reverse:
for num1 in self.evolution_reverse[i]:
for num2 in self.evolution_reverse[i]:
if num1 in self.evolution_reverse and num2 in self.evolution_reverse[num1]:
continue
elif num2 in self.evolution_reverse and num1 in self.evolution_reverse[num2]:
continue
elif num1 > num2:
open_traid_3.append( (num1, num2, i) )
return open_traid_0, open_traid_1, open_traid_3, close_traid_6
def load_factor_file(self, filename, factor):
content = open(filename).readlines()
for i in content:
li = i.strip().split(' ')
fli = list()
for i in range(1, len(li)):
fli.append( float(li[i].split(':')[1]) )
factor.append(fli)
def load_factor(self):
factor = list()
filename = '../social_tie/results/' + self.query + '/label.txt'
self.load_factor_file(filename, factor)
filename = '../social_tie/results/' + self.query + '/unlabel.txt'
self.load_factor_file(filename, factor)
return factor
def load_mark_file(self, filename, mark):
counter = len(mark)
content = open(filename).readlines()
for i in content:
mark[ i.strip() ] = counter
counter += 1
def load_mark(self):
mark = dict()
filename = '../social_tie/results/' + self.query + '/label.mark'
load_mark_file(filename, mark)
filename = '../social_tie/results/' + self.query + '/unlabel.mark'
load_mark_file(filename, mark)
return mark
def output_edge(self, open_traid_0, open_traid_1, open_traid_3, infile, outfile):
output = open(outfile, 'w')
mark = dict()
self.load_mark_file(infile, mark)
for i in open_traid_0:
edge1 = '%s %s' % (self.num2key[ i[0] ], self.num2key[ i[1] ])
edge2 = '%s %s' % (self.num2key[ i[0] ], self.num2key[ i[2] ])
if edge1 in mark and edge2 in mark:
output.write('#edge %d %d %d\n' % (mark[edge1], mark[edge2], 1) )
for i in open_traid_1:
edge1 = '%s %s' % (self.num2key[ i[0] ], self.num2key[ i[1] ])
edge2 = '%s %s' % (self.num2key[ i[1] ], self.num2key[ i[2] ])
if edge1 in mark and edge2 in mark:
output.write('#edge %d %d %d\n' % (mark[edge1], mark[edge2], 2))
for i in open_traid_3:
edge1 = '%s %s' % (self.num2key[ i[0] ], self.num2key[ i[2] ])
edge2 = '%s %s' % (self.num2key[ i[1] ], self.num2key[ i[2] ])
if edge1 in mark and edge2 in mark:
output.write('#edge %d %d %d\n' % (mark[edge1], mark[edge2], 3))
output.close()
def output_triangle(self, close_traid_6, infile, outfile):
output = open(outfile, 'w')
mark = dict()
self.load_mark_file(infile, mark)
for i in close_traid_6:
edge1 = '%s %s' % (self.num2key[ i[0] ], self.num2key[ i[1] ])
edge2 = '%s %s' % (self.num2key[ i[0] ], self.num2key[ i[2] ])
edge3 = '%s %s' % (self.num2key[ i[1] ], self.num2key[ i[2] ])
if edge1 in mark and edge2 in mark and edge3 in mark:
output.write('#triangle %d %d %d %d\n' % (mark[edge1], mark[edge2], mark[edge3], 1) )
output.close()
def calc_similarity(self, open_traid_0, open_traid_3):
factor = self.load_factor()
pair_dict = dict()
counter = 0
filename = '../social_tie/results/' + self.query + '/label.mark'
content = open(filename).readlines()
for i in content:
pair_dict[i.strip()] = factor[counter]
counter += 1
filename = '../social_tie/results/' + self.query + '/unlabel.mark'
content = open(filename).readlines()
for i in content:
pair_dict[i.strip()] = factor[counter]
counter += 1
pair_list = list()
for i in open_traid_0:
pair_list.append( '%s %s' % (self.num2key[ i[1] ], self.num2key[ i[2] ] ) )
for i in open_traid_3:
pair_list.append( '%s %s' % (self.num2key[ i[0] ], self.num2key[ i[1] ] ) )
length = len(factor[0])
accumulator = [0.0] * length
counter = 0
for i in pair_list:
if i in pair_dict:
for j in range(0, length):
accumulator[j] += pair_dict[i][j]
counter += 1
for i in range(0, length):
accumulator[i] /= counter
print (accumulator)
return accumulator
def test(query, skip_char):
td = TraidDetect(query)
# the input should be label.txt
td.load_labeled_file(skip_char)
open_traid_0, open_traid_1, open_traid_3, close_traid_6 = td.detect_traid()
td.output_traids(skip_char, open_traid_0, open_traid_1, open_traid_3, close_traid_6)
return list()
def main():
label_avg = test(sys.argv[1], '0')
unlabel_avg = test(sys.argv[1], '1')
for i in range(0, len(label_avg)):
rate = math.fabs(label_avg[i] - unlabel_avg[i]) / math.fabs(label_avg[i] + unlabel_avg[i])
print (rate)
'''
input_file = '../social_tie/results/' + sys.argv[1] + '/' + sys.argv[2] + '.mark'
output_file = '../social_tie/results/' + sys.argv[1] + '/edges.txt'
output_triangle_file = '../social_tie/results/' + sys.argv[1] + '/triangles.txt'
td = TraidDetect(sys.argv[1])
td.load_unlabeled_file(input_file)
op0, op1, op3, cp6 = td.detect_traid()
td.output_edge(op0, op1, op3, input_file, output_file)
td.output_triangle(cp6, input_file, output_triangle_file)
append_filename = '../social_tie/results/' + sys.argv[1] + '/' + sys.argv[2] + '.txt'
append_file = open(append_filename, 'a')
output = open(output_file, 'r')
for line in output:
append_file.write(line)
append_file.close()
output.close()
append_file = open(append_filename, 'a')
output = open(output_triangle_file, 'r')
for line in output:
append_file.write(line)
append_file.close()
output.close()
'''
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
#Author: Cedric Flamant
import numpy as np
from rotmat import rotation_matrix as RM
from meas import *
def ang2pos(head_pos,angles):
"""
Input =>
head_pos:
(x,y,z) position of the head
angles:
2d array of theta,phi for each of the following (in order):
-neck
-shoulder girdle
-pelvis girdle
-right upper arm
-left upper arm
-right thigh
-left thigh
-right forearm
-left forearm
-right foreleg
-left foreleg
Returns =>
pos:
2d array of positions, where the row index corresponds to:
-head
-collarbone
-tailbone
-right shoulder
-left shoulder
-right hip
-left hip
-right elbow
-left elbow
-right knee
-left knee
-right wrist
-left wrist
-right ankle
-left ankle
"""
pos = np.zeros((15,3))
# initialize default limb vectors
neck = NECK*np.array([0.,-1.,0.])
spine = SPINE*np.array([0.,-1.,0.])
rcollar = SHOULDER/2.*np.array([-1.,0.,0.])
rpelvis = PELVIC/2.*np.array([-1.,0.,0.])
rupperarm = UPPER_ARM*np.array([-1.,0.,0.])
lupperarm = UPPER_ARM*np.array([1.,0.,0.])
rthigh = THIGH*np.array([0.,-1.,0.])
lthigh = THIGH*np.array([0.,-1.,0.])
rforearm = FOREARM*np.array([-1.,0.,0.])
lforearm = FOREARM*np.array([1.,0.,0.])
rforeleg = FORELEG*np.array([0.,-1.,0.])
lforeleg = FORELEG*np.array([0.,-1.,0.])
# calculate rotation matrices for each joint
neckRM = RM([-np.cos(angles[0,1]),0.,np.sin(angles[0,1])],angles[0,0])
collarRM = RM([0.,np.cos(angles[1,1]),-np.sin(angles[1,1])],angles[1,0])
pelvisRM = RM([0.,np.cos(angles[2,1]),-np.sin(angles[2,1])],angles[2,0])
rshoulderRM = RM([0.,np.cos(angles[3,1]),-np.sin(angles[3,1])],angles[3,0])
lshoulderRM = RM([0.,-np.cos(angles[4,1]),-np.sin(angles[4,1])],angles[4,0])
rhipRM = RM([-np.cos(angles[5,1]),0.,-np.sin(angles[5,1])],angles[5,0])
lhipRM = RM([-np.cos(angles[6,1]),0.,-np.sin(angles[6,1])],angles[6,0])
relbowRM = RM([0.,np.cos(angles[7,1]),-np.sin(angles[7,1])],angles[7,0])
lelbowRM = RM([0.,-np.cos(angles[8,1]),-np.sin(angles[8,1])],angles[8,0])
rkneeRM = RM([-np.cos(angles[9,1]),0.,-np.sin(angles[9,1])],angles[9,0])
lkneeRM = RM([-np.cos(angles[10,1]),0.,-np.sin(angles[10,1])],angles[10,0])
# calculate the rotated vectors
neck = neckRM @ neck
spine = neckRM @ spine
rcollar = neckRM @ collarRM @ rcollar
rpelvis = neckRM @ pelvisRM @ rpelvis
rupperarm = neckRM @ collarRM @ rshoulderRM @ rupperarm
lupperarm = neckRM @ collarRM @ lshoulderRM @ lupperarm
rthigh = neckRM @ pelvisRM @ rhipRM @ rthigh
lthigh = neckRM @ pelvisRM @ lhipRM @ lthigh
rforearm = neckRM @ collarRM @ rshoulderRM @ relbowRM @ rforearm
lforearm = neckRM @ collarRM @ lshoulderRM @ lelbowRM @ lforearm
rforeleg = neckRM @ pelvisRM @ rhipRM @ rkneeRM @ rforeleg
lforeleg = neckRM @ pelvisRM @ lhipRM @ lkneeRM @ lforeleg
# set the head position first
pos[0,:] = head_pos
# set collar position
pos[1,:] = pos[0,:] + neck
# set tailbone position
pos[2,:] = pos[1,:] + spine
# set right shoulder position
pos[3,:] = pos[1,:] + rcollar
# set left shoulder position
pos[4,:] = pos[1,:] - rcollar
# set right hip position
pos[5,:] = pos[2,:] + rpelvis
# set left hip position
pos[6,:] = pos[2,:] - rpelvis
# set right elbow
pos[7,:] = pos[3,:] + rupperarm
# set left elbow
pos[8,:] = pos[4,:] + lupperarm
# set right knee
pos[9,:] = pos[5,:] + rthigh
# set left knee
pos[10,:] = pos[6,:] + lthigh
# set right wrist
pos[11,:] = pos[7,:] + rforearm
# set left wrist
pos[12,:] = pos[8,:] + lforearm
# set right ankle
pos[13,:] = pos[9,:] + rforeleg
# set left ankle
pos[14,:] = pos[10,:] + lforeleg
return pos
|
import os
from flask import abort, Flask, jsonify, request
from flask_jwt_extended import (create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity)
from flask_jwt_extended import JWTManager
import requests
app = Flask(__name__)
##### EMPTY
###
##
#
@app.route('/', methods = ['POST'])
def empty():
message = {
"name":"name"
}
return jsonify( message ) |
"""
Crie um programa que leia vários números inteiros pelo teclado. O programa só vai parar quando o usuário digitar o valor
999, que é a condição de parada. No final, mostre quantos números foram digitados e qual foi a soma entre eles
(desconsiderando o flag).
"""
"""
controle = 0
acumulador = 0
contador = 0
while controle != 999:
numero = int(input("Digite um número: "))
if numero == 999:
controle = 999
print("Fim do programa")
else:
acumulador += numero
contador += 1
print("Foram somados {} numeros e a soma dos números é igual a {}".format(contador, acumulador))
"""
num = cont = soma = 0
num = int(input("Digite um número [999 para parar]: "))
while num != 999:
soma += num
cont += 1
num = int(input("Digite um número [999 para parar]: "))
print("Você digitou {} números e a soma entre eles foi {}.".format(cont, soma))
|
# Generated by Django 2.1.7 on 2019-03-07 00:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0002_item_rentabilidade'),
]
operations = [
migrations.AlterField(
model_name='item',
name='preco_digitado_pelo_usuario',
field=models.DecimalField(decimal_places=2, default=550000, max_digits=10),
),
migrations.AlterField(
model_name='item',
name='quantidade_digitadada_pelo_usuario',
field=models.IntegerField(default=1),
),
]
|
# we can have two parameters in open or close
# 1st parameter is filename and the other is which operation we have to perform - read,write,append
# file= open("test.txt","r") opening file in a read mode
# x= file.read()
# print(x)
## Writing into the file
# file= open("test.txt","w")
#
# y=file.write("i am nipun gera")
#
#
# file.close()
#
# Appending text in the file
file=open("test.txt","a")
z=file.write("I will be in a great world one day.")
print(z)
|
def printCombination(arr, n, r):
data = [0] * r;
combinationUtil(arr, data, 0, n - 1, 0, r)
def combinationUtil(arr, data, start, end, index, r):
if index == r:
for j in range(r):
print(data[j], end=" ")
print()
return
i = start
for j in range(i, end+1):
data[index] = arr[j]
combinationUtil(arr, data, j + 1,
end, index + 1, r)
#i += 1
arr = [1, 2, 3, 4, 5]
r = 3
n = len(arr)
printCombination(arr, n, r)
|
import xlwt
file = xlwt.Workbook()
table = file.add_sheet('sheet name',cell_overwrite_ok=True)
table.write(1, 1, 'text')
file.save("test1.xls")
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-09-03 07:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pay', '0025_safe_check_status'),
]
operations = [
migrations.AddField(
model_name='safe',
name='install_status',
field=models.IntegerField(default=1),
),
migrations.AddField(
model_name='safe',
name='sale_status',
field=models.IntegerField(default=1),
),
]
|
from pyAudioAnalysis import audioBasicIO
from pyAudioAnalysis import ShortTermFeatures
from pyAudioAnalysis import MidTermFeatures
from glob import glob
#TODO
'''
Add number of Features
Add types of Features
'''
data_dir = "C:/Users/MADHUKAR/Desktop/test/abc/*.wav"
audio_files = glob(data_dir)
for filename in range(0, len(audio_files), 1):
[Fs, x] = audioBasicIO.read_audio_file(audio_files[filename])
Mono_Signal = audioBasicIO.stereo_to_mono(x)
print(Fs)
#short term features
[Feature, Feature_Names] = ShortTermFeatures.feature_extraction(Mono_Signal, Fs, 0.050 * Fs, 0.025 * Fs, deltas=True)
#mid term features
[mid_features, short_features, mid_feature_names] = MidTermFeatures.mid_feature_extraction(Mono_Signal, Fs, 1.0 * Fs, 0.75 * Fs, 0.050 * Fs, 0.005 * Fs)
#mid_feature_extraction(signal, sampling_rate, mid_window, mid_step, short_window, short_step)
print(Feature_Names)
print(Feature)
print(mid_feature_names)
print(mid_features)
|
'''
William Cawley Gelling
201077658
Assignment 4 Bank Accounts
'''
from random import randint
import datetime
class BasicAccount():
'''
Basic account is the account to be used for individules for the company/Bank
'''
#incremented when a bank account is made.
noOfAc = 0
#List of card numbers that have already been used.
cardNoList = []
@classmethod
def makeCardNum(cls):
"""
Creates a card number and checks that it does not already exist,
if it does it creates a new card number and checks untill it has an unused number.
Parameters:
Nothing
Returns:
returns the string of the card number
"""
while True:
a = ""
for x in range (0,16):
a += str(randint(0,9))
if a in cls.cardNoList:
continue
else :
return a
@classmethod
def checkStartBalance(cls,theBalance):
"""
to be used during __init__
checks that the start balance is a valid balance, keeps asking until balance of correct form
is entered.
This is designed so only floats are excepted e.g. 50.0 is excepted but 50 is not.
Parameters:
theBalance - this is the balance entered when creating an instance
Returns:
theBalance - this is the balance entered that fullfils the critiria
"""
while True:
if isinstance(theBalance,float) == True:
if theBalance >= 0:
return theBalance
else:
try:
theBalance= float(input("theBalance has to be positive or zero, please enter again: "))
except:
print("please enter a positive/zero float: ")
else:
try:
theBalance = float(input("theBalance has to be a positve/zero float. please input again: "))
except:
print("please enter a positive/zero float: ")
@classmethod
def checkStartName(cls, theName):
"""
to be used during __init__
checks that the start name is a valid name, i have defined this
as being a string that starts with a letter
Parameters:
theName - this is the name entered when creating an instance
Returns:
theBalance - this is the name entered of the correct form
"""
while True:
if isinstance(theName,str):
if theName[0].isalpha():
return theName
break
else :
theName = input("The name has to start with a letter, please input theName again: ")
else:
theName = input("The name has to start with a letter, please input theName again: ")
def __init__(self, theName, theBalance = 0.0) :
self.name = self.checkStartName(theName)
self.balance = self.checkStartBalance(theBalance)
BasicAccount.noOfAc += 1
self.acNum = BasicAccount.noOfAc
self.cardNum = self.makeCardNum()
self.joinDate = datetime.datetime.now()
self.cardExp = (self.joinDate.month , self.joinDate.year - 1997)
def __str__(self):
return '\nYour account name is: ' + self.name +'\nYour balance is : ' + "£{:,.2f}".format(self.balance) + '\n'
def getAvailableBalance(self):
"""
Calculates and returns the total balance available and returns as a float
Parameters:
Nothing
Returns:
The total available balance available as a float
"""
return float(self.balance)
def getBalance(self):
"""
returns the balance available as a float
Parameters:
Nothing
Returns:
The balance as a float
"""
return float(self.balance)
def printBalance(self):
"""
Prints the Balance available
Parameters:
None
Returns:
Nothing
"""
print('Your balance is: ' + "£{:,.2f}".format(self.balance))
def getName(self):
"""
Returns the name of the account holder
Parameters:
None
Returns:
Name of the account holder as a string
"""
return str(self.name)
def getAcNum(self):
"""
Returns the account number as a string
Parameters:
None
Returns:
account number as a sting
"""
return str(self.acNum)
def issueNewCard(self):
"""
Creates a new card with new number and expiry date.
Parameters:
None
Returns:
nothing
"""
self.cardNum = self.makeCardNum()
today = datetime.datetime.now()
self.cardExp = (today.month , today.year - 1997)
def deposit(self,deposits):
"""
Deposits the stated amount into the account and adjusts the balance appropriately.
Deposits must be a positive amount.
Parameters:
deposit: the amount that they wish to deposit
return:
Nothing
"""
if isinstance(deposits, float) or isinstance(deposits, int):
if deposits <= 0:
print ("Deposit not possible, You can only add a positive nonzero ammount to your account.")
else:
self.printBalance()
print ('You are depositing: '+ "£{:,.2f}".format(self.balance))
self.balance += deposits
self.printBalance()
else:
print('Deposit not avalible, You need to enter a float')
def withdraw(self, withdraws):
"""
Withdraws the stated amount from the account and prints information about the withdrawral.
If an invalid amount is requested, then a warning message appears and the process is terminated
Parameters:
withdraws: the amount that they wish to withdraw
return:
Nothing
"""
try:
if isinstance(withdraws, float) == False:
raise TypeError ("\nYour withdrawral has to be od type float")
if withdraws > self.getAvailableBalance():
raise ValueError ("\nYour withdrawral can not be larger then your balance.")
elif withdraws < 0 :
raise ValueError ("\nYour withdrawral can not be negative")
self.balance -= withdraws
print(self.name, "has withdrawn "+ '£{:,.2f}'.format(withdraws))
self.printBalance()
except Exception as exp:
print("Can not Withdraw "+ '£{:,.2f}'.format(withdraws).replace('£-','-£'), exp)
def closeAccount(self):
"""
to be called before deleting of the object instence, Returns remaining balence to the customer
and then returns True. If the person has a negative balance then account will not be able
to close and a message will show. False will be returned
Parameters:
None
return:
retuns TRUE OR FALSE depending on whether it is successful
"""
if self.balance >= 0:
self.withdraw(self.balance)
return True
else :
print("Can not close account due to customer being overdrawn by £", abs(self.balance))
return False
class PremiumAccount(BasicAccount):
'''
Premium account is a basic account with the addition of an overdraft
'''
@classmethod
def checkStartOverdraft(cls,theOverdraftLimit):
"""
to be used during __init__
checks that the start overdraft limit is a valid, keeps asking until limit of correct form
is entered.
This is designed so only floats are excepted e.g. 50.0 is excepted but 50 is not.
Parameters:
theOverdraftLimit - this is the overdraft limit entered when creating an instance
Returns:
theBalance - this is the overdraft limit entered that fullfils the critiria
"""
while True:
if isinstance(theOverdraftLimit,float) == True:
if theOverdraftLimit >= 0:
return theOverdraftLimit
else:
try:
theOverdraftLimit= float(input("theOverdraftLimit has to be positive or zero, please enter again: "))
except:
print("please enter a positive/zero float: ")
else:
try:
theOverdraftLimit = float(input("theOverdraftLimit has to be a positve/zero float. please input again: "))
except:
print("please enter a positive/zero float: ")
def __init__(self, theName, theBalance = 0.0, theOverdraftLimit = 0.0):
super().__init__(theName, theBalance)
self.overdraftLimit = self.checkStartOverdraft(theOverdraftLimit)
self.overdraft = True
def __str__(self):
return '\nYour account name is: ' + self.name + self.printedBalance()
def setOverdraftLimit(self, newLimit):
"""
resets the overdraft limit, checks that this allowed by checking that it is a positive or zero
float. If the person is already in there overdraft it makes sure that the new limit is grater or equal
to the absolute value of their balance
Parameters:
newlimit - the new overdraft to be used, needs to be a float
Returns:
none
"""
if self.overdraft == True:
newLimit = self.checkStartOverdraft(newLimit)
if self.balance < 0:
if newLimit >= abs(self.balance):
self.overdraftLimit = newLimit
else:
print("To much money is owed to be able to change the overdraft limit to this")
else:
self.overdraftLimit = newLimit
else:
print("Your account does not support overdrafts")
def getAvailableBalance(self):
"""
Calculates and returns the total balance available and returns as a float
Parameters:
Nothing
Returns:
The total availble balance availble as a float
"""
return self.balance + self.overdraftLimit
def printedBalance(self):
"""
makes the information to be printed in printBalance in a single string to be easily used in
__str__ as well.
Parameters:
None
Returns:
x - the sting formation of the balance and overdarft information
"""
x = '\nYour balance is: ' + "£{:,.2f}".format(self.balance).replace('£-','-£')
if self.overdraft == True:
x += '\nYour avalible balance is:'+ "£{:,.2f}".format(self.getAvailableBalance()).replace('£-','-£')
x += '\nYour overdraft is '"£{:,.2f}".format(self.overdraftLimit).replace('£-','-£')
if self.balance < 0 :
x += '\nYour remaining overdraft is '"£{:,.2f}".format(self.overdraftLimit+self.balance).replace('£-','-£')
else:
x += '\nYour remaining overdraft is '"£{:,.2f}".format(self.overdraftLimit).replace('£-','-£')
return x
def printBalance(self):
"""
Prints the Balance avalible, as well as the overdraft availble
Parameters:
None
Returns:
Nothing
"""
print(self.printedBalance())
#I have not redone closeAccount as this issue was covered for both in the super class (BasicAccount)
|
base62_alphabet = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
def base10_to_base62(number, accumulator):
quotient = number // 62
remainder = number % 62
base62_remainder = base62_alphabet[remainder]
if quotient == 0:
return base62_remainder + accumulator
else:
return base10_to_base62(quotient, base62_remainder + accumulator)
|
# -*- coding: utf-8 -*-
import json
import scrapy
from locations.items import GeojsonPointItem
from locations.hours import OpeningHours
STATES = [
'AL', 'AK', 'AS', 'AZ', 'AR', 'CA', 'CO', 'CT', 'DE', 'DC', 'FM', 'FL',
'GA', 'GU', 'HI', 'ID', 'IL', 'IN', 'IA', 'KS', 'KY', 'LA', 'ME', 'MH',
'MD', 'MA', 'MI', 'MN', 'MS', 'MO', 'MT', 'NE', 'NV', 'NH', 'NJ', 'NM',
'NY', 'NC', 'ND', 'MP', 'OH', 'OK', 'OR', 'PW', 'PA', 'PR', 'RI', 'SC',
'SD', 'TN', 'TX', 'UT', 'VT', 'VI', 'VA', 'WA', 'WV', 'WI', 'WY'
]
DAY_MAPPING = {
'Monday': 'Mo',
'Tuesday': 'Tu',
'Wednesday': 'We',
'Thursday': 'Th',
'Friday': 'Fr',
'Saturday': 'Sa',
'Sunday': 'Su'
}
class JiffyLubeSpider(scrapy.Spider):
name = "jiffylube"
item_attributes = {'brand': "Jiffy Lube"}
allowed_domains = ["www.jiffylube.com"]
start_urls = (
'https://www.jiffylube.com/api/locations',
)
def parse(self, response):
stores = json.loads(response.text)
for store in stores:
store_url = "https://www.jiffylube.com/api" + store["_links"]["_self"]
yield scrapy.Request(
store_url,
callback=self.parse_store
)
def parse_store(self, response):
store_data = json.loads(response.text)
properties = {
'ref': store_data["id"],
'addr_full': store_data["address"],
'city': store_data["city"],
'state': store_data["state"],
'postcode': store_data["postal_code"].strip(),
'country': store_data["country"],
'phone': store_data["phone_main"],
'lat': float(store_data["coordinates"]["latitude"]),
'lon': float(store_data["coordinates"]["longitude"]),
'website': "https://www.jiffylube.com{}".format(store_data["_links"]["_self"])
}
yield GeojsonPointItem(**properties)
|
import numpy as np
from nltk.tokenize import wordpunct_tokenize
import math
import collections
from collections import Counter
from models.classic.stopword import load_stopwords
class LMClassifierEx:
def __init__(self, tokenizer=wordpunct_tokenize, stemmer=None):
self.tokenizer = tokenizer
self.alpha_list = None
self.C_ctf = []
self.C = []
self.BG_ctf = None
self.BG = None
self.smoothing = 0.1
self.stemmer = stemmer
self.fulltext = False
self.supervised = False
self.n_lm = 2
def build(self, lm_docs_list, bg_tf, bg_ctf):
self.n_lm = len(lm_docs_list)
stopwords = load_stopwords()
def transform(counter):
if self.stemmer is None:
new_tf = counter
else:
new_tf = Counter()
for key in counter:
source = key
target = self.stemmer(key)
new_tf[target] += counter[source]
counter = new_tf
new_tf = Counter()
for key in counter:
if len(key) <= 3 or key in stopwords:
pass
else:
new_tf[key] = counter[key]
return new_tf
def remove_stopword(counter):
new_tf = Counter()
for key in counter:
if len(key) < 3 or key in stopwords:
pass
else:
new_tf[key] = counter[key]
return new_tf
self.BG = transform(bg_tf)
self.BG_ctf = bg_ctf
self.stopword = stopwords
for lm_docs in lm_docs_list:
c_tf = collections.Counter()
for idx, s in enumerate(lm_docs):
tokens = self.tokenizer(s)
for token in tokens:
if token in bg_tf:
c_tf[token] += 1
tf_dict = transform(c_tf)
self.C.append(tf_dict)
self.C_ctf.append(sum(tf_dict.values()))
def tokenize(self, str):
tokens = self.tokenizer(str)
if self.stemmer:
tokens = list([self.stemmer(t) for t in tokens])
return tokens
def get_tf10(self, tokens):
counter = Counter()
for t in tokens:
if t not in self.stopword and len(t) > 2:
counter[t] += 1
return counter.most_common(10)
def log_likelihood_base(self, tokens):
sum_likeli = np.array([0.0 for _ in range(self.n_lm+1)])
if self.fulltext:
for token in set(tokens):
s = self.term_likely(token)
sum_likeli += s
else:
for token, _ in self.get_tf10(tokens):
s = self.term_likely(token)
sum_likeli += s
return np.array(sum_likeli)
def log_likelihood(self, tokens):
list_ll = self.log_likelihood_base(tokens)
for i in range(self.n_lm+1):
list_ll[i] += self.alpha_list[i]
return list_ll
def predict(self, data):
y = []
for idx, s in enumerate(data):
tokens = self.tokenize(s)
list_ll = self.log_likelihood(tokens)
label = list_ll[0] - max(list_ll[1:]) > 0
y.append(label)
return np.array(y)
def get_score(self, doc):
tokens = self.tokenize(doc)
ll = self.log_likelihood(tokens)
return ll[0] - max(ll[1:])
def tune_alpha(self, x, y):
vectors = []
for idx, s in enumerate(x):
tokens = self.tokenize(s)
ll = self.log_likelihood_base(tokens)
vectors.append((ll, y[idx]))
def get_acc(alpha_list):
p = 0
n = 0
for vector in vectors:
v, y = vector
z = alpha_list + v
label = int(z[0] > max(z[1:]))
if label == y:
p += 1
else:
n += 1
acc = p / (p + n)
return acc
param = Counter()
for k in range(-500, 500, 10):
alpha_0 = k / 100
self.alpha_list = np.array([alpha_0] + self.n_lm * [0])
param[alpha_0] = get_acc(self.alpha_list)
print(param)
alpha_0 = param.most_common(1)[0][0]
param = Counter()
for k in range(-500, 500, 2):
alpha_1 = k / 100
self.alpha_list = np.array([alpha_0, alpha_1, 0])
param[alpha_1] = get_acc(self.alpha_list)
print(param)
alpha_1 = param.most_common(1)[0][0]
param = Counter()
for k in range(-500, 500, 2):
alpha_0 = k / 100
self.alpha_list = np.array([alpha_0, alpha_1, 0])
param[alpha_0] = get_acc(self.alpha_list)
print(param)
alpha_0 = param.most_common(1)[0][0]
self.alpha_list = np.array([alpha_0, alpha_1, 0])
print(self.alpha_list)
print("Train acc : {}".format(param.most_common(1)[0][1]))
def term_likely(self, token):
if token in self.stopword:
return 0
def count(LM, token):
if token in LM:
return LM[token]
else:
return 0
tf_bg = count(self.BG, token)
if tf_bg == 0:
return 0
P_w_BG = tf_bg / self.BG_ctf
if P_w_BG == 0:
return 0
assert P_w_BG > 0
logBG = math.log(P_w_BG)
list_likely = []
for i in range(self.n_lm):
tf_c = count(self.C[i], token)
P_w_C = tf_c / self.C_ctf[i]
logC = math.log(P_w_C * self.smoothing + P_w_BG * (1 - self.smoothing))
list_likely.append(logC)
assert (math.isnan(logC) == False)
list_likely.append(logBG)
return list_likely
|
import os
import eyed3
import hashlib
import time
from db import Master, getitem
directory = 'C:/Users/jordan.oh/Music/music'
files = os.listdir(directory)
total = len(files)
fl = 1
for filename in files:
path = os.path.join(directory, filename)
print(path)
file = eyed3.load(path=path)
print('file loaded into eyed3')
try:
artists = file.tag.artist
if ',' in artists:
artist = artists[0:artists.find(',')]
else:
artist = artists
info = file.tag.title + artist
hinfo = info.encode(encoding='UTF-8',errors='strict')
hash = hashlib.sha256(hinfo)
print('hash created')
except:
print(filename, 'could not get info', fl, '/', total)
fl += 1
continue
at = 0
while at < 10:
try:
res = getitem(table='Master', hash=hash.hexdigest(), range=info, fields=['name', 'items', 'tags'])
if res == 'not found in db':
print(hash.hexdigest(), info, res, fl, '/', total)
fl += 1
break
file.tag.genre = ' '.join(res['items'])
if res['tags'][0] == 'Saved - todo':
file.tag.comments.set(file.tag.comments[0].text + ' #Saved;')
file.tag.save()
print('tagged :', filename, file.tag.genre, fl, '/', total)
fl += 1
at = 10
except:
time.sleep(1)
print('slept attempt: ',at)
res = getitem(table='Master', hash=hash.hexdigest(), range=info, fields=['name', 'items'])
if res == 'not found in db':
print(hash.hexdigest(), info, res)
fl += 1
break
print(res)
file.tag.genre = ' '.join(res['items'])
# file.tag.comments = file.tag.comments + res['tags'][0]
file.tag.save()
print('tagged :', filename, file.tag.genre, fl, '/', total)
fl += 1
at = 10
finally:
at += 1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('formacion', '0037_revisioninterventoriadocentesoporte_revisioninterventoriaescuelaticsoporte'),
]
operations = [
migrations.RemoveField(
model_name='revisioninterventoriadocentesoporte',
name='participante',
),
migrations.AddField(
model_name='revisioninterventoriadocente',
name='registrado',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='revisioninterventoriadocentesoporte',
name='evidencia',
field=models.ForeignKey(blank=True, to='formacion.EvidenciaDocentes', null=True),
),
migrations.AddField(
model_name='revisioninterventoriaescuelatic',
name='registrado',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='revisioninterventoriaescuelaticsoporte',
name='evidencia',
field=models.ForeignKey(blank=True, to='formacion.EvidenciaEscuelaTic', null=True),
),
]
|
import tweepy
from tweepy import OAuthHandler
class Myauth:
consumer_key = 'O6SiTAkcuTLBahfSNaESbdjDb'
consumer_secret = 'WsI4HkMKaKNE2BzHLep7BFckYl9d93onFFTkMqtZsxbn63JCSw'
access_token = '2868107255-jyI0ASuGgzovt9wGfUMNm0Nsrlx6sM1nDMALrNT'
access_secret = 'vLjG4JvjE7t27JnhqFEQiZXjawTplVXZImxEY5rfLOGK6'
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
def get_auth(self):
return self.auth
def get_api(self):
api = tweepy.API(self.auth)
return api |
"""core URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from account import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.index, name='homepage'),
path('login/', views.login_view, name='login'),
path('logout/', views.logout_view, name='logout'),
path('add_ticket/', views.add_ticket_view, name='addticket'),
path('ticket/<str:item_id>', views.ticket_view, name='ticket_detail'),
path('ticket/edit_ticket/<str:item_id>', views.edit_ticket_view, name='edit_ticket'),
path('ticket/assign_ticket/<str:item_id>', views.assign_ticket_view, name='assign_ticket'),
path('ticket/return_ticket/<str:item_id>', views.return_ticket_view, name='return_ticket'),
path('ticket/finish_ticket/<str:item_id>', views.finished_ticket_view, name='finish_ticket'),
path('ticket/invalid_ticket/<str:item_id>', views.invalid_ticket_view, name='invalid_ticket'),
path('profile/<str:item_id>', views.user_view, name='user_profile'),
]
|
import sys
import argparse
from psp import Pv
from PyQt4 import QtCore, QtGui
import time
NBeamSeq = 16
dstsel = ['Include','DontCare']
bmsel = ['D%u'%i for i in range(NBeamSeq)]
evtsel = ['Fixed Rate','AC Rate','Sequence']
fixedRates = ['929kHz','71.4kHz','10.2kHz','1.02kHz','102Hz','10.2Hz','1.02Hz']
acRates = ['60Hz','30Hz','10Hz','5Hz','1Hz']
acTS = ['TS%u'%(i+1) for i in range(6)]
seqIdxs = ['s%u'%i for i in range(18)]
seqBits = ['b%u'%i for i in range(32)]
class PvDisplay(QtGui.QLabel):
valueSet = QtCore.pyqtSignal(QtCore.QString,name='valueSet')
def __init__(self):
QtGui.QLabel.__init__(self, "-")
self.setMinimumWidth(100)
def connect_signal(self):
self.valueSet.connect(self.setValue)
def setValue(self,value):
self.setText(value)
class PvLabel:
def __init__(self, parent, pvbase, name, dName=None, isInt=False):
layout = QtGui.QHBoxLayout()
label = QtGui.QLabel(name)
label.setMinimumWidth(100)
layout.addWidget(label)
#layout.addStretch()
self.__display = PvDisplay()
self.__display.connect_signal()
layout.addWidget(self.__display)
parent.addLayout(layout)
pvname = pvbase+name
print pvname
self.pv = Pv.Pv(pvname)
self.pv.monitor_start()
self.pv.add_monitor_callback(self.update)
if dName is not None:
dPvName = pvbase+dName
self.dPv = Pv.Pv(dPvName)
self.dPv.monitor_start()
self.dPv.add_monitor_callback(self.update)
else:
self.dPv = None
self.isInt = isInt
def update(self, err):
q = self.pv.value
if self.dPv is not None:
dq = self.dPv.value
else:
dq = None
if err is None:
s = QtCore.QString('fail')
try:
if self.isInt:
s = QtCore.QString("%1 (0x%2)").arg(QtCore.QString.number(long(q),10)).arg(QtCore.QString.number(long(q),16))
if dq is not None:
s = s + QtCore.QString(" [%1 (0x%2)]").arg(QtCore.QString.number(long(dq),10)).arg(QtCore.QString.number(long(dq),16))
else:
s = QtCore.QString.number(q)
if dq is not None:
s = s + QtCore.QString(" [%1]").arg(QtCore.QString.number(dq))
except:
v = ''
for i in range(len(q)):
#v = v + ' %f'%q[i]
v = v + ' ' + QtCore.QString.number(q[i])
if dq is not None:
v = v + QtCore.QString(" [%1]").arg(QtCore.QString.number(dq[i]))
#v = v + ' [' + '%f'%dq[i] + ']'
if ((i%8)==7):
v = v + '\n'
s = QtCore.QString(v)
self.__display.valueSet.emit(s)
else:
print err
class PvPushButton(QtGui.QPushButton):
valueSet = QtCore.pyqtSignal(QtCore.QString,name='valueSet')
def __init__(self, pvname, label):
super(PvPushButton, self).__init__(label)
self.setMaximumWidth(25) # Revisit
self.clicked.connect(self.buttonClicked)
self.pv = Pv.Pv(pvname)
def buttonClicked(self):
self.pv.put(1) # Value is immaterial
class CheckBox(QtGui.QCheckBox):
valueSet = QtCore.pyqtSignal(int, name='valueSet')
def __init__(self, label):
super(CheckBox, self).__init__(label)
def connect_signal(self):
self.valueSet.connect(self.boxClicked)
def boxClicked(self, state):
#print "CheckBox.clicked: state:", state
self.setChecked(state)
class PvCheckBox(CheckBox):
def __init__(self, pvname, label):
super(PvCheckBox, self).__init__(label)
self.connect_signal()
self.clicked.connect(self.pvClicked)
self.pv = Pv.Pv(pvname)
self.pv.monitor_start()
self.pv.add_monitor_callback(self.update)
def pvClicked(self):
q = self.isChecked()
self.pv.put(q)
#print "PvCheckBox.clicked: pv %s q %x" % (self.pv.name, q)
def update(self, err):
#print "PvCheckBox.update: pv %s, i %s, v %x, err %s" % (self.pv.name, self.text(), self.pv.value, err)
q = self.pv.value != 0
if err is None:
if q != self.isChecked(): self.valueSet.emit(q)
else:
print err
class PvTextDisplay(QtGui.QLineEdit):
valueSet = QtCore.pyqtSignal(QtCore.QString,name='valueSet')
def __init__(self, label):
super(PvTextDisplay, self).__init__("-")
#self.setMinimumWidth(60)
def connect_signal(self):
self.valueSet.connect(self.setValue)
def setValue(self,value):
self.setText(value)
class PvComboDisplay(QtGui.QComboBox):
valueSet = QtCore.pyqtSignal(QtCore.QString,name='valueSet')
def __init__(self, choices):
super(PvComboDisplay, self).__init__()
self.addItems(choices)
def connect_signal(self):
self.valueSet.connect(self.setValue)
def setValue(self,value):
self.setCurrentIndex(value)
class PvTxt(PvTextDisplay):
def __init__(self, pv, label):
super(PvTxt, self).__init__(label)
self.connect_signal()
self.pv = Pv.Pv(pv)
self.pv.monitor_start()
print 'Monitor started '+pv
self.pv.add_monitor_callback(self.update)
def update(self, err):
print 'Update '+pv
q = self.pv.value
if err is None:
s = QtCore.QString(q)
self.valueSet.emit(s)
else:
print err
def setPv(self):
pass
class PvEditTxt(PvTextDisplay):
def __init__(self, pv, label):
super(PvEditTxt, self).__init__(label)
self.connect_signal()
self.editingFinished.connect(self.setPv)
self.pv = Pv.Pv(pv)
self.pv.monitor_start()
print 'Monitor started '+pv
self.pv.add_monitor_callback(self.update)
def update(self, err):
print 'Update '+pv
q = self.pv.value
if err is None:
s = QtCore.QString(q)
self.valueSet.emit(s)
else:
print err
def setPv(self):
pass
class PvEditInt(PvEditTxt):
def __init__(self, pv, label):
super(PvEditInt, self).__init__(pv, label)
def setPv(self):
value = self.text().toInt()
self.pv.put(value)
def update(self, err):
print 'Update '+pv
q = self.pv.value
if err is None:
s = QtCore.QString('fail')
try:
s = QtCore.QString("%1").arg(QtCore.QString.number(long(q),10))
except:
v = ''
for i in range(len(q)):
v = v + ' %f'%q[i]
s = QtCore.QString(v)
self.valueSet.emit(s)
else:
print err
class PvInt(PvEditInt):
def __init__(self,pv):
super(PvInt, self).__init__(pv)
self.setEnabled(False)
class PvEditHML(PvEditTxt):
def __init__(self, pv, label):
super(PvEditHML, self).__init__(pv, label)
def setPv(self):
value = self.text()
try:
q = 0
for i in range(len(value)):
q |= frLMH[str(value[i])] << (2 * (len(value) - 1 - i))
self.pv.put(q)
except KeyError:
print "Invalid character in string:", value
def update(self, err):
q = self.pv.value
if err is None:
v = toLMH[q & 0x3]
q >>= 2
while q:
v = toLMH[q & 0x3] + v
q >>= 2
s = QtCore.QString(v)
self.valueSet.emit(s)
else:
print err
class PvHML(PvEditHML):
def __init__(self, pv, label):
super(PvHML, self).__init__(pv, label)
self.setEnabled(False)
class PvEditDbl(PvEditTxt):
def __init__(self, pv, label):
super(PvEditDbl, self).__init__(pv, label)
def setPv(self):
value = self.text().toDouble()
self.pv.put(value)
def update(self, err):
q = self.pv.value
if err is None:
s = QtCore.QString('fail')
try:
s = QtCore.QString.number(q)
except:
v = ''
for i in range(len(q)):
v = v + ' %f'%q[i]
s = QtCore.QString(v)
self.valueSet.emit(s)
else:
print err
class PvDbl(PvEditDbl):
def __init__(self,pv):
super(PvDbl, self).__init__(pv)
self.setEnabled(False)
class PvDblArrayW(QtGui.QLabel):
valueSet = QtCore.pyqtSignal(QtCore.QString,name='valueSet')
def __init__(self):
super(PvDblArrayW, self).__init__('-')
self.connect_signal()
def connect_signal(self):
self.valueSet.connect(self.setValue)
def setValue(self,value):
self.setText(value)
class PvDblArray:
def __init__(self, pv, widgets):
self.widgets = widgets
self.pv = Pv.Pv(pv)
self.pv.monitor_start()
print 'Monitor started '+pv
self.pv.add_monitor_callback(self.update)
def update(self, err):
q = self.pv.value
if err is None:
for i in range(len(q)):
self.widgets[i].valueSet.emit(QtCore.QString.number(q[i],'f',4))
else:
print err
class PvEditCmb(PvComboDisplay):
def __init__(self, pvname, choices):
super(PvEditCmb, self).__init__(choices)
self.connect_signal()
self.currentIndexChanged.connect(self.setValue)
self.pv = Pv.Pv(pvname)
self.pv.monitor_start()
self.pv.add_monitor_callback(self.update)
def setValue(self):
value = self.currentIndex()
self.pv.put(value)
def update(self, err):
q = self.pv.value
if err is None:
self.setCurrentIndex(q)
self.valueSet.emit(q)
else:
print err
class PvCmb(PvEditCmb):
def __init__(self, pvname, choices):
super(PvCmb, self).__init__(pvname, choices)
self.setEnabled(False)
class PvEvtTab(QtGui.QStackedWidget):
def __init__(self, pvname, evtcmb):
super(PvEvtTab,self).__init__()
self.addWidget(PvEditCmb(pvname+'_FixedRate',fixedRates))
acw = QtGui.QWidget()
acl = QtGui.QVBoxLayout()
acl.addWidget(PvEditCmb(pvname+'_ACRate',acRates))
acl.addWidget(PvEditCmb(pvname+'_ACTimeslot',acTS))
acw.setLayout(acl)
self.addWidget(acw)
sqw = QtGui.QWidget()
sql = QtGui.QVBoxLayout()
sql.addWidget(PvEditCmb(pvname+'_Sequence',seqIdxs))
sql.addWidget(PvEditCmb(pvname+'_SeqBit',seqBits))
sqw.setLayout(sql)
self.addWidget(sqw)
evtcmb.currentIndexChanged.connect(self.setCurrentIndex)
class PvEditEvt(QtGui.QWidget):
def __init__(self, pvname, idx):
super(PvEditEvt, self).__init__()
vbox = QtGui.QVBoxLayout()
evtcmb = PvEditCmb(pvname,evtsel)
vbox.addWidget(evtcmb)
vbox.addWidget(PvEvtTab(pvname,evtcmb))
self.setLayout(vbox)
class PvDstTab(QtGui.QWidget):
def __init__(self, pvname):
super(PvDstTab,self).__init__()
self.pv = Pv.Pv(pvname)
self.chkBox = []
layout = QtGui.QGridLayout()
for i in range(NBeamSeq):
layout.addWidget( QtGui.QLabel('D%d'%i), i/4, 2*(i%4) )
chkB = QtGui.QCheckBox()
layout.addWidget( chkB, i/4, 2*(i%4)+1 )
chkB.clicked.connect(self.update)
self.chkBox.append(chkB)
self.setLayout(layout)
def update(self):
v = 0
for i in range(NBeamSeq):
if self.chkBox[i].isChecked():
v |= (1<<i)
self.pv.put(v)
class PvEditDst(QtGui.QWidget):
def __init__(self, pvname, idx):
super(PvEditDst, self).__init__()
vbox = QtGui.QVBoxLayout()
selcmb = PvEditCmb(pvname,dstsel)
vbox.addWidget(selcmb)
vbox.addWidget(PvDstTab(pvname+'_Mask'))
self.setLayout(vbox)
class PvEditTS(PvEditCmb):
def __init__(self, pvname, idx):
super(PvEditTS, self).__init__(pvname, ['%u'%i for i in range(16)])
class PvInput:
def __init__(self, widget, parent, pvbase, name, count=1):
pvname = pvbase+name
print pvname
layout = QtGui.QHBoxLayout()
label = QtGui.QLabel(name)
label.setMinimumWidth(100)
layout.addWidget(label)
#layout.addStretch
if count == 1:
layout.addWidget(widget(pvname, ''))
else:
for i in range(count):
layout.addWidget(widget(pvname+'%d'%i, QtCore.QString.number(i)))
#layout.addStretch
parent.addLayout(layout)
def LblPushButton(parent, pvbase, name, count=1):
return PvInput(PvPushButton, parent, pvbase, name, count)
def LblCheckBox(parent, pvbase, name, count=1):
return PvInput(PvCheckBox, parent, pvbase, name, count)
def LblEditInt(parent, pvbase, name, count=1):
return PvInput(PvEditInt, parent, pvbase, name, count)
def LblEditHML(parent, pvbase, name, count=1):
return PvInput(PvEditHML, parent, pvbase, name, count)
def LblEditTS(parent, pvbase, name, count=1):
return PvInput(PvEditTS, parent, pvbase, name, count)
def LblEditEvt(parent, pvbase, name, count=1):
return PvInput(PvEditEvt, parent, pvbase, name, count)
def LblEditDst(parent, pvbase, name, count=1):
return PvInput(PvEditDst, parent, pvbase, name, count)
class Ui_MainWindow(object):
def setupUi(self, MainWindow, base, partn, shelf):
MainWindow.setObjectName(QtCore.QString.fromUtf8("MainWindow"))
self.centralWidget = QtGui.QWidget(MainWindow)
self.centralWidget.setObjectName("centralWidget")
pvbase = base+':XPM:'+shelf+':'
#ppvbase = pvbase+partn+':'
ppvbase = base+':PART:'+partn+':'
print 'pvbase : '+pvbase
print 'ppvbase: '+ppvbase
grid = QtGui.QGridLayout()
textWidgets = []
for i in range(32):
textWidgets.append( PvDblArrayW() )
# Need to wait for pv.get()
time.sleep(2)
for i in range(14):
pv = Pv.Pv(pvbase+'LinkLabel%d'%i)
grid.addWidget( QtGui.QLabel(pv.get()), i, 0 )
grid.addWidget( textWidgets[i], i, 1 )
for j in range(16,21):
i = j-16
pv = Pv.Pv(pvbase+'LinkLabel%d'%j)
grid.addWidget( QtGui.QLabel(pv.get()), i, 2 )
grid.addWidget( textWidgets[j], i, 3 )
for j in range(28,32):
i = j-22
grid.addWidget( QtGui.QLabel('INH-%d'%(j-28)), i, 2 )
grid.addWidget( textWidgets[j], i, 3 )
self.deadflnk = PvDblArray( ppvbase+'DeadFLnk', textWidgets )
self.centralWidget.setLayout(grid)
self.centralWidget.resize(240,340)
title = 'XPM:'+shelf+'\tPART:'+partn
MainWindow.setWindowTitle(title)
MainWindow.resize(240,340)
MainWindow.setCentralWidget(self.centralWidget)
if __name__ == '__main__':
print QtCore.PYQT_VERSION_STR
parser = argparse.ArgumentParser(description='simple pv monitor gui')
parser.add_argument("base", help="pv base to monitor", default="DAQ:LAB2")
parser.add_argument("partition", help="partition to monitor")
parser.add_argument("shelf", help="shelf to monitor")
args = parser.parse_args()
app = QtGui.QApplication([])
MainWindow = QtGui.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow,args.base,args.partition,args.shelf)
MainWindow.updateGeometry()
MainWindow.show()
sys.exit(app.exec_())
|
import nltk
import random
from nltk import word_tokenize
import pickle
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.naive_bayes import MultinomialNB, GaussianNB, BernoulliNB
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.svm import SVC, LinearSVC, NuSVC
from nltk.classify import ClassifierI
from statistics import mode
short_pos = open("Dataset/short_reviews/positive.txt","r",encoding='Windows-1252').read()
short_neg = open("Dataset/short_reviews/negative.txt","r",encoding='Windows-1252').read()
all_words = []
documents = []
# j is adject, r is adverb, and v is verb
# allowed_word_types = ["J", "R", "V"]
allowed_word_types = ["J"]
for p in short_pos.split('\n'):
documents.append((p, "pos"))
words = word_tokenize(p)
pos = nltk.pos_tag(words)
for w in pos:
if w[1][0] in allowed_word_types:
all_words.append(w[0].lower())
for p in short_neg.split('\n'):
documents.append((p, "neg"))
words = word_tokenize(p)
pos = nltk.pos_tag(words)
for w in pos:
if w[1][0] in allowed_word_types:
all_words.append(w[0].lower())
# save_documents = open("pickled_algos/documents.pickle","wb")
# pickle.dump(documents, save_documents)
# save_documents.close()
p_f = open("pickled_algos/documents.pickle","rb")
documents = pickle.load(p_f)
p_f.close()
all_words = nltk.FreqDist(all_words)
word_features = list(all_words.keys())[:5000]
# save_word_features = open("pickled_algos/word_features5k.pickle","wb")
# pickle.dump(word_features, save_word_features)
# save_word_features.close()
p_f = open("pickled_algos/word_features5k.pickle","rb")
word_features = pickle.load(p_f)
p_f.close()
def find_features(document):
words = word_tokenize(document)
features = {}
for w in word_features:
features[w] = (w in words)
return features
#print((find_features((movie_reviews.words('neg/cv000_29416.txt')))))
featuresets = [(find_features(rev), category) for (rev, category) in documents]
print(len(featuresets)) #10644
random.shuffle(featuresets)
"""
Naive Bayes
"""
training_set = featuresets[:10000]
testing_set = featuresets[10000:]
# classifier = nltk.NaiveBayesClassifier.train(training_set)
#
# print("Naive Bayes Algo accuracy", (nltk.classify.accuracy(classifier, testing_set))*100)
# classifier.show_most_informative_features(15)
#
# """
# Pickle save classifier
# """
# save_classifier = open("pickled_algos/Twitter_naivebayes.pickle", "wb")
# pickle.dump(classifier, save_classifier)
# save_classifier.close()
classifier_f = open("pickled_algos/Twitter_naivebayes.pickle","rb")
Naivebayes_classifier = pickle.load(classifier_f)
classifier_f.close()
# print("Naive Bayes Algo accuracy", (nltk.classify.accuracy(Naivebayes_classifier, testing_set))*100)
# classifier.show_most_informative_features(15)
"""
Scikit-Learn incorporation
"""
# MNB_classifier = SklearnClassifier(MultinomialNB())
# MNB_classifier.train(training_set)
# print("MNB_classifier accuracy", (nltk.classify.accuracy(MNB_classifier, testing_set))*100)
#
# save_classifier = open("pickled_algos/MNB_classifier5k.pickle","wb")
# pickle.dump(MNB_classifier, save_classifier)
# save_classifier.close()
classifier_f = open("pickled_algos/MNB_classifier5k.pickle","rb")
MNB_classifier = pickle.load(classifier_f)
classifier_f.close()
# BernoulliNB_classifier = SklearnClassifier(BernoulliNB())
# BernoulliNB_classifier.train(training_set)
# print("BernoulliNB_classifier accuracy", (nltk.classify.accuracy(BernoulliNB_classifier, testing_set))*100)
#
#
# save_classifier = open("pickled_algos/BernoulliNB_classifier5k.pickle","wb")
# pickle.dump(BernoulliNB_classifier, save_classifier)
# save_classifier.close()
classifier_f = open("pickled_algos/BernoulliNB_classifier5k.pickle","rb")
BernoulliNB_classifier = pickle.load(classifier_f)
classifier_f.close()
# SGDClassifier_classifier = SklearnClassifier(SGDClassifier())
# SGDClassifier_classifier.train(training_set)
# print("SGDClassifier_classifier accuracy", (nltk.classify.accuracy(SGDClassifier_classifier, testing_set))*100)
#
# save_classifier = open("pickled_algos/SGDClassifier_classifier5k.pickle","wb")
# pickle.dump(SGDClassifier_classifier, save_classifier)
# save_classifier.close()
classifier_f = open("pickled_algos/SGDClassifier_classifier5k.pickle","rb")
SGDClassifier_classifier = pickle.load(classifier_f)
classifier_f.close()
# LinearSVC_classifier = SklearnClassifier(LinearSVC())
# LinearSVC_classifier.train(training_set)
# print("LinearSVC_classifier accuracy", (nltk.classify.accuracy(LinearSVC_classifier, testing_set))*100)
#
# save_classifier = open("pickled_algos/LinearSVC_classifier5k.pickle","wb")
# pickle.dump(LinearSVC_classifier, save_classifier)
# save_classifier.close()
classifier_f = open("pickled_algos/LinearSVC_classifier5k.pickle","rb")
LinearSVC_classifier = pickle.load(classifier_f)
classifier_f.close()
"""
Combining Algos with a Vote
"""
class VoteClassifier(ClassifierI):
def __init__(self, *classifiers):
self.classifiers = classifiers
def classify(self, features):
votes = []
for c in self.classifiers:
v = c.classify(features)
votes.append(v)
return mode(votes)
def confidence(self, features):
votes = []
for c in self.classifiers:
v = c.classify(features)
votes.append(v)
choice_votes = votes.count(mode(votes))
conf = choice_votes / len(votes)
return conf
voted_classifiers = VoteClassifier(Naivebayes_classifier,MNB_classifier,BernoulliNB_classifier,
SGDClassifier_classifier,LinearSVC_classifier)
# print("voted_classifiers accuracy", (nltk.classify.accuracy(voted_classifiers, testing_set))*100)
# print("Classification:", voted_classifiers.classify(testing_set[0][0]), "Confidence {:f} %".format(voted_classifiers.confidence(testing_set[0][0])*100))
def sentiment(text):
feats = find_features(text)
return (voted_classifiers.classify(feats),voted_classifiers.confidence(feats)) |
#!/usr/bin/env python
import ROOT
import os
import argparse
import shutil
from StopsDilepton.tools.user import analysis_results, plot_directory
argParser = argparse.ArgumentParser(description = "Argument parser")
argParser.add_argument('--logLevel', action='store', default='INFO', nargs='?', choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'TRACE', 'NOTSET'], help="Log level for logging")
argParser.add_argument("--signal", action='store', default='T2tt', nargs='?', choices=["T2tt","TTbarDM","ttHinv"], help="Which signal?")
argParser.add_argument("--removeDir", action='store_true', help="Remove the directory in the combine release after study is done?")
argParser.add_argument("--expected", action='store_true', help="Use expected results?")
argParser.add_argument("--observed", action='store_true', help="Use expected results?")
argParser.add_argument("--combined", action='store_true', help="Use expected results?")
argParser.add_argument("--signalInjection",action='store_true', help="Inject some signal?")
argParser.add_argument("--useTxt", action='store_true', help="Use txt based card files?")
argParser.add_argument("--cores", action='store', default=8, nargs='?', help="Run on n cores in parallel")
argParser.add_argument("--year", action='store', default=2017, nargs='?', help="Which year?")
argParser.add_argument("--only", action='store', default=None, nargs='?', help="pick only one masspoint?")
argParser.add_argument("--bkgOnly", action='store_true', help="Allow no signal?")
args = argParser.parse_args()
# Logging
import StopsDilepton.tools.logger as logger
logger = logger.get_logger(args.logLevel, logFile = None )
import RootTools.core.logger as logger_rt
logger_rt = logger_rt.get_logger(args.logLevel, logFile = None )
year = int(args.year)
def wrapper(s):
logger.info("Processing mass point %s"%s.name)
cardFile = "%s_combination_shapeCard.txt"%s.name if not args.useTxt else "%s.txt"%s.name
#analysis_results = '/afs/hephy.at/work/p/phussain/StopsDileptonLegacy/results/v2/'
sSubDir = 'expected' if args.expected else 'observed'
if args.signalInjection: sSubDir += '_signalInjected'
cardFilePath = "%s/%s/fitAll/cardFiles/%s/%s/%s"%(analysis_results, args.year if not args.combined else 'COMBINED', args.signal, sSubDir, cardFile)
#cardFilePath = "%s/%s/controlAll/cardFiles/%s/%s/%s"%(analysis_results, args.year if not args.combined else 'COMBINED', args.signal, 'expected' if args.expected else 'observed', cardFile)
#cardFilePath = "%s/%s/signalOnly/cardFiles/%s/%s/%s"%(analysis_results, args.year if not args.combined else 'COMBINED', args.signal, 'expected' if args.expected else 'observed', cardFile)
#cardFilePath = "%s/%s/controlDYVV/cardFiles/%s/%s/%s"%(analysis_results, args.year if not args.combined else 'COMBINED', args.signal, 'expected' if args.expected else 'observed', cardFile)
combineDirname = os.path.join(os.path.abspath('.'), s.name)
print cardFilePath
logger.info("Creating %s"%combineDirname)
if not os.path.isdir(combineDirname): os.makedirs(combineDirname)
shutil.copyfile(cardFilePath,combineDirname+'/'+cardFile)
if not args.combined: shutil.copyfile(cardFilePath.replace('shapeCard.txt','shape.root'),combineDirname+'/'+cardFile.replace('shapeCard.txt','shape.root'))
shutil.copyfile(cardFilePath.replace('shapeCard.txt', 'shapeCard.root'),combineDirname+'/'+cardFile.replace('shapeCard.txt', 'shapeCard.root'))
prepWorkspace = "text2workspace.py %s -m 125"%cardFile
if args.bkgOnly:
robustFit = "combineTool.py -M Impacts -d %s_shapeCard.root -m 125 -t -1 --expectSignal 0 --doInitialFit --robustFit 1 --rMin -10 --rMax 10"%s.name
impactFits = "combineTool.py -M Impacts -d %s_shapeCard.root -m 125 -t -1 --expectSignal 0 --robustFit 1 --doFits --parallel %s --rMin -10 --rMax 10"%(s.name,str(args.cores))
elif args.observed:
robustFit = "combineTool.py -M Impacts -d %s_shapeCard.root -m 125 --doInitialFit --robustFit 1 --rMin -10 --rMax 10"%s.name
impactFits = "combineTool.py -M Impacts -d %s_shapeCard.root -m 125 --robustFit 1 --doFits --parallel %s --rMin -10 --rMax 10"%(s.name,str(args.cores))
else:
robustFit = "combineTool.py -M Impacts -d %s_shapeCard.root -m 125 -t -1 --expectSignal 1 --doInitialFit --robustFit 1 --rMin -10 --rMax 10"%s.name
impactFits = "combineTool.py -M Impacts -d %s_shapeCard.root -m 125 -t -1 --expectSignal 1 --robustFit 1 --doFits --parallel %s --rMin -10 --rMax 10"%(s.name,str(args.cores))
extractImpact = "combineTool.py -M Impacts -d %s_shapeCard.root -m 125 -o impacts.json"%s.name
plotImpacts = "plotImpacts.py -i impacts.json -o impacts"
combineCommand = "cd %s;%s;%s;%s;%s;%s"%(combineDirname,prepWorkspace,robustFit,impactFits,extractImpact,plotImpacts)
logger.info("Will run the following command, might take a few hours:\n%s"%combineCommand)
os.system(combineCommand)
plotDir = plot_directory + "/impacts_combination/"
if args.expected:
s.name += '_expected'
if args.bkgOnly:
s.name += '_bkgOnly'
if args.observed:
s.name += '_observed'
if not os.path.isdir(plotDir): os.makedirs(plotDir)
elif args.combined:
shutil.copyfile(combineDirname+'/impacts.pdf', "%s/%s_combined%s.pdf"%(plotDir,s.name,'_signalInjected' if args.signalInjection else ''))
elif args.year:
shutil.copyfile(combineDirname+'/impacts.pdf', "%s/%s_%s%s.pdf"%(plotDir,s.name,args.year,'_signalInjected' if args.signalInjection else ''))
else:
shutil.copyfile(combineDirname+'/impacts.pdf', "%s/%s.pdf"%(plotDir,s.name))
logger.info("Copied result to %s"%plotDir)
if args.removeDir:
logger.info("Removing directory in release location")
rmtree(combineDirname)
if args.signal == "T2tt":
data_directory = '/afs/hephy.at/data/cms07/nanoTuples/'
postProcessing_directory = 'stops_2016_nano_v0p22/dilep/'
from StopsDilepton.samples.nanoTuples_FastSim_Fall17_postProcessed import signals_T2tt as jobs
allJobs = [j.name for j in jobs]
if args.only is not None:
if args.only.isdigit():
wrapper(jobs[int(args.only)])
else:
jobNames = [ x.name for x in jobs ]
wrapper(jobs[jobNames.index(args.only)])
exit(0)
results = map(wrapper, jobs)
results = [r for r in results if r]
|
from django.contrib import admin
# Register your models here.
from .models import *
admin.site.register(Card_Type)
admin.site.register(Card)
admin.site.register(Player)
admin.site.register(Turn)
admin.site.register(Game)
admin.site.register(Registred)
admin.site.register(Board)
|
#!/usr/bin/python
import argparse
import sys
import logging
import settings
from modules.SqliScanner import SqliScanner
logging.basicConfig(stream=sys.stdout, level=settings.DEBUG_LEVEL)
logger = logging.getLogger(__name__)
# This program check if a website is vulnerable to SQL injection (wiki: https://en.wikipedia.org/wiki/SQL_injection)
# It will crawl the targeted website and check if form parameters are vulnerable using SQLmap server API.
# Then, the result will be written in JSON file
# The project sqlmap is available here : https://github.com/sqlmapproject/sqlmap.
# This function will parse argument to run the main class.
# Not required parameters will be loaded in 'settings.py' as default parameters.
# Here is the list of each parameters :
# --url is a required parameter. The program will crawl this URL to check if form parameters are vulnerable to SQLi.
# --sqlmap-server is optional. Provide its own sqlmapapi server. (run sqlmap server : sqlmapapi.py -s)
# --debug is optional. Get debug output.
def main():
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--url", help="target to scan. (ex: http://localhost:8000/)", required=True)
parser.add_argument("-q", "--quiet", action='store_true', help="Do not use stdout.")
args = parser.parse_args()
# print help if no argument is provided
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
sys.exit(1)
# Add slash at the end of thr provided url
if '/' not in args.url[-1]:
args.url += '/'
if args.quiet:
debug = False
else:
debug = settings.DEBUG
# Create SqliScanner object
SqliScanner(debug=debug, target=args.url)
# main function of the program
if __name__ == "__main__":
main()
|
from pprint import pprint
import yaml
import datetime
import uuid
import sys,os,getpass
import subprocess as sp
import numpy as np
def generate_working_dirname(run_directory):
s = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
s += "_{}".format(uuid.uuid4())
return run_directory + s
def get_executable_name(conf):
shallow = conf['model']['shallow']
if shallow:
executable_name = conf['paths']['shallow_executable']
use_mpi = False
else:
executable_name = conf['paths']['executable']
use_mpi = True
return executable_name,use_mpi
def start_slurm_job(subdir,num_nodes,i,conf,shallow):
executable_name,use_mpi = get_executable_name(conf)
os.system(" ".join(["cp -p",executable_name,subdir]))
script = create_slurm_script(subdir,num_nodes,i,executable_name,use_mpi)
sp.Popen("sbatch "+script,shell=True)
def create_slurm_script(subdir,num_nodes,idx,executable_name,use_mpi):
filename = "run_{}_nodes.cmd".format(num_nodes)
filepath = subdir+filename
user = getpass.getuser()
sbatch_header = create_sbatch_header(num_nodes,use_mpi,idx)
with open(filepath,"w") as f:
for line in sbatch_header:
f.write(line)
f.write('module load anaconda\n')
f.write('source activate frnn\n')
f.write('module load cudatoolkit/8.0 cudnn/cuda-8.0/6.0 openmpi/cuda-8.0/intel-17.0/2.1.0/64 intel/17.0/64/17.0.4.196 intel-mkl/2017.3/4/64\n')
# f.write('rm -f /tigress/{}/model_checkpoints/*.h5\n'.format(user))
f.write('cd {}\n'.format(subdir))
f.write('export OMPI_MCA_btl=\"tcp,self,sm\"\n')
f.write('srun python {}\n'.format(executable_name))
f.write('echo "done."')
return filepath
def create_sbatch_header(num_nodes,use_mpi,idx):
if not use_mpi:
assert(num_nodes == 1)
lines = []
lines.append('#!/bin/bash\n')
lines.append('#SBATCH -t 06:00:00\n')
lines.append('#SBATCH -N '+str(num_nodes)+'\n')
if use_mpi:
lines.append('#SBATCH --ntasks-per-node=4\n')
lines.append('#SBATCH --ntasks-per-socket=2\n')
else:
lines.append('#SBATCH --ntasks-per-node=1\n')
lines.append('#SBATCH --ntasks-per-socket=1\n')
lines.append('#SBATCH --gres=gpu:4\n')
lines.append('#SBATCH -c 4\n')
lines.append('#SBATCH --mem-per-cpu=0\n')
lines.append('#SBATCH -o {}.out\n'.format(idx))
lines.append('\n\n')
return lines
def copy_files_to_environment(subdir):
from plasma.conf import conf
normalization_dir = os.path.dirname(conf['paths']['normalizer_path'])
if os.path.isdir(normalization_dir):
print("Copying normalization to")
os.system(" ".join(["cp -rp",normalization_dir,os.path.join(subdir,os.path.basename(normalization_dir))]))
|
import cv2
import numpy as np
import os
preds=np.load('Data/preds6.npy')
num_v=np.load('Data/time_list_visual6.npy')
num_t=np.load('Data/time_list_tactile6.npy')
widths=np.load('Data/widths6.npy')
forces=np.load('Data/forces6.npy')
fps=30
path='Data/visual_6_recording/'
size=(1920,1080)
video = cv2.VideoWriter("VideoTest1.avi", cv2.VideoWriter_fourcc('I', '4', '2', '0'), fps, size)
font = cv2.FONT_HERSHEY_SIMPLEX
img_list=[]
for i in range(num_v[0]):
img = cv2.imread(path + str(i) + '.jpg')
if abs(i-num_v[0])<40:
width = 'Width:' + str(widths[0])
force = 'Force:' + str(forces[0])
# img = cv2.imread(path + str(k) + '.jpg')
img = cv2.putText(img, width, (50, 50), font, 1.2, (255, 0, 0), 2)
img = cv2.putText(img, force, (50, 100), font, 1.2, (0, 155, 0), 2)
if abs(i-num_v[0])<10:
text = 'Grasping state label:' + str(preds[0])
img = cv2.putText(img, text, (50, 150), font, 1.2, (0, 0, 0), 2)
img_list.append(img)
for i in range(1,len(preds)):
for k in range(num_v[i-1],num_v[i]):
width = 'Width:' + str(widths[i-1])
force='Force:'+str(forces[i-1])
img=cv2.imread(path+str(k)+'.jpg')
img = cv2.putText(img, width, (50, 50), font, 1.2, (255, 0, 0), 2)
img = cv2.putText(img, force, (50, 100), font, 1.2, (0, 155, 0), 2)
if abs(k-num_v[i-1])<10:
text = 'Grasping state label:' + str(preds[i-1])
img = cv2.putText(img, text, (50, 150), font, 1.2, (0, 0, 0), 2)
if abs(k-num_v[i]<10):
text = 'Grasping state label:' + str(preds[i])
img = cv2.putText(img, text, (50, 150), font, 1.2, (0, 0, 0), 2)
img_list.append(img)
for img in img_list:
video.write(img)
video.release()
cv2.destroyAllWindows() |
import pickle
import requests
import json
data = requests.get("https://newsapi.org/v2/top-headlines?country=in&apiKey=88ddf65370f54fab8dc8d09503f7339e").text
data_dict = json.loads(data) #It will parse
art = data_dict['articles'] #It will fetch the aricles key value
with open ("Topheadlines.pkl","wb")as w:
i=1
for article in art:
inputt = f"{i} {article['title']}" #It will fetch the aricles dict having the title dict
pickle.dump(inputt,w)
i+=1
# with open ("Topheadlines.pkl","rb") as r:
# for line in "Topheadlines.txt":
# # for i in range(1,21):
# print(pickle.load(r))
|
from django.db import models
import uuid
# Create your models here.
class User(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
username = models.CharField(unique=True,max_length=50)
password = models.CharField(max_length=50)
def __str__(self):
return self.username
|
from imageIO import *
from FFT import *
from cmath import *
import os
def compress(filename="lena.mn",output_filename="lena",compression_factor=.5):
'''Writes a compressed mnc file.
The number of values kept is the (orginal number)*(compression_factor).
This version of the compression function acts on square images only.'''
data = read_mn(filename)
transformed_data = FFT(data)
col_length=len(transformed_data) #also equals the number of rows
row_length=len(transformed_data[0]) #also equals the number of columns
#flatten the array:
transformed_data=transformed_data.reshape((1,np.multiply(*transformed_data.shape)))[0]
#given a compression factor, compression threshold is the value below which the...
#function throws away frequency in the FFT data
compression_threshold={'real':0,'imag':0}
compression_threshold['real']=find_threshold(transformed_data.real,compression_factor)
compression_threshold['imag']=find_threshold(transformed_data.imag,compression_factor)
#by symmetry, the lower half of the data can be reproduced by the top half, excluding the first row
upper_half=np.array(transformed_data[:(col_length/2+1)*row_length])
#split FFT data into two pieces, real and imag
uh_real=upper_half.real
uh_imag=upper_half.imag
#throw away small values using compression threshold
uh_real=np.array([i if abs(i)>compression_threshold['real'] else 0 for i in uh_real])
uh_imag=np.array([i if abs(i)>compression_threshold['imag'] else 0 for i in uh_imag])
#writes to mnc file
write_mnc(output_filename+".mnc",np.around(uh_real).astype('int'),np.around(uh_imag).astype('int'), (len(data),len(data)),(1,0,1,0))
def find_threshold(data,compression_factor):
'''Given a one-dimensional array and a compression_factor, finds
the threshold to throw away small values'''
data_local=abs(data)
data_local.sort()
threshold_spot=int(float(len(data))*(1-compression_factor))
if compression_factor==0:
return data_local[len(data)-1]
return data_local[threshold_spot]
def decompress(filename="lena.mnc"):
'''reads a file, and rebuilds the ppm image and mn
representation based on the FFT data provided in the .mnc'''
data, original_size = read_mnc(filename)
width, height = data.shape
iFFT_data = iFFT(data).real
write_mn(iFFT_data,filename[:-4])
write_ppm(iFFT_data,filename[:-4])
def generate_images():
'''generates image in a range of compression factors for demo and testing'''
#factors=np.linspace(0,1,41)
factors=[.4];
factor_data='';
filesize_data=''
outfile=open("output_files/data_file.txt","w+")
for f in factors:
print "processing factor: ",f
#so .04 becomes "04"
factor_string="{}{}".format(int(f*10),int(f*100)-10*int(f*10))
output_filename="output_files/lena"+factor_string
compress(filename="lena.mn",output_filename=output_filename,compression_factor=f)
filesize=str(os.path.getsize("output_files/lena"+factor_string+".mnc"))+' '
decompress(output_filename+".mnc")
outfile.write(str(f)+'\t')
outfile.write(filesize)
outfile.write('\n')
outfile.close()
generate_images() |
import csv
import os
import pandas as pd
import sys
sys.path.append("..") # Adds higher directory to python modules path.
from classes import Images
# append files together this is used because run1 and run2 are processed through afni at the same time
# supply a list of files and an out file name
def append(filelist, outfile):
if os.path.exists(outfile):
os.remove(outfile)
for file in filelist: ##TODO Fix This file
fin = open(file, "r")
data = fin.read()
fin.close()
fout = open(outfile, "a")
fout.write(data)
fout.close()
# format the motion regressors so that we can run afni, basically append the 2 runs of movment regressors and FDs and DVARs together
def format_motion_regressors(path, images: Images.preprocessed_image):
print(f"Formatting Motion regressors: {images[0]} and {images[1].encoding}")
fullpath = os.path.join(path, images[0].subject, 'INPUT_DATA', images[0].task, images[0].session)
# List of motion regressors
move_regs = [os.path.join(fullpath, f"Movement_Regressors_{images[0].root_name}.txt"),
os.path.join(fullpath, f"Movement_Regressors_{images[1].root_name}.txt")]
# outfilename for the motion regressors
mov_regs_out = os.path.join(fullpath, "movregs12.txt")
# append the two motion regressors together and store them in the outfile
append(move_regs, mov_regs_out)
# List of framewise Displacement files
mov_regs_fd = [os.path.join(fullpath, f"{images[0].subject}_tfMRI_{images[0].root_name}_FD.txt"),
os.path.join(fullpath, f"{images[1].subject}_tfMRI_{images[1].root_name}_FD.txt")]
# the Framewise Displacement outfile
mov_regs_fd_out = os.path.join(fullpath, 'movregs_FD.txt')
# append the outfiles together
append(mov_regs_fd, mov_regs_fd_out)
# A list of the Framewise Displacement Mask
mov_regs_fd_masks = [os.path.join(fullpath, f"{images[0].subject}_tfMRI_{images[0].root_name}_FD_mask.txt"),
os.path.join(fullpath, f"{images[1].subject}_tfMRI_{images[1].root_name}_FD_mask.txt")]
# The outfile for the Framewise displacement mask
mov_regs_fd_masks_out = os.path.join(fullpath, "movregs_FD_mask.txt")
# append the files together
append(mov_regs_fd_masks, mov_regs_fd_masks_out)
# We just want the first 6 movement regressors not their derivatives
mov_regs_six_out = os.path.join(fullpath, "movregs6.txt")
# TODO maybe write in a seperate func
# read in the 12 column movement regressors csv
motion = pd.read_csv(mov_regs_out, header=None, delimiter='\t', encoding='utf-8')
# Just get the first 6 movement regressors
data = motion.iloc[:, 0:6]
# put them into a dataframe
df = pd.DataFrame(data=data)
# print them out to a csv
df.to_csv(mov_regs_six_out, sep='\t', index=False, header=None, quoting=csv.QUOTE_NONE, float_format='%.8f')
|
import unittest
import read_locations
import sqlite3
class TestMediaType(unittest.TestCase):
def setUp(self):
self.movie = 'Lilith asdf (1964)'
self.for_video = 'Lights Out (2008) (V)'
self.for_tv = "Life's Other Side (2007) (TV)"
self.tv_series = '"100 Greatest Discoveries" (2004)'
self.tv_episode = '"A Haunting (a subtitle)" (2005) {The Dark Side (#3.10)}'
self.malformed = "asdf adsf (????a) (TV)"
self.paren_movie = "Flight Level Three Twenty Four (FL324) (2008) (TV)" # movie with parentheses in title
self.conn = sqlite3.connect("imdb.db")
self.lr = read_locations.LocationReader(self.conn)
def test_movie(self):
media_type, year = self.lr.parse_title(self.movie)
self.assertEqual(year, 1964)
self.assertEqual(media_type, 1)
def test_for_video(self):
media_type, year = self.lr.parse_title(self.for_video)
self.assertEqual(year, 2008)
self.assertEqual(media_type, 3)
def test_for_tv(self):
media_type, year = self.lr.parse_title(self.for_tv)
self.assertEqual(year, 2007)
self.assertEqual(media_type, 2)
def test_for_tv_series(self):
media_type, year = self.lr.parse_title(self.tv_series)
self.assertEqual(year, 2004)
self.assertEqual(media_type, 4)
def test_for_tv_episode(self):
media_type, year = self.lr.parse_title(self.tv_episode)
self.assertEqual(year, 2005)
self.assertEqual(media_type, 5)
def test_malformed(self):
media_type, year = self.lr.parse_title(self.malformed)
self.assertIsNone(year)
self.assertEqual(media_type, 2)
def test_parse_year(self):
openp, closep, year = self.lr.find_year_token(self.tv_episode)
self.assertEqual(year, "2005")
openp, closep, year = self.lr.find_year_token(self.paren_movie)
self.assertEqual(year, "2008")
|
import os
import ui_modules
BASE_DIR = os.path.dirname(__file__)
options = {
'port':8888,
}
settings = {
'template_path':os.path.join(BASE_DIR, "templates"),
"static_path": os.path.join(os.path.dirname(__file__), "static"),
'xsrf_cookies':True,
'debug':True,
#'ui_modules':ui_modules,
"xsrf_cookies": True,
# "cookie_secret":'xxxxx'
}
|
import json
import requests
from PIL import Image
from time import sleep
from .CJYDemo import use_cjy
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1"
" Safari/537.1"
}
class TrainUser:
def __init__(self, username, password):
self.username = username
self.password = password
self.cookie = ""
self.station_data = ""
# 登录12306
def login(self):
browser = webdriver.Chrome()
browser.get("https://kyfw.12306.cn/otn/login/init")
browser.find_element_by_xpath('//*[@id="username"]').send_keys(self.username)
sleep(2)
browser.find_element_by_xpath('//*[@id="password"]').send_keys(self.password)
sleep(2)
captcha_img = browser.find_element_by_xpath('//*[@id="loginForm"]/div/ul[2]/li[4]/div/div/div[3]/img')
location = captcha_img.location
size = captcha_img.size
# 写成我们需要截取的位置坐标
coordinates = (int(location['x']), int(location['y']),
int(location['x'] + 2 * size['width']), int(location['y'] + 2 * size['height']))
browser.save_screenshot('screen.png')
i = Image.open('screen.png')
# 使用Image的crop函数,从截图中再次截取我们需要的区域
verify_code_image = i.crop(coordinates)
verify_code_image.save('captcha.png')
# 调用超级鹰识别验证码
capture_result = use_cjy('captcha.png')
print(capture_result)
# 对返回的结果进行解析
groups = capture_result.get("pic_str").split('|')
points = [[int(number) for number in group.split(',')] for group in groups]
for point in points:
# 先定位到验证图片
element = WebDriverWait(browser, 20).until(
EC.presence_of_element_located((By.CLASS_NAME, "touclick-image")))
# 模拟点击验证图片
ActionChains(browser).move_to_element_with_offset(element, point[0] - 110, point[1] - 90).click().perform()
sleep(3)
browser.find_element_by_xpath('//*[@id="loginSub"]').click()
sleep(5)
if browser.current_url not in ["https://kyfw.12306.cn/otn/login/init", "https://kyfw.12306.cn/otn/login/init#"]:
print("登录成功!")
else:
print("登录失败,请重试!")
# 显示可购车票信息
def show_ticket(self):
# 请求保存列车站点代码的链接
res1 = requests.get("https://kyfw.12306.cn/otn/resources/js/framework/station_name.js")
# 把分割处理后的车站信息保存在station_data中
self.station_data = res1.text.lstrip("var station_names ='").rstrip("'").split('@')
# 需要按2018-01-01的格式输入日期,不然会出现错误
d = input("请输入日期(如:2018-01-01):")
f = self.get_station(input("请输入您的出发站:"))
t = self.get_station(input("请输入您的目的站:"))
url = "https://kyfw.12306.cn/otn/leftTicket/query?leftTicketDTO.train_date={}&leftTicketDTO.from_station={}" \
"&leftTicketDTO.to_station={}&purpose_codes=ADULT".format(d, f, t)
res2 = requests.get(url)
result = json.loads(res2.text)['data']['result']
seat_data = [(32, "商务座"), (31, "一等座"), (30, "二等座"), (26, "无座"), (23, "软卧"), (28, "硬卧"), (29, "硬座")]
for i in result:
i = i.split('|')
info = {
"车次": i[3], "出发日期": i[13], "始发站": self.get_city(i[4]), "终点站": self.get_city(i[7]),
"出发站": self.get_city(i[6]), "目的站": self.get_city(i[5]), "出发时间": i[8], "到达时间": i[9],
"总耗时": str(int(i[10][:i[10].index(":")])) + "小时" + str(int(i[10][i[10].index(":") + 1:])) + "分钟",
"商务座": '', "一等座": '', "二等座": '', "无座": '', "软卧": '', "硬卧": '', "硬座": ''
}
for j in range(7):
if i[seat_data[j][0]] == "有" or i[seat_data[j][0]].isdigit():
info[seat_data[j][1]] = i[seat_data[j][0]]
else:
del info[seat_data[j][1]]
print(info)
# 返回车站英文缩写
def get_station(self, city):
for i in self.station_data:
if city in i:
return i.split('|')[2]
# 返回车站中文缩写
def get_city(self, station):
for i in self.station_data:
if station in i:
return i.split('|')[1]
if __name__ == '__main__':
u = TrainUser(input("请输入您的用户名:"), input("请输入您的密码:"))
u.login()
u.show_ticket()
|
import random
class XGen:
rows = 0
connected_comp = 1
data = []
temp = []
linked_nodes = []
def __init__(self, rows, linked_nodes, connected_comp=1):
self.connected_comp = connected_comp
self.rows = rows
self.linked_nodes = linked_nodes
def add_nodes(self, s, e):
for i in range(1, self.r(2, 4)):
self.data.append([s, e, self.attr(), self.attr(), self.attr()])
def append_to_temp(self, val):
if val not in self.temp:
self.temp.append(val)
def attr(self):
return 'A-' + str(random.randint(1, self.rows))
def r(self, s=1, e=10):
return random.randint(s, e)
class GSimpleChainGen(XGen):
__count = 0
def gen(self, offset):
for i in range(self.rows):
self.add_nodes(i + offset * self.rows, i + 1 + offset * self.rows)
def run(self):
self.__count = self.__count + 1
for cc in range(0, self.connected_comp):
self.gen(cc)
return self.data
class GTreeGen(XGen):
__count = 0
def gen(self, offset):
for i in range(self.rows):
parent = self.r(0 + offset * i, (self.__count - 1) + offset * i)
self.add_nodes(parent, self.__count)
self.append_to_temp(parent)
self.append_to_temp(self.__count)
self.__count = self.__count + 1
self.add_linked_nodes()
def add_linked_nodes(self):
for child in self.linked_nodes:
if child not in self.temp:
self.data.append([0, child, self.attr(), self.attr(), self.attr()])
self.temp.append(child)
def run(self):
self.__count = self.__count + 1
for cc in range(0, self.connected_comp):
self.gen(cc)
return self.data
# g = GSimpleChainGen(2000, [], 1)
# d = g.run()
g = GTreeGen(100, [], 1)
d = g.run()
for i in d:
print(i[0], '\t', i[1])
with open('somefile.txt', 'a') as the_file:
for i in d:
the_file.write(str(i[0]) + '\t' + str(i[1]) + '\n')
# import networkx as nx
# G=nx.Graph()
#
# for i in d:
# G.add_node(i[0])
#
#
# for i in d:
# G.add_edge(i[0], i[1])
#
# nx.write_graphml(G, 'test2.graphml')
|
'''Starting in the top left corner of a 2×2 grid, and only being able to move to the right and down, there are exactly 6 routes to the bottom right corner.
How many such routes are there through a 20×20 grid?'''
gridSize = [20,20]
def recPath(gridSize):
if gridSize == [0,0]: return 1
paths = 0
if gridSize[0] > 0:
paths += recPath([gridSize[0]-1,gridSize[1]])
if gridSize[1] > 0:
paths += recPath([gridSize[0],gridSize[1]-1])
return paths
result = recPath(gridSize)
print (result)
|
#Capital Gains Tax (CGT) - Individuals
#CGT is payable by individuals on thier taxable gains.
#If there is an increase in value on disposal, there is a chargeable gain (a fall in value results in an allowable loss).
#Chargeable disposals include sale, gift or loss/destruction of an asset or part of an asset.
#Exempt disposals include gifts to charties.
#There is no CGT on death (but those who receive the asset get it at its market rate value at the time of death, which can result in a tax free uplift).
#Chargeable assets include land, furniture, art, goodwill, shares and leases.
#Exempt assets include cash, cars, wasting chattels, chattels (where acquisition cost and gross disposal consideration less than or equal to £6000),
#and gilt-edged securities (e.g. Treasury stock), National Savings Certificates, investments held in ISAs.
#Calculating a gain or loss
disposalConsideration = 0 #Sales proceeds (if sold) or market value (if gifted)
incidentalCostsOfDisposal = 0 #e.g. legal fees, estate agent's fees, advertising costs
netDisposalConsideration = disposalConsideration - incidentalCostsOfDisposal
allowableCosts = 0 #Acquisition costs, incidental costs of acquistions (e.g. stamp duty), enchancement capital expenditure (e.g. additions and improvements)
chargeableGainOrLoss = netDisposalConsideration - allowableCosts
print(f"Chargeable gain or allowable loss on disposal of chargeable asset is £{chargeableGainOrLoss}")
#Chattels - special rules apply to chattels
#Chattels are tangible moveable property.
#Wasting chattels are chattels with a predictable life of 50 years or less (e.g. computers, plant and machinery)
#Non-wasting chattels are chattels with predictable life of more than 50 years (e.g. antiques)
chattelAcquisitionCost = 15000
chattelProceeds = 40000
chattelCostsOfSale = 2000
chargeableGainOnChattel = 0
wastingChattel = False
nonwastingChattel = False
lifeAtDisposal = 51 #years
if lifeAtDisposal <= 50:
wastingChattel = True
else:
nonwastingChattel = True
if wastingChattel == True:
chargeableGainOnChattel = 0
elif nonwastingChattel == True:
if chattelAcquisitionCost <= 6000 and chattelProceeds <= 6000:
chargeableGainOnChattel = 0
elif chattelAcquisitionCost <= 6000 and chattelProceeds > 6000:
cannotExceed = 5/3 * (chattelProceeds - 6000)
chargeableGainOnChattel = chattelProceeds - chattelCostsOfSale - chattelAcquisitionCost
if chargeableGainOnChattel > cannotExceed:
chargeableGainOnChattel = cannotExceed
else:
chargeableGainOnChattel = chargeableGainOnChattel
elif chattelProceeds < 6000 and chattelAcquisitionCost > chattelProceeds: #Would result in a loss, so assume proceeds were 6000
chattelProceeds = 6000
chargeableGainOnChattel = chattelProceeds - chattelCostsOfSale - chattelAcquisitionCost
if chargeableGainOnChattel > 0:
chargeableGainOnChattel = 0
else:
chargeableGainOnChattel = chattelProceeds - chattelCostsOfSale - chattelAcquisitionCost
print(f"Chargeable gain or allowable loss on disposal of chattel is £{round(chargeableGainOnChattel)}")
#Calculating CGT payable
annualExemptAmount = 11700 #tax year 2018/19
taxableGains = chargeableGainOrLoss + chargeableGainOnChattel - annualExemptAmount
taxableIncome = 0
basicRateCeiling = 34500 #tax year 2018/19
basicCGTRate = 0.1
higherCGTRate = 0.2
#Calculate CGT liable
liableCGT = 0
if taxableIncome + taxableGains <= basicRateCeiling:
liableCGT = taxableGains * basicCGTRate
elif taxableIncome > basicRateCeiling:
liableCGT = taxableGains * higherCGTRate
elif taxableIncome <= basicRateCeiling and (taxableIncome + taxableGains) > basicRateCeiling:
liableCGT = (basicRateCeiling - taxableIncome) * basicCGTRate + (taxableGains - (basicRateCeiling - taxableIncome)) * higherCGTRate
print(f"Capital gains tax liability is £{liableCGT}")
|
import pygame
pygame.init()
screen = pygame.display.set_mode((500,500))
pygame.display.set_caption("My first game")
clock = pygame.time.Clock()
done = False
clock = pygame.time.Clock()
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done=True |
# Copyright (c) 2009 ActiveState Software Inc.
# See http://www.activestate.com/activepython/license/ for licensing
# information.
import os
import sys
import logging
from pypm.client.base import PyPMFeature, ImagePythonEnvironment
from pypm.client.fs import Extractor
LOG = logging.getLogger(__name__)
class Fixer(PyPMFeature):
class ScriptShebangFixer(object):
"""Fix #! hardcoding in scripts
While running ``easy_install $pkg.name`` is `one solution`_, it will not
work with packages that use only distutils (not setuptools).
Note: On Windows, foo.exe uses the shebang line from foo-script.py; so
we should run this fixer on Windows too.
.. _`one solution`:
http://mail.python.org/pipermail/distutils-sig/2008-April/009283.html
"""
def __init__(self, pypmenv, ipkg):
self.pypmenv = pypmenv
self.ipkg = ipkg
assert os.path.exists(self.pypmenv.pyenv.python_exe)
@staticmethod
def applicable():
return True
def fix(self):
pyenv = self.pypmenv.pyenv
if isinstance(pyenv, ImagePythonEnvironment):
python_exe = pyenv.target_python_exe
LOG.info('using ImagePythonEnvironment w/ target=%s', python_exe)
else:
python_exe = pyenv.python_exe
for path in self.ipkg.files_list:
if path.startswith('bin/') or (path.startswith('Scripts/') and
not path.endswith('.exe')):
self._fix_script(pyenv.get_abspath(path), python_exe)
@staticmethod
def _fix_script(script_path, python_exe):
# replace old #! path with ``python_exe``
with open(script_path) as file:
first_line = file.readline()
if first_line.startswith('#!'):
contents = file.read()
file.close()
first_line = '#!' + python_exe
LOG.info('Fixing script %s', script_path)
LOG.debug('New shebang %s', first_line)
with open(script_path, 'w') as wfile:
wfile.write(first_line + '\n')
wfile.write(contents)
wfile.flush()
def __str__(self):
return "<ScriptShebangFixer:%s,%s>" % (self.ipkg.name,
self.pypmenv.pyenv.root_dir)
available_fixers = [ScriptShebangFixer]
def get_applicable_fixers(self, ipkg):
return [Fixer(self.pypmenv, ipkg)
for Fixer in self.available_fixers if Fixer.applicable()]
|
# Jeffrey Martinez CSC110 - 01 Airline Flight Schedule Program
# Dipippo
# 29 April 2015
#-------------------------------------------------------------------------------
# Description of the program:
# This program reads through a large data file with flight information
# for all direct flights from Providence to Orlando. The program
# will offer the user various options for finding the right flight.
#-------------------------------------------------------------------------------
# General solution:
# This program reads through a given text file and then sorts the information
# in that text file and stores them into lists. The program will then ask the user
# to choose an option. Based on that option, the program will execute the option
# Once the program has executed said option then it will print out the result.
#-------------------------------------------------------------------------------
# Pseudocode:
# Make empty lists where the information will be stored
# Opens the text file that the user inputs
# Takes the data and then stores it into the empty lists
# The program then will read through and sort the lists
# The program will print out options that the user can input and then
# the program will run the option depending on what the user inputs
# Prints the results of the functions
#-------------------------------------------------------------------------------
# Design:
#importing sys to be able to quit the program
import sys
from datetime import datetime
# @return the airlines, flightNumbers, departureTimes, arrivalTimes and prices in
# lists
def getInfo(file):
# This function will get the file, open it and read it. It will then
# sort the information and store it into empty lists created beforehand.
# Empty lists where everything will be held
airlines = []
flightNumbers = []
departureTimes = []
arrivalTimes = []
prices = []
infile = open(file, 'r')
# Reads through the file and sorts the lists accordingly
line = infile.readline()
line = line.strip()
while line != "":
airline, flightNum, departureTime, arrivalTime, price = line.split(',')
airlines+=[airline]
flightNumbers+=[flightNum]
departureTimes+=[departureTime]
arrivalTimes+=[arrivalTime]
prices+=[price]
line = infile.readline()
line = line.strip()
# Gets rid of the $ for the prices
for i in range(len(prices)):
prices[i] = prices[i][1:4]
prices[i] = int(prices[i])
return airlines, flightNumbers, departureTimes, arrivalTimes, prices
def choice1(airlines, flightNumbers, departureTimes, arrivalTimes, prices):
# Finds all the flights on a particular airline
# Keeps the program running until this option has been fully executed
run = True
while run:
airline = input("Enter the airline to find the flights for: ")
found = 0
i = 0
# Searches through the airlines and finds if at some point in time it matches
while i < len(airlines) and found == 0:
if airlines[i] == airline:
found = 1
else:
i+=1
# If the airline is not found then it loops because run is still true
if found == 0:
print("That airline does not exist...")
# Otherwise if the airline is found, this is printed out along with the appropriate information
# asked for and run is switched to false
else:
print("The flights that meet your criteria are:\n")
print("Airline\t\tFlight Number\tDeparture Time\tArrival Time\tPrice")
print("---------------------------------------------------------------------------------------------------")
for i in range(len(airlines)):
if airlines[i] == airline:
print(airlines[i]," \t",flightNumbers[i],"\t\t",departureTimes[i],
"\t\t",arrivalTimes[i],"\t\t$",prices[i])
run = False
def choice2(airlines, flightNumbers, departureTimes, arrivalTimes, prices):
# Finds the cheapest flight
# Empty strings to hold the cheapest airline and the cheapest flightNumber
cheapestA = []
cheapestF = []
# Sets lowest equal to the first value in the list of prices
lowest = prices[0]
# Loops through the list of prices to look for the lowest price in the list
for i in range(len(prices)):
if lowest > prices[i]:
lowest = prices[i]
cheapestA = airlines[i]
cheapestF = flightNumbers[i]
elif lowest == prices[i]:
lowest = prices[i]
cheapestA = airlines[i]
cheapestF = flightNumbers[i]
# Prints the cheapest price along with the airline and flight number
print("The cheapest airline is,", str(cheapestA) + ", with the flight number,", cheapestF, "at $" + str(lowest))
def choice3(airlines, flightNumbers, departureTimes, arrivalTimes, prices):
# Finds all flights less than a specified price
# Keeps the program running until it is switched to false
run = True
while run:
# Prompts the user to input what the threshold is
threshold = int(input("Enter a max price: $"))
found = 0
i = 0
# Loops through the prices list to look to see if there is something cheaper
# than what the user inputed
while i < len(prices) and found == 0:
if prices[i] < threshold:
found = 1
else:
i+=1
# If nothing is cheaper than what the user inputed, then it loops back
if found == 0:
print("There's nothing cheaper...")
# Otherwise if there are cheaper prices, it prints it along with the important information
# and stops this function from running
else:
print("The flights that meet your criteria are:\n")
print("Airline\t\tFlight Number\tDeparture Time\tArrival Time\tPrice")
print("---------------------------------------------------------------------------------------------------")
for i in range(len(prices)):
if prices[i] < threshold:
print(airlines[i]," \t",flightNumbers[i],"\t\t",departureTimes[i],
"\t\t",arrivalTimes[i],"\t\t$",prices[i])
run = False
def choice4(airlines, flightNumbers, departureTimes, arrivalTimes, prices):
# Finds the shortest flight
form = '%H:%M'
# Find the first time difference in minutes and temporarily set it as the shortest
time1 = departureTimes[0]
time2 = arrivalTimes[0]
timediff = datetime.strptime(time2, form) - datetime.strptime(time1, form)
# Converts HH:MM format of shortest time to minutes
timediff = str(timediff)
timePrt = timediff.split(':')
mins = int(timePrt[0])*(60)+int(timePrt[1])+int(timePrt[2])
shortest = mins
# Goes through each time on list and finds difference in HH:MM format
for i in range(len(arrivalTimes)):
time1 = departureTimes[i]
time2 = arrivalTimes[i]
timediff = datetime.strptime(time2, form) - datetime.strptime(time1, form)
# Converts HH:MM format of shortest time to minutes
timediff = str(timediff)
timePrt = timediff.split(':')
mins = int(timePrt[0])*(60)+int(timePrt[1])+int(timePrt[2])
# Replaces the shortest time and saves the index
while mins < shortest:
shortest = mins
index = i
print('The shortest flight is',airlines[index],flightNumbers[index],'at',shortest,'minutes')
def choice5(airlines, flightNumbers, departureTimes, arrivalTimes, prices):
# Finds all flights that depart within a specified range
form = '%H:%M'
# Keeps the program from running
run = True
while run:
# Prompts the user to enter the earliest and latest departure time
# they want to look for
earliestTime = input("Enter the earliest time: ")
latestTime = input("Enter the latest time: ")
found = 0
i = 0
# Loop looks throughthe departure times list and finds times within the range
while i < len(departureTimes) and found == 0:
# If the departureTime at the time is greater than and less than the latest time
# then found is turned to 1 to proceed with the program
if departureTimes[i] > earliestTime and departureTimes[i] < latestTime:
found = 1
else:
i += 1
# If the time interval is not valid then it prints this and reruns this function
if found == 0:
print('No flights exist in that range...')
# Otherwise if it is valid then it prints out all the appropriate information and
# stops the function from running
else:
print('Here are flights in that time range: ')
print("Airline \tFlight Number\tDeparture Time\tArrival Time\tPrice")
print("---------------------------------------------------------------------------------------------------")
for i in range(len(departureTimes)):
if departureTimes[i] >= earliestTime and departureTimes[i] <= latestTime:
print(airlines[i]," \t",flightNumbers[i],"\t\t",departureTimes[i],"\t\t",
arrivalTimes[i],"\t\t$",prices[i])
run = False
def choice6(airlines, flightNumbers, departureTimes, arrivalTimes, prices):
# Finds the average price for a specified airline
# Keeps the function running
run = True
while run:
found = 0
i = 0
total = 0
counter = 0
averagePrice = 0
# Prompts the user to input an airline they want to find the average price for
airline = input("Enter an airline: ")
# Loops through airline list and if the airline is in the airline list
while i < len(airlines) and found == 0:
if airlines[i] == airline:
found = 1
else:
i+=1
# If the airline is not found then it prints this out and loops back to the beginning
if found == 0:
print("That airline does not exist in this list...")
# Otherwise if the airline is found then it prints out the appropriate information and
# stops the function from running
else:
for i in range(len(airlines)):
if airlines[i] == airline:
total += prices[i]
counter+=1
averagePrice = round((total/counter),2)
print("The average price for", airline, "is $" + str(averagePrice))
run = False
# Quits the program
def quit():
sys.exit(1)
def main():
# The main function will call both prior functions and will print the
# results of the option that was chosen by the user/
running = True
# gets the file from the user and then puts the information from the file into lists
file = input("Enter name of data file: ")
airlines, flightNumbers, departureTimes, arrivalTimes, prices = getInfo(file)
print("\nPlease choose one of the following options:\n1 -- Find all flights on a particular airline\n"+
"2 -- Find the cheapest flight\n3 -- Find all flights less than a specified price\n4 -- Find the shortest flight"+
"\n5 -- find all flights that depart within a specified range\n6 -- Find the average price for a specified airline"+
"\n7 -- Quit")
# Keeps the program running until the user enters option 7 and if the user enters an invalid
# input then it will reprompt the user to enter a new one after saying that the input is
# not valid.
while running:
optionNum = input("Choice ==> ")
if optionNum == '1':
choice1(airlines, flightNumbers, departureTimes, arrivalTimes, prices)
elif optionNum == '2':
choice2(airlines, flightNumbers, departureTimes, arrivalTimes, prices)
elif optionNum == '3':
choice3(airlines, flightNumbers, departureTimes, arrivalTimes, prices)
elif optionNum == '4':
choice4(airlines, flightNumbers, departureTimes, arrivalTimes, prices)
elif optionNum == '5':
choice5(airlines, flightNumbers, departureTimes, arrivalTimes, prices)
elif optionNum == '6':
choice6(airlines, flightNumbers, departureTimes, arrivalTimes, prices)
elif optionNum == '7':
print("Adios!")
quit()
else:
print("No")
# Runs the main function automatically instead of having to type it out in IDLE
if __name__ == "__main__":
main()
|
#!/usr/bin/python3
import subprocess
import shlex
import requests
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dev', default=False, action='store_true', help="Run the script on the dev")
args = parser.parse_args()
if args.dev:
url = "https://freshmaker.dev.engineering.redhat.com/api/1/"
else:
url = "https://freshmaker.engineering.redhat.com/api/1/"
r = requests.get(url + "events/?state=2")
r.raise_for_status()
data = r.json()
# Get errata_id from last successful event
errata_id = data['items'][0]['search_key']
# Check that the deployment was successful:
url_build = url + "builds/"
command = (
"curl --negotiate -u : -k -X POST -d '{\"errata_id\": %s, \"dry_run\": true}' %s -l -v"
% (errata_id, url_build))
subprocess_cmd = shlex.split(command)
stdout = subprocess.run(subprocess_cmd, stdout=subprocess.PIPE).stdout.decode('utf-8')
print(stdout)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 15 19:50:08 2021
@author: sethn
"""
# Import modules
import gdal
import pyproj
import numpy as np
import scipy.interpolate as interpolate
def geotiff_read(infile):
"""
Function to read a Geotiff file and convert to numpy array.
"""
# Allow GDAL to throw Python exceptions
gdal.UseExceptions()
# Read tiff and convert to a numpy array
tiff = gdal.Open(infile)
if tiff.RasterCount == 1:
array = tiff.ReadAsArray()
if tiff.RasterCount > 1:
array = np.zeros((tiff.RasterYSize, tiff.RasterXSize, tiff.RasterCount))
for i in range(tiff.RasterCount):
band = tiff.GetRasterBand(i + 1)
array[:, :, i] = band.ReadAsArray()
# Get parameters
geotransform = tiff.GetGeoTransform()
projection = tiff.GetProjection()
band = tiff.GetRasterBand(1)
nodata = band.GetNoDataValue()
return array, geotransform, projection, nodata
def geotiff_write(outfile, geotransform, projection, data, nodata=None):
"""
Function to write a numpy array as a GeoTIFF file.
IMPORTANT: I've edited this function so it writes the data as byte format.
"""
# Produce numpy to GDAL conversion dictionary
print('Writing %s' % outfile)
driver = gdal.GetDriverByName('GTiff')
if data.ndim == 2:
(x,y) = data.shape
tiff = driver.Create(outfile, y, x, 1, gdal.GDT_Byte)
tiff.GetRasterBand(1).WriteArray(data)
if data.ndim > 2:
bands = data.shape[2]
(x,y,z) = data.shape
tiff = driver.Create(outfile, y, x, bands, gdal.GDT_Byte)
for band in range(bands):
array = data[:, :, band + 1]
tiff.GetRasterBand(band).WriteArray(array)
if nodata:
tiff.GetRasterBand(1).SetNoDataValue(nodata)
tiff.SetGeoTransform(geotransform)
tiff.SetProjection(projection)
tiff = None
return 1
# Import DEM
dem, gt, proj, nodata = geotiff_read('C:/Users/sgoldst3/Inglefield/PyTrx\Seth_Examples/cam_env/201907-Minturn-Elv-5cm-octree_dem_clipped.tif')
x = np.linspace(0, 1, dem.shape[0])
y = np.linspace(0, 1, dem.shape[1])
yy, xx = np.meshgrid(y, x)
# Identify NaNs
dem[dem == -9999] = np.nan
vals = ~np.isnan(dem)
# Sample DEM
xx_sub = xx[1950:2150,3650:3850]
yy_sub = yy[1950:2150,3650:3850]
dem_sub = dem[1950:2150,3650:3850]
vals_sub = ~np.isnan(dem_sub)
z_dense_smooth_griddata = interpolate.griddata(np.array([xx_sub[vals_sub].ravel(),
yy_sub[vals_sub].ravel()]).T,
dem_sub[vals_sub].ravel(), (xx_sub,yy_sub),
method='cubic')
|
from disjoint_set import DisjointSet
def get_correlated_columns(corr_mat, thresh=0.8):
corr_abs = corr_mat.abs()
corr_vals = corr_abs.values
col_names = corr_mat.columns
# get correlated pairs
corr_pairs = []
for i in range(len(corr_vals) - 1):
for j in range(i + 1, len(corr_vals)):
if corr_vals[i][j] >= thresh:
corr_pairs.append((i, j))
# get the DisjointSet of the correlated columns
ds = DisjointSet()
for x, y in corr_pairs:
ds.union(x, y)
# for each disjoint set, get the first column as a representative
# should replicate the behavior of drop_duplicate(keep="first") in a DataFrame
keep_cols_idx = []
remove_cols_idx = []
for dis_set in ds.itersets():
min_col_index = min(dis_set)
dis_set.remove(min_col_index)
keep_cols_idx.append(min_col_index)
remove_cols_idx.extend(list(dis_set))
keep_cols = col_names[keep_cols_idx]
remove_cols = col_names[remove_cols_idx]
return keep_cols, remove_cols
|
from django.shortcuts import render
def about(request):
return_data = {}
return render(request, 'frontend/about.html', return_data)
|
class Solution(object):
def myAtoi(self, str):
"""
:type str: str
:rtype: int
"""
# strip text from string
str = str.strip()
# initialize value to return
integer = 0
# set flag to signify whether or not we need to add a negative sign
is_negative = False
if len(str) > 0:
# if the first character is '+', we'll strip it off
if str[0] == "+":
str = str[1:]
# if the first character is '-', we'll strip it off and set
# the is_negative flag to true
elif str[0] == "-":
str = str[1:]
is_negative = True
# for each character, we'll add it to the integer or stop processing if we hit a non-numeric character
for char in str:
# if the next character is a number, we'll assign the variable to_add
try:
to_add = int(char)
# if it's not, we'll break out of processing the string
except ValueError:
break
# move integer decimal place
integer *= 10
# add new number to integer
integer += to_add
# make the number negative if it's supposed to be
if is_negative:
integer = -integer
# return the highest 32 bit number if the number is lower
if integer > 2**31 - 1:
return 2**31 - 1
# return the lowest 32 bit number if the number is lower
elif integer < -2**31:
return -2**31
# otherwise, return the integer
else:
return integer
|
# -*- coding: utf-8 -*-
from django.conf.urls.defaults import *
from django.conf import settings
from dictionary.models import Term
from dictionary.forms import TermForm
from wiki import views
from dictionary import views as dictviews
try:
WIKI_URL_RE = settings.WIKI_URL_RE
except AttributeError:
WIKI_URL_RE = r'[-\w]+'
urlpatterns = patterns('',
url(r'^$', dictviews.home,
name='dictionary_home'),
url(r'^edit/(?P<slug>'+ WIKI_URL_RE +r')/$', views.edit_article,
{'template_dir': 'dictionary',
'ArticleClass': Term,
'article_qs': Term.objects.all(),
'ArticleFormClass': TermForm},
name='dictionary_edit'),
url(r'^search/$', dictviews.search,
name='dictionary_search'),
url(r'^new/$', dictviews.new_article,
name='dictionary_new'),
url(r'^history/(?P<slug>'+ WIKI_URL_RE +r')/$', views.article_history,
{'template_dir': 'dictionary'},
name='dictionary_article_history'),
url(r'^history/(?P<slug>'+ WIKI_URL_RE +r')/changeset/(?P<revision>\d+)/$', views.view_changeset,
{'template_dir': 'dictionary'},
name='dictionary_changeset',),
url(r'^history/(?P<slug>'+ WIKI_URL_RE +r')/revert/$', views.revert_to_revision,
{'template_dir': 'dictionary'},
name='dictionary_revert_to_revision'),
url(r'^view/(?P<slug>'+ WIKI_URL_RE +r')/$', views.view_article,
{'ArticleClass': Term,
'article_qs': Term.objects.all(),
'template_dir': 'dictionary'},
name='dictionary_view'),
url(r'^ajax/(?P<slug>'+ WIKI_URL_RE +r')/$', views.view_article,
{'ArticleClass': Term,
'article_qs': Term.objects.all(),
'template_dir': 'dictionary',
'template_name': 'ajax.html'},
name='dictionary_ajax'),
url(r'^w/view/(?P<slug>'+ WIKI_URL_RE +r')/$', dictviews.view_wiki_article,
name='wiki_article'),
url(r'^w/history/(?P<slug>'+ WIKI_URL_RE +r')/$', dictviews.article_history,
name='wiki_article_history'),
)
|
#!/usr/bin/env python
from lndynamic import LNDynamic
import natsort
with open(r"/home/hme/commands.txt") as hpass:
lines = hpass.readlines()
api = LNDynamic(lines[0].rstrip('\n'), lines[1].rstrip('\n'))
results = api.request('vm', 'list')
f= open(r"/home/hme/inventory_lunanode" ,"w+")
hfile= open(r"/home/hme/user_list" ,"w+")
val = results.get("vms")
user_dic={}
print len(val)
for i in range(0,len(val)):
flag = 0
for key, value in val[i].items():
if key == 'name':
if "stjo" not in value:
break
print('name=',value)
user= value
if key =='primaryip':
ip = value
print('ip=',value)
if key == 'plan_id':
print('plan_id=',value)
if key == 'vm_id' :
print('vm_id=', value)
vm_info = api.request('vm', 'info', {'vm_id': value})
st = vm_info.get('info')
#print(st)
#print type(st)
#print len(st)
try:
print(st['login_details'])
user_login= st['login_details']
a=user_login.split()
print (str(ip),str(a[1]),str(a[3]))
gt=str(a[1])[:-1]
line = "{} ansible_ssh_user={} ansible_ssh_pass={}\n".format(str(ip),str(gt),str(a[3]))
#user_line="{} \t {} \t centos \t lawn-vex\n".format(user,str(ip))
f.write(line)
user_dic[str(user)]=str(ip)
#hfile.write(user_line)
except KeyError as error:
pass
#for v in range (0,len(st)):
# for z,y in st[v].items():
# if z == 'login_details':
# print( "user = ",y)
f.close()
#print (user_dic)
list_user =user_dic.keys();
natural= natsort.natsorted(list_user)
#print natural
for vts in range(0,len(natural)):
myip=user_dic[natural[vts]]
user_line="{} \t {} \t centos \t lawn-vex\n".format(natural[vts],myip)
hfile.write(user_line)
hfile.close()
|
import logging
import numpy as np
import torch
from csrank import FETAObjectRanker
from iorank.training.object_ranker_trainer import ObjectRankerTrainer
from iorank.util.util import get_device
class FETARanker:
def __init__(self, n_objects, n_object_features, add_zeroth_order_model=False, n_hidden=2, n_units=8,
reg_strength=1e-4, learning_rate=1e-3, batch_size=128, **kwargs):
"""
Creates an instance of the FETA (First Evaluate Then Aggregate) object ranker. This class uses the FETA
ranker from the csrank library.
:param n_objects: Number of objects to be ranked (can be seen as upper bound as padding is used)
:param n_object_features: Size of the feature vectors
:param add_zeroth_order_model: If True, the (context-independent) 0-order model is taken into account.
Default: False
:param n_hidden: Number of hidden layers. Default: 8
:param n_units: Number of hidden units. Default: 2
:param reg_strength: Regularization strength of the regularize function. Default: 1e-4
:param learning_rate: Learning rate used for training. Default: 1e-3
:param batch_size: Batch size used for training. Default: 128
:param kwargs: Keyword arguments
"""
self.model = None
self.n_objects = n_objects
self.n_object_features = n_object_features
self.torch_model = False
self.trainable = True
self.device = get_device()
self.logger = logging.getLogger(FETARanker.__name__)
# Tunable parameters
self.add_zeroth_order_model = add_zeroth_order_model
self.n_hidden = n_hidden
self.n_units = n_units
self.reg_strength = reg_strength
self.learning_rate = learning_rate
self.batch_size = batch_size
self._construct_model()
def _construct_model(self):
"""
Constructs the FETA model.
"""
self.logger.info("Construct model..")
self.model = FETAObjectRanker(self.n_objects, self.n_object_features,
add_zeroth_order_model=self.add_zeroth_order_model)
self.model.set_tunable_parameters(n_hidden=self.n_hidden, n_units=self.n_units, reg_strength=self.reg_strength,
learning_rate=self.learning_rate, batch_size=self.batch_size)
self.logger.info("Finished constructing model")
def fit(self, X, Y, **kwargs):
"""
Fits the model on the given data.
:param X: Examples
:param Y: Ground truth data
:param kwargs: Keyword arguments
"""
# Turn PyTorch tensors into numpy array first
X = np.array(X)
Y = np.array(Y)
self.model.fit(X, Y, **kwargs)
def predict_scores(self, object_feature_vectors, **kwargs):
"""
Predict utility scores for object ranking for the given feature vectors.
:param object_feature_vectors: Object feature vectors
:param kwargs: Keyword arguments
:return: Utility scores
"""
# Turn PyTorch tensors into numpy array first
fv = np.array(object_feature_vectors)
return torch.tensor(self.model.predict_scores(fv), device=self.device)
def set_tunable_parameters(self, add_zeroth_order_model=False, n_hidden=2, n_units=8,
reg_strength=1e-4, learning_rate=1e-3, batch_size=128):
"""
Set the tunable parameters for this model.
:param add_zeroth_order_model: If True, the (context-independent) 0-order model is taken into account.
Default: False
:param n_hidden: Number of hidden layers. Default: 8
:param n_units: Number of hidden units. Default: 2
:param reg_strength: Regularization strength of the regularize function. Default: 1e-4
:param learning_rate: Learning rate used for training. Default: 1e-3
:param batch_size: Batch size used for training. Default: 128
:return:
"""
self.add_zeroth_order_model = add_zeroth_order_model
self.n_hidden = n_hidden
self.n_units = n_units
self.reg_strength = reg_strength
self.learning_rate = learning_rate
self.batch_size = batch_size
self._construct_model()
def get_trainer(self):
"""
Returns a trainer type for this model.
:return: Trainer type for this model
"""
return ObjectRankerTrainer
def set_n_object_features(self, n_object_features):
self.n_object_features = n_object_features
# Reconstruct model as number of features might have changed
self._construct_model()
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @File : rest01.py
# @Author : CHIN
# @Time : 2021-01-24
from flask import Flask,make_response,jsonify,abort,request
from flask_restful import Api,Resource
from flask_httpauth import HTTPBasicAuth
app = Flask(__name__)
api = Api(app=app)
auth = HTTPBasicAuth()
@auth.get_password
def get_password(name):
if name=='chin':
return 'admin'
@auth.error_handler
def authorized():
return make_response(jsonify({'message':'请认证'}),401)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'status':404,'error':'访问页面不存在'}),404)
@app.errorhandler(405)
def not_found(error):
return make_response(jsonify({'status':405,'error':'该方法只支持GET请求'}),405)
@app.errorhandler(500)
def not_found(error):
return make_response(jsonify({'status':500,'error':'请耐心等待,服务正在修复中'}),500)
books = [
{'id':1,'author':'tester','name':'Python接口自动化测试实战','done':True},
{'id':2,'author':'测试菌','name':'Python接口自动化测试入门','done':False},
{'id':3,'author':'chin','name':'Selenium3自动化测试实战','done':True},
{'id':4,'author':'陈','name':'Selenium3自动化测试入门','done':False}
]
@app.route('/v1/api/books',methods=['GET'])
def get_books():
return jsonify(books)
@app.route('/v1/api/books',methods=['POST'])
def create_books():
if not request.json:
abort(400)
else:
book = {
'id': books[-1]['id'] + 1,
'author': request.json.get('author'),
'name': request.json.get('name'),
'done': True
}
books.append(book)
return jsonify({'message':'添加成功'},201)
if __name__ == '__main__':
app.run(debug=True,port=8000) |
import asyncio
import time
import os
import requests
def fetch(url):
""" Make the request and return the results """
start_time = time.monotonic()
r = requests.get(url)
request_time = time.monotonic() - start_time
return {"status_code": r.status_code, "request_time": request_time}
async def worker(name, queue, results):
"""
A function to take unmet requests from a que, perform the request and
add the results the to queue
"""
# Get the loop object to use it for calling the fetch function
cur_loop = asyncio.get_event_loop()
while True:
# Wait to get a new request
url = await queue.get()
if os.getenv("DEBUG"):
print(f"{name} fetching {url}")
future_result = cur_loop.run_in_executor(None, fetch, url)
result = await future_result
results.append(result)
# Mark the request in job as done
queue.task_done()
async def distribute_work(url, req_numb, concurrency, results):
"""
Create the queue with the requests to be done, create tasks and call
workers to implement the tasks
"""
# Create the queue from asyncio Queue class that contains all the tasks
# that need to be done (all the requests)
queue = asyncio.Queue()
# Add an item to the queue for every request that we need to make
for _ in range(req_numb):
queue.put_nowait(url)
# Create the list of tasks. Each task is a worker that will implement a
# request from the queue
tasks = []
for i in range(concurrency):
# Create the workers calling the async worker function
task = asyncio.create_task(worker(f"worker-{i+1}", queue, results))
tasks.append(task)
start_time = time.monotonic()
# Start the tasks and wait for them to finish
await queue.join()
stop_time = time.monotonic()
total_time = stop_time - start_time
# Cancel/Destroy all the workers tasks that have started
for task in tasks:
task.cancel()
return total_time
def assault(url, requests, concurrency):
""" Entrypoint to be called by the CLI command to make the requests"""
# Create the results list to be passed to other functions
results = []
# Call the async function distribute work. Call it with asyncio
total_time = asyncio.run(distribute_work(url, requests, concurrency, results))
return total_time, results
|
# coding: utf-8
from dext.common.meta_relations import objects as meta_relations_objects
class MetaType(meta_relations_objects.MetaType):
__slots__ = ()
TYPE_CAPTION = NotImplemented
def __init__(self, **kwargs):
super(MetaType, self).__init__(**kwargs)
caption = NotImplemented
url = NotImplemented
class MetaRelation(meta_relations_objects.MetaRelation):
pass
|
##################################################################################
# Written/Modified by Karl Tomecek 05/31/2019 #
# Program Name: server.py #
# Week 4 Assignment #
# Comments: Modeled from code retrieved from #
# https://stackoverflow.com/questions/7749341/basic-python-client-socket-example #
# #
##################################################################################
#Import Socket library
import socket
def clearScreen():
for i in range(15): #clear the screen
print('\n')
def main():
#'Clear' the screen
clearScreen()
# Start listening for an input stream
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.bind(('localhost', 9500))
serversocket.listen(5) # become a server socket, maximum 5 connections
#Main processing loop
print("SERVER STARTED...LISTENING ON PORT 9500")
while True:
#Open a connection on port 9500
connection, address = serversocket.accept()
#Wait for inbound data
buf = connection.recv(64).decode("utf-8")
#If data is received, process it as per the assignment
if len(buf) > 0:
#If Hello is received, answer with Hi
if (buf == "Hello"):
#Show action on console
print ("Hi sent to client")
connection.send(bytes('Hi','UTF-8'))
else:
#Show action on console. Is anything else other than Hello is received, answer with Goodbye
print ("Goodbye sent to client")
connection.send(bytes('Goodbye','UTF-8'))
#Start the server
main() |
import re
from django.template import RequestContext
from django.http import HttpResponse, HttpResponseRedirect, Http404, HttpResponseNotFound
from django.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.core.mail import mail_admins, send_mail
from settings_devel import EMAIL_HOST_USER
# from django.contrib.sessions.models import Session
# from django.contrib.auth.backends import ModelBackend
@login_required
def login_test(request):
return render_to_response("home.html",
context_instance=RequestContext(request))
def mylogout(request):
logout(request)
return HttpResponse('Loged out successfuly.')
def login_page(request):
request.session["next"] = request.GET.get('next')
return render_to_response("account_sso.html",
context_instance=RequestContext(request))
# def openid_login(request, domain):
# ''' Leave for Not PopUp Window openid login'''
# if domain == 'company':
# url = 'https://www.google.com/accounts/o8/ud?hd=smalltreemedia.com&openid.ns=http://specs.openid.net/auth/2.0&openid.ns.pape=http://specs.openid.net/extensions/pape/1.0&openid.claimed_id=http://specs.openid.net/auth/2.0/identifier_select&openid.identity=http://specs.openid.net/auth/2.0/identifier_select&openid.return_to=http://localhost:8000/accounts/profile&openid.realm=http://localhost:8000/accounts/&openid.assoc_handle=ABSmpf6DNMw&openid.mode=checkid_setup&openid.ui.ns=http://specs.openid.net/extensions/ui/1.0&openid.ui.mode=popup&openid.ui.icon=true&openid.ns.ax=http://openid.net/srv/ax/1.0&openid.ax.mode=fetch_request&openid.ax.type.email=http://axschema.org/contact/email&openid.ax.type.firstname=http://axschema.org/namePerson/first&openid.ax.type.lastname=http://axschema.org/namePerson/last&openid.ax.required=email,firstname,lastname'
# else:
# url = 'https://www.google.com/accounts/o8/ud?openid.ns=http://specs.openid.net/auth/2.0&openid.ns.pape=http://specs.openid.net/extensions/pape/1.0&openid.claimed_id=http://specs.openid.net/auth/2.0/identifier_select&openid.identity=http://specs.openid.net/auth/2.0/identifier_select&openid.return_to=http://localhost:8000/accounts/profile&openid.realm=http://localhost:8000/accounts/&openid.assoc_handle=ABSmpf6DNMw&openid.mode=checkid_setup&openid.ui.ns=http://specs.openid.net/extensions/ui/1.0&openid.ui.mode=popup&openid.ui.icon=true&openid.ns.ax=http://openid.net/srv/ax/1.0&openid.ax.mode=fetch_request&openid.ax.type.email=http://axschema.org/contact/email&openid.ax.type.firstname=http://axschema.org/namePerson/first&openid.ax.type.lastname=http://axschema.org/namePerson/last&openid.ax.required=email,firstname,lastname'
# return HttpResponseRedirect(url)
def popup_login_return(request):
return render_to_response("popup_return.html",
context_instance=RequestContext(request))
def show_profile(request):
if request.GET.get('openid.mode') == 'cancel':
return HttpResponse('Some Error Happened!')
elif request.GET.get('openid.ext1.value.email') and request.GET.get('openid.ext1.mode')=='fetch_response':
firstname = request.GET.get('openid.ext1.value.firstname')
lastname = request.GET.get('openid.ext1.value.lastname')
email = request.GET.get('openid.ext1.value.email')
next = request.session['next']
username = re.split('@', email)[0]
username = re.sub('\.', '', username)
username = username.partition('+')[0]
request.session["username"] = username
if User.objects.filter(username=username).exists():
if User.objects.filter(username=username, email=email).exists():
pass
else:
User.objects.filter(username=username).update(email=email)
user = User.objects.get(username=username)
user.backend = 'django.contrib.auth.backends.ModelBackend'
if user.is_active:
login(request, user)
results = {
"jump_url": next,
"username": username,
}
return render_to_response("welcome.html", results,
context_instance=RequestContext(request))
else:
results = {
"username": username,
"msg": "Your account is NOT active, please contact System Admin for support "
}
return render_to_response("welcome.html", results,
context_instance=RequestContext(request))
else:
UserObj = User.objects.create(first_name=firstname, last_name=lastname, email=email, is_active=False, username=username, password='')
return HttpResponseRedirect(reverse("profile_msg"))
else:
return HttpResponseNotFound()
@csrf_exempt
def leave_msg(request):
if request.method == 'GET':
var = {
'msg': 'You have logged in with Openid.'
}
return render_to_response("leave_msg.html", var,
context_instance=RequestContext(request))
else:
username = request.session['username']
user = User.objects.get(username=username)
content = request.POST['content']
msg = '<p>System Email. Please approve this guy and assgin proper Permissions for new user: %s.</p>' % username
content = '<p>Leave a message with you: %s</p>' % content
active_link = '<p>Permission Assgin Link Address: <a href="http://127.0.0.1:8000/admin/auth/user/%s">Assgin Permission</a></p>' % user.id
mail_admins('A New guy is Coming', 'why message here not be sent', html_message = msg+content+active_link)
return HttpResponse('email & comment success')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 22/07/2014
@author: HP
'''
from Postresql import Database
from Fichero import Fichero
def crear_tabla_noticias(db):
nombre_tabla = "Noticias"
lista_columnas = ["ID", "Titulo", "Noticia", "Fecha", "Autor"]
lista_valor = ["INT PRIMARY KEY", "CHAR(64)", "TEXT", "CHAR(32)", "CHAR(32)"]
lista_NULL = [1, 1, 1, 0, 0]
db.crear_tabla(nombre_tabla, lista_columnas, lista_valor, lista_NULL)
def agregar_noticia(Id, Titulo, Noticia, Fecha, Autor, db):
nombre_tabla = "Noticias"
lista_columnas = ["ID", "Titulo", "Noticia", "Fecha", "Autor"]
lista_valor = [Id, Titulo, Noticia, Fecha, Autor]
db.insertar_datos(nombre_tabla, lista_columnas, lista_valor)
def crear_tabla_jugadores(db):
nombre_tabla = "Jugadores"
lista_columnas = ["ID",
"Username",
"Password",
"Nombre",
"Email",
"Nivel",
"Experiencia",
"Gold",
"Silver",
"Deck",
"Coleccion",
"Rank"]
lista_valor = ["CHAR(128) ",
"CHAR(32) PRIMARY KEY",
"CHAR(32)",
"CHAR(32)",
"CHAR(32)",
"INT",
"INT",
"INT",
"INT",
"TEXT",
"TEXT",
"INT"]
lista_NULL = [1,
1,
1,
1,
1,
1,
1,
1,
1,
0,
0,
1]
db.crear_tabla(nombre_tabla, lista_columnas, lista_valor, lista_NULL)
def agregar_jugador(Nombre, Username, Email, Password, db):
def cifrado(user):
id = ""
for element in user:
codigo = ord(element)
id += str(codigo)
return id
nombre_tabla = "Jugadores"
ID = cifrado(Username)
lista_columnas = ["ID",
"Username",
"Password",
"Nombre",
"Email",
"Nivel",
"Experiencia",
"Gold",
"Silver",
"Rank"]
lista_valor = [ID,
Username,
Password,
Nombre,
Email,
1,
0,
100,
500,
0]
db.insertar_datos(nombre_tabla, lista_columnas, lista_valor)
if __name__ == '__main__':
print "Esto ha iniciado"
Fichero = Fichero()
puerto = Fichero.buscar_valor("configuracion.txt", "DBPORT")
nombredb = Fichero.buscar_valor("configuracion.txt", "DBNAME")
usuario = Fichero.buscar_valor("configuracion.txt", "DBUSER")
password = Fichero.buscar_valor("configuracion.txt", "DBPASS")
db = Database()
db.crear_conexion(puerto, nombredb, usuario, password)
crear_tabla_noticias(db)
Id = "1"
Titulo = "'Arranque del desarrollo de Loss Of Heart'"
Noticia = "'Es un gusto informar que a la fecha el avance en el proyecto del MMOTCG es un exito, ya contamos con la colaboracion de varios amigos interesados en el desarrollo del mismo y dia a dia avanzamos enormemente en la produccion del mismo'"
Fecha = "'28/08/2014'"
Autor = "'Erichris'"
agregar_noticia(Id, Titulo, Noticia, Fecha, Autor, db)
crear_tabla_jugadores(db)
agregar_jugador("'Eric Christian Bernstorff Corona'", "'erichris'", "'Erichris@live.com.mx'", "'annieteamo1'", db)
agregar_jugador("'Andrea Sanchez Martinez'", "'annieZa'", "'AnnieZa@live.com.mx'", "'tkieromuxho1'", db)
print "Exito" |
from setup_django import *
import os
import shutil
def check(ds):
pass
if __name__ == "__main__":
with open('datasets_in_psql_to_republish') as r:
datasets = [line.strip() for line in r]
with open('timeseries_error_dataset_ids') as r:
ts_errors = [line.strip() for line in r]
for dsid in datasets:
if "fx" in dsid:
continue
ds = Dataset.objects.filter(dataset_id__icontains='.'.join(dsid.split('.')[1:-1])).exclude(version='v20181201').first()
if not ds:
continue
if ds in ts_errors:
continue
for df in ds.datafile_set.all():
errors = df.qcerror_set.exclude(error_msg__icontains='ERROR (4)')
if not len(errors) == 0:
print "datafile errors",
for e in errors:
print e.error_msg
with open('datasets_with_datafile_errors', 'a+') as w:
w.writelines(["{}\n".format(dsid)])
continue
# ds_errors = ds.qcerror_set.all()
#
# if not len(ds_errors) == 0:
# print "dataset errors", ds, ds_errors, len(ds_errors) |
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.shortcuts import render
from accounting.models import *
from panel.forms import *
@login_required
def panel(request):
if request.method == 'GET':
user = request.user
profile = Profile.objects.filter(user=user).first()
user = request.user
profile = Profile.objects.filter(user=user).first()
return render(request, 'panel.html', {"profile": profile})
else:
# post request
pass
# reload the inputs to profile
# todo read inputs and add image inputs to template
def get_profile(request):
user = request.user
profile = Profile.objects.filter(user=user).first()
if request.method == 'GET':
if profile:
print("before from creation ***")
form = ProfileForm(instance=profile)
print("after from creation ***")
return render(request, 'profile.html', {'form': form, 'profile': profile})
else:
form = ProfileForm(instance=Profile())
return render(request, 'profile.html', {'form': form})
else:
form = ProfileForm(request.POST, instance=profile)
print(form.is_valid(), "**In post **")
if form.is_valid():
# save image
form.save()
return render(request, 'profile.html', {'form': form, 'profile': profile})
# def sign_up(request):
# user = request.user
# if not user.is_authenticated:
# profile=Profile.objects.(user=user).first()
# if profile:
# form=ProfileForm(instance=profile)
# print(form,"****")
#
#
# return render(request, 'panel.html')
|
# break and continue 2 python keyword when put them inside loop.
import math
cars = ["ok","ok","ok","faulty","ok","ok","ok"]
for status in cars:
if status == "faulty":
print("Found Faulty Car, Skipping...")
continue
#print("Stopping the Production Line!!!")
#break
print(f"This car is {status}")
print("Shipping new car to the customer.")
"""_x,_y,_n = map(int,input().split())
for number in range(1,_n+1):
if number%_x == 0 and number%_y == 0:
print("FizzBuzz")
elif number%_x == 0:
print("Fizz")
elif number%_y == 0:
print("Buzz")
else:
print(number)"""
input_1 = 4.8
input_2 = 8
cal = input_2/input_1
cal2 = input_2//input_1
print(f"After rounding: {round(cal)}")
print(f"Cal is as is it: {cal}")
print(f"As integer division: {cal2}")
print(f"After using ceil function:{math.ceil(cal)}")
print(f"After using floor function: {math.floor(cal)}")
print(f"Integer Division: {12//3}")
print(f"Floating Division: {12/3}")
|
import plotly.express as px
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
import pandas as pd
df = pd.read_csv('Admission_Predict.csv')
toefl_score = df['TOEFL Score'].tolist()
result = df['GRE Score'].tolist()
fig = px.scatter(x = toefl_score, y = result)
fig.show()
import plotly.graph_objects as go
toefl_scores = df['TOEFL Score'].tolist()
chance_of_admission = df['Chance of admit'].tolist()
result = df['GRE Score'].tolist()
colors = []
for data in result:
if data == 1:
colors.append('green')
else:
colors.append('red')
fig = go.Figure(data = go.Scatter(
x = toefl_scores,
y = chance_of_admission,
mode = 'markers',
marker = dict(color = colors)
))
fig.show()
factors = df[['TOEFL Score', 'Chance of admit']]
results = df['GRE Score']
toefl_train, toefl_test, results_train, results_test = train_test_split(factors, results, test_size = 0.25, random_state = 0)
print(toefl_train[0:10])
sc_x = StandardScaler()
toefl_train = sc_x.fit_transform(toefl_train)
toefl_test = sc_x.fit_transform(toefl_test)
print(toefl_train[0:10])
results_pred = classifier.predict(toefl_test)
print ("Accuracy : ", accuracy_score(results_test, results_pred))
user_score = int(input('Enter the score of the user:'))
user_chance_of_admission = int(input('Enter the chances of admission of the user:'))
user_test = sc_x.transform([[user_score, user_chance_of_admission]])
user_results_pred = classifier.predict(user_test)
if user_purchase_pred[0] == 1:
print('The user may get admission.')
else:
print('The user may not get admission.') |
# -*- coding: UTF-8 -*-
from django.conf.urls.defaults import *
from ebook import models
from ebook import views
urlpatterns = patterns('',
url(r'^$', views.index, name='product_idx'), #timeline hot list
url(r'^hot/$', views.hot, name='product_hot'),
#timeline recommend list
url(r'^recommend/$', views.recommend, name='product_recommend'),
#timeline last list
url(r'^last/$', views.last, name='product_last'),
#timeline random list
url(r'^random/$', views.random, name='product_random'),
#timeline all tags list
#url(r'^tags/$', views.tags, name='bookmark_tags'),
url(r'^pd/new/$', views.new, name='product_new'),
url(r'^tag/(?P<tag_name>[^/]+)/$', views.tag, name='product_tag'),
url(r'^pd/(?P<pk>\d+)/delete/$', views.delete, name='product_delete'),
#url(r'^bk/(?P<pk>\d+)/postcomment_/$', views.postcomment_, name='product_postcomment_'),
url(r'^pd/(?P<pk>\d+)/$', views.detail, name='product_detail'),
url(r'^pd/(?P<pk>\d+)/edit/$', views.edit, name='product_edit'),
url(r'^tags/$', views.tags, name='product_tags'),
url(r'^tag/(?P<tag_name>[^/]+)/$', views.tag, name='product_tag'),
)
|
import tensorflow as tf
import functools
import numpy as np
def lazy_property(function):
attribute = '_' + function.__name__
@property
@functools.wraps(function)
def wrapper(self):
if not hasattr(self, attribute):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return wrapper
class RNN_Model(object):
@lazy_property
def length(self):
length = tf.reduce_sum(self.mask_x, reduction_indices=1)
length = tf.cast(length, tf.int32)
return length
@staticmethod
def _last_relevant(output, length):
batch_size = tf.shape(output)[0]
max_length = int(output.get_shape()[1])
output_size = int(output.get_shape()[2])
##get last time steps output , the last time steps is not padding token
index = tf.range(0, batch_size) * max_length + (length - 1)
flat = tf.reshape(output, [-1, output_size])
relevant = tf.gather(flat, index)
return relevant
def __init__(self, config, embedding_mat, is_training=True):
self.keep_prob = config.keep_prob
self.batch_size = tf.Variable(0, dtype=tf.int32, trainable=False)
num_step = config.num_step
self.input_data = tf.placeholder(tf.int32, [None, num_step])
self.target = tf.placeholder(tf.int64, [None])
self.mask_x = tf.placeholder(tf.int32, [None, num_step])
#self.keep_prob = tf.placeholder(tf.float32)
self.regs = None
class_num = config.class_num
hidden_neural_size = config.hidden_neural_size
vocabulary_size = config.vocabulary_size
embed_dim = config.embed_dim
hidden_layer_num = config.hidden_layer_num
self.new_batch_size = tf.placeholder(tf.int32,shape=[],name="new_batch_size")
self._batch_size_update = tf.assign(self.batch_size,self.new_batch_size)
#build LSTM network
#lstm_cell = tf.contrib.rnn.BasicLSTMCell(hidden_neural_size, forget_bias=0.0, state_is_tuple=True)
#lstm_cell = tf.contrib.rnn.BasicLSTMCell(hidden_neural_size)
lstm_cell = tf.contrib.rnn.GRUCell(hidden_neural_size)
if self.keep_prob<1:
lstm_cell = tf.contrib.rnn.DropoutWrapper(
lstm_cell, output_keep_prob=self.keep_prob
#lstm_cell, input_keep_prob=self.keep_prob, output_keep_prob=self.keep_prob
)
#cell = tf.contrib.rnn.MultiRNNCell([lstm_cell]*hidden_layer_num, state_is_tuple=True)
cell = tf.contrib.rnn.MultiRNNCell([lstm_cell]*hidden_layer_num)
self._initial_state = cell.zero_state(self.batch_size, dtype=tf.float32)
#embedding layer
with tf.device("/cpu:0"),tf.name_scope("embedding_layer"):
#embedding = tf.get_variable("embedding", [vocabulary_size, embed_dim], dtype=tf.float32)
embedding_mat = embedding_mat.astype(np.float32)
embedding = tf.get_variable("embedding",initializer=embedding_mat)
inputs = tf.nn.embedding_lookup(embedding, self.input_data)
self.regs = tf.nn.l2_loss(embedding)*0.001
print inputs.get_shape()
if self.keep_prob<1:
inputs = tf.nn.dropout(inputs,self.keep_prob)
# Recurrent network.
# output.shape = [batch_size, max_len, hidden_size]
output, _ = tf.nn.dynamic_rnn(
cell,
inputs,
dtype=tf.float32,
sequence_length=self.length
)
last = self._last_relevant(output, self.length)
with tf.name_scope("Softmax_layer_and_output"):
softmax_w = tf.get_variable("softmax_w", [hidden_neural_size, class_num], dtype=tf.float32)
softmax_b = tf.get_variable("softmax_b", [class_num], dtype=tf.float32)
self.logits = tf.matmul(last, softmax_w) + softmax_b
with tf.name_scope("loss"):
self.loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.target, logits=self.logits+1e-10)
self.loss = self.loss + self.regs
self.cost = tf.reduce_mean(self.loss)
with tf.name_scope("accuracy"):
self.prediction = tf.argmax(self.logits, 1)
correct_prediction = tf.equal(self.prediction, self.target)
self.correct_num = tf.reduce_sum(tf.cast(correct_prediction, tf.float32))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name="accuracy")
self.probs = tf.nn.softmax(self.logits)
if not is_training:
return
self.globle_step = tf.Variable(0,name="globle_step",trainable=False)
self.lr = tf.Variable(0.0,trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars),
config.max_grad_norm)
#optimizer = tf.train.GradientDescentOptimizer(self.lr)
optimizer = tf.train.AdamOptimizer(self.lr)
optimizer.apply_gradients(zip(grads, tvars))
self.train_op = optimizer.apply_gradients(zip(grads, tvars))
self.new_lr = tf.placeholder(tf.float32,shape=[],name="new_learning_rate")
self._lr_update = tf.assign(self.lr,self.new_lr)
def assign_new_lr(self, session, lr_value):
session.run(self._lr_update, feed_dict={self.new_lr:lr_value})
def assign_new_batch_size(self, session, batch_size_value):
session.run(self._batch_size_update, feed_dict={self.new_batch_size:batch_size_value})
|
#!/usr/bin/env python2
from ddnet import *
serverAddresses = [
("62.173.150.210", 8303, "ddrace.tk")
, ("62.173.150.210", 8304, "ddrace.tk")
, ("62.173.150.210", 8305, "ddrace.tk")
, ("62.173.150.210", 8306, "ddrace.tk")
]
printStatus("KOnATbl4", [], serverAddresses, True)
|
import numpy as np
import struct
from ckc.utils import ckc_params
from prospect.sources import StarBasis
def write_binary(z, logg, logt, sps, outroot='test', zsolar=0.0134, **extras):
"""Convert a *flat* hdf5 spectral data file to the binary format
appropriate for FSPS, interpolating the hdf spectra to target points. This
also writes a .lambda file and a zlegend file. A simple ```wc *.lambda```
will give the number of wavelength points to specify in sps_vars.f90
"""
params = {'feh': np.log10(z/zsolar)}
# Write the spectral file
name = '{0}_z{1:6.4f}.spectra.bin'.format(outroot, z)
outfile = open(name, 'wb')
for g in logg:
for t in logt:
params['logg'] = g
params['logt'] = t
try:
w, spec, _ = sps.get_star_spectrum(**params)
except(ValueError):
print('Could not build spectrum for {}'.format(params))
spec = np.zeros(len(sps.wavelengths))
for flux in spec:
outfile.write(struct.pack('f', flux))
outfile.close()
return None
def write_all_binaries(zlist=[], outroot='test', sps=None, **kwargs):
_, logg, logt = ckc_params()
# Write the wavelength file
wfile = open('{}.lambda'.format(outroot), 'w')
for wave in sps.wavelengths:
wfile.write('{}\n'.format(wave))
wfile.close()
# Loop over Z, writing a binary for each Z and writing zlegend
with open('{}_zlegend.dat'.format(outroot), 'w') as zfile:
for z in zlist:
write_binary(z, logg, logt, sps, outroot=outroot, **kwargs)
zfile.write('{:6.4f}\n'.format(z))
return None
if __name__ == "__main__":
runparams = {'verbose': True,
# Interpolator
'libname':'lores/manga/c3k+dMall_manga-sigma50kms.h5',
'in_memory': False,
'use_params': ['logt', 'logg', 'feh'],
'logify_Z': False,
# Z definition
'zsolar': 0.0134,
'zlist': np.array([-2.0, -1.5, -1.0, -0.5, 0.0, 0.5]),
# Output
'outroot': 'lores/manga/c3kdM_manga'
}
runparams['zlist'] = runparams['zsolar'] * 10**runparams['zlist']
sps = StarBasis(**runparams)
write_all_binaries(sps=sps, **runparams)
|
import json
from typing import List, Tuple
import boto3
from botocore.client import BaseClient
from logger.decorator import lambda_auto_logging
from logger.my_logger import MyLogger
from utils.lambda_tool import get_environ_values
from utils.s3_tool import (
create_key_of_eorzea_database_merged_item,
create_key_of_irregular_data,
create_key_of_match_data,
create_key_of_xivapi_merged_item,
)
environ_names = ["TMP_DATA_BUCKET_NAME"]
logger = MyLogger(__name__)
@lambda_auto_logging(*environ_names)
def handler(event, context):
main(event)
def main(event: dict, s3_client: BaseClient = boto3.client("s3")):
(tmp_data_bucket_name,) = get_environ_values(environ_names)
process_id = get_process_id_from_event(event)
key_of_eorzea_database = create_key_of_eorzea_database_merged_item(process_id)
key_of_xivapi = create_key_of_xivapi_merged_item(process_id)
eorzea_database = get_s3_data(
tmp_data_bucket_name, key_of_eorzea_database, s3_client
)
xivapi = get_s3_data(tmp_data_bucket_name, key_of_xivapi, s3_client)
match, irregular = exec_matching(eorzea_database, xivapi)
key_of_match = create_key_of_match_data(process_id)
put_s3_data(tmp_data_bucket_name, key_of_match, match, s3_client)
if len(irregular) > 0:
key_of_irregular = create_key_of_irregular_data(process_id)
put_s3_data(tmp_data_bucket_name, key_of_irregular, irregular, s3_client)
logger.error("has irregular data", count=len(irregular))
def get_process_id_from_event(event: dict) -> str:
return event["id"]
def get_s3_data(bucket_name: str, key: str, s3_client: BaseClient) -> List[dict]:
option = {"Bucket": bucket_name, "Key": key}
resp = s3_client.get_object(**option)
return json.load(resp["Body"])
def exec_matching(
eorzea_database: List[dict], xivapi: List[dict]
) -> Tuple[List[dict], List[dict]]:
match = []
irregular = []
for item in eorzea_database:
parsed = [x for x in xivapi if x["Name_en"] == item["name"]]
if len(parsed) == 1:
api_item = parsed[0]
match.append({**api_item, **{"EorzeaDatabaseId": item["id"]}})
else:
irregular.append({"eorzea_database": item, "xivapi": parsed})
return match, irregular
def put_s3_data(bucket_name: str, key: str, data: List[dict], s3_client: BaseClient):
option = {
"Bucket": bucket_name,
"Key": key,
"Body": json.dumps(data, ensure_ascii=False).encode(),
"ContentType": "application/json",
}
s3_client.put_object(**option)
|
import matlab.engine
import os
import argparse
import sys
if __name__ == "__main__":
# Parse CLI arguments
# When --help or no args are given, print this help
usage_text = (
""
"python demo.py --rgb_img <path/to/rgb/image> --depth_img <path/to/depth/image> --correspondence_img <path/to/correspondence/image> --output_name output"
)
parser = argparse.ArgumentParser(description=usage_text)
parser.add_argument('--rgb_img', dest='rgb_img', help='RGB source image', type=str)
parser.add_argument('--depth_img', dest='depth_img', help='Depth source image', type=str)
parser.add_argument('--correspondence_img', dest='correspondence_img', help='Correspondence source image', type=str)
parser.add_argument('--output_name', dest='output_name', help='output_name', type=str)
args = parser.parse_args()
if not args:
parser.print_help()
sys.exit()
if (not args.rgb_img and
not args.depth_img and
not args.correspondence_img and
not args.output_name):
print("Error: argument not given, aborting.")
parser.print_help()
sys.exit()
for arg in vars(args):
print('[%s] = ' % arg, getattr(args, arg))
print("Starting Matlab engine.....")
eng = matlab.engine.start_matlab("-nodisplay")
print("Matlab engine started.....")
print("Starting geometric reconstruction.....")
eng.geometric_recons(args.rgb_img, args.correspondence_img, args.depth_img, args.output_name, nargout=0)
print("Stopping Matlab engine.....")
eng.quit()
print("Matlab engine stopped.....")
|
import cherrypy
import device_db
class DeviceDataWebService(object):
@cherrypy.tools.accept(media='application/json')
@cherrypy.expose
def index(self, brand=None, model=None, os=None, osVersion=None):
if not brand:
return ""
return device_db.new_device_data(brand, model, device_db.OsType[os], osVersion)
@cherrypy.expose(['devices'])
# @cherrypy.tools.json_out() # not working with Enum for now
def devices(self, brand=None, model=None, os=None, osVersion=None, page=None):
return device_db.query(brand=brand, model=model, os=device_db.OsType[os] if os else None, osVersion=osVersion, page=page)
if __name__ == '__main__':
cherrypy.quickstart(DeviceDataWebService())
|
from service_charge import ServiceCharge
class AddServiceCharge(object):
def __init__(self, memberId, date, amount, db):
self.memberId = memberId
self.date = date
self.amount = amount
self.db = db
def execute(self):
sc = ServiceCharge(self.date, self.amount)
e = self.db.get_union_member(self.memberId)
e.affiliation.add_service_charge(self.date, sc)
|
import commands
from pymongo import MongoClient
import toml
import logging
import random
from random import randint
import re
import os
import time
import commands
import json
import pdb
import requests, json
import sys
import urllib
global reg, enabled_value, apii, E_value,impr_1, impr_n
with open("/var/chandni-chowk/configs/app.development.toml") as conffile:
config = toml.loads(conffile.read())
mongo_client = MongoClient(config["app"]["mongo_conn_str"])
db = mongo_client.dsp
with open("/var/chandni-chowk/configs/app.test.toml") as conffile1:
config1 = toml.loads(conffile1.read())
reg = config1["app"]["regions"]
for i in range(0, 500):
def mul_random_regs():
regions = ['Delhi NCR', 'Hyderabad', 'Bangalore', 'TN/Pondicherry', 'Kerala', 'Pun/Har/Cha/HP/J%26K',
'Uttar Pradesh', 'West Bengal', 'North East', 'Orissa', 'Jharkhand', 'Bihar', 'Maharashtra/Goa',
'Chhattisgarh', 'Rajasthan', 'Madhya Pradesh']
reg_iter = random.sample(range(0, len(regions) - 1), random.randint(1, len(regions) - 1))
current_regions = []
for j in reg_iter:
current_regions.append(regions[j])
mul_ran_regs = ','.join(current_regions)
return mul_ran_regs
if reg == "All_regions":
reg = 'Delhi NCR,Hyderabad,Bangalore,Pun/Har/Cha/HP/J%26K,Maharashtra/Goa,West Bengal,North East,Orissa,Chhattisgarh,Rajasthan,Madhya Pradesh,Jharkhand,Gujarat,Bihar,Uttar Pradesh,TN/Pondicherry,Kerala'
elif reg == "One_by_One":
regions = ["National", "DTH", "Delhi NCR", "Hyderabad", "Bangalore", "TN/Pondicherry", "Kerala",
"Pun/Har/Cha/HP/J%26K", "Uttar Pradesh", "West Bengal", "North East", "Orissa", "Jharkhand", "Bihar",
"Maharashtra/Goa", "Chhattisgarh", "Rajasthan", "Madhya Pradesh"]
reg = regions[random.randrange(len(regions))]
elif reg == "multiple_reg":
reg = mul_random_regs()
###Inputs needed
gen_a = ["Male,Female", "Male", "Female"]
gen = gen_a[random.randrange(len(gen_a))]
bud = randint(0, 500000)
dur = randint(5, 99)
spot_d = ["10", "15", "20", "25", "30"]
spot_dur = spot_d[random.randrange(len(spot_d))]
cat_prof_a = random.choice(list(open('cat_prof_map_CMPTEST.txt'))).rstrip()
url = "http://localhost:2770/compute-media-package"
api = str(
url) + '?regions=' + mul_random_regs() + '&date=21-10-16&gender=' + gen + '&sub_category=' + cat_prof_a + '&duration=' + str(
dur) + '&spot_duration=' + str(spot_dur) + '&budgets=' + str(bud) + '&user_id=radhika@amagi.com'
re = requests.get(api)
if (re.status_code != 200):
# print re.status_code, ',',
continue
else:
re_t = re.text
data = json.loads(re_t)
#### Given gender
start = 'gender='
end = '&sub_category'
gender_in = api[api.find(start) + len(start):api.rfind(end)]
# print gender_in
#### Given spot_duration
start = 'spot_duration='
end = '&budgets'
spot_duration_in = api[api.find(start) + len(start):api.rfind(end)]
# print spot_duration_in
#### Given budget
start = 'budgets='
end = '&user_id'
budget_in = api[api.find(start) + len(start):api.rfind(end)]
# print budget_in
#### profile
start = 'profile='
end = '&duration'
profile_in = api[api.find(start) + len(start):api.rfind(end)]
# print profile_in
#### Response gender
gender_out = data['package']['gender']
# print gender_out
#### Response spot_duration
spot_duration_out = data['package']['spot_duration']
# print spot_duration_out
#### Response discounted_package_cost
budget_out = data['package']['discounted_package_cost']
# print budget_out
#### Response tax
tax_out = data['package']['tax_amount']
# print tax_out
def profile_to_tg_mongo(audience_type):
pf = db.profile_to_tg.find({"audience_type":audience_type})
for document in pf:
result = document.get("tg")
r = result.split('/')
r_1 = r[0]
nccs = list(r_1)
return nccs
# print result
# print r
# print r[0]
def channel_reach(r, c, a, g, nccs):
em = db.channel_reach.find({"channel":c,"region":r,"gender":g,"age":a,"nccs":nccs})
for document in em:
result = document.get("channel_reach")
print result
return result
def val_reach_spli_mongo(region_name):
vl = db.reach_split.find({"region":region_name})
for doc in vl:
if region_name != "National":
return doc.get("cable_percent")
def val_reach_spli_mongo_dth(region_name):
vl = db.reach_split.find({"region":region_name})
for doc in vl:
if region_name != "National":
return doc.get("dth_percent")
def channel_mappings_mongo(region_name,type_out,channel_name):
cov = db.channel_mappings.find({"region":region_name,"type":type_out,"channel_name":channel_name})
for doc in cov:
result = doc.get("coverage")
# print result
return result
def chan_reach():
for i in range(0,len(data['package']['region'])):
if data['package']['region'][i]['channel_order'] == []:
continue
#If channel_order is not empty, then compute the actual cost
else:
region_name = data['package']['region'][i]['region_name']
for j in range(0,len(data['package']['region'][i]['channels'])):
channel_name = data['package']['region'][i]['channels'][j]['channel_name']
age = data['package']['age']
gender = data['package']['gender']
if gender == 'Male,Female':
g = ["Male","Female"]
elif gender == 'Male':
g = ["Male"]
elif gender == 'Female':
g = ["Female"]
if gender == 'Male,Female':
gender_for_tg = ' MF'
elif gender == 'Male':
gender_for_tg = ' M'
elif gender == 'Female':
gender_for_tg = ' F'
type_out = data['package']['region'][i]['channels'][j]['type']
audience_type = profile_in + gender_for_tg
nccs = profile_to_tg_mongo(audience_type)
channel_reach_r = data['package']['region'][i]['channels'][j]['channel_reach']
channel_reach_mongo = channel_reach(region_name,channel_name,age,g,nccs)
chan_reach = channel_reach_mongo * 1000
if (region_name != 'National' and type_out == 'Spliced'):
coverage = channel_mappings_mongo(region_name,type_out,channel_name)
# print 'coverage',coverage,str(coverage)
if str(coverage) == 'Cable':
value = val_reach_spli_mongo(region_name)
# print 'value',value
elif str(coverage) == 'DTH':
value = val_reach_spli_mongo_dth(region_name)
# print 'Value',value
elif str(coverage) == 'Cable+DTH':
value = 100
# print 'Value',value
reac = chan_reach * (value / 100)
print '!N and Spliced:region_name,channel_name,channel_reach_mongo,value,chan_reach*1000,channel_reach_r,calculated_reach',region_name,channel_name,channel_reach_mongo,value,chan_reach,channel_reach_r,reac
if abs(round(reac)-round(channel_reach_r)) == 1 or abs(round(reac)-round(channel_reach_r)) == 0:
res = 'Match'
print res
else:
res = 'Mismatch'
print res
elif (region_name != 'National' and type_out == 'Regional'):
coverage = channel_mappings_mongo(region_name,type_out,channel_name)
if str(coverage) == 'Cable':
value = val_reach_spli_mongo(region_name)
elif str(coverage) == 'DTH':
value = val_reach_spli_mongo_dth(region_name)
elif str(coverage) == 'Cable+DTH':
value = 100
reac = chan_reach * (value / 100)
print '!N and Regional:region_name,channel_name,channel_reach_mongo,value,chan_reach*1000,channel_reach_r,calculated_reach',region_name,channel_name,channel_reach_mongo,value,chan_reach,channel_reach_r,reac
if abs(round(reac)-round(channel_reach_r)) == 1 or abs(round(reac)-round(channel_reach_r)) == 0:
res = 'Match'
print res
else:
res = 'Mismatch'
print res
elif (region_name == 'National'):
reac_2 = 0
reac_2_n = 0
# National and Spliced -- then -- DTH
if type_out == 'Spliced':
for key,value in data['package']['region'][i]['detailed_region_impressions'].items():
region = key
channel_reach_mongo = channel_reach(region,channel_name,age,g,nccs)
chan_reach = channel_reach_mongo * 1000
coverage = channel_mappings_mongo('DTH',type_out,channel_name)
if str(coverage) == 'Cable':
value = val_reach_spli_mongo(region)
elif str(coverage) == 'DTH':
value = val_reach_spli_mongo_dth(region)
elif str(coverage) == 'Cable+DTH':
value = 100
reac_1 = chan_reach * (value/100)
reac_2 += reac_1
reac_2_r = data['package']['region'][i]['channels'][j]['channel_reach']
if abs(round(reac_2)-round(reac_2_r)) == 1 or abs(round(reac_2)-round(reac_2_r)) == 0:
res = 'Match'
print res
else:
res = 'Mismatch'
print res
print 'N and Spliced:region_name,channel_name,channel_reach_mongo,value,chan_reach*1000,channel_reach_r,calculated_reach',region_name,channel_name,channel_reach_mongo,value,chan_reach,reac_2_r,reac_2
elif type_out == 'National':
for key,value in data['package']['region'][i]['detailed_region_impressions'].items():
region = key
channel_reach_mongo = channel_reach(region,channel_name,age,g,nccs)
chan_reach = channel_reach_mongo * 1000
coverage = channel_mappings_mongo('National',type_out,channel_name)
if str(coverage) == 'Cable':
value = val_reach_spli_mongo(region)
elif str(coverage) == 'DTH':
value = val_reach_spli_mongo_dth(region)
elif str(coverage) == 'Cable+DTH':
value = 100
reac_1_n = chan_reach * (value/100)
reac_2_n += reac_1_n
reac_2_nr = data['package']['region'][i]['channels'][j]['channel_reach']
print 'N and N:region_name,channel_name,channel_reach_mongo,value,chan_reach*1000,channel_reach_r,calculated_reach',region_name,channel_name,channel_reach_mongo,value,chan_reach,reac_2_nr,reac_2_n
if abs(round(reac_2_n)-round(reac_2_nr)) == 1 or abs(round(reac_2_n)-round(reac_2_nr)) == 0:
res = 'Match'
print res
else:
res = 'Mismatch'
print res
r = []
r.append(res)
print r
if all(r[0] == item for item in r):
if r[0] == 'Match':
print 'Reach_Match',',',
else:
print 'Reach_Mismatch',',',
else:
print 'Reach_Partial_Pass',',',
rea = chan_reach()
print api
|
from odoo import fields,models,api,_
class BookingConfigSettings(models.Model):
_name ="booking.order.settings"
pre_booking = fields.Integer(string="Pre Booking time",required=True)
post_booking = fields.Integer(String="Post Booking time",required=True) |
# coding=utf-8
import os, sys, cx_Oracle, smtplib, datetime, csv, zipfile
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
def input_verify(fmdate, todate, output_type): #用户输入验证模块
db = cx_Oracle.connect('name/psw@*****')
sqlfm = "select to_date('" + fmdate + "','yyyy-mm-dd hh24:mi:ss') from dual"
cursor = db.cursor()
try:
cursor.execute(sqlfm)
print '开始时间验证OK'
except Exception, e:
print Exception, ":", e
print '开始时间有误,请重新输入'
return 'FALSE'
sqlto = "select to_date('" + todate + "','yyyy-mm-dd hh24:mi:ss') from dual"
cusror = db.cursor()
try:
cursor.execute(sqlto)
print '结束时间验证OK'
except Exception, e:
print Exception, ":", e
print '结束时间有误,请重新输入'
return 'FALSE'
if output_type.lower() == 'csv' or output_type.lower() == 'txt':
print '格式验证OK,取数中,请稍等......'
return 'TRUE'
else:
print '格式输入有误,请重新输入(CSV/TXT不区分大小写):'
def data_query(fmdate, todate, output_type): #数据查询模块
db = cx_Oracle.connect('sss/sss_123456@10.202.4.97:1521/sss')
sql = "select transit_zno, TO_CHAR(insert_tm,'YYYY-MM-DD HH24:MI:SS'), waybill_no, des_zno from tt_as_out_ident"
where = " where TO_CHAR(insert_tm, 'yyyy-mm-dd hh24:mi:ss') between '" + fmdate + "' and '" + todate + "'"
cursor = db.cursor()
cursor.execute(sql + where)
result = cursor.fetchall()
if result and output_type.lower() == 'csv':
trans_result(result, 'csv')
if result and output_type.lower() == 'txt':
trans_result(result, 'txt')
else:
print '条件输入查询数据为空,程序退出,谢谢'
sys.exit()
def get_current_dir(output_type): #获取当前新建取数文件目录
datafile_path = str(os.getcwd())
datafile_name = datetime.datetime.now().strftime("%Y%m%d%H%M%S") + '.' + output_type
datafile_name_path = datafile_path +"\\"+ datafile_name
return datafile_name_path
def create_txtdatafile(output_type): #新建txt头文件
datafile_dir = get_current_dir(output_type)
txt_header = "场地代码-时间段-运单号-目的地\n"
f = open(datafile_dir, 'wb')
f.write(txt_header)
f.close()
return datafile_dir
def add_data(datafile_dir, data): #写入取数文件CSV数据
f = open(datafile_dir, 'a')
f.write(data)
f.close()
def add_csvdata(datafile_dir, data): #写入取数文件CSV数据
csvfile = open(datafile_dir, 'ab+')
writer = csv.writer(csvfile)
writer.writerows(data)
csvfile.close()
def mail_to(filename): #邮件发送模块
user = "mail"
pwd = "psw"
to = "mail"
msg = MIMEMultipart()
msg["Subject"] = "取数"
msg["From"] = user
msg["To"] = to
part = MIMEText("取数结果详见附件")
msg.attach(part)
part = MIMEApplication(open(filename, "rb").read())
part.add_header('Content-Disposition', 'attachment', filename=filename)
msg.attach(part)
s = smtplib.SMTP_SSL("smtp.qq.com", 465)
s.login(user, pwd)
s.sendmail(user, to, msg.as_string())
print "取数邮件已经发送,请注意查收,谢谢"
s.close()
sys.exit()
def trans_result(result, output_format): #取数格式转换
dir = str(os.getcwd())
now_time = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
if output_format.lower() == 'txt':
datafile_dir = create_txtdatafile('txt')
for each_item in result:
line_result = each_item[0] + '-' + each_item[1] + '-' + each_item[2] + '-' + each_item[3] + '\n'
add_data(datafile_dir, line_result)
zip_file_name = file_zip(datafile_dir)
print zip_file_name
mail_to(zip_file_name)
else:
datafile_dir = get_current_dir('csv')
csv_header = "场地代码,时间段,运单号,目的地\n"
add_data(datafile_dir, csv_header)
add_csvdata(datafile_dir, result)
zip_file_name = file_zip(datafile_dir)
mail_to(zip_file_name)
def file_zip(filename): #文件压缩模块
zip_file_name = filename + '.zip'
# f = zipfile.ZipFile(zip_file_name,'w',zipfile.ZIP_DEFLATED)
f = zipfile.ZipFile(zip_file_name, 'w')
f.write(filename)
f.close()
return zip_file_name
if __name__ == "__main__": #输入查询条件
while True:
# r_fmdate=str(raw_input('请输入开始时间 例:2016-12-12 23:59:59:'))
_fmdate = '2015-12-11 00:00:00'
# r_todate=str(raw_input('请输入结束时间 例:2016-12-12 23:59:59:'))
_todate = '2016-12-12 00:00:00'
# r_outpt_type=str(raw_input('请输入导出格式 CSV/TXT:'))
_output_type = 'txt'
if input_verify(_fmdate, _todate, _output_type) == 'TRUE':
data_query(_fmdate, _todate, _output_type)
else:
continue
|
#!/bin/python
#https://www.hackerrank.com/challenges/mars-exploration
import sys
s = raw_input().strip()
count=0
i=0
while i<len(s):
S,O,S1=s[i],s[i+1],s[i+2]
count = count + (1 if S!='S' else 0)
count = count + (1 if O!='O' else 0)
count = count + (1 if S1!='S' else 0)
i=i+3
print count
|
t = 12345, 54321, 'hello!'
print(t)
u = t, (1,2,3,4,5)
print(u)
#元组在输出时总是有括号的,以便于正确表达嵌套结构。在输入时可能有或没有括号, 不过括号通常是必须的(如果元组是更大的表达式的一部分 |
from django.urls import path
from . import views
urlpatterns = [
path('',views.index,name = 'approvalView'),
path('<int:object_id>/', views.pending, name='pending'),
path('<int:object_id>/approved', views.approved, name='approve'),
path('<int:object_id>/reval', views.reval, name='reval'),
] |
import pandas as pd
grades = pd.Series([87,100,94])
myarray = pd.Series(98.6, range(3))
'''
print(myarray)
print(grades[0])
print(grades.describe())
'''
grades = pd.Series([87,100,94],index=['Wally','Eva','Sam'])
print(grades)
grades = pd.Series({'Wally':87, 'Eva':100, 'Sam':94})
print(grades)
hardware = pd.Series(['Hammer','Saw','Wrench'])
a = hardware.str.contains('a')
print(a)
b = hardware.str.upper()
print(b) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
通过pywin32库,通过调用本地windows API的方式实现抓取功能
屏幕抓取器利用windows图形设备接口(GDI)获取抓取屏幕时必须的参数,如屏幕大小分辨率等信息。
"""
import selenium
import win32gui
import win32ui
import win32con
import win32api
|
import os
from cs50 import SQL
from flask import Flask, flash, jsonify, redirect, render_template, request, session
from flask_session import Session
from tempfile import mkdtemp
from werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError
from werkzeug.security import check_password_hash, generate_password_hash
from helpers import apology, login_required, lookup, usd
# Configure application
app = Flask(__name__)
# Ensure templates are auto-reloaded
app.config["TEMPLATES_AUTO_RELOAD"] = True
# Ensure responses aren't cached
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
# Custom filter
app.jinja_env.filters["usd"] = usd
# Configure session to use filesystem (instead of signed cookies)
app.config["SESSION_FILE_DIR"] = mkdtemp()
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# Configure CS50 Library to use SQLite database
db = SQL("sqlite:///finance.db")
# Make sure API key is set
if not os.environ.get("API_KEY"):
raise RuntimeError("API_KEY not set")
# Define function that checks for password strength
def PassStrength(password):
properties = {"lower": 0, "upper": 0, "numbers": 0, "special": 0}
alphabet = "abcdefghijklmnopqrstuvwxyz"
numbers = "1234567890"
special = "!§$%&'\"<>\\:;/()=?,.-_+#*~^"
if len(password) < 8:
return False
for letter in password:
if letter in alphabet:
properties["lower"] += 1
elif letter in alphabet.upper():
properties["upper"] += 1
elif letter in numbers:
properties["numbers"] += 1
elif letter in special:
properties["special"] += 1
for prop in properties:
if properties[prop] < 1:
return False
return True
def SpecialCharacters(username):
special = "!§$%&/()\\=\"?,.-_:;+#'*~<>^"
for letter in username:
if letter in special:
return True
return False
@app.route("/")
@login_required
def index():
"""Show portfolio of stocks"""
# Get all pervious transations by ID
transactions = db.execute("SELECT * FROM transactions WHERE user_id = :id", id = session["user_id"])
holdings = {}
for transaction in transactions:
if transaction["type"] == "BUY":
if transaction["symbol"] not in holdings.keys():
holdings[transaction["symbol"]] = int(transaction["shares"])
else:
holdings[transaction["symbol"]] = int(holdings[transaction["symbol"]]) + int(transaction["shares"])
if int(holdings[transaction["symbol"]]) == 0:
holdings.pop("symbol")
if transaction["type"] == "SELL":
if transaction["symbol"] not in holdings.keys():
holdings[transaction["symbol"]] = - int(transaction["shares"])
else:
holdings[transaction["symbol"]] = int(holdings[transaction["symbol"]]) - int(transaction["shares"])
if int(holdings[transaction["symbol"]]) == 0:
holdings.pop(transaction["symbol"])
prices = {}
names = {}
values = {}
total = 0
for index, holding in enumerate(holdings):
data = lookup(holding)
prices[holding] = usd(data["price"])
names[holding] = data["name"]
values[holding] = float(data["price"]) * int(holdings[data["symbol"]])
total = total + (float(data["price"]) * int(holdings[data["symbol"]]))
for value in values:
values[value] = usd(values[value])
current_user = db.execute("SELECT * FROM users WHERE id = :id", id = session["user_id"])
if len(current_user) != 1:
return apology("Your user id was not found. Please try loggin in again.")
cash = current_user[0]["cash"]
net_worth = total + cash
return render_template("dashboard.html", holdings=holdings, prices=prices, names=names, values=values, total=usd(total), cash=usd(cash), net_worth=usd(net_worth))
@app.route("/buy", methods=["GET", "POST"])
@login_required
def buy():
"""Buy shares of stock"""
if request.method == "POST":
# Ensure symbol was submitted
if not request.form.get("symbol"):
return apology("must provide a stock symbol to buy", 403)
# Ensure shares were submitted
if not request.form.get("shares") or int(request.form.get("shares")) <= 0:
return apology("must provide a valid number of shares to buy", 403)
# Ensure shares are not negative
response = lookup(request.form.get("symbol"))
if response == None:
return apology("that stock symbol doesnt exist", 403)
name = response["name"]
price = response["price"]
symbol = response["symbol"]
# Calculate amount of money required to buy the selected shares
required_funds = float(price) * int(request.form.get("shares"))
current_user = db.execute("SELECT * FROM users WHERE id = :id", id = session["user_id"])
if len(current_user) != 1:
return apology("there seems to be no ID found. Critical Error!", 403)
# Check if funds required are present in the account
if float(required_funds) > float(current_user[0]["cash"]):
return apology("your existing funds are too small for this purchse", 403)
# Get the last transaction ID in order to insert the next one in order
last_id = db.execute("SELECT id FROM transactions ORDER BY id DESC LIMIT 1")
print(last_id)
if len(last_id) == 0:
last_id = [{"id":0}]
new_id = last_id[0]["id"] + 1
db.execute("INSERT INTO transactions VALUES(:newid, :currentuser, datetime('now', 'localtime'), :symbol, :shares, :type, :company, :shareprice)", newid = new_id, currentuser = current_user[0]["id"], symbol = request.form.get("symbol"), shares = int(request.form.get("shares")), type = "BUY", company = name, shareprice = price)
db.execute("UPDATE users SET cash = :cashvalue WHERE id = :currentuser ", cashvalue = (float(current_user[0]["cash"]) - float(required_funds)), currentuser = current_user[0]["id"])
flash(f'Your purchase of {request.form.get("shares")} shares of {name} ({symbol}) was successful.')
return redirect("/")
else:
return render_template("buy.html")
@app.route("/history")
@login_required
def history():
"""Show history of transactions"""
# Get all pervious transations by ID
transactions = db.execute("SELECT * FROM transactions WHERE user_id = :currentuser ORDER BY datetime DESC", currentuser = session["user_id"])
values = {}
total = 0
for index, transaction in enumerate(transactions):
values[transaction["symbol"]] = float(transaction["price"]) * int(transaction["shares"])
total = float(total) + (float(transaction["price"]) * int(transaction["shares"]))
for value in values:
values[value] = usd(values[value])
flash("You have successfully loaded your transaction history.")
return render_template("history.html", transactions=transactions, values=values, total=usd(total))
@app.route("/login", methods=["GET", "POST"])
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username", 403)
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password", 403)
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username",
username=request.form.get("username"))
# Ensure username exists and password is correct
if len(rows) != 1 or not check_password_hash(rows[0]["hash"], request.form.get("password")):
return apology("invalid username and/or password", 403)
# Remember which user has logged in
session["user_id"] = rows[0]["id"]
flash("You have successfully logged in.")
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html")
@app.route("/logout")
def logout():
"""Log user out"""
# Forget any user_id
session.clear()
flash("You have successfully logged out.")
# Redirect user to login form
return redirect("/")
@app.route("/quote", methods=["GET", "POST"])
@login_required
def quote():
"""Get stock quote."""
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure stock symbol was submitted
if not request.form.get("symbol") or SpecialCharacters(request.form.get("symbol")) == True:
return apology("must provide a valid stock symbol to get a quote.", 403)
response = lookup(request.form.get("symbol"))
name = response["name"]
price = response["price"]
symbol = response["symbol"]
return render_template("quoted.html", name=name, price=usd(price), symbol=symbol)
else:
return render_template("quote.html")
@app.route("/register", methods=["GET", "POST"])
def register():
"""Register user"""
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username", 403)
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password", 403)
# Password must match the confirmation
elif request.form.get("password") != request.form.get("confirmation"):
return apology("the entered passwords do not match", 403)
# Ensure that password is strong enough
elif PassStrength(request.form.get("password")) != True:
flash("Password was not strong enough. It must be at least 8 characters long and contain 1 upper case letter, 1 number, and 1 special character. Try again.")
return render_template("register.html")
elif SpecialCharacters(request.form.get("username")) == True:
flash("Username cannot contain special characters.")
return render_template("register.html")
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username",
username=request.form.get("username"))
if len(rows) != 0:
return apology("Username already exists. Please choose a different one.", 403)
lastUser = db.execute("SELECT * FROM users ORDER BY id DESC LIMIT 1")
if len(lastUser) == 0:
lastID = 0
else:
lastID = lastUser[0]["id"]
db.execute("INSERT INTO users (id, username, hash, cash) values (:newid, :currentuser, :password, :cash)", newid = (lastID + 1), currentuser = request.form.get("username"), password = generate_password_hash(request.form.get("password")), cash = 10000)
flash("You have successfully registered your account.")
# Redirect user to home page
return redirect("/")
else:
return render_template("register.html")
@app.route("/sell", methods=["GET", "POST"])
@login_required
def sell():
"""Sell shares of stock"""
if request.method == "POST":
# Ensure symbol was submitted
if not request.form.get("symbol"):
return apology("must provide a stock symbol", 403)
# Ensure shares were submitted
elif not request.form.get("shares") or int(request.form.get("shares")) <= 0:
return apology("must provide a number of shares you want to sell", 403)
# Ensure user actually owns shares
shares_bought = db.execute("Select SUM(shares) FROM transactions WHERE symbol = :symbol AND user_id = :currentuser AND type = 'BUY'", symbol = request.form.get("symbol"), currentuser = session["user_id"])
shares_sold = db.execute("SELECT SUM(shares) FROM transactions WHERE symbol = :symbol AND user_id = :currentuser AND type = 'SELL'", symbol = request.form.get("symbol"), currentuser = session["user_id"])
print(shares_bought)
print(shares_sold)
# Check if the SQLite queries returned null because no records were found
if shares_bought[0]["SUM(shares)"] == None:
shares_bought[0]["SUM(shares)"] = 0
if shares_sold[0]["SUM(shares)"] == None:
shares_sold[0]["SUM(shares)"] = 0
current_shares = int(shares_bought[0]["SUM(shares)"]) - int(shares_sold[0]["SUM(shares)"])
if current_shares <= 0:
return apology("you do not own any shares in that stock anymore.")
if ((int(current_shares) - int(request.form.get("shares"))) < 0 ):
return apology("you are trying to sell more shares than you own.")
# Get data on prices and company
data = lookup(request.form.get("symbol"))
price = data["price"]
name = data["name"]
symbol = data["symbol"]
# The funds won by selling
won_funds = float(price) * int(request.form.get("shares"))
# Get the current balance of the users
current_user = db.execute("SELECT * FROM users WHERE id = :currentuser", currentuser = session["user_id"])
if len(current_user) != 1:
return apology("there seems to be no ID found. Critical Error! Try loggin in again.", 403)
# Check if funds required are present in the account
if int(request.form.get("shares")) > int(current_user[0]["cash"]):
return apology("you are trying to sell more shares than you own", 403)
last_id = db.execute("SELECT id FROM transactions ORDER BY id DESC LIMIT 1")
print(last_id)
if len(last_id) == 0:
last_id = [{"id":0}]
new_id = last_id[0]["id"] + 1
db.execute("INSERT INTO transactions VALUES(:newid, :currentuser, datetime('now', 'localtime'), :symbol, :shares, :type, :company, :stockprice)", newid = new_id, currentuser = current_user[0]["id"], symbol = request.form.get("symbol"), shares = int(request.form.get("shares")), type = "SELL", company = name, stockprice = price)
db.execute("UPDATE users SET cash = :cash WHERE id = :currentuser ", cash = (float(current_user[0]["cash"]) + float(won_funds)), currentuser = current_user[0]["id"])
flash(f'Your sell order of {request.form.get("shares")} shares of {name} ({symbol}) was successful.')
return redirect("/")
else:
stocks = []
symbols = db.execute("SELECT symbol FROM transactions WHERE user_id = :currentuser", currentuser = session["user_id"])
for row in symbols:
if row["symbol"] in stocks:
continue
stocks.append(row["symbol"])
return render_template("sell.html", stocks=stocks)
def errorhandler(e):
"""Handle error"""
if not isinstance(e, HTTPException):
e = InternalServerError()
return apology(e.name, e.code)
# Listen for errors
for code in default_exceptions:
app.errorhandler(code)(errorhandler)
|
啊实打实asdfsadfasdfsfasdfsa苏打
sadfsd
asd
fasd
fasdasdf
asdf
sadfsdasd
啊实打xiao asdasd asd asd as苏打
heelo woorlld asd as
|
# -*- coding: utf-8 -*-
'''
Created on May 29, 2012
@author: feralvam
'''
def position(argcand):
"""
Indicates whether the word occurs before(0) or after(1) the target verb
"""
verb_pos = argcand["verb"]["address"]
arg_pos = argcand["info"]["address"]
if arg_pos < verb_pos:
return 0
else:
return 1
def voice(argcand):
"""
Indicates whether the verb clause is in active(0) or passive voice(1)
"""
# Check if the verb is in passive voice
verb_postag = argcand["verb"]["tag"]
if (verb_postag.lower() == "v-pcp"):
# Its head must be a form of "ser" or "estar"
verb_head_id = argcand["verb"]["head"]
verb_head_lemma = argcand["depgraph"].get_by_address(verb_head_id)["lemma"]
if verb_head_lemma.lower() in ["ser","estar"]:
return 1
return 0
def path_deprel(depgraph, address_from, address_to):
"""
Gets the path of dependency relations between two nodes in the dependency graph
@param depgraph: dependency graph of the sentence
@param addres_from: the address of the starting point of the path
@param addres_to: the address of the finishing point of the path
"""
direct_path = _get_path_deprel(depgraph,depgraph.get_by_address(address_from),address_to)
path = []
if len(direct_path)>0:
path_to = direct_path
else:
path_from = _get_path_deprel(depgraph,depgraph.root, address_from)
path_to = _get_path_deprel(depgraph,depgraph.root, address_to)
# Find the intersection of the paths (preserving the order)
intersec_path = [x for x in path_from if x in path_to]
# Removing the intersection
if len(intersec_path)>0:
intersec = list(intersec_path)[-1]
path_from = path_from[path_from.index(intersec)+1:]
path_to = path_to[path_to.index(intersec)+1:]
path_from.reverse()
# Forming the path
path = []
for path_node in path_from:
path.append("{}¡".format(path_node[1]))
for path_node in path_to:
path.append("{}{}".format(path_node[1],path_node[2]))
return path
def path_postag(depgraph, address_from, address_to):
"""
Gets the path of Part-of-Speech tags between two nodes in the dependency graph
@param depgraph: dependency graph of the sentence
@param addres_from: the address of the starting point of the path
@param addres_to: the address of the finishing point of the path
"""
direct_path = _get_path_postag(depgraph,depgraph.get_by_address(address_from),address_to)
path = []
if len(direct_path)>0:
path_to = direct_path
else:
# Look for the inverse direct path
inv_direct_path = _get_path_postag(depgraph,depgraph.get_by_address(address_to),address_from)
if len(inv_direct_path) > 0:
inv_direct_path.reverse()
path_to = inv_direct_path
else:
path_from = _get_path_postag(depgraph,depgraph.root, address_from)
path_to = _get_path_postag(depgraph,depgraph.root, address_to)
# Find the intersection of the paths (preserving the order)
intersec_path = [x for x in path_from if x in path_to]
# Removing the intersection
if len(intersec_path)>0:
intersec = list(intersec_path)[-1]
path_from = path_from[path_from.index(intersec)+1:]
path_to = path_to[path_to.index(intersec)+1:]
path_from.reverse()
# Forming the path
path = []
for path_node in path_from:
path.append("{}".format(path_node[1]))
for path_node in path_to:
path.append("{}".format(path_node[1]))
return path
def _get_path_deprel(depgraph, node_from, address_to):
for dep in node_from["deps"]:
if dep == address_to:
return [(node_from["address"],depgraph.get_by_address(address_to)["rel"],"!", dep)]
for dep in node_from["deps"]:
path = _get_path_deprel(depgraph,depgraph.get_by_address(dep), address_to)
if len(path)>0:
path.insert(0, (node_from["address"],depgraph.get_by_address(dep)["rel"],"!", dep))
return path
return []
def _get_path_postag(depgraph, node_from, address_to):
path = depgraph.get_cycle_path(node_from, address_to)
if len(path)>0:
# There is a direct path. Give it form
postag_path = []
for node_address in path:
postag_path.append((node_address,(depgraph.get_by_address(node_address)["tag"])))
postag_path.append((address_to,(depgraph.get_by_address(address_to)["tag"])))
return postag_path
else:
return path
def head_dep(const_span, depgraph):
const_start, const_end = const_span
const_start +=1
for i in range(const_start, const_end+1):
node = depgraph.get_by_address(i)
if node["head"] < const_start or node["head"] > const_end:
return {"word":node["word"], "lemma":node["lemma"], "rel":node["rel"], "tag":node["tag"], "address": node["address"],
"deps":node["deps"]}
raise ValueError("No head found.")
return
def feature_extractor_dep(argcand, feature_list):
features_set = dict()
head_info = head_dep(argcand["info"]["span"], argcand["depgraph"])
if "head_dep" in feature_list:
features_set["head_dep"] = head_info["word"]
if "dep_rel" in feature_list:
features_set["dep_rel"] = head_info["rel"]
return features_set
|
# --------------
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
# Code starts here
df = pd.read_csv(path)
print(df.head())
print(df.info())
cols = ['INCOME','HOME_VAL','BLUEBOOK','OLDCLAIM','CLM_AMT']
for col in cols:
df[col] = df[col].str.replace('$','')
df[col] = df[col].str.replace(',','')
X = df.drop('CLAIM_FLAG',axis=1)
y = df['CLAIM_FLAG']
count = y.value_counts()
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=6,test_size=0.3)
# Code ends here
# --------------
# Code starts here
X_train[cols] = X_train[cols].astype('float64')
X_test[cols] = X_test[cols].astype('float64')
print(X_train.isnull().sum())
print(X_test.isnull().sum())
# Code ends here
# --------------
# Code starts here
X_train.dropna(axis=0,subset=['YOJ','OCCUPATION'],inplace=True)
X_test.dropna(axis=0,subset=['YOJ','OCCUPATION'],inplace=True)
y_train = y_train[X_train.index]
y_test = y_test[X_test.index]
cols1 = ['AGE','CAR_AGE','INCOME','HOME_VAL']
X_train[cols1] = X_train[cols1].fillna(X_train[cols1].mean())
X_test[cols1] = X_test[cols1].fillna(X_test[cols1].mean())
# Code ends here
# --------------
from sklearn.preprocessing import LabelEncoder
columns = ["PARENT1","MSTATUS","GENDER","EDUCATION","OCCUPATION","CAR_USE","CAR_TYPE","RED_CAR","REVOKED"]
# Code starts here
le = LabelEncoder()
for col in columns:
X_train[col] = le.fit_transform(X_train[col].astype('str'))
X_test[col] = le.transform(X_test[col].astype('str'))
# Code ends here
# --------------
from sklearn.metrics import precision_score
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
# code starts here
model = LogisticRegression(random_state=6)
model.fit(X_train,y_train)
y_pred = model.predict(X_test)
score = accuracy_score(y_test,y_pred)
precision = precision_score(y_test,y_pred)
# Code ends here
# --------------
from sklearn.preprocessing import StandardScaler
from imblearn.over_sampling import SMOTE
# code starts here
smote = SMOTE(random_state=6)
X_train, y_train = smote.fit_sample(X_train,y_train)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# Code ends here
# --------------
# Code Starts here
model = LogisticRegression()
model.fit(X_train,y_train)
y_pred = model.predict(X_test)
score = accuracy_score(y_test,y_pred)
# Code ends here
|
from django.contrib import admin
from .models import (Promotion)
admin.site.register(Promotion)
# Register your models here.
|
#!/usr/bin/env python
import multiprocessing
import time
def func(name):
print 'start process'
time.sleep(2)
return name.upper()
if __name__ == '__main__':
results = []
p = multiprocessing.Pool(5)
for i in range(7):
res = p.apply_async(func,args=('kel',))
results.append(res)
for i in results:
print i.get(2.1)
#print p.map(func,['kel','smile'])
#print '------------------'
#for i in p.imap(func,['kel','smile']):
# print i
|
import socket
#get ips - dynamically - from ensemble
#assign ranges
server = ['127.0.0.1', '127.0.0.1', '127.0.0.1']
port = [6066, 6067, 6068]
s = ["" , "", ""]
for i in range(0,3) :
s[i] = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s[i].connect((server[i],port[i]))
while True:
command = raw_input('Enter your command: ')
if(command == ''):
print "Enter Something !"
continue
tokens = command.split()
if(tokens[0] != 'get' and tokens[0] != 'put' and tokens[0] != 'quit') :
print "Erroneous command, type the right command please !"
continue
if(tokens[0] == "quit") :
s[0].send(command)
s[1].send(command)
s[2].send(command)
print 'Exiting...'
break
else :
key_letter = ord((tokens[1][0]).lower())
if(key_letter < 105) :
sv = s[0]
elif(key_letter < 114) :
sv = s[1]
else :
sv = s[2]
sv.send(command)
reply = sv.recv(1024)
print reply
|
from collections import defaultdict
slownik = defaultdict(int) # po wpisaniu defaultdict() podkreśli się na czerwono, wtedy robimy
# lewy Alt+Enter i na samej górze pojawi się "from collections import defaultdict
print(slownik)
print(slownik['ala'])
print(slownik)
slownik['kot'] += 100
print(slownik)
slownik_znakow = defaultdict(str)
print(fr'#{slownik_znakow["ala"]}#') # to jest pusty napis (pusty string), bo "ala" jest kluczem, pod którym
# nie ma wartosci, więc zeby było widać, że nic nie ma opakowaliśmy to ##
# wartość domyślna inna niż 0
slownik = defaultdict(int)
def tralala():
return 5
zmienna = tralala # tralala bez nawiasów to obiekt, tralala() to
print('obiekt:', zmienna)
print('wywołanie:', zmienna()) # tutaj jest z () i to wywołuje return
slownik = defaultdict(tralala)
print(slownik)
print(slownik['ola'])
# lambda wyrażenie
# pomysł na krótkie zapisanie funkcji, która ma 1 linijkę
# tzn. tylko instrukcję return i nic wiecej
zmienna = lambda x,y,z: x+y+z
print(zmienna(2,3,4))
"""
teraz robimy przez lambda wyrażenie coś takiego jak wyżej:
def tralala():
return 5
"""
# lambda z 0 parametrami, zwraca 5
tralala = lambda : 5
print(tralala()) # zwraca 5, chociaż nie podaję żadnych parametrów
# slownik z wartoscia domyslna 5
slownik = defaultdict(lambda : 5) # slownik miał wart domyslna 5, ale nie mial klucza
print(slownik)
print(slownik['ala']) # teraz dopisalismy klucz
print(slownik)
|
from sqlalchemy import (
Column,
Index,
Integer,
Text,
String,
ForeignKey,
)
from .meta import (
Base,
DBSession,
)
from sqlalchemy.orm import relationship, backref
class Cliente(Base):
__tablename__ = 'clientes'
id = Column(Integer, primary_key=True)
RFC = Column(String(20))
RazonSocial = Column(String(100))
Direccion = Column(String(100))
company_id = Column(Integer,ForeignKey('companies.id'))
id_Sucursal = Column(Integer,ForeignKey('sucursales.id'))
#relationships
company = relationship("Company", backref=backref('clientes', order_by=id)) #one to one relationship
sucursal = relationship("Sucursal", foreign_keys=[id_Sucursal], backref=backref('sucursales', order_by=id))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from settings import RPI as geometry
from src import RunManager, start_gui, particles
#remove distracting particles
particles.TABLE.pop("Kohlenstoff")
particles.TABLE.pop("Elektron")
particles.TABLE.pop("Alpha")
particles.TABLE.pop("Gamma")
particles.TABLE.pop("Muon")
particles.TABLE.pop("Proton")
particles.TABLE.pop('Neutron')
run_manager = RunManager(geometry)
start_gui(run_manager)
|
from enum import Enum
class Platform(Enum):
IOS = "iOS"
ANDROID = "Android"
H5 = "H5"
MP = "Mp"
class SdkType(Enum):
IOS = "iOS"
ANDROID = "Android"
H5 = "H5"
MP = "Mp"
class Network(Enum):
N_3G = "3G"
N_4G = "4G"
N_5G = "5G"
N_WIFI = "wifi"
class Os(Enum):
IOS = "iOS"
ANDROID = "Android"
WINDOWS_PHONE = "Windows phone"
YUN_OS = "YunOS"
SYMBIAN = "Symbian"
class Carrier(Enum):
CHINE_TELECOM = "电信"
CHINA_MOBILE = "移动"
CHINA_UNICOM = "联通"
class DebugMode(Enum):
NO_DEBUG_MODE = "no_debug"
DEBUG_AND_IMPORT = "debug_and_import"
DEBUG_AND_NOT_IMPORT = "debug_and_not_import"
if __name__ == "__main__":
pass
|
from django.db import models
from django.utils import timezone
class Encuesta(models.Model):
autor = models.ForeignKey('auth.User')
nombre = models.CharField(max_length=200)
fecha_creacion = models.DateTimeField(
blank=True, null=True)
universo = models.IntegerField(default=0)
def publish(self):
self.fecha_creacion = timezone.now()
self.save()
def __str__(self):
return self.nombre
class Pregunta(models.Model):
idE = models.ForeignKey(Encuesta)
pregunta = models.CharField(max_length=200)
def __str__(self):
return self.pregunta
class Respuesta(models.Model):
idP = models.ForeignKey(Pregunta)
respuesta = models.CharField(max_length=200)
def __str__(self):
return self.respuesta
|
import pandas as pd
def readDataFromExcel(path, option, request, columnList):
# 数据中不包含列名
# df = pd.read_excel(path, sheet_name=0, header=None, skiprows=1)
df = pd.read_excel(path, sheet_name=0)
df = df[df[option] == request].loc[:, columnList]
return df
|
from ROOT import TH1D, TFile, TCanvas, gStyle, gPad, TLegend
gStyle.SetOptStat(0)
cann = TCanvas("cann","cann")
cann1 = TCanvas("cann1","cann1")
canp = TCanvas("canp","canp")
canp1 = TCanvas("canp1","canp1")
cann.cd()
gPad.SetLogy(1)
CanvasTitle = "Monday"
FilenameTitle = "Monday"
#Monday
file_WINE = TFile("wine_0319Mon_LA_s_tree_cut_hist.root","READ")
hist_WINE_N = file_WINE.Get("wine_0319Mon_LA_s_tree_cut_wine_0319Mon_LA_s_f_NEGP").Clone();hist_WINE_N.SetLineColor(1); scale = hist_WINE_N.GetEntries(); scale = 1/scale; hist_WINE_N.Scale(scale); hist_WINE_N.SetBins(10,0.0,0.5)
hist_WINE_P = file_WINE.Get("wine_0319Mon_LA_s_tree_cut_wine_0319Mon_LA_s_f_POSP").Clone();hist_WINE_P.SetLineColor(1); scale = hist_WINE_P.GetEntries(); scale = 1/scale; hist_WINE_P.Scale(scale); hist_WINE_P.SetBins(10,0.0,0.5)
file_WATER = TFile("water_0319Mon_LA_s_tree_cut_hist.root","READ")
hist_WATER_N = file_WATER.Get("water_0319Mon_LA_s_tree_cut_water_0319Mon_LA_s_f_NEGP").Clone();hist_WATER_N.SetLineColor(2); scale = hist_WATER_N.GetEntries(); scale = 1/scale; hist_WATER_N.Scale(scale); hist_WATER_N.SetBins(10,0.0,0.5)
hist_WATER_P = file_WATER.Get("water_0319Mon_LA_s_tree_cut_water_0319Mon_LA_s_f_POSP").Clone();hist_WATER_P.SetLineColor(2); scale = hist_WATER_P.GetEntries(); scale = 1/scale; hist_WATER_P.Scale(scale); hist_WATER_P.SetBins(10,0.0,0.5)
file_TEA = TFile("tea_0319Mon_LA_s_tree_cut_hist.root","READ")
hist_TEA_N = file_TEA.Get("tea_0319Mon_LA_s_tree_cut_tea_0319Mon_LA_s_f_NEGP").Clone();hist_TEA_N.SetLineColor(3); scale = hist_TEA_N.GetEntries(); scale = 1/scale; hist_TEA_N.Scale(scale); hist_TEA_N.SetBins(10,0.0,0.5)
hist_TEA_P = file_TEA.Get("tea_0319Mon_LA_s_tree_cut_tea_0319Mon_LA_s_f_POSP").Clone();hist_TEA_P.SetLineColor(3); scale = hist_TEA_P.GetEntries(); scale = 1/scale; hist_TEA_P.Scale(scale); hist_TEA_P.SetBins(10,0.0,0.5)
file_JUICE = TFile("juice_0319Mon_LA_s_tree_cut_hist.root","READ")
hist_JUICE_N = file_JUICE.Get("juice_0319Mon_LA_s_tree_cut_juice_0319Mon_LA_s_f_NEGP").Clone();hist_JUICE_N.SetLineColor(4); scale = hist_JUICE_N.GetEntries(); scale = 1/scale; hist_JUICE_N.Scale(scale); hist_JUICE_N.SetBins(10,0.0,0.5)
hist_JUICE_P = file_JUICE.Get("juice_0319Mon_LA_s_tree_cut_juice_0319Mon_LA_s_f_POSP").Clone();hist_JUICE_P.SetLineColor(4); scale = hist_JUICE_P.GetEntries(); scale = 1/scale; hist_JUICE_P.Scale(scale); hist_JUICE_P.SetBins(10,0.0,0.5)
file_COFFEE = TFile("coffee_0319Mon_LA_s_tree_cut_hist.root","READ")
hist_COFFEE_N = file_COFFEE.Get("coffee_0319Mon_LA_s_tree_cut_coffee_0319Mon_LA_s_f_NEGP").Clone();hist_COFFEE_N.SetLineColor(5); scale = hist_COFFEE_N.GetEntries(); scale = 1/scale; hist_COFFEE_N.Scale(scale); hist_COFFEE_N.SetBins(10,0.0,0.5)
hist_COFFEE_P = file_COFFEE.Get("coffee_0319Mon_LA_s_tree_cut_coffee_0319Mon_LA_s_f_POSP").Clone();hist_COFFEE_P.SetLineColor(5); scale = hist_COFFEE_P.GetEntries(); scale = 1/scale; hist_COFFEE_P.Scale(scale); hist_COFFEE_P.SetBins(10,0.0,0.5)
file_BEER = TFile("beer_0319Mon_LA_s_tree_cut_hist.root","READ")
hist_BEER_N = file_BEER.Get("beer_0319Mon_LA_s_tree_cut_beer_0319Mon_LA_s_f_NEGP").Clone();hist_BEER_N.SetLineColor(6); scale = hist_BEER_N.GetEntries(); scale = 1/scale; hist_BEER_N.Scale(scale); hist_BEER_N.SetBins(10,0.0,0.5)
hist_BEER_P = file_BEER.Get("beer_0319Mon_LA_s_tree_cut_beer_0319Mon_LA_s_f_POSP").Clone();hist_BEER_P.SetLineColor(6); scale = hist_BEER_P.GetEntries(); scale = 1/scale; hist_BEER_P.Scale(scale); hist_BEER_P.SetBins(10,0.0,0.5)
file_COLA = TFile("COLA_COKE_0319Mon_LA_s_tree_cut_hist.root","READ")
hist_COLA_N = file_COLA.Get("COLA_COKE_0319Mon_LA_s_tree_cut_COLA_COKE_0319Mon_LA_s_f_NEGP").Clone(); hist_COLA_N.SetLineColor(7); scale = hist_COLA_N.GetEntries(); scale = 1/scale; hist_COLA_N.Scale(scale); hist_COLA_N.SetBins(10,0.0,0.5)
hist_COLA_P = file_COLA.Get("COLA_COKE_0319Mon_LA_s_tree_cut_COLA_COKE_0319Mon_LA_s_f_POSP").Clone(); hist_COLA_P.SetLineColor(7); scale = hist_COLA_P.GetEntries(); scale = 1/scale; hist_COLA_P.Scale(scale); hist_COLA_P.SetBins(10,0.0,0.5)
'''
#Tuesday
file_WINE = TFile("wine_0320Tue_LA_s_tree_cut_hist.root","READ")
hist_WINE_N = file_WINE.Get("wine_0320Tue_LA_s_tree_cut_wine_0320Tue_LA_s_f_NEGP").Clone(); hist_WINE_N.SetLineColor(1); scale = hist_WINE_N.GetEntries(); scale = 1/scale; hist_WINE_N.Scale(scale); hist_WINE_N.SetBins(10,0.0,0.5)
hist_WINE_P = file_WINE.Get("wine_0320Tue_LA_s_tree_cut_wine_0320Tue_LA_s_f_POSP").Clone(); hist_WINE_P.SetLineColor(1); scale = hist_WINE_P.GetEntries(); scale = 1/scale; hist_WINE_P.Scale(scale); hist_WINE_P.SetBins(10,0.0,0.5)
file_WATER = TFile("water_0320Tue_LA_s_tree_cut_hist.root","READ")
hist_WATER_N = file_WATER.Get("water_0320Tue_LA_s_tree_cut_water_0320Tue_LA_s_f_NEGP").Clone(); hist_WATER_N.SetLineColor(2); scale = hist_WATER_N.GetEntries(); scale = 1/scale; hist_WATER_N.Scale(scale); hist_WATER_N.SetBins(10,0.0,0.5)
hist_WATER_P = file_WATER.Get("water_0320Tue_LA_s_tree_cut_water_0320Tue_LA_s_f_POSP").Clone(); hist_WATER_P.SetLineColor(2); scale = hist_WATER_P.GetEntries(); scale = 1/scale; hist_WATER_P.Scale(scale); hist_WATER_P.SetBins(10,0.0,0.5)
file_TEA = TFile("tea_0320Tue_LA_s_tree_cut_hist.root","READ")
hist_TEA_N = file_TEA.Get("tea_0320Tue_LA_s_tree_cut_tea_0320Tue_LA_s_f_NEGP").Clone(); hist_TEA_N.SetLineColor(3); scale = hist_TEA_N.GetEntries(); scale = 1/scale; hist_TEA_N.Scale(scale); hist_TEA_N.SetBins(10,0.0,0.5)
hist_TEA_P = file_TEA.Get("tea_0320Tue_LA_s_tree_cut_tea_0320Tue_LA_s_f_POSP").Clone(); hist_TEA_P.SetLineColor(3); scale = hist_TEA_P.GetEntries(); scale = 1/scale; hist_TEA_P.Scale(scale); hist_TEA_P.SetBins(10,0.0,0.5)
file_JUICE = TFile("juice_0320Tue_LA_s_tree_cut_hist.root","READ")
hist_JUICE_N = file_JUICE.Get("juice_0320Tue_LA_s_tree_cut_juice_0320Tue_LA_s_f_NEGP").Clone(); hist_JUICE_N.SetLineColor(4); scale = hist_JUICE_N.GetEntries(); scale = 1/scale; hist_JUICE_N.Scale(scale); hist_JUICE_N.SetBins(10,0.0,0.5)
hist_JUICE_P = file_JUICE.Get("juice_0320Tue_LA_s_tree_cut_juice_0320Tue_LA_s_f_POSP").Clone(); hist_JUICE_P.SetLineColor(4); scale = hist_JUICE_P.GetEntries(); scale = 1/scale; hist_JUICE_P.Scale(scale); hist_JUICE_P.SetBins(10,0.0,0.5)
file_COFFEE = TFile("coffee_0320Tue_LA_s_tree_cut_hist.root","READ")
hist_COFFEE_N = file_COFFEE.Get("coffee_0320Tue_LA_s_tree_cut_coffee_0320Tue_LA_s_f_NEGP").Clone(); hist_COFFEE_N.SetLineColor(5); scale = hist_COFFEE_N.GetEntries(); scale = 1/scale; hist_COFFEE_N.Scale(scale); hist_COFFEE_N.SetBins(10,0.0,0.5)
hist_COFFEE_P = file_COFFEE.Get("coffee_0320Tue_LA_s_tree_cut_coffee_0320Tue_LA_s_f_POSP").Clone(); hist_COFFEE_P.SetLineColor(5); scale = hist_COFFEE_P.GetEntries(); scale = 1/scale; hist_COFFEE_P.Scale(scale); hist_COFFEE_P.SetBins(10,0.0,0.5)
file_BEER = TFile("beer_0320Tue_LA_s_tree_cut_hist.root","READ")
hist_BEER_N = file_BEER.Get("beer_0320Tue_LA_s_tree_cut_beer_0320Tue_LA_s_f_NEGP").Clone(); hist_BEER_N.SetLineColor(6); scale = hist_BEER_N.GetEntries(); scale = 1/scale; hist_BEER_N.Scale(scale); hist_BEER_N.SetBins(10,0.0,0.5)
hist_BEER_P = file_BEER.Get("beer_0320Tue_LA_s_tree_cut_beer_0320Tue_LA_s_f_POSP").Clone(); hist_BEER_P.SetLineColor(6); scale = hist_BEER_P.GetEntries(); scale = 1/scale; hist_BEER_P.Scale(scale); hist_BEER_P.SetBins(10,0.0,0.5)
file_COLA = TFile("COLA_COKE_0320Tue_LA_s_tree_cut_hist.root","READ")
hist_COLA_N = file_COLA.Get("COLA_COKE_0320Tue_LA_s_tree_cut_COLA_COKE_0320Tue_LA_s_f_NEGP").Clone(); hist_COLA_N.SetLineColor(7); scale = hist_COLA_N.GetEntries(); scale = 1/scale; hist_COLA_N.Scale(scale); hist_COLA_N.SetBins(10,0.0,0.5)
hist_COLA_P = file_COLA.Get("COLA_COKE_0320Tue_LA_s_tree_cut_COLA_COKE_0320Tue_LA_s_f_POSP").Clone(); hist_COLA_P.SetLineColor(7); scale = hist_COLA_P.GetEntries(); scale = 1/scale; hist_COLA_P.Scale(scale); hist_COLA_P.SetBins(10,0.0,0.5)
'''
'''
#Wednesday
file_WINE = TFile("wine_0321Wed_LA_s_tree_cut_hist.root","READ")
hist_WINE_N = file_WINE.Get("wine_0321Wed_LA_s_tree_cut_wine_0321Wed_LA_s_f_NEGP").Clone(); hist_WINE_N.SetLineColor(1); scale = hist_WINE_N.GetEntries(); scale = 1/scale; hist_WINE_N.Scale(scale); hist_WINE_N.SetBins(10,0.0,0.5)
hist_WINE_P = file_WINE.Get("wine_0321Wed_LA_s_tree_cut_wine_0321Wed_LA_s_f_POSP").Clone(); hist_WINE_P.SetLineColor(1); scale = hist_WINE_P.GetEntries(); scale = 1/scale; hist_WINE_P.Scale(scale); hist_WINE_P.SetBins(10,0.0,0.5)
file_WATER = TFile("water_0321Wed_LA_s_tree_cut_hist.root","READ")
hist_WATER_N = file_WATER.Get("water_0321Wed_LA_s_tree_cut_water_0321Wed_LA_s_f_NEGP").Clone(); hist_WATER_N.SetLineColor(2); scale = hist_WATER_N.GetEntries(); scale = 1/scale; hist_WATER_N.Scale(scale); hist_WATER_N.SetBins(10,0.0,0.5)
hist_WATER_P = file_WATER.Get("water_0321Wed_LA_s_tree_cut_water_0321Wed_LA_s_f_POSP").Clone(); hist_WATER_P.SetLineColor(2); scale = hist_WATER_P.GetEntries(); scale = 1/scale; hist_WATER_P.Scale(scale); hist_WATER_P.SetBins(10,0.0,0.5)
file_TEA = TFile("tea_0321Wed_LA_s_tree_cut_hist.root","READ")
hist_TEA_N = file_TEA.Get("tea_0321Wed_LA_s_tree_cut_tea_0321Wed_LA_s_f_NEGP").Clone(); hist_TEA_N.SetLineColor(3); scale = hist_TEA_N.GetEntries(); scale = 1/scale; hist_TEA_N.Scale(scale); hist_TEA_N.SetBins(10,0.0,0.5)
hist_TEA_P = file_TEA.Get("tea_0321Wed_LA_s_tree_cut_tea_0321Wed_LA_s_f_POSP").Clone(); hist_TEA_P.SetLineColor(3); scale = hist_TEA_P.GetEntries(); scale = 1/scale; hist_TEA_P.Scale(scale); hist_TEA_P.SetBins(10,0.0,0.5)
file_JUICE = TFile("juice_0321Wed_LA_s_tree_cut_hist.root","READ")
hist_JUICE_N = file_JUICE.Get("juice_0321Wed_LA_s_tree_cut_juice_0321Wed_LA_s_f_NEGP").Clone(); hist_JUICE_N.SetLineColor(4); scale = hist_JUICE_N.GetEntries(); scale = 1/scale; hist_JUICE_N.Scale(scale); hist_JUICE_N.SetBins(10,0.0,0.5)
hist_JUICE_P = file_JUICE.Get("juice_0321Wed_LA_s_tree_cut_juice_0321Wed_LA_s_f_POSP").Clone(); hist_JUICE_P.SetLineColor(4); scale = hist_JUICE_P.GetEntries(); scale = 1/scale; hist_JUICE_P.Scale(scale); hist_JUICE_P.SetBins(10,0.0,0.5)
file_COFFEE = TFile("coffee_0321Wed_LA_s_tree_cut_hist.root","READ")
hist_COFFEE_N = file_COFFEE.Get("coffee_0321Wed_LA_s_tree_cut_coffee_0321Wed_LA_s_f_NEGP").Clone(); hist_COFFEE_N.SetLineColor(5); scale = hist_COFFEE_N.GetEntries(); scale = 1/scale; hist_COFFEE_N.Scale(scale); hist_COFFEE_N.SetBins(10,0.0,0.5)
hist_COFFEE_P = file_COFFEE.Get("coffee_0321Wed_LA_s_tree_cut_coffee_0321Wed_LA_s_f_NEGP").Clone(); hist_COFFEE_P.SetLineColor(5); scale = hist_COFFEE_P.GetEntries(); scale = 1/scale; hist_COFFEE_P.Scale(scale); hist_COFFEE_P.SetBins(10,0.0,0.5)
file_BEER = TFile("beer_0321Wed_LA_s_tree_cut_hist.root","READ")
hist_BEER_N = file_BEER.Get("beer_0321Wed_LA_s_tree_cut_beer_0321Wed_LA_s_f_NEGP").Clone(); hist_BEER_N.SetLineColor(6); scale = hist_BEER_N.GetEntries(); scale = 1/scale; hist_BEER_N.Scale(scale); hist_BEER_N.SetBins(10,0.0,0.5)
hist_BEER_P = file_BEER.Get("beer_0321Wed_LA_s_tree_cut_beer_0321Wed_LA_s_f_POSP").Clone(); hist_BEER_P.SetLineColor(6); scale = hist_BEER_P.GetEntries(); scale = 1/scale; hist_BEER_P.Scale(scale); hist_BEER_P.SetBins(10,0.0,0.5)
file_COLA = TFile("COLA_COKE_0321Wed_LA_s_tree_cut_hist.root","READ")
hist_COLA_N = file_COLA.Get("COLA_COKE_0321Wed_LA_s_tree_cut_COLA_COKE_0321Wed_LA_s_f_NEGP").Clone(); hist_COLA_N.SetLineColor(7); scale = hist_COLA_N.GetEntries(); scale = 1/scale; hist_COLA_N.Scale(scale); hist_COLA_N.SetBins(10,0.0,0.5)
hist_COLA_P = file_COLA.Get("COLA_COKE_0321Wed_LA_s_tree_cut_COLA_COKE_0321Wed_LA_s_f_POSP").Clone(); hist_COLA_P.SetLineColor(7); scale = hist_COLA_P.GetEntries(); scale = 1/scale; hist_COLA_P.Scale(scale); hist_COLA_P.SetBins(10,0.0,0.5)
'''
'''
#Thursday
file_WINE = TFile("wine_0322Thu_LA_s_tree_cut_hist.root","READ")
hist_WINE_N = file_WINE.Get("wine_0322Thu_LA_s_tree_cut_wine_0322Thu_LA_s_f_NEGP").Clone(); hist_WINE_N.SetLineColor(1); scale = hist_WINE_N.GetEntries(); scale = 1/scale; hist_WINE_N.Scale(scale); hist_WINE_N.SetBins(10,0.0,0.5)
hist_WINE_P = file_WINE.Get("wine_0322Thu_LA_s_tree_cut_wine_0322Thu_LA_s_f_POSP").Clone(); hist_WINE_P.SetLineColor(1); scale = hist_WINE_P.GetEntries(); scale = 1/scale; hist_WINE_P.Scale(scale); hist_WINE_P.SetBins(10,0.0,0.5)
file_WATER = TFile("water_0322Thu_LA_s_tree_cut_hist.root","READ")
hist_WATER_N = file_WATER.Get("water_0322Thu_LA_s_tree_cut_water_0322Thu_LA_s_f_NEGP").Clone(); hist_WATER_N.SetLineColor(2); scale = hist_WATER_N.GetEntries(); scale = 1/scale; hist_WATER_N.Scale(scale); hist_WATER_N.SetBins(10,0.0,0.5)
hist_WATER_P = file_WATER.Get("water_0322Thu_LA_s_tree_cut_water_0322Thu_LA_s_f_POSP").Clone(); hist_WATER_P.SetLineColor(2); scale = hist_WATER_P.GetEntries(); scale = 1/scale; hist_WATER_P.Scale(scale); hist_WATER_P.SetBins(10,0.0,0.5)
file_TEA = TFile("tea_0322Thu_LA_s_tree_cut_hist.root","READ")
hist_TEA_N = file_TEA.Get("tea_0322Thu_LA_s_tree_cut_tea_0322Thu_LA_s_f_NEGP").Clone(); hist_TEA_N.SetLineColor(3); scale = hist_TEA_N.GetEntries(); scale = 1/scale; hist_TEA_N.Scale(scale); hist_TEA_N.SetBins(10,0.0,0.5)
hist_TEA_P = file_TEA.Get("tea_0322Thu_LA_s_tree_cut_tea_0322Thu_LA_s_f_POSP").Clone(); hist_TEA_P.SetLineColor(3); scale = hist_TEA_P.GetEntries(); scale = 1/scale; hist_TEA_P.Scale(scale); hist_TEA_P.SetBins(10,0.0,0.5)
file_JUICE = TFile("juice_0322Thu_LA_s_tree_cut_hist.root","READ")
hist_JUICE_N = file_JUICE.Get("juice_0322Thu_LA_s_tree_cut_juice_0322Thu_LA_s_f_NEGP").Clone(); hist_JUICE_N.SetLineColor(4); scale = hist_JUICE_N.GetEntries(); scale = 1/scale; hist_JUICE_N.Scale(scale); hist_JUICE_N.SetBins(10,0.0,0.5)
hist_JUICE_P = file_JUICE.Get("juice_0322Thu_LA_s_tree_cut_juice_0322Thu_LA_s_f_POSP").Clone(); hist_JUICE_P.SetLineColor(4); scale = hist_JUICE_P.GetEntries(); scale = 1/scale; hist_JUICE_P.Scale(scale); hist_JUICE_P.SetBins(10,0.0,0.5)
file_COFFEE = TFile("coffee_0322Thu_LA_s_tree_cut_hist.root","READ")
hist_COFFEE_N = file_COFFEE.Get("coffee_0322Thu_LA_s_tree_cut_coffee_0322Thu_LA_s_f_NEGP").Clone(); hist_COFFEE_N.SetLineColor(5); scale = hist_COFFEE_N.GetEntries(); scale = 1/scale; hist_COFFEE_N.Scale(scale); hist_COFFEE_N.SetBins(10,0.0,0.5)
hist_COFFEE_P = file_COFFEE.Get("coffee_0322Thu_LA_s_tree_cut_coffee_0322Thu_LA_s_f_POSP").Clone(); hist_COFFEE_P.SetLineColor(5); scale = hist_COFFEE_P.GetEntries(); scale = 1/scale; hist_COFFEE_P.Scale(scale); hist_COFFEE_P.SetBins(10,0.0,0.5)
file_BEER = TFile("beer_0322Thu_LA_s_tree_cut_hist.root","READ")
hist_BEER_N = file_BEER.Get("beer_0322Thu_LA_s_tree_cut_beer_0322Thu_LA_s_f_NEGP").Clone(); hist_BEER_N.SetLineColor(6); scale = hist_BEER_N.GetEntries(); scale = 1/scale; hist_BEER_N.Scale(scale); hist_BEER_N.SetBins(10,0.0,0.5)
hist_BEER_P = file_BEER.Get("beer_0322Thu_LA_s_tree_cut_beer_0322Thu_LA_s_f_POSP").Clone(); hist_BEER_P.SetLineColor(6); scale = hist_BEER_P.GetEntries(); scale = 1/scale; hist_BEER_P.Scale(scale); hist_BEER_P.SetBins(10,0.0,0.5)
file_COLA = TFile("COLA_COKE_0322Thu_LA_s_tree_cut_hist.root","READ")
hist_COLA_N = file_COLA.Get("COLA_COKE_0322Thu_LA_s_tree_cut_COLA_COKE_0322Thu_LA_s_f_NEGP").Clone(); hist_COLA_N.SetLineColor(7); scale = hist_COLA_N.GetEntries(); scale = 1/scale; hist_COLA_N.Scale(scale); hist_COLA_N.SetBins(10,0.0,0.5)
hist_COLA_P = file_COLA.Get("COLA_COKE_0322Thu_LA_s_tree_cut_COLA_COKE_0322Thu_LA_s_f_POSP").Clone(); hist_COLA_P.SetLineColor(7); scale = hist_COLA_P.GetEntries(); scale = 1/scale; hist_COLA_P.Scale(scale); hist_COLA_P.SetBins(10,0.0,0.5)
'''
'''
#Friday
file_WINE = TFile("wine_0323Fri_LA_s_tree_cut_hist.root","READ")
hist_WINE_N = file_WINE.Get("wine_0323Fri_LA_s_tree_cut_wine_0323Fri_LA_s_f_NEGP").Clone(); hist_WINE_N.SetLineColor(1); scale = hist_WINE_N.GetEntries(); scale = 1/scale; hist_WINE_N.Scale(scale); hist_WINE_N.SetBins(10,0.0,0.5)
hist_WINE_P = file_WINE.Get("wine_0323Fri_LA_s_tree_cut_wine_0323Fri_LA_s_f_POSP").Clone(); hist_WINE_P.SetLineColor(1); scale = hist_WINE_P.GetEntries(); scale = 1/scale; hist_WINE_P.Scale(scale); hist_WINE_P.SetBins(10,0.0,0.5)
file_WATER = TFile("water_0323Fri_LA_s_tree_cut_hist.root","READ")
hist_WATER_N = file_WATER.Get("water_0323Fri_LA_s_tree_cut_water_0323Fri_LA_s_f_NEGP").Clone(); hist_WATER_N.SetLineColor(2); scale = hist_WATER_N.GetEntries(); scale = 1/scale; hist_WATER_N.Scale(scale); hist_WATER_N.SetBins(10,0.0,0.5)
hist_WATER_P = file_WATER.Get("water_0323Fri_LA_s_tree_cut_water_0323Fri_LA_s_f_POSP").Clone(); hist_WATER_P.SetLineColor(2); scale = hist_WATER_P.GetEntries(); scale = 1/scale; hist_WATER_P.Scale(scale); hist_WATER_P.SetBins(10,0.0,0.5)
file_TEA = TFile("tea_0323Fri_LA_s_tree_cut_hist.root","READ")
hist_TEA_N = file_TEA.Get("tea_0323Fri_LA_s_tree_cut_tea_0323Fri_LA_s_f_NEGP").Clone(); hist_TEA_N.SetLineColor(3); scale = hist_TEA_N.GetEntries(); scale = 1/scale; hist_TEA_N.Scale(scale); hist_TEA_N.SetBins(10,0.0,0.5)
hist_TEA_P = file_TEA.Get("tea_0323Fri_LA_s_tree_cut_tea_0323Fri_LA_s_f_POSP").Clone(); hist_TEA_P.SetLineColor(3); scale = hist_TEA_P.GetEntries(); scale = 1/scale; hist_TEA_P.Scale(scale); hist_TEA_P.SetBins(10,0.0,0.5)
file_JUICE = TFile("juice_0323Fri_LA_s_tree_cut_hist.root","READ")
hist_JUICE_N = file_JUICE.Get("juice_0323Fri_LA_s_tree_cut_juice_0323Fri_LA_s_f_NEGP").Clone(); hist_JUICE_N.SetLineColor(4); scale = hist_JUICE_N.GetEntries(); scale = 1/scale; hist_JUICE_N.Scale(scale); hist_JUICE_N.SetBins(10,0.0,0.5)
hist_JUICE_P = file_JUICE.Get("juice_0323Fri_LA_s_tree_cut_juice_0323Fri_LA_s_f_POSP").Clone(); hist_JUICE_P.SetLineColor(4); scale = hist_JUICE_P.GetEntries(); scale = 1/scale; hist_JUICE_P.Scale(scale); hist_JUICE_P.SetBins(10,0.0,0.5)
file_COFFEE = TFile("coffee_0323Fri_LA_s_tree_cut_hist.root","READ")
hist_COFFEE_N = file_COFFEE.Get("coffee_0323Fri_LA_s_tree_cut_coffee_0323Fri_LA_s_f_NEGP").Clone(); hist_COFFEE_N.SetLineColor(5); scale = hist_COFFEE_N.GetEntries(); scale = 1/scale; hist_COFFEE_N.Scale(scale); hist_COFFEE_N.SetBins(10,0.0,0.5)
hist_COFFEE_P = file_COFFEE.Get("coffee_0323Fri_LA_s_tree_cut_coffee_0323Fri_LA_s_f_POSP").Clone(); hist_COFFEE_P.SetLineColor(5); scale = hist_COFFEE_P.GetEntries(); scale = 1/scale; hist_COFFEE_P.Scale(scale); hist_COFFEE_P.SetBins(10,0.0,0.5)
file_BEER = TFile("beer_0323Fri_LA_s_tree_cut_hist.root","READ")
hist_BEER_N = file_BEER.Get("beer_0323Fri_LA_s_tree_cut_beer_0323Fri_LA_s_f_NEGP").Clone(); hist_BEER_N.SetLineColor(6); scale = hist_BEER_N.GetEntries(); scale = 1/scale; hist_BEER_N.Scale(scale); hist_BEER_N.SetBins(10,0.0,0.5)
hist_BEER_P = file_BEER.Get("beer_0323Fri_LA_s_tree_cut_beer_0323Fri_LA_s_f_POSP").Clone(); hist_BEER_P.SetLineColor(6); scale = hist_BEER_P.GetEntries(); scale = 1/scale; hist_BEER_P.Scale(scale); hist_BEER_P.SetBins(10,0.0,0.5)
file_COLA = TFile("COLA_COKE_0323Fri_LA_s_tree_cut_hist.root","READ")
hist_COLA_N = file_COLA.Get("COLA_COKE_0323Fri_LA_s_tree_cut_COLA_COKE_0323Fri_LA_s_f_NEGP").Clone(); hist_COLA_N.SetLineColor(7); scale = hist_COLA_N.GetEntries(); scale = 1/scale; hist_COLA_N.Scale(scale); hist_COLA_N.SetBins(10,0.0,0.5)
hist_COLA_P = file_COLA.Get("COLA_COKE_0323Fri_LA_s_tree_cut_COLA_COKE_0323Fri_LA_s_f_POSP").Clone(); hist_COLA_P.SetLineColor(7); scale = hist_COLA_P.GetEntries(); scale = 1/scale; hist_COLA_P.Scale(scale); hist_COLA_P.SetBins(10,0.0,0.5)
'''
'''
#Saturday
file_WINE = TFile("wine_0324Sat_LA_s_tree_cut_hist.root","READ")
hist_WINE_N = file_WINE.Get("wine_0324Sat_LA_s_tree_cut_wine_0324Sat_LA_s_f_NEGP").Clone(); hist_WINE_N.SetLineColor(1); scale = hist_WINE_N.GetEntries(); scale = 1/scale; hist_WINE_N.Scale(scale); hist_WINE_N.SetBins(10,0.0,0.5)
hist_WINE_P = file_WINE.Get("wine_0324Sat_LA_s_tree_cut_wine_0324Sat_LA_s_f_POSP").Clone(); hist_WINE_P.SetLineColor(1); scale = hist_WINE_P.GetEntries(); scale = 1/scale; hist_WINE_P.Scale(scale); hist_WINE_P.SetBins(10,0.0,0.5)
file_WATER = TFile("water_0324Sat_LA_s_tree_cut_hist.root","READ")
hist_WATER_N = file_WATER.Get("water_0324Sat_LA_s_tree_cut_water_0324Sat_LA_s_f_NEGP").Clone(); hist_WATER_N.SetLineColor(2); scale = hist_WATER_N.GetEntries(); scale = 1/scale; hist_WATER_N.Scale(scale); hist_WATER_N.SetBins(10,0.0,0.5)
hist_WATER_P = file_WATER.Get("water_0324Sat_LA_s_tree_cut_water_0324Sat_LA_s_f_POSP").Clone(); hist_WATER_P.SetLineColor(2); scale = hist_WATER_P.GetEntries(); scale = 1/scale; hist_WATER_P.Scale(scale); hist_WATER_P.SetBins(10,0.0,0.5)
file_TEA = TFile("tea_0324Sat_LA_s_tree_cut_hist.root","READ")
hist_TEA_N = file_TEA.Get("tea_0324Sat_LA_s_tree_cut_tea_0324Sat_LA_s_f_NEGP").Clone(); hist_TEA_N.SetLineColor(3); scale = hist_TEA_N.GetEntries(); scale = 1/scale; hist_TEA_N.Scale(scale); hist_TEA_N.SetBins(10,0.0,0.5)
hist_TEA_P = file_TEA.Get("tea_0324Sat_LA_s_tree_cut_tea_0324Sat_LA_s_f_POSP").Clone(); hist_TEA_P.SetLineColor(3); scale = hist_TEA_P.GetEntries(); scale = 1/scale; hist_TEA_P.Scale(scale); hist_TEA_P.SetBins(10,0.0,0.5)
file_JUICE = TFile("juice_0324Sat_LA_s_tree_cut_hist.root","READ")
hist_JUICE_N = file_JUICE.Get("juice_0324Sat_LA_s_tree_cut_juice_0324Sat_LA_s_f_NEGP").Clone(); hist_JUICE_N.SetLineColor(4); scale = hist_JUICE_N.GetEntries(); scale = 1/scale; hist_JUICE_N.Scale(scale); hist_JUICE_N.SetBins(10,0.0,0.5)
hist_JUICE_P = file_JUICE.Get("juice_0324Sat_LA_s_tree_cut_juice_0324Sat_LA_s_f_POSP").Clone(); hist_JUICE_P.SetLineColor(4); scale = hist_JUICE_P.GetEntries(); scale = 1/scale; hist_JUICE_P.Scale(scale); hist_JUICE_P.SetBins(10,0.0,0.5)
file_COFFEE = TFile("coffee_0324Sat_LA_s_tree_cut_hist.root","READ")
hist_COFFEE_N = file_COFFEE.Get("coffee_0324Sat_LA_s_tree_cut_coffee_0324Sat_LA_s_f_NEGP").Clone(); hist_COFFEE_N.SetLineColor(5); scale = hist_COFFEE_N.GetEntries(); scale = 1/scale; hist_COFFEE_N.Scale(scale); hist_COFFEE_N.SetBins(10,0.0,0.5)
hist_COFFEE_P = file_COFFEE.Get("coffee_0324Sat_LA_s_tree_cut_coffee_0324Sat_LA_s_f_POSP").Clone(); hist_COFFEE_P.SetLineColor(5); scale = hist_COFFEE_P.GetEntries(); scale = 1/scale; hist_COFFEE_P.Scale(scale); hist_COFFEE_P.SetBins(10,0.0,0.5)
file_BEER = TFile("beer_0324Sat_LA_s_tree_cut_hist.root","READ")
hist_BEER_N = file_BEER.Get("beer_0324Sat_LA_s_tree_cut_beer_0324Sat_LA_s_f_NEGP").Clone(); hist_BEER_N.SetLineColor(6); scale = hist_BEER_N.GetEntries(); scale = 1/scale; hist_BEER_N.Scale(scale); hist_BEER_N.SetBins(10,0.0,0.5)
hist_BEER_P = file_BEER.Get("beer_0324Sat_LA_s_tree_cut_beer_0324Sat_LA_s_f_POSP").Clone(); hist_BEER_P.SetLineColor(6); scale = hist_BEER_P.GetEntries(); scale = 1/scale; hist_BEER_P.Scale(scale); hist_BEER_P.SetBins(10,0.0,0.5)
file_COLA = TFile("COLA_COKE_0324Sat_LA_s_tree_cut_hist.root","READ")
hist_COLA_N = file_COLA.Get("COLA_COKE_0324Sat_LA_s_tree_cut_COLA_COKE_0324Sat_LA_s_f_NEGP").Clone(); hist_COLA_N.SetLineColor(7); scale = hist_COLA_N.GetEntries(); scale = 1/scale; hist_COLA_N.Scale(scale); hist_COLA_N.SetBins(10,0.0,0.5)
hist_COLA_P = file_COLA.Get("COLA_COKE_0324Sat_LA_s_tree_cut_COLA_COKE_0324Sat_LA_s_f_POSP").Clone(); hist_COLA_P.SetLineColor(7); scale = hist_COLA_P.GetEntries(); scale = 1/scale; hist_COLA_P.Scale(scale); hist_COLA_P.SetBins(10,0.0,0.5)
'''
'''
#Sunday
file_WINE = TFile("wine_0325Sun_LA_s_tree_cut_hist.root","READ")
hist_WINE_N = file_WINE.Get("wine_0325Sun_LA_s_tree_cut_wine_0325Sun_LA_s_f_NEGP").Clone(); hist_WINE_N.SetLineColor(1); scale = hist_WINE_N.GetEntries(); scale = 1/scale; hist_WINE_N.Scale(scale); hist_WINE_N.SetBins(10,0.0,0.5)
hist_WINE_P = file_WINE.Get("wine_0325Sun_LA_s_tree_cut_wine_0325Sun_LA_s_f_POSP").Clone(); hist_WINE_P.SetLineColor(1); scale = hist_WINE_P.GetEntries(); scale = 1/scale; hist_WINE_P.Scale(scale); hist_WINE_P.SetBins(10,0.0,0.5)
file_WATER = TFile("water_0325Sun_LA_s_tree_cut_hist.root","READ")
hist_WATER_N = file_WATER.Get("water_0325Sun_LA_s_tree_cut_water_0325Sun_LA_s_f_NEGP").Clone(); hist_WATER_N.SetLineColor(2); scale = hist_WATER_N.GetEntries(); scale = 1/scale; hist_WATER_N.Scale(scale); hist_WATER_N.SetBins(10,0.0,0.5)
hist_WATER_P = file_WATER.Get("water_0325Sun_LA_s_tree_cut_water_0325Sun_LA_s_f_POSP").Clone(); hist_WATER_P.SetLineColor(2); scale = hist_WATER_P.GetEntries(); scale = 1/scale; hist_WATER_P.Scale(scale); hist_WATER_P.SetBins(10,0.0,0.5)
file_TEA = TFile("tea_0325Sun_LA_s_tree_cut_hist.root","READ")
hist_TEA_N = file_TEA.Get("tea_0325Sun_LA_s_tree_cut_tea_0325Sun_LA_s_f_NEGP").Clone(); hist_TEA_N.SetLineColor(3); scale = hist_TEA_N.GetEntries(); scale = 1/scale; hist_TEA_N.Scale(scale); hist_TEA_N.SetBins(10,0.0,0.5)
hist_TEA_P = file_TEA.Get("tea_0325Sun_LA_s_tree_cut_tea_0325Sun_LA_s_f_POSP").Clone(); hist_TEA_P.SetLineColor(3); scale = hist_TEA_P.GetEntries(); scale = 1/scale; hist_TEA_P.Scale(scale); hist_TEA_P.SetBins(10,0.0,0.5)
file_JUICE = TFile("juice_0325Sun_LA_s_tree_cut_hist.root","READ")
hist_JUICE_N = file_JUICE.Get("juice_0325Sun_LA_s_tree_cut_juice_0325Sun_LA_s_f_NEGP").Clone(); hist_JUICE_N.SetLineColor(4); scale = hist_JUICE_N.GetEntries(); scale = 1/scale; hist_JUICE_N.Scale(scale); hist_JUICE_N.SetBins(10,0.0,0.5)
hist_JUICE_P = file_JUICE.Get("juice_0325Sun_LA_s_tree_cut_juice_0325Sun_LA_s_f_POSP").Clone(); hist_JUICE_P.SetLineColor(4); scale = hist_JUICE_P.GetEntries(); scale = 1/scale; hist_JUICE_P.Scale(scale); hist_JUICE_P.SetBins(10,0.0,0.5)
file_COFFEE = TFile("coffee_0325Sun_LA_s_tree_cut_hist.root","READ")
hist_COFFEE_N = file_COFFEE.Get("coffee_0325Sun_LA_s_tree_cut_coffee_0325Sun_LA_s_f_NEGP").Clone(); hist_COFFEE_N.SetLineColor(5); scale = hist_COFFEE_N.GetEntries(); scale = 1/scale; hist_COFFEE_N.Scale(scale); hist_COFFEE_N.SetBins(10,0.0,0.5)
hist_COFFEE_P = file_COFFEE.Get("coffee_0325Sun_LA_s_tree_cut_coffee_0325Sun_LA_s_f_POSP").Clone(); hist_COFFEE_P.SetLineColor(5); scale = hist_COFFEE_P.GetEntries(); scale = 1/scale; hist_COFFEE_P.Scale(scale); hist_COFFEE_P.SetBins(10,0.0,0.5)
file_BEER = TFile("beer_0325Sun_LA_s_tree_cut_hist.root","READ")
hist_BEER_N = file_BEER.Get("beer_0325Sun_LA_s_tree_cut_beer_0325Sun_LA_s_f_NEGP").Clone(); hist_BEER_N.SetLineColor(6); scale = hist_BEER_N.GetEntries(); scale = 1/scale; hist_BEER_N.Scale(scale); hist_BEER_N.SetBins(10,0.0,0.5)
hist_BEER_P = file_BEER.Get("beer_0325Sun_LA_s_tree_cut_beer_0325Sun_LA_s_f_POSP").Clone(); hist_BEER_P.SetLineColor(6); scale = hist_BEER_P.GetEntries(); scale = 1/scale; hist_BEER_P.Scale(scale); hist_BEER_P.SetBins(10,0.0,0.5)
file_COLA = TFile("COLA_COKE_0325Sun_LA_s_tree_cut_hist.root","READ")
hist_COLA_N = file_COLA.Get("COLA_COKE_0325Sun_LA_s_tree_cut_COLA_COKE_0325Sun_LA_s_f_NEGP").Clone(); hist_COLA_N.SetLineColor(7); scale = hist_COLA_N.GetEntries(); scale = 1/scale; hist_COLA_N.Scale(scale); hist_COLA_N.SetBins(10,0.0,0.5)
hist_COLA_P = file_COLA.Get("COLA_COKE_0325Sun_LA_s_tree_cut_COLA_COKE_0325Sun_LA_s_f_POSP").Clone(); hist_COLA_P.SetLineColor(7); scale = hist_COLA_P.GetEntries(); scale = 1/scale; hist_COLA_P.Scale(scale); hist_COLA_P.SetBins(10,0.0,0.5)
'''
hist_COLA_N.SetTitle(CanvasTitle+", with NEGATIVE on each beverage at LA (Normalized)")
hist_COLA_N.SetXTitle("Negative words propotion of Total words on a Tweet")
hist_COLA_N.SetYTitle("Emotion propotion (Log scale)")
hist_COLA_N.Draw("hist")
hist_BEER_N.Draw("hist same")
hist_COFFEE_N.Draw("hist same")
hist_JUICE_N.Draw("hist same")
hist_TEA_N.Draw("hist same")
hist_WATER_N.Draw("hist same")
hist_WINE_N.Draw("hist same")
leg = TLegend(0.65, 0.65, 0.85, 0.85)
leg.SetBorderSize(0)
leg.SetFillColor(10)
leg_entry = leg.AddEntry(hist_COLA_N,"COLA","f")
leg_entry = leg.AddEntry(hist_BEER_N,"BEER","f")
leg_entry = leg.AddEntry(hist_COFFEE_N,"COFFEE","f")
leg_entry = leg.AddEntry(hist_JUICE_N,"JUICE","f")
leg_entry = leg.AddEntry(hist_TEA_N,"TEA","f")
leg_entry = leg.AddEntry(hist_WATER_N,"WATER","f")
leg_entry = leg.AddEntry(hist_WINE_N,"WINE","f")
leg.Draw()
cann.Modified()
cann.Update()
cann.Print(FilenameTitle+"_Drinks_Negative_log.pdf")
cann1.cd()
gPad.SetLogy(0)
hist_COLA_N.SetYTitle("Emotion propotion (Natural scale)")
hist_COLA_N.Draw("hist")
hist_BEER_N.Draw("hist same")
hist_COFFEE_N.Draw("hist same")
hist_JUICE_N.Draw("hist same")
hist_TEA_N.Draw("hist same")
hist_WATER_N.Draw("hist same")
hist_WINE_N.Draw("hist same")
leg = TLegend(0.65, 0.65, 0.85, 0.85)
leg.SetBorderSize(0)
leg.SetFillColor(10)
leg_entry = leg.AddEntry(hist_COLA_N,"COLA","f")
leg_entry = leg.AddEntry(hist_BEER_N,"BEER","f")
leg_entry = leg.AddEntry(hist_COFFEE_N,"COFFEE","f")
leg_entry = leg.AddEntry(hist_JUICE_N,"JUICE","f")
leg_entry = leg.AddEntry(hist_TEA_N,"TEA","f")
leg_entry = leg.AddEntry(hist_WATER_N,"WATER","f")
leg_entry = leg.AddEntry(hist_WINE_N,"WINE","f")
leg.Draw()
cann1.Modified()
cann1.Update()
cann1.Print(FilenameTitle+"_Drinks_Negative.pdf")
canp.cd()
gPad.SetLogy(1)
hist_COLA_P.SetTitle(CanvasTitle+", with NEGATIVE on each beverage at LA (Normalized)")
hist_COLA_P.SetXTitle("Positive words propotion of Total words on a Tweet")
hist_COLA_P.SetYTitle("Emotion propotion (Log scale)")
hist_COLA_P.Draw("hist")
hist_BEER_P.Draw("hist same")
hist_COFFEE_P.Draw("hist same")
hist_JUICE_P.Draw("hist same")
hist_TEA_P.Draw("hist same")
hist_WATER_P.Draw("hist same")
hist_WINE_P.Draw("hist same")
leg = TLegend(0.65, 0.65, 0.85, 0.85)
leg.SetBorderSize(0)
leg.SetFillColor(10)
leg_entry = leg.AddEntry(hist_COLA_P,"COLA","f")
leg_entry = leg.AddEntry(hist_BEER_P,"BEER","f")
leg_entry = leg.AddEntry(hist_COFFEE_P,"COFFEE","f")
leg_entry = leg.AddEntry(hist_JUICE_P,"JUICE","f")
leg_entry = leg.AddEntry(hist_TEA_P,"TEA","f")
leg_entry = leg.AddEntry(hist_WATER_P,"WATER","f")
leg_entry = leg.AddEntry(hist_WINE_P,"WINE","f")
leg.Draw()
canp.Modified()
canp.Update()
canp.Print(FilenameTitle+"_Drinks_Positive_log.pdf")
canp1.cd()
gPad.SetLogy(0)
hist_COLA_P.SetTitle(CanvasTitle+", with NEGATIVE on each beverage at LA (Normalized)")
hist_COLA_P.SetXTitle("Positive words propotion of Total words on a Tweet")
hist_COLA_P.SetYTitle("Emotion propotion (Natural scale)")
hist_COLA_P.Draw("hist")
hist_BEER_P.Draw("hist same")
hist_COFFEE_P.Draw("hist same")
hist_JUICE_P.Draw("hist same")
hist_TEA_P.Draw("hist same")
hist_WATER_P.Draw("hist same")
hist_WINE_P.Draw("hist same")
leg = TLegend(0.65, 0.65, 0.85, 0.85)
leg.SetBorderSize(0)
leg.SetFillColor(10)
leg_entry = leg.AddEntry(hist_COLA_P,"COLA","f")
leg_entry = leg.AddEntry(hist_BEER_P,"BEER","f")
leg_entry = leg.AddEntry(hist_COFFEE_P,"COFFEE","f")
leg_entry = leg.AddEntry(hist_JUICE_P,"JUICE","f")
leg_entry = leg.AddEntry(hist_TEA_P,"TEA","f")
leg_entry = leg.AddEntry(hist_WATER_P,"WATER","f")
leg_entry = leg.AddEntry(hist_WINE_P,"WINE","f")
leg.Draw()
canp1.Modified()
canp1.Update()
canp1.Print(FilenameTitle+"_Drinks_Positive.pdf")
|
import os
from datetime import datetime, timedelta
from functools import wraps
def list_files(dirname, filter=['.json']):
result = []
for maindir, subdir, file_name_list in os.walk(dirname):
for filename in file_name_list:
apath = os.path.join(maindir, filename)
ext = os.path.splitext(apath)[1]
if ext in filter:
result.append(apath)
return result
def throttle(seconds=0, minutes=0, hours=0):
throttle_period = timedelta(seconds=seconds, minutes=minutes, hours=hours)
def throttle_decorator(fn):
time_of_last_call = datetime.min
@wraps(fn)
def wrapper(*args, **kwargs):
nonlocal time_of_last_call
now = datetime.now()
if now - time_of_last_call > throttle_period:
time_of_last_call = now
return fn(*args, **kwargs)
return wrapper
return throttle_decorator |
import imaplib
import socket
import re
class ImapGmailClient:
IMAP_HOST = 'imap.gmail.com'
def __init__(self, login, password):
socket.setdefaulttimeout(10)
self.username = login
self.password = password
self.authorized = False
self.imap = imaplib.IMAP4_SSL(self.IMAP_HOST)
def __del__(self):
self.imap.logout()
def login(self):
try:
self.imap.login(self.username, self.password)
except imaplib.IMAP4.error as e:
self.authorized = False
if 'AUTHENTICATIONFAILED' in e.message:
raise AuthErrorException('Wrong credentials for %s'.format(self.login))
else:
raise
self.authorized = True
def logout(self):
if not self.authorized:
raise NotAuthorizedException('You should be authorized to perform logout')
self.imap.logout()
self.authorized = False
def get_new_mail_count(self):
try:
x, y = self.imap.status('INBOX','(MESSAGES UNSEEN)')
except Exception:
self.authorized = False
return False
return int(re.search(r'UNSEEN\s+(\d+)', y[0]).group(1))
class AuthErrorException(Exception):
pass
class NotAuthorizedException(Exception):
pass |
import cv2
import rects
import utils
class Face(object):
"""Data on facial features: face, eyes, nose, mouth"""
def __init__(self):
self.face_rect = None
self.left_eye_rect = None
self.right_eye_rect = None
self.nose_rect = None
self.mouth_rect = None
class FaceTracker(object):
"""A tracker for facial features: face,eyes,nose,mouth"""
def __init__(self, scale_factor=1.2, min_neighbors=2, flags=cv2.cv.CV_HAAR_SCALE_IMAGE):
self.scale_factor = scale_factor
self.min_neighbors = min_neighbors
self.flags = flags
self._faces = []
self._face_classifier = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
self._eye_classifier = cv2.CascadeClassifier('haarcascade_eye.xml')
self._nose_classifier = cv2.CascadeClassifier('haarcascade_mcs_nose.xml')
self._mouth_classifier = cv2.CascadeClassifier('haarcascade_mcs_mouth.xml')
@property
def faces(self):
"""Tracked facial features"""
return self._faces
def update(self, image):
"""Update the tracked facial features"""
self._faces = []
if utils.is_gray(image):
image = cv2.equalizeHist(image)
else:
image = cv2.cvtColor(image, cv2.cv.CV_BGR2GRAY)
cv2.equalizeHist(image, image)
min_size = utils.width_height_divided_by(image, 8)
face_rects = self._face_classifier.detectMultiScale(image,
self.scale_factor,
self.min_neighbors,
self.flags,
min_size)
if face_rects is not None:
for face_rect in face_rects:
face = Face()
face.face_rect = face_rect
x, y, w, h = face_rect
# seek a left eye
search_rect = (x+w*4/7, y, w*2/7, h/2)
face.left_eye_rect = self._detect_one_object(self._eye_classifier, image, search_rect, 64)
# seek a right eye
search_rect = (x+w/7, y, w*2/7, h/2)
face.right_eye_rect = self._detect_one_object(self._eye_classifier, image, search_rect, 64)
# seek a nose
search_rect = (x+w/4, y+h/4, w/2, h/2)
face.nose_rect = self._detect_one_object(self._nose_classifier, image, search_rect, 32)
# seek a mouth
search_rect = (x+w/6, y+h*2/3, w*2/3, h/3)
face.mouth_rect = self._detect_one_object(self._mouth_classifier, image, search_rect, 16)
self._faces.append(face)
def _detect_one_object(self, classifier, image, rect, image_size_to_min_size_ratio):
x, y, w, h = rect
min_size = utils.width_height_divided_by(image, image_size_to_min_size_ratio)
sub_image = image[y:y+h, x:x+w]
sub_rects = classifier.detectMultiScale(sub_image, self.scale_factor, self.min_neighbors, self.flags, min_size)
if len(sub_rects) == 0:
return None
sub_x, sub_y, sub_w, sub_h = sub_rects[0]
return x + sub_x, y + sub_y, sub_w, sub_h
def draw_debug_rects(self, image):
"""Draw rectangles around the tracked facial features"""
if utils.is_gray(image):
face_color = 255
left_eye_color = 255
right_eye_color = 255
nose_color = 255
mouth_color = 255
else:
face_color = (255, 255, 255) # white
left_eye_color = (0, 0, 255) # red
right_eye_color = (0, 255, 255) # yellow
nose_color = (0, 255, 0) # green
mouth_color = (255, 0, 0) # blue
for face in self.faces:
rects.outline_rect(image, face.face_rect, face_color)
rects.outline_rect(image, face.left_eye_rect, left_eye_color)
rects.outline_rect(image, face.right_eye_rect, right_eye_color)
rects.outline_rect(image, face.nose_rect, nose_color)
rects.outline_rect(image, face.mouth_rect, mouth_color)
|
from django.db import models
class TextStatementModel(models.Model):
statement_text = models.CharField(max_length=500, blank=False, null = False)
statement_author = models.CharField(max_length=500, blank=False, null=True)
statement_source = models.ForeignKey('TextSatementSourceModel', on_delete=models.CASCADE, null=True)
@staticmethod
def get_manager():
return TextStatementModel.objects
def __str__(self):
return str(self.statement_source) +" : " + self.statement_text |
#!/usr/bin/env python
import unittest
import detect_pointer_in_rect
class TestPointerDetection(unittest.TestCase):
def setUp(self):
if(__name__=="__main__"):
test_pointer_detection()
|
# Starter code for Homework 4
# %%
# Import the modules we will use
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %%
# ** MODIFY **
# Set the file name and path to where you have stored the data
filename = 'streamflow_week4.txt'
filepath = os.path.join('../data', filename)
print(os.getcwd())
print(filepath)
# %%
# DON'T change this part -- this creates the lists you
# should use for the rest of the assignment
# no need to worry about how this is being done now we will cover
# this in later sections.
#Read the data into a pandas dataframe
data=pd.read_table(filepath, sep = '\t', skiprows=30,
names=['agency_cd', 'site_no', 'datetime', 'flow', 'code']
)
# Expand the dates to year month day
data[["year", "month", "day"]] =data["datetime"].str.split("-", expand=True)
data['year'] = data['year'].astype(int)
data['month'] = data['month'].astype(int)
data['day'] = data['day'].astype(int)
# Make a numpy array of this data
flow_data = data[['year', 'month','day', 'flow']].to_numpy()
# Getting rid of the pandas dataframe since we wont be using it this week
del(data)
# %%
# Starter Code
# Count the number of values with flow > 600 and month ==7
flow_count = np.sum((flow_data[:,3] > 600) & (flow_data[:,1]==9))
# this gives a list of T/F where the criteria are met
(flow_data[:,3] > 600) & (flow_data[:,1]==9)
# this give the flow values where that criteria is met
flow_pick = flow_data[(flow_data[:,3] > 600) & (flow_data[:,1]==9), 3]
# this give the year values where that criteria is met
year_pic = flow_data[(flow_data[:,3] > 600) & (flow_data[:,1]==9), 0]
# this give the all rows where that criteria is met
all_pic = flow_data[(flow_data[:,3] > 600) & (flow_data[:,1]==9), ]
# Calculate the average flow for these same criteria
flow_mean = np.mean(flow_data[(flow_data[:,3] > 600) & (flow_data[:,1]==9),3])
print("Flow meets this critera", flow_count, " times")
print('And has an average value of', flow_mean, "when this is true")
# Make a histogram of data
# Use the linspace funciton to create a set of evenly spaced bins
mybins = np.linspace(0, 1000, num=15)
# another example using the max flow to set the upper limit for the bins
#mybins = np.linspace(0, np.max(flow_data[:,3]), num=15)
#Plotting the histogram
plt.hist(flow_data[:,3], bins = mybins)
plt.title('Streamflow')
plt.xlabel('Flow [cfs]')
plt.ylabel('Count')
# Get the quantiles of flow
# Two different approaches --- you should get the same answer
# just using the flow column
flow_quants1 = np.quantile(flow_data[:,3], q=[0,0.1, 0.5, 0.9])
print('Method one flow quantiles:', flow_quants1)
# Or computing on a colum by column basis
flow_quants2 = np.quantile(flow_data, q=[0,0.1, 0.5, 0.9], axis=0)
# and then just printing out the values for the flow column
print('Method two flow quantiles:', flow_quants2[:,3])
# %% Q2. Data type
# print(type(flow_data[:,1]))
print(flow_data.ndim) # the number of dimensions
print(flow_data.shape[0]) # the size of each dimension
print(flow_data.size) # the total size of the array
print("dtype:", flow_data[:,0].dtype)
# %% Weekly Forecast
# this give the flow values week 1 and sedt criteria
av_week1 = []
av_pweek1 = []
m_sp_wk1 = []
star_d = 22
end_d = 28
mon = 11
flow_week1 = flow_data[(flow_data[:,0] == 2019) & (flow_data[:,1]==mon) & (flow_data[:,2]>=star_d) & (flow_data[:,2]<=end_d), 3]
print(flow_week1)
print(np.mean(flow_data[(flow_data[:,0] == 2019) & (flow_data[:,1]==mon) & (flow_data[:,2]>=star_d) & (flow_data[:,2]<=end_d), 3]))
av_week1=(np.mean(flow_data[(flow_data[:,0] == 2019) & (flow_data[:,1]==mon) & (flow_data[:,2]>=star_d) & (flow_data[:,2]<=end_d), 3]))
flow_week1_sp = flow_data[(flow_data[:,1]==mon) & (flow_data[:,2]>=(star_d-7)) & (flow_data[:,2]<=(star_d-1)), 3]
print(flow_week1_sp)
# flow_pweek1 = flow_data[(flow_data[:,0] == 2020) & (flow_data[:,1]==9) & (flow_data[:,2]<=19) & (flow_data[:,2]>=13), 3]
#av_pweek1 = flow_data[(flow_data[:,0] == 2019) & (flow_data[:,1]==9) & (flow_data[:,2]>=27) & (flow_data[:,2]<=30), 3]
#print(av_pweek1)
# print(np.mean(flow_data[(flow_data[:,0] == 2019) & (flow_data[:,1]==9) & (flow_data[:,2]<=19) & (flow_data[:,2]>=13), 3]))
av_pweek1 = (np.mean(flow_data[(flow_data[:,0] == 2019) & (flow_data[:,1]==mon) & (flow_data[:,2]<=end_d) & (flow_data[:,2]>=star_d), 3]))
print("mean",av_pweek1)
# % Weekly Forecast
# this give the flow values week 1
# Count the number of values with flow > 600 and month ==7
fll_ct_sp = np.sum(flow_week1_sp > av_pweek1)
# this gives a list of T/F where the criteria are met
# (flow_data[:,3] > 600) & (flow_data[:,1]==9)
# this give the flow values where that criteria is met
fl_pick_so = flow_data[(flow_data[:,3] > av_pweek1) & (flow_data[:,1]==mon), 3]
# this give the year values where that criteria is met
year_pic = flow_data[(flow_data[:,3] > av_pweek1) & (flow_data[:,1]==mon), 0]
# this give the all rows where that criteria is met
all_pic = flow_data[(flow_data[:,3] > av_pweek1) & (flow_data[:,1]==mon), ]
# Calculate the average flow for these same criteria
flow_mean_sp = np.mean(flow_data[(flow_data[:,3] > av_pweek1) & (flow_data[:,1]==mon),3])
std_sp = np.std(flow_data[(flow_data[:,3] >= av_pweek1) & (flow_data[:,1]==mon) & (flow_data[:,0]<=2019) & (flow_data[:,0]>=2005),3])
# % Make a histogram of data
# %
print("Flow meets this critera", fll_ct_sp, " times")
print('And has an average value of', flow_mean_sp, "when this is true")
print('And has a standard deviation value of', std_sp, "when this is true")
# Use the linspace funciton to create a set of evenly spaced bins
mybins_sp = np.linspace(0, 900, num=10)
# another example using the max flow to set the upper limit for the bins
# mybins_sp = np.linspace(0, np.max(flow_week1_sp), num=15)
#Plotting the histogram
plt.hist(flow_week1_sp, bins = mybins_sp)
plt.title('Streamflow September 27-30 since 1989')
plt.xlabel('Flow [cfs]')
plt.ylabel('Count')
plt.savefig('week_test.png')
# Get the quantiles of flow
# Two different approaches --- you should get the same answer
# just using the flow column
flow_quants1 = np.quantile(flow_week1_sp, q=[0,0.1, 0.5, 0.9])
print('Method one flow quantiles:', flow_quants1)
# Or computing on a colum by column basis
flow_quants2 = np.quantile(flow_week1_sp, q=[0,0.1, 0.5, 0.9], axis=0)
# and then just printing out the values for the flow column
print('Method two flow quantiles:', flow_quants2)
# %% Q3.
av_1 = 68
av_2 = 80
sp_flow_all = flow_data[(flow_data[:,1]==9) & (flow_data[:,0]>=2010), 3]
gr_fl_sp_wk1 = np.sum((sp_flow_all > av_1) )
per_flow_1 = 100*(gr_fl_sp_wk1/len(sp_flow_all))
gr_fl_sp_wk2 = np.sum((sp_flow_all > av_2) )
per_flow_2 = 100*(gr_fl_sp_wk2/len(sp_flow_all))
print("Times daily value is greater than my mean:",gr_fl_sp_wk1,"from a total of: ",len(sp_flow_all),"times.")
print("Percentage",per_flow_1)
print("Times daily value is greater than my mean:",gr_fl_sp_wk2,"from a total of: ",len(sp_flow_all),"times.")
print("Percentage",per_flow_2)
# %% Q5
fl_1_sp = flow_data[(flow_data[:,1]==9) & (flow_data[:,2]>=1) & (flow_data[:,2]<=15), 3]
fl_2_sp = flow_data[(flow_data[:,1]==9) & (flow_data[:,2]>=16) & (flow_data[:,2]<=30), 3]
# Use the linspace funciton to create a set of evenly spaced bins
mybins_sp = np.linspace(0, 1000, num=10)
# another example using the max flow to set the upper limit for the bins
# mybins_sp = np.linspace(0, np.max(flow_week1_sp), num=15)
#Plotting the histogram
plt.hist(fl_1_sp, bins = mybins_sp)
plt.title('Streamflow September 1-15 since 1989')
plt.xlabel('Flow [cfs]')
plt.ylabel('Count')
plt.savefig('sepu1.png')
# %%
plt.hist(fl_2_sp, bins = mybins_sp)
plt.title('Streamflow September 16-30 since 1989')
plt.xlabel('Flow [cfs]')
plt.ylabel('Count')
plt.savefig('sepu2.png')
# %%
|
from django.contrib.gis.db import models
from django.core.cache import cache
# Create your models here.
class Trip(models.Model):
# class Meta:
# db_table = 'taxi_trip_timescale'
vendorID = models.SmallIntegerField(null=True,blank=True)
pickupTime = models.DateTimeField(null=True,blank=True)
dropoffTime = models.DateTimeField(null=True,blank=True)
storeAndFwdFlag = models.BooleanField() #Y(TRUE)= store and forward trip N(FALSE)= not a store and forward trip
rateCodeID = models.SmallIntegerField(null=True,blank=True)
pickupPoint = models.PointField(null=True,blank=True)
dropoffPoint = models.PointField(null=True,blank=True)
passengerCount = models.SmallIntegerField(null=True,blank=True)
tripDistance = models.DecimalField(max_digits=5, decimal_places=2,null=True,blank=True)# in miles
fareAmount = models.DecimalField(max_digits=6, decimal_places=2,null=True,blank=True)
extra = models.DecimalField(max_digits=4, decimal_places=2,null=True,blank=True)
MTATax = models.DecimalField(max_digits=3, decimal_places=2,null=True,blank=True)
tipAmount = models.DecimalField(max_digits=5, decimal_places=2,null=True,blank=True)
tolls_amount = models.DecimalField(max_digits=5, decimal_places=2,null=True,blank=True)
# Ehail_fee : All NULL
improvementSurcharge = models.DecimalField(max_digits=3, decimal_places=2,null=True,blank=True)
totalAmount = models.DecimalField(max_digits=6, decimal_places=2,null=True,blank=True)
paymentType = models.SmallIntegerField(null=True,blank=True)
tripType = models.SmallIntegerField(null=True,blank=True)
PULocationID = models.SmallIntegerField(null=True,blank=True)
DOLocationID = models.SmallIntegerField(null=True,blank=True)
pickupDistrict = models.ForeignKey('District', related_name='trips_by_pickup', null=True)
dropoffDistrict = models.ForeignKey('District', related_name='trips_by_dropoff',null=True)
objects = models.GeoManager()
# Returns the string representation of the model.
def __str__(self): # __unicode__ on Python 2
return str(self.pickupTime)
# @property
# def pickup(self):
# if self.pickupPoint is None:
# return ''
# precision = "{:.5f}"
# return '('+precision.format(self.pickupPoint.x)+','+precision.format(self.pickupPoint.y)+')'
# @property
# def dropoff(self):
# if self.dropoffPoint is None:
# return ''
# precision = "{:.5f}"
# return '('+precision.format(self.dropoffPoint.x)+','+precision.format(self.dropoffPoint.y)+')'
class Tripset(models.Model):
name = models.CharField(max_length=200)
trips = models.ManyToManyField(Trip, blank=True,related_name='set')#, through='TripInSet'
def __str__(self):
return self.name
def create_graph(self):
from django.db.models import Count
edges = self.get_trips().filter(pickupDistrict__isnull=False, dropoffDistrict__isnull=False).values('pickupDistrict','dropoffDistrict').order_by().annotate(weight=Count('pk'))
Edge.objects.all().filter(tripset = self).delete()
Edge.objects.bulk_create([Edge(
tail_id = e['pickupDistrict'],
head_id = e['dropoffDistrict'],
weight = e['weight'],
tripset = self) for e in edges])
return 0
def n_trips(self):
key = "set_"+str(self.pk)+"_count_trips"
count = cache.get(key)
if count is None:
count = self.get_trips().count()
cache.set(key,count,72*3600) #timeout 3 days
return count#self.trips.count()
def get_trips(self):
if self.pk is 29:
return Trip.objects.all()
return self.trips.all()
# class TripInSet(models.Model):
# trip = models.ForeignKey(Trip, related_name='set')
# tripset = models.ForeignKey(Tripset, related_name='info')
# pickupDistrict = models.ForeignKey(District,null=True)
# dropoffDistrict = models.ForeignKey(District,null=True)
class Edge(models.Model):
tail = models.ForeignKey('District',related_name='edges_as_tail')
head = models.ForeignKey('District',related_name='edges_as_head')
weight = models.FloatField(null=True)
tripset = models.ForeignKey(Tripset, null=True, related_name='graph', on_delete=models.CASCADE)
class District(models.Model):
name = models.CharField(max_length=256)
center = models.PointField(null=True)
shape = models.MultiPolygonField()
objects = models.GeoManager()
#area_square_local_units = object.polygon.transform(srid, clone=False).area
def __str__(self):
return self.name
class BoroughDistrict(District):
fip = models.CharField(max_length=5)
class CityDistrict(District):
borough = models.ForeignKey('BoroughDistrict')
ntacode = models.CharField(max_length=5)
area = models.FloatField() |
import argparse
import os
from cira.labs.leat.structs.Bunch import Bunch
def parse_args():
argument_parser = argparse.ArgumentParser(prog="le-at")
create_arguments(argument_parser)
args = argument_parser.parse_args()
print(args)
args = args_to_bunch(args)
print(args)
return args
def create_arguments(parser):
def add_positionals():
parser.add_argument('domain')
add_positionals()
def args_to_bunch(args):
pass |
from WMCore.Configuration import Configuration
step = 'lhe'
part = 'p3'
config = Configuration()
config.section_('General')
config.General.requestName = '_'.join(['ttjets_dl', step, part])
config.section_('JobType')
config.JobType.pluginName = 'PrivateMC'
config.JobType.psetName = 'configs/ttjets_dl_lhe.py'
config.JobType.eventsPerLumi = 1000
config.section_('Data')
config.Data.outputPrimaryDataset = 'TTTo2L2Nu_TuneCUETP8M2_ttHtranche3_13TeV-powheg-pythia8'
config.Data.splitting = 'EventBased'
config.Data.unitsPerJob = 50 * 1000
config.Data.totalUnits = 100 * 1000 * 1000
config.Data.publication = True
config.Data.outputDatasetTag = '{}_v1{}'.format(step, part)
config.section_('Site')
config.Site.storageSite = 'T2_EE_Estonia'
# config.Site.whitelist = ['T2_EE_Estonia']
|
import mdtraj.io as io
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
rcParams['axes.linewidth'] = 3
rcParams.update({'font.size': 20})
# load tICA object
ti = io.loadh('tica_l20.h5')
vecs = ti['components']
cov = ti['covariance']
# calculate total dynamics
dott = np.dot(cov,vecs.T)
trr = 0
for i in range(dott.shape[0]):
s = np.linalg.norm(dott[:,i])**2
trr += s
# calculate each eigenvector's contribution
c3 = 0
cont = []
for i in range(dott.shape[0]):
s = np.linalg.norm(dott[:,i])**2
c3 += s / float(trr)
cont.append(c3)
# plot accumulated tICA eigenvector contributions
plt.plot(cont,'o-',lw=2)
plt.xlim([-1,22]) # zooming only on the first 22 tICs
plt.ylim([0,1])
plt.grid(True,lw=1)
plt.xlabel('tICA eigenvector')
plt.ylabel('Contribution')
plt.savefig('tic_cont_zoom.png',dpi=100,transparet=True)
|
def plot_defaced(bids_dir, subject_label, session=None, t2w=None):
"""
Plot brainmask created from original non-defaced image on defaced image
to evaluate defacing performance.
Parameters
----------
bids_dir : str
Path to BIDS root directory.
subject_label : str
Label of subject to be plotted (without 'sub-').
session : str, optional
If multiple sessions exist, create one plot per session.
session : bool, optional
If T2w image exists, create a plot for defaced T2w.
"""
from bids import BIDSLayout
from glob import glob
from os.path import join as opj
from matplotlib.pyplot import figure
import matplotlib.pyplot as plt
from nilearn.plotting import find_cut_slices, plot_stat_map
layout = BIDSLayout(bids_dir)
bidsonym_path = opj(bids_dir, 'sourcedata/bidsonym/sub-%s' % subject_label)
if session is not None:
defaced_t1w = layout.get(subject=subject_label, extension='nii.gz', suffix='T1w',
return_type='filename', session=session)
else:
defaced_t1w = layout.get(subject=subject_label, extension='nii.gz', suffix='T1w',
return_type='filename')
for t1w in defaced_t1w:
brainmask_t1w = glob(opj(bids_dir, 'sourcedata/bidsonym/sub-%s' % subject_label,
t1w[t1w.rfind('/')+1:t1w.rfind('.nii')] +
'_brainmask_desc-nondeid.nii.gz'))[0]
fig = figure(figsize=(15, 5))
plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=-0.2, hspace=0)
for i, e in enumerate(['x', 'y', 'z']):
ax = fig.add_subplot(3, 1, i + 1)
cuts = find_cut_slices(t1w, direction=e, n_cuts=12)
plot_stat_map(brainmask_t1w, bg_img=t1w, display_mode=e,
cut_coords=cuts, annotate=False, dim=-1, axes=ax, colorbar=False)
plt.savefig(opj(bidsonym_path,
t1w[t1w.rfind('/')+1:t1w.rfind('.nii')] + '_desc-brainmaskdeid.png'))
if t2w is not None:
if session is not None:
defaced_t2w = layout.get(subject=subject_label, extension='nii.gz', suffix='T2w',
return_type='filename', session=session)
else:
defaced_t2w = layout.get(subject=subject_label, extension='nii.gz', suffix='T2w',
return_type='filename')
for t2w in defaced_t2w:
brainmask_t2w = glob(opj(bids_dir, 'sourcedata/bidsonym/sub-%s' % subject_label,
t2w[t2w.rfind('/') + 1:t2w.rfind('.nii')] +
'_brainmask_desc-nondeid.nii.gz'))[0]
for i, e in enumerate(['x', 'y', 'z']):
ax = fig.add_subplot(3, 1, i + 1)
cuts = find_cut_slices(t2w, direction=e, n_cuts=12)
plot_stat_map(brainmask_t2w, bg_img=t2w, display_mode=e,
cut_coords=cuts, annotate=False, dim=-1, axes=ax, colorbar=False)
plt.savefig(opj(bids_dir, 'sourcedata/bidsonym/sub-%s' % subject_label,
t2w[t2w.rfind('/')+1:t2w.rfind('.nii')] + '_desc-brainmaskdeid.png'))
return (t1w, t2w)
def gif_defaced(bids_dir, subject_label, session=None, t2w=None):
"""
Create a gif that loops through slices of the defaced image in
x,y,z direction.
Parameters
----------
bids_dir : str
Path to BIDS root directory.
subject_label : str
Label of subject to be plotted (without 'sub-').
session : str, optional
If multiple sessions exist, create one gif per session.
session : bool, optional
If T2w image exists, create a gif for defaced T2w.
"""
from bids import BIDSLayout
from glob import glob
from os.path import join as opj
from shutil import move
import gif_your_nifti.core as gif2nif
layout = BIDSLayout(bids_dir)
bidsonym_path = opj(bids_dir, 'sourcedata/bidsonym/sub-%s' % subject_label)
if session is not None:
defaced_t1w = layout.get(subject=subject_label, extension='nii.gz', suffix='T1w',
return_type='filename', session=session)
if t2w is not None:
defaced_t2w = layout.get(subject=subject_label, extension='nii.gz', suffix='T2w',
return_type='filename', session=session)
else:
defaced_t1w = layout.get(subject=subject_label, extension='nii.gz', suffix='T1w',
return_type='filename')
if t2w is not None:
defaced_t2w = layout.get(subject=subject_label, extension='nii.gz', suffix='T2w',
return_type='filename', session=session)
for t1_image in defaced_t1w:
gif2nif.write_gif_normal(t1_image)
if t2w is not None:
for t2_image in defaced_t2w:
gif2nif.write_gif_normal(t2_image)
if session is not None:
list_gifs = glob(opj(bids_dir, 'sub-%s/ses-%s/anat' % (subject_label, session),
'sub-%s*.gif' % subject_label))
else:
list_gifs = glob(opj(bids_dir, 'sub-%s/anat' % subject_label,
'sub-%s*.gif' % subject_label))
for gif_nii in list_gifs:
move(gif_nii, opj(bids_dir, bidsonym_path))
def create_graphics(bids_dir, subject_label, session=None, t2w=None):
"""
Setup and run the graphics workflow which creates the static
plot(s) of defaced images with a brainmask overlaid and a
gif looping through slices of the defaced images.
Parameters
----------
bids_dir : str
Path to BIDS root directory.
subject_label : str
Label of subject to be plotted (without 'sub-').
session : str, optional
If multiple sessions exist, include them in worklow.
session : bool, optional
If T2w image exists, include them in worklow.
"""
import nipype.pipeline.engine as pe
from nipype import Function
from nipype.interfaces import utility as niu
report_wf = pe.Workflow('report_wf')
inputnode = pe.Node(niu.IdentityInterface(fields=['bids_dir', 'subject_label', 'session', 't2w']),
name='inputnode')
plt_defaced = pe.Node(Function(input_names=['bids_dir', 'subject_label', 'session', 't2w'],
function=plot_defaced),
name='plt_defaced')
gf_defaced = pe.Node(Function(input_names=['bids_dir', 'subject_label', 'session', 't2w'],
function=gif_defaced),
name='gf_defaced')
report_wf.connect([(inputnode, plt_defaced, [('bids_dir', 'bids_dir'),
('subject_label', 'subject_label')]),
(inputnode, gf_defaced, [('bids_dir', 'bids_dir'),
('subject_label', 'subject_label')]),
])
if session:
inputnode.inputs.session = session
report_wf.connect([(inputnode, plt_defaced, [('session', 'session')]),
(inputnode, gf_defaced, [('session', 'session')]),
])
if t2w:
inputnode.inputs.t2w = t2w
report_wf.connect([(inputnode, plt_defaced, [('t2w', 't2w')]),
(inputnode, gf_defaced, [('t2w', 't2w')]),
])
inputnode.inputs.bids_dir = bids_dir
inputnode.inputs.subject_label = subject_label
report_wf.run()
|
from flask import Flask,redirect,url_for,render_template,request
import jwt
# import request as request
# from flask_jwt import JWT, jwt_required, current_identity
import flask_sijax as simpleajax
import sqlite3 as sql
app = Flask(__name__)
# app.config['SECRET_KEY'] = 'super-secret'
@app.route('/createtable/<tablename>')
def createtable(tablename):
conn = sql.connect('database.db')
print("Opened database successfully")
conn.execute('CREATE TABLE '+tablename+' (name TEXT, addr TEXT, city TEXT, pin TEXT)')
print("Table created successfully")
conn.close()
return "Table created successfully"
@app.route('/list')
def list1():
con = sql.connect("database.db")
con.row_factory = sql.Row
cur = con.cursor()
cur.execute("select * from students")
rows = cur.fetchall();
print(str(rows))
return str(rows)
# return render_template("list.html",rows = rows)
@app.route('/jwt')
def myjwt(username,password):
print("myother page username"+username)
print("myother page password"+password)
a = {
"username":username,
"password":password
}
encoded_jwt = jwt.encode(a, 'srirampassword')
print(encoded_jwt)
try:
decoded_jwt = jwt.decode(encoded_jwt, 'srirampassword')
print(str(decoded_jwt))
except:
return "Error on jwt password matching"
finally:
return '<html><body><div style="align:rigth; margin-left:10%"><p><br><span style="font-size:14pt;color:red"><b>Your JWT DECODED</b></span></b> :'+str(decoded_jwt)+'<br><span style="font-size:14pt;color:#0078d7 !important"><b>Your JWT ECODECD</b></span> :'+str(encoded_jwt)+'</p></div></body></html>'
# encoded_jwt = jwt.encode({'name': 'payload'}, 'srirampassword', algorithm='HS256')
# print(encoded_jwt)
# decoded_jwt = jwt.decode(encoded_jwt, 'srirampassword', algorithms=['HS256'])
# print(str(decoded_jwt))
# return('<html><body><h1>Your JWT Token:'+str(encoded_jwt)+'</h1></br></body></html>')
# return str(decoded_jwt)
@app.route('/')
def firstroute():
# return 'success'
return render_template('index.html')
@simpleajax.route(app,'/myajax')
def myajax():
def myajax(obj_response):
obj_response.alert("hi there")
@app.route('/show',methods = ['get','post'])
def show():
email = request.form['email']
pwd = request.form['pwd']
a = {
"username":email,
"password":pwd
}
encoded_jwt = jwt.encode(a, 'srirampassword')
# print(encoded_jwt)
try:
decoded_jwt = jwt.decode(encoded_jwt, 'srirampassword312')
print(str(decoded_jwt))
except:
return "Error on jwt password matching"
finally:
return '<html><body><div style="align:rigth; margin-left:10%"><p><br><span style="font-size:14pt;color:red"><b>Your JWT DECODED</b></span></b> :'+str(decoded_jwt)+'<br><span style="font-size:14pt;color:#0078d7 !important"><b>Your JWT ECODECD</b></span> :'+str(encoded_jwt)+'</p></div></body></html>'
# a = email +pwd
# print(email+" "+pwd)
# myjwt( email, pwd)
# return redirect(url_for('myjwt',data =a))
# redirect(url_for('myjwt',username=email,password=pwd))
# return "sucess"
# @app.route("/",)
# def index():
# return("<html>....Ha`i...</html>")
@app.route("/hello/")
def hello():
return("<html>....Hai...</html>")
@app.route("/onearg/<firstname>")
def arg(firstname):
return 'hello hey you %s hai' % firstname
@app.route('/secarg/<int:num>')
def secarg(num):
return "your int value is %d" % num
@app.route('/thirdarg/<float:dec>')
def thirdarg(dec):
return "your float value is %f" % dec
@app.route('/admin/<username>')
def admin(username):
return "Hello_admin %s" % username
@app.route('/guest')
def guest():
return "Hello_guest"
@app.route('/user/<username>')
def selectuser(username):
if username =="admin":
return redirect(url_for('admin',username = username))
else :
return redirect(url_for('guest'))
if __name__ == '__main__':
app.run(debug=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.