index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
17,800 | b58f44708f96f19ff86337cd65538d84a811d44a | from django.apps import AppConfig
class PoloniexConfig(AppConfig):
name = 'poloniex'
|
17,801 | 386f935573c8d7f9a5df1de1f99aa4c0d144a7de | # This file is necessary for pytest to find the modules we're trying
# to test.
|
17,802 | 3c19de8e22bd952c179726ffd754a05d19246fa6 | def fixed_maintenance(flat_sqft):
'''(number) -> number
Calculate the fixed_maintenance cost for a flat by inputting flat_sqft against a fixed maintenance charge per sqft of 50
Examples:
>>>(1200)
60000
>>>(1000)
50000
'''
if (flat_sqft <= 0):
print("Area of a flat cannot be less than or equal to zero")
return
fixed_maintenance = flat_sqft * 300
return(fixed_maintenance)
def maintenance_amount(flat_sqft, charge_per_sqft):
'''(number, number) -> number
Calculate the maintenance_amount for a flat by inputting flat_sqft and charge_per_sqft.
Examples:
>>>(1200, 50)
60000
>>>(1000, 40)
40000
'''
if (flat_sqft <= 0):
print("Area of a flat cannot be less than or equal to zero")
return
maintenance_amount = flat_sqft * charge_per_sqft
return (maintenance_amount)
def variable_maintenance(flat_sqft, charge_per_sqft):
'''(number, number) -> number
Calculate the variable_maintenance for a flat by inputting flat_sqft and charge_per_sqft.
A sqft_surcharge is calculated based on the flat_sqft.
Examples:
>>>(1600, 20)
33200.0
>>>(1000, 40)
40750.0
'''
if (flat_sqft <= 0):
print("Area of a flat cannot be less than or equal to zero")
return
if (flat_sqft > 1500):
sqft_surcharge = flat_sqft
else:
sqft_surcharge = flat_sqft*3/4
variable_maintenance = (flat_sqft * charge_per_sqft) + sqft_surcharge
return(variable_maintenance)
|
17,803 | e88da479a5884177f883239cf625899436ecc41f | import json
import requests
from .fetch_github_info import AUTHENTICATED_USER_ENDPOINT, fetch_github_info
def test_fetch_github_info(monkeypatch):
class FakeResponse:
def __init__(self, content) -> None:
assert isinstance(content, (bytes, str))
self.content = content
def json(self):
return json.loads(self.content)
def mock_response(*args, **kwargs):
assert args[0] == AUTHENTICATED_USER_ENDPOINT
assert "Authorization" in kwargs["headers"]
assert kwargs["headers"]["Authorization"].startswith("token ")
assert "Accept" in kwargs["headers"]
return FakeResponse(b'{"login":"test","id":1}')
monkeypatch.setattr(requests, "get", mock_response)
result = fetch_github_info("token")
assert result["login"] == "test"
assert result["id"] == 1
|
17,804 | b684f4bc49e6beaf26a32a7391c1f4c04731179e | import math
def getDistanceBetween2Point(point1, point2):
width=point1[0]-point2[0]
height=point1[1]-point2[1]
return math.sqrt((width*width) + (height*height))
|
17,805 | b4e557b0b8c42b678f90f85ec08d65a1d6efa012 | from .result_row_scrapper import ResultRowScrapper
from .race_day_scrapper import RaceDayScrapper |
17,806 | 592c78e5f706b57b0c19b0c5f00384a0a469417a | a=1
while a==1:
sayı = int(input("Daire Alanını Bulmak İçin Lütfen Sayısal R Değerini Giriniz\n R="))
pi = 3.14
print(int(sayı)**2*pi)
|
17,807 | 0b8eeeeac30132f5efe2420405ef71a9270b9504 | from django.contrib import admin
# Register your models here.
from .models import Goods, GoodsImage, GoodsCategory, GoodsCategoryBrand
admin.site.register(GoodsCategory)
admin.site.register(Goods)
admin.site.register(GoodsImage)
admin.site.register(GoodsCategoryBrand)
|
17,808 | faf0681cfafe575c7a292338c765346cc3280a00 | #pound character is used for comments
#this is used to read your comments
print "This is your comment"#new comments
#print "This wont run"
print "This will run"
|
17,809 | fd11aa2a7da4914069f9307cab5be538c39be734 | from collections import namedtuple
RGB = namedtuple('RGB', ['r', 'g', 'b'])
YCbCr = namedtuple('YCbCr', ['y', 'cb', 'cr'])
Size = namedtuple('Size', ['cx', 'cy', 'unit'])
Offset = namedtuple('Offset', ['x', 'y'])
Area = namedtuple('Area', ['x', 'y', 'cx', 'cy', 'unit'])
_block_properties = ['size', 'rows']
_plane_ycbcr_properties = ['size', 'y', 'cb', 'cr']
BlockRGB = namedtuple('BlockRGB', _block_properties)
BlockInt = namedtuple('BlockInt', _block_properties)
PlaneYCbCr = namedtuple('PlaneYCbCr', _plane_ycbcr_properties)
PlaneYCbCr420 = namedtuple('PlaneYCbCr420', _plane_ycbcr_properties)
BlocksRGB = namedtuple('BlocksRGB', _block_properties)
PlanesYCbCr = namedtuple('PlanesYCbCr', _block_properties)
PlanesYCbCr420 = namedtuple('PlanesYCbCr420', _block_properties)
|
17,810 | 93e13b13f9736a9420640768e552e3f41543c0cc | #Написать программу, моделюрующую популяцию кролей. Данные кол-ва сезоно
#взросления, кол-во потомков у пары, длительность жизни в сезонах,
#вероятность быть съеденым.
print("взросление в сезонах")
A = int(input()) #взросление в сезонах
print("жизнь в сезонах")
C = int(input()) #жизнь в сезонах
print("кол-во пар в потомках")
B = int(input()) #кол-во пар в потомках
print("вероятность быть съедeным")
P = int(input()) # вероятность быть съедeным
print("")
D = 10**11 #лимит популяции
print("начальная популяция")
S = int(input()) #начальная популяция
print("длительность жизни мира")
E = int(input()) # длительность жизни мира
Q = []
import random
if A > C:
print("Популяция умирает")
elif S > D:
print("Кролики умерли от голода")
else:
while E > 0:
if len(Q) <= A:
Q.append(1)
elif C > len(Q) > A:
S += (len(Q)-A)*B
Q = Q + [(len(Q)-A)*B]
else:
S = (len(Q)-A)*B - Q[0]
Q = Q[1:] + [(len(Q)-A)*B]
E -= 1
print("популяция=", S)
S = S - E
print("популяцияEnd=", S)
|
17,811 | 448c3d6f8af4658d566a1659028b6f293af63271 | #
# module np_helper.py
# Convenience functions for the python HPC demo programs
#
import sys
import numpy
import contextlib
def loadmatrix1(filename):
"""Saves a 2-D numpy array to a text file.
The output is written in the simple "MATRIX" format.
"""
with open(filename, "r") as F:
header = next(F).split()
# The header should contain 3 elements:
# "MATRIX", numrows, numcols
assert header[0].upper() == "MATRIX"
numrows, numcols = int(header[1]), int(header[2])
M = numpy.loadtxt(F)
assert M.shape == (numrows, numcols)
return M
def savematrix1(M, filename, fmt="%.18e"):
"""Saves a 2-D numpy array to a text file.
The output is written in the simple "MATRIX" format.
By default, a full double precision format is used.
If you know exactly the precision of your values, you can avoid
printing so much by overriding the `fmt` field, e.g. to"%10.3f".
"""
assert len(M.shape) == 2
Rows, Cols = M.shape
with open(filename, "w") as F:
numpy.savetxt(F, M,
fmt=fmt,
header="MATRIX %d %d" % (Rows, Cols),
comments="")
def printmatrix0(M, outF=sys.stdout):
"""Prints matrix using Numpy's default matrix printing.
This does not look good, generally, unless you already tweak the
print options using `numpy.set_printoptions`.
"""
outF.write(str(M))
outF.write("\n")
# In python 2, the above lines can be replaced by
# print >>outF, str(M)
outF.flush()
@contextlib.contextmanager
def printoptions(*args, **kwargs):
"""Using the `with` context to temporarily set numpy's print
options in python:
http://stackoverflow.com/questions/2891790/how-to-pretty-printing-a-numpy-array-without-scientific-notation-and-with-given
See also: numpy.set_printoptions help page.
"""
original = numpy.get_printoptions()
numpy.set_printoptions(*args, **kwargs)
yield
numpy.set_printoptions(**original)
def printmatrix1(M, outF=sys.stdout,
float_fmt=' %7.3f',
#float_fmt=None,
linewidth=99999,
**printopts):
"""Advanced matrix printing--allows locally setting the precision,
formatting, etc.
Note that some defaults have been altered here for my desired behavior.
"""
formatter = printopts.get("formatter", None)
def_dict = lambda f : f if (f is not None) else {}
if float_fmt is not None:
formatter = def_dict(formatter)
if callable(float_fmt):
proc_float_fmt = float_fmt
else:
proc_float_fmt = lambda x : float_fmt % x
formatter['float'] = proc_float_fmt
with printoptions(formatter=formatter,
linewidth=linewidth,
**printopts) as FF:
outF.write(str(M))
outF.write("\n")
outF.flush()
|
17,812 | 5fcea9ebfabe153f22efc3065ef32d25b6d5d282 | import os.path
import math
from tqdm import tqdm
from collections import defaultdict
import copy
def prune(input_list):
l = []
for e in input_list:
e = e.strip()
if e != '' and e != ' ':
l.append(e)
return l
# Assumption :
# Sram to SysArray and vice versa is very fast
# Compute cycles and memory cycles have same frequency (this was already assumed in original implementation)
def dram_traces_with_delay(
filter_sram_size = 64, ifmap_sram_size= 64, ofmap_sram_size = 64,
filt_base = 1000000, ifmap_base=0, ofmap_base = 2000000,
word_size_bytes = 1,
default_read_bw = 10,
default_write_bw = 10,
buffer_swap_factor=0.7,
sram_read_trace_file = "sram_read.csv",
sram_write_trace_file = "sram_write.csv",
dram_filter_trace_file = "dram_filter_read.csv",
dram_ifmap_trace_file = "dram_ifmap_read.csv",
dram_ofmap_trace_file = "dram_ofmap_write.csv"
):
sram_read_requests=open(sram_read_trace_file,'r')
sram_write_requests=open(sram_write_trace_file,'r')
dict_sram_ifmap_requests={}
dict_sram_ofmap_requests={}
dict_sram_filter_requests={}
dict_sram_read_requests_max_key=0
dict_sram_write_requests_max_key=0
for entry in sram_read_requests:
elems = entry.strip().split(',')
elems = prune(elems)
elems = [float(x) for x in elems]
dict_sram_ifmap_requests[int(elems[0])]=[e for e in elems[1:] if e<filt_base]
dict_sram_filter_requests[int(elems[0])]=[e for e in elems[1:] if e>=filt_base]
dict_sram_read_requests_max_key=int(elems[0])
sram_read_requests.close()
for entry in sram_write_requests:
elems = entry.strip().split(',')
elems = prune(elems)
elems = [float(x) for x in elems]
dict_sram_ofmap_requests[int(elems[0])]=[e for e in elems[1:]]
dict_sram_write_requests_max_key=int(elems[0])
sram_write_requests.close()
max_compute_cycle = max(dict_sram_write_requests_max_key,dict_sram_read_requests_max_key)
dram_filter_trace_requests = open(dram_filter_trace_file,'w')
dram_ifmap_trace_requests = open(dram_ifmap_trace_file,'w')
dram_ofmap_trace_requests = open(dram_ofmap_trace_file,'w')
cycle=0
compute_cycle=0
dict_sram_ifmap_requests2=copy.deepcopy(dict_sram_ifmap_requests)
dict_sram_ofmap_requests2=copy.deepcopy(dict_sram_ofmap_requests)
dict_sram_filter_requests2=copy.deepcopy(dict_sram_filter_requests)
# cycle pointers in original SRAM files
sram_ofmap_buffer2_cycle=0
sram_ifmap_buffer2_cycle=0
sram_filter_buffer2_cycle=0
#Simulating Double Buffer
sram_ofmap_buffer1_size=0
sram_ifmap_buffer1=set()
sram_filter_buffer1=set()
sram_ofmap_buffer2_size=0
sram_ifmap_buffer2=set()
sram_filter_buffer2=set()
# Iterating cycles
while(True):
# Work is finished
if(compute_cycle>max_compute_cycle and sram_ofmap_buffer1_size==0 and sram_ofmap_buffer2_size==0):
break
# fill ifmap in one cycle
if(len(sram_ifmap_buffer2)<ifmap_sram_size and sram_ifmap_buffer2_cycle<=max_compute_cycle):
#Bandwidth
count = math.floor(default_read_bw/word_size_bytes)
trace= str(cycle)+", "
while(len(sram_ifmap_buffer2)<ifmap_sram_size
and count>0
and sram_ifmap_buffer2_cycle<=max_compute_cycle):
while(((sram_ifmap_buffer2_cycle not in dict_sram_ifmap_requests) or len(dict_sram_ifmap_requests[sram_ifmap_buffer2_cycle])==0)
and sram_ifmap_buffer2_cycle<=max_compute_cycle):
sram_ifmap_buffer2_cycle+=1
# Only if element not already present in buffer
if(sram_ifmap_buffer2_cycle<=max_compute_cycle):
if(dict_sram_ifmap_requests[sram_ifmap_buffer2_cycle][0] not in sram_ifmap_buffer2):
sram_ifmap_buffer2.add(dict_sram_ifmap_requests[sram_ifmap_buffer2_cycle][0])
trace+=str(dict_sram_ifmap_requests[sram_ifmap_buffer2_cycle][0])+", "
count-=1
dict_sram_ifmap_requests[sram_ifmap_buffer2_cycle].pop(0)
trace+="\n"
dram_ifmap_trace_requests.write(trace)
# fill filter in one cycle
if(len(sram_filter_buffer2)<filter_sram_size and sram_filter_buffer2_cycle<=max_compute_cycle):
count = math.floor(default_read_bw/word_size_bytes)
trace= str(cycle)+", "
while(len(sram_filter_buffer2)<filter_sram_size and count>0 and sram_filter_buffer2_cycle<=max_compute_cycle):
while(((sram_filter_buffer2_cycle not in dict_sram_filter_requests) or len(dict_sram_filter_requests[sram_filter_buffer2_cycle])==0) and sram_filter_buffer2_cycle<=max_compute_cycle):
sram_filter_buffer2_cycle+=1
if(sram_filter_buffer2_cycle<=max_compute_cycle):
if(dict_sram_filter_requests[sram_filter_buffer2_cycle][0] not in sram_filter_buffer2):
sram_filter_buffer2.add(dict_sram_filter_requests[sram_filter_buffer2_cycle][0])
trace+=str(dict_sram_filter_requests[sram_filter_buffer2_cycle][0])+", "
count-=1
dict_sram_filter_requests[sram_filter_buffer2_cycle].pop(0)
trace+="\n"
dram_filter_trace_requests.write(trace)
# Move data from sram_ofmap_buffer2 to DRAM in one cycle
if(sram_ofmap_buffer2_size>0):
count = math.floor(default_write_bw/word_size_bytes)
trace= str(cycle)+", "
while(sram_ofmap_buffer2_size>0 and count>0):
while((sram_ofmap_buffer2_cycle not in dict_sram_ofmap_requests) or len(dict_sram_ofmap_requests[sram_ofmap_buffer2_cycle])==0):
sram_ofmap_buffer2_cycle+=1
sram_ofmap_buffer2_size-=1
trace+=str(dict_sram_ofmap_requests[sram_ofmap_buffer2_cycle][0])+", "
dict_sram_ofmap_requests[sram_ofmap_buffer2_cycle].pop(0)
count-=1
trace+="\n"
dram_ofmap_trace_requests.write(trace)
#After Draining write buffer check if it requires swap
if(sram_ofmap_buffer2_size==0):
sram_ofmap_buffer2_size=sram_ofmap_buffer1_size
sram_ofmap_buffer1_size=0
#check for all ifmap data that can be taken in from sram into array
if((compute_cycle in dict_sram_ifmap_requests2) and len(dict_sram_ifmap_requests2[compute_cycle])>0):
if((dict_sram_ifmap_requests2[compute_cycle][0] not in sram_ifmap_buffer1)
# Buffer swaping policy
and (sram_ifmap_buffer2_cycle>max_compute_cycle or len(sram_ifmap_buffer2)>buffer_swap_factor*ifmap_sram_size)):
sram_ifmap_buffer1=sram_ifmap_buffer2
sram_ifmap_buffer2=set()
while(len(dict_sram_ifmap_requests2[compute_cycle])>0 and (dict_sram_ifmap_requests2[compute_cycle][0] in sram_ifmap_buffer1)):
dict_sram_ifmap_requests2[compute_cycle].pop(0)
#check for all filter data that can be taken in from sram into array
if((compute_cycle in dict_sram_filter_requests2) and len(dict_sram_filter_requests2[compute_cycle])>0):
if(dict_sram_filter_requests2[compute_cycle][0] not in sram_filter_buffer1
and (sram_filter_buffer2_cycle>max_compute_cycle or len(sram_filter_buffer2)>buffer_swap_factor*filter_sram_size)):
sram_filter_buffer1=sram_filter_buffer2
sram_filter_buffer2=set()
while(len(dict_sram_filter_requests2[compute_cycle])>0 and (dict_sram_filter_requests2[compute_cycle][0] in sram_filter_buffer1)):
dict_sram_filter_requests2[compute_cycle].pop(0)
#write ofmap data from array to sram
if((compute_cycle in dict_sram_ofmap_requests2) and (len(dict_sram_ofmap_requests2[compute_cycle])>0)):
# print(compute_cycle,len(dict_sram_ofmap_requests2[compute_cycle]))
if(sram_ofmap_buffer1_size<ofmap_sram_size ):
while(len(dict_sram_ofmap_requests2[compute_cycle])>0 and sram_ofmap_buffer1_size<ofmap_sram_size):
sram_ofmap_buffer1_size+=1
dict_sram_ofmap_requests2[compute_cycle].pop(0)
cycle+=1
# If the whole calculation required for original compute cycle is done
if((compute_cycle not in dict_sram_ifmap_requests2 or len(dict_sram_ifmap_requests2[compute_cycle])==0)
and (compute_cycle not in dict_sram_ofmap_requests2 or len(dict_sram_ofmap_requests2[compute_cycle])==0)
and (compute_cycle not in dict_sram_filter_requests2 or len(dict_sram_filter_requests2[compute_cycle])==0)
):
compute_cycle+=1
dram_filter_trace_requests.close()
dram_ifmap_trace_requests.close()
dram_ofmap_trace_requests.close()
return cycle
def dram_trace_read_v2(
sram_sz = 512 * 1024,
word_sz_bytes = 1,
min_addr = 0, max_addr=1000000,
default_read_bw = 10, # this is arbitrary
sram_trace_file = "sram_log.csv",
dram_trace_file = "dram_log.csv"
):
t_fill_start = -1
t_drain_start = 0
init_bw = default_read_bw # Taking an arbitrary initial bw of 4 bytes per cycle
sram = set()
sram_requests = open(sram_trace_file, 'r')
dram = open(dram_trace_file, 'w')
#for entry in tqdm(sram_requests):
for entry in sram_requests:
elems = entry.strip().split(',')
elems = prune(elems)
elems = [float(x) for x in elems]
clk = elems[0]
for e in range(1, len(elems)):
if (elems[e] not in sram) and (elems[e] >= min_addr) and (elems[e] < max_addr):
# Used up all the unique data in the SRAM?
if len(sram) + word_sz_bytes > sram_sz:
if t_fill_start == -1:
t_fill_start = t_drain_start - math.ceil(len(sram) / (init_bw * word_sz_bytes))
# Generate the filling trace from time t_fill_start to t_drain_start
cycles_needed = t_drain_start - t_fill_start
words_per_cycle = math.ceil(len(sram) / (cycles_needed * word_sz_bytes))
c = t_fill_start
while len(sram) > 0:
trace = str(c) + ", "
for _ in range(words_per_cycle):
if len(sram) > 0:
p = sram.pop()
trace += str(p) + ", "
trace += "\n"
dram.write(trace)
c += 1
t_fill_start = t_drain_start
t_drain_start = clk
# Add the new element to sram
sram.add(elems[e])
if len(sram) > 0:
if t_fill_start == -1:
t_fill_start = t_drain_start - math.ceil(len(sram) / (init_bw * word_sz_bytes))
# Generate the filling trace from time t_fill_start to t_drain_start
cycles_needed = t_drain_start - t_fill_start
words_per_cycle = math.ceil(len(sram) / (cycles_needed * word_sz_bytes))
c = t_fill_start
while len(sram) > 0:
trace = str(c) + ", "
for _ in range(words_per_cycle):
if len(sram) > 0:
p = sram.pop()
trace += str(p) + ", "
trace += "\n"
dram.write(trace)
c += 1
sram_requests.close()
dram.close()
def dram_trace_write(ofmap_sram_size = 64,
data_width_bytes = 1,
default_write_bw = 10, # this is arbitrary
sram_write_trace_file = "sram_write.csv",
dram_write_trace_file = "dram_write.csv"):
traffic = open(sram_write_trace_file, 'r')
trace_file = open(dram_write_trace_file, 'w')
last_clk = 0
clk = 0
sram_buffer = [set(), set()]
filling_buf = 0
draining_buf = 1
for row in traffic:
elems = row.strip().split(',')
elems = prune(elems)
elems = [float(x) for x in elems]
clk = elems[0]
# If enough space is in the filling buffer
# Keep filling the buffer
if (len(sram_buffer[filling_buf]) + (len(elems) - 1) * data_width_bytes ) < ofmap_sram_size:
for i in range(1,len(elems)):
sram_buffer[filling_buf].add(elems[i])
# Filling buffer is full, spill the data to the other buffer
else:
# If there is data in the draining buffer
# drain it
#print("Draining data. CLK = " + str(clk))
if len(sram_buffer[draining_buf]) > 0:
delta_clks = clk - last_clk
data_per_clk = math.ceil(len(sram_buffer[draining_buf]) / delta_clks)
#print("Data per clk = " + str(data_per_clk))
# Drain the data
c = last_clk + 1
while len(sram_buffer[draining_buf]) > 0:
trace = str(c) + ", "
c += 1
for _ in range(int(data_per_clk)):
if len(sram_buffer[draining_buf]) > 0:
addr = sram_buffer[draining_buf].pop()
trace += str(addr) + ", "
trace_file.write(trace + "\n")
#Swap the ids for drain buffer and fill buffer
tmp = draining_buf
draining_buf = filling_buf
filling_buf = tmp
#Set the last clk value
last_clk = clk
#Fill the new data now
for i in range(1,len(elems)):
sram_buffer[filling_buf].add(elems[i])
#Drain the last fill buffer
reasonable_clk = clk
if len(sram_buffer[draining_buf]) > 0:
#delta_clks = clk - last_clk
#data_per_clk = math.ceil(len(sram_buffer[draining_buf]) / delta_clks)
data_per_clk = default_write_bw
#print("Data per clk = " + str(data_per_clk))
# Drain the data
c = last_clk + 1
while len(sram_buffer[draining_buf]) > 0:
trace = str(c) + ", "
c += 1
for _ in range(int(data_per_clk)):
if len(sram_buffer[draining_buf]) > 0:
addr = sram_buffer[draining_buf].pop()
trace += str(addr) + ", "
trace_file.write(trace + "\n")
reasonable_clk = max(c, clk)
if len(sram_buffer[filling_buf]) > 0:
data_per_clk = default_write_bw
# Drain the data
c = reasonable_clk + 1
while len(sram_buffer[filling_buf]) > 0:
trace = str(c)+ ", "
c += 1
for _ in range(int(data_per_clk)):
if len(sram_buffer[filling_buf]) > 0:
addr = sram_buffer[filling_buf].pop()
trace += str(addr) + ", "
trace_file.write(trace + "\n")
#All traces done
traffic.close()
trace_file.close()
if __name__ == "__main__":
dram_trace_read_v2(min_addr=0, max_addr=1000000, dram_trace_file="ifmaps_dram_read.csv")
dram_trace_read_v2(min_addr=1000000, max_addr=100000000, dram_trace_file="filter_dram_read.csv")
#dram_trace_read(filter_sram_sz=1024, ifmap_sram_sz=1024, sram_trace_file="sram_read.csv")
#dram_trace_write(ofmap_sram_size=1024,sram_write_trace_file="yolo_tiny_layer1_write.csv", dram_write_trace_file="yolo_tiny_layer1_dram_write.csv")
|
17,813 | 4c22b9753af8a2d6430aefee7a16d076661e05a6 | from django.contrib.auth import get_user_model
from rest_framework import serializers
from .models import (
FriendsProfile,
Photo,
Nationality,
AcquaintanceRequest,
DatesProfile,
Match,
SexualOrientation,
Interest
)
from django.contrib.contenttypes.models import ContentType
class PhotoObjectRelatedSerializer(serializers.RelatedField):
"""
A custom field to use for the `content_object` generic relationship.
"""
def to_representation(self, value):
if isinstance(value, FriendsProfile):
serializers = FriendsProfilesListSerializer(value)
elif isinstance(value, DatesProfile):
serializers = DatesProfilesListSerializer(value)
else:
return Exception("Unexpected type of tagged object")
print(serializers.data)
return serializers.data
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = ('id', 'first_name')
class PhotoListSerializer(serializers.ModelSerializer):
class Meta:
model = Photo
fields = ('id', 'photo')
class FriendsProfilesListSerializer(serializers.ModelSerializer):
age = serializers.SerializerMethodField("get_age_name")
photos = PhotoListSerializer(many=True, read_only=True)
user = UserSerializer()
class Meta:
model = FriendsProfile
fields = ['id', 'user', 'photos', 'age', ]
def get_age_name(self, obj):
return obj.get_age(obj.birth_date)
class PhotoDetailSerializer(serializers.ModelSerializer):
content_object = PhotoObjectRelatedSerializer(read_only=True, )
class Meta:
model = Photo
fields = '__all__'
class NationalityDetailSerializer(serializers.ModelSerializer):
class Meta:
model = Nationality
fields = '__all__'
class NationalitiesListSerializer(serializers.ModelSerializer):
class Meta:
model = Nationality
fields = ('name',)
class AcquaintanceRequestsListSerializer(serializers.ModelSerializer):
class Meta:
model = AcquaintanceRequest
exclude = ('request_date',)
class AcquaintanceRequestDetailSerializer(serializers.ModelSerializer):
class Meta:
model = AcquaintanceRequest
fields = '__all__'
class MatchesListSerializer(serializers.ModelSerializer):
class Meta:
model = Match
exclude = ('match_date',)
class MatchDetailSerializer(serializers.ModelSerializer):
class Meta:
model = Match
fields = '__all__'
def create(self, validated_data):
dict = {
'content_type': validated_data.get('content_type'),
'sender_object_id': validated_data.get('initiator_object_id'),
'receiver_object_id': validated_data.get('confirmer_object_id'),
}
if AcquaintanceRequest.objects.filter(**dict).count() == 0:
raise serializers.ValidationError("There is no corresponding acquaintance request")
return Match.objects.create(**validated_data)
class SexualOrientationSerializer(serializers.ModelSerializer):
class Meta:
model = SexualOrientation
fields = '__all__'
class DatesProfilesListSerializer(serializers.ModelSerializer):
age = serializers.SerializerMethodField("get_age_name")
photos = PhotoListSerializer(many=True, read_only=True)
user = UserSerializer()
class Meta:
model = DatesProfile
fields = ['id', 'user', 'photos', 'age', ]
def get_age_name(self, obj):
return obj.get_age(obj.birth_date)
class InterestClassField(serializers.StringRelatedField):
def to_internal_value(self, data):
interest = Interest.objects.filter(name=data)
if interest and len(interest) == 1:
return interest.get().pk
raise serializers.ValidationError(f"Interest with name {data} not found")
class DatesProfileDetailSerializer(serializers.ModelSerializer):
photos = PhotoListSerializer(many=True, read_only=True)
initiated_matches = MatchesListSerializer(many=True, read_only=True)
confirmed_matches = MatchesListSerializer(many=True, read_only=True)
sent_requests = AcquaintanceRequestsListSerializer(many=True, read_only=True)
received_requests = AcquaintanceRequestsListSerializer(many=True, read_only=True)
interests = InterestClassField(many=True, required=False)
def is_valid(self, raise_exception=False):
self.initial_data['user'] = self.context['request'].user.pk
return super().is_valid()
class Meta:
model = DatesProfile
fields = '__all__'
class FriendsProfileDetailSerializer(serializers.ModelSerializer):
photos = PhotoListSerializer(many=True, read_only=True)
initiated_matches = MatchesListSerializer(many=True, read_only=True)
confirmed_matches = MatchesListSerializer(many=True, read_only=True)
sent_requests = AcquaintanceRequestsListSerializer(many=True, read_only=True)
received_requests = AcquaintanceRequestsListSerializer(many=True, read_only=True)
interests = InterestClassField(many=True, required=False)
def is_valid(self, raise_exception=False):
self.initial_data['user'] = self.context['request'].user.pk
return super().is_valid()
class Meta:
model = FriendsProfile
fields = '__all__'
class FriendsProfileInfoSerializer(serializers.ModelSerializer):
interests = InterestClassField(many=True, read_only=True)
class Meta:
model = FriendsProfile
fields = '__all__'
class DatesProfileInfoSerializer(serializers.ModelSerializer):
interests = InterestClassField(many=True, read_only=True)
class Meta:
model = DatesProfile
fields = '__all__'
|
17,814 | 87c5831042cbca5bd848c121b8a3f56ddaad40a5 | from flask import Blueprint, render_template, redirect, url_for, request, flash, send_file
from flask_login import login_required, current_user
from werkzeug.utils import secure_filename
from . import db, ALLOWED_EXTENSIONS, UPLOAD_FOLDER
media = Blueprint('media', __name__)
from PIL import Image
import os, subprocess, math
ROW_WIDTH = 6 # Number of images per row
def get_mimetype(filename):
mimetype = None
if len(filename) >= 4:
if filename[-4:] == ".gif":
mimetype = 'image/gif'
elif filename[-4:] == ".jpg":
mimetype = 'image/jpeg'
elif filename[-4:] == ".jpeg":
mimetype = 'image/jpeg'
elif filename[-4:] == ".png":
mimetype = 'image/png'
return mimetype
class Picture(object):
def __init__(self, filename, user_dir):
self.filename = filename
self.filepath = os.path.join(user_dir, filename)
im = Image.open(self.filepath)
self.width, self.height = im.size
@staticmethod
def isPicture(filename, user_dir):
try:
Image.open(os.path.join(user_dir, filename))
return True
except:
return False
# obtained from https://flask.palletsprojects.com/en/1.1.x/patterns/fileuploads/
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def get_user_dir():
return os.path.join(UPLOAD_FOLDER, current_user.email)
@media.route('/pictures')
@login_required
def pictures():
# Load the pictures
user_dir = get_user_dir()
print("user_dir {}".format(user_dir))
if os.path.exists(user_dir):
print("test")
pictures = [Picture(f, user_dir) for f in os.listdir(user_dir) if Picture.isPicture(f, user_dir)]
picture_rows = []
for index in range(0, len(pictures), ROW_WIDTH):
picture_rows += [pictures[index:min(index+ROW_WIDTH, len(pictures))]]
else:
print("test2")
picture_rows = []
return render_template('pictures.html', name=current_user.name, picture_rows=picture_rows)
@media.route('/picture/<string:filename>')
@login_required
def picture(filename):
mimetype = get_mimetype(filename)
user_dir = get_user_dir()
return send_file(os.path.join(user_dir, filename), mimetype=mimetype)
@media.route('/view/pictures/<string:filename>')
@login_required
def view_picture(filename):
user_dir = get_user_dir()
if not Picture.isPicture(filename, user_dir):
return redirect(url_for('media.pictures'))
picture = Picture(filename, user_dir)
return render_template('picture.html', name=current_user.name, picture=picture)
@media.route('/delete/picture/<string:filename>')
@login_required
def delete_picture(filename):
user_dir = get_user_dir()
if Picture.isPicture(filename, user_dir):
os.remove(os.path.join(user_dir, filename))
return redirect(url_for('media.pictures'))
@media.route('/upload')
@login_required
def upload():
return render_template('upload.html', name=current_user.name)
@media.route('/upload', methods=['POST'])
@login_required
def upload_post():
# obtained from https://flask.palletsprojects.com/en/1.1.x/patterns/fileuploads/
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
user_dir = get_user_dir()
filename = secure_filename(file.filename)
if not os.path.exists(user_dir):
os.mkdir(user_dir)
file.save(os.path.join(user_dir, filename))
return redirect(url_for('media.pictures'))
return redirect(request.url)
|
17,815 | 5fa3c8bd9b53d6164e32432243f82d42e669ca18 | import numpy as np
k_text = 3
k_image = 3
labels = np.load('label.npy')
vgg_features = np.load('feature.npy')
vgg_features = np.tanh((vgg_features)/np.std(vgg_features))
bow = np.load('bow.npy')
data_list = np.arange(len(bow))
np.random.shuffle(data_list)
train_data_list = data_list[0:10000]
test_data_list = data_list[10000:15000]
np.save('train_data_list.npy', train_data_list)
np.save('test_data_list.npy', test_data_list)
train_vgg = vgg_features[train_data_list]
train_bow = bow[train_data_list]
norm = np.linalg.norm(train_vgg, axis=1)
dist_new = np.zeros([len(train_data_list),len(train_data_list)], np.float32)
for i in range(len(train_data_list)):
dist_new[i] = -np.sum(train_vgg[i] * train_vgg, 1) / norm / \
norm[i]
vgg_features_target = np.zeros_like(vgg_features, np.float32)
for i in range(len(train_data_list)):
neb = np.argpartition(dist_new[i], k_image)[0:k_image]
vgg_features_target[train_data_list[i]] = np.mean(vgg_features[train_data_list[neb]], 0)
np.save('vgg_features_target.npy', vgg_features_target)
norm = np.linalg.norm(train_bow, axis=1)
dist_new = np.zeros([len(train_data_list),len(train_data_list)], np.float32)
for i in range(len(train_data_list)):
dist_new[i] = -np.sum(train_bow[i] * train_bow, 1) / norm / \
norm[i]
bow_target = np.zeros_like(bow, np.float32)
for i in range(len(train_data_list)):
neb = np.argpartition(dist_new[i], k_text)[0:k_text]
bow_target[train_data_list[i]] = np.mean(bow[train_data_list[neb]], 0)
np.save('bow_target.npy', bow_target) |
17,816 | 362c24615028806c55d70f593c4efcf3302ab85b | #!/usr/bin/env python
'''
Ansible module for application
'''
# vim: expandtab:tabstop=4:shiftwidth=4
#
# Zabbix application ansible module
#
#
# Copyright 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is in place because each module looks similar to each other.
# These need duplicate code as their behavior is very similar
# but different for each zabbix class.
# pylint: disable=duplicate-code
# pylint: disable=import-error
from openshift_tools.zbxapi import ZabbixAPI, ZabbixConnection
def exists(content, key='result'):
''' Check if key exists in content or the size of content[key] > 0
'''
if not content.has_key(key):
return False
if not content[key]:
return False
return True
def get_template_ids(zapi, template_name):
'''
get related templates
'''
template_ids = []
# Fetch templates by name
content = zapi.get_content('template',
'get',
{'search': {'host': template_name}})
if content.has_key('result'):
template_ids.append(content['result'][0]['templateid'])
return template_ids
def main():
''' Ansible module for application
'''
module = AnsibleModule(
argument_spec=dict(
zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
zbx_debug=dict(default=False, type='bool'),
name=dict(default=None, type='str', required=True),
template_name=dict(default=None, type='str'),
state=dict(default='present', type='str'),
),
#supports_check_mode=True
)
zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
module.params['zbx_user'],
module.params['zbx_password'],
module.params['zbx_debug']))
#Set the instance and the application for the rest of the calls
zbx_class_name = 'application'
idname = 'applicationid'
aname = module.params['name']
state = module.params['state']
# get a applicationid, see if it exists
tids = get_template_ids(zapi, module.params['template_name'])
content = zapi.get_content(zbx_class_name,
'get',
{'search': {'name': aname},
'templateids': tids[0],
})
if state == 'list':
module.exit_json(changed=False, results=content['result'], state="list")
if state == 'absent':
if not exists(content):
module.exit_json(changed=False, state="absent")
content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
module.exit_json(changed=True, results=content['result'], state="absent")
if state == 'present':
params = {'hostid': tids[0],
'name': aname,
}
if not exists(content):
# if we didn't find it, create it
content = zapi.get_content(zbx_class_name, 'create', params)
module.exit_json(changed=True, results=content['result'], state='present')
# already exists, we need to update it
# let's compare properties
differences = {}
zab_results = content['result'][0]
for key, value in params.items():
if key == 'templates' and zab_results.has_key('parentTemplates'):
if zab_results['parentTemplates'] != value:
differences[key] = value
elif zab_results[key] != str(value) and zab_results[key] != value:
differences[key] = value
if not differences:
module.exit_json(changed=False, results=content['result'], state="present")
# We have differences and need to update
differences[idname] = zab_results[idname]
content = zapi.get_content(zbx_class_name, 'update', differences)
if content.has_key('error'):
module.exit_json(failed=True, changed=False, results=content['error'], state="present")
module.exit_json(changed=True, results=content['result'], state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
|
17,817 | 7c3a26d0b20da816cc9235ad90d1becd1268efbb | # a=["this" "isa" "page"]
# for i ,j in enumerate(a):
# tlist=["is","a"]
# del a[i]
# for ix in range(len(tlist)):
# a.insert(i+ix,tlist[ix])
# print(a)
from docx import *
document = Document('sample.docx')
bolds=[]
italics=[]
for para in document.paragraphs:
for run in para.runs:
if run.italic :
italics.append(run.text)
if run.bold :
bolds.append(run.text)
boltalic_Dict={'bold_phrases':bolds,
'italic_phrases':italics}
|
17,818 | 4a967dec3c60286c58f6308a667517359a64f993 |
from __future__ import print_function
from collections import deque
import os
import queue
import socket
import sys
import threading
from time import sleep
from urllib.error import URLError
import urllib.parse as urlparse
from apiclient.discovery import build
from apiclient import errors
import googleapiclient
from httplib2 import Http
import json
from oauth2client import file, client, tools
from options import Options
class Gmail:
"""Object for accessing gmail via http API."""
options = Options(email=None,
scopes=['https://www.googleapis.com/auth/gmail.readonly'],
client_secret_file='client-secret.json',
batch_size=100,
credentials_path=None,
query='-in:chats',
num_workers=0,
poll_interval=300)
service = None
class GenericException(Exception):
pass
class UserRateException(Exception):
pass
class BatchException(Exception):
pass
class NoHistoryException(Exception):
pass
@staticmethod
def batch_executor(creds, cmds):
"""Execute a batch command and check for errors.
Batch Gmail commands require a callback. This function wraps the call
plus callback into a single synchronous function. Rather than relying
on callbacks, use threads for parallelism.
:param cmds list: A list (or other iterable) with a collections of
commands. Each command consists of a tuple (google_id,
command), where command is added to the batch() Gmail client
api.
:return: A list of response objects. Each entry of the list
corresponds to a callback value.
:raises: Exceptions on error.
"""
def handler(rid, resp, ex, responses):
"Callback invoked by Google API to handled message data."
def ex_is_error(ex, code):
"Check if exception is error code 'code'."
return (isinstance(ex, googleapiclient.errors.HttpError) and
ex.resp.status == code)
if ex is not None:
if ex_is_error(ex, 404):
# message could not be found this is probably a
# deleted message, spam or draft message since these
# are not included in the messages.get() query by
# default.
print("remote: could not find remote message: %s!" % rid)
return
elif ex_is_error(ex, 400):
# message id invalid, probably caused by stray files
# in the mail repo
print("remote: message id: %s is invalid! " % rid)
return
elif ex_is_error(ex, 403) or ex_is_error(ex, 429):
#import pdb; pdb.set_trace()
raise Gmail.UserRateException(ex)
elif ex_is_error(ex, 500):
raise Gmail.GenericException(ex)
else:
raise Gmail.BatchException(ex)
responses.append(resp)
http = creds.authorize(Http(timeout=30))
service = build('gmail', 'v1', http=http)
batch = service.new_batch_http_request()
responses = []
for gid, cmd in cmds:
batch.add(cmd, callback=lambda a, b, c: handler(a, b, c,
responses),
request_id=gid)
batch.execute(http=http)
return responses
@staticmethod
def worker(my_idx, inq, outq):
"""Entry point for new executor threads.
Downloading (or importing) metadata is limited by the round-trip time to
Gmail if we only use one thread. This wrapper function makes it
possible to start multiple threads (currently limited to two because
that is how many concurrent requests from the same user Gmail alows) to
reduce the import time.
Commands come in via a thread-safe queue (inq) and response data is
written to another thread-safe queue (outq). This function does not
interpret the data in either queue. It merly acts as a dumb pipeline
between the two endpoints.
:param inq queue.Queue: Inress queue. Commands received on this
queue are sent to a batch_executor.
:param outq queue.Queue: Egress queue. Data returned by the batch
executor is written to the queue for consumption by the
initiator.
"""
print("worker %d: starting" % my_idx)
backoff = .001
while True:
cmd = inq.get()
if cmd is None:
break
ridx, creds, cmds = cmd
backoff = max(backoff / 2, 0.001)
while True:
try:
responses = Gmail.batch_executor(creds, cmds)
except Gmail.UserRateException:
print(f'worker {my_idx}: backoff {backoff} sec')
sleep(backoff)
backoff = min(backoff * 2, 1.0)
except Exception as ex:
outq.put([ridx, ex])
break
else:
outq.put([ridx, responses])
break
inq.task_done()
print("worker %d stoping" % my_idx)
def __init__(self, **kwargs):
"""Initialize a new object using the options passed in."""
self.opts = self.options.push(kwargs)
data_dir = os.path.normpath(os.path.join(os.path.dirname(__file__),
'../data'))
if self.opts.credentials_path is None:
self.opts.set(credentials_path='%s-creds.json' % self.opts.email)
if os.path.relpath(self.opts.client_secret_file):
self.opts.set(client_secret_file=os.path.join(data_dir,
self.opts.client_secret_file))
if os.path.relpath(self.opts.credentials_path):
self.opts.set(credentials_path=os.path.join(data_dir,
self.opts.credentials_path))
self.creds = None
self.service = None
self.threads = []
if self.opts.num_workers >= 1:
self.outq = queue.Queue(maxsize=self.opts.num_workers + 1)
self.inq = queue.Queue(maxsize=self.opts.num_workers + 1)
for idx in range(self.opts.num_workers):
werker = lambda: self.worker(idx, self.outq, self.inq)
# It's OK for these threads to not free up resources on exit
# since they don't store permanent state.
# FIXME: should I even keep a pointer to the tread?
self.threads.append(threading.Thread(daemon=True,
target=werker))
self.threads[idx].start()
@property
def poll_interval(self):
"""How often to poll for new messages / updates."""
return self.opts.poll_interval
@property
def scopes(self):
"""Scopes used for authorization."""
return [scope.rsplit('/', 1)[1] for scope in self.opts.scopes]
@property
def writable(self):
"""Whether the account was authorized as read-only or not."""
return 'gmail.modify' in self.scopes
@property
def can_send(self):
"""Whether the scopes list includes the ability to send mail."""
return ('gmail.compose' in self.scopes or
'gmail.send' in self.scopes)
def get_credentials(self):
"Read, or create one if it does not exist, the credentials file."
store = file.Storage(self.opts.credentials_path)
creds = store.get()
if not creds or creds.invalid:
# Clear out argv so argparse in run_flow() is happy.
argv = sys.argv
sys.argv = []
flow = client.flow_from_clientsecrets(self.opts.client_secret_file,
self.opts.scopes)
creds = tools.run_flow(flow, store)
sys.argv = argv
return creds
def reachable(self):
"""Whether the Gmail endpoint is reachable."""
service = build('gmail', 'v1', http=Http(timeout=1.0))
url = urlparse.urlparse(service._baseUrl)
host = url.hostname
port = url.port
try:
socket.getaddrinfo(host, port, proto=socket.IPPROTO_TCP)
except (socket.herror, socket.gaierror, URLError, OSError):
return False
return True
def authorize(self):
"Authorize the service to access the user's mailbox."
if not self.service:
self.creds = self.get_credentials()
http = self.creds.authorize(Http(timeout=10.0))
self.service = build('gmail', 'v1', http=http)
assert self.service is not None
def authorized(func):
"Ensure service is authorized to access the user's mailbox."
def func_wrap (self, *args, **kwargs):
if self.service is None:
self.authorize()
return func(self, *args, **kwargs)
return func_wrap
@authorized
def get_profile(self):
"Return the user's profile."
# Call the Gmail API
results = self.service.users().getProfile(userId='me').execute()
return results
@authorized
def get_labels(self):
"Return a list of labels."
# Call the Gmail API
results = self.service.users().labels().list(userId='me').execute()
return results.get('labels', [])
@authorized
def get_history_id(self, start=1):
"Get the current history id of the mailbox."
try:
hist = self.service.users().history()
results = hist.list(userId='me', startHistoryId=start).execute()
if 'historyId' in results:
return int(results['historyId'])
else:
raise Gmail.GenericException("no historyId field returned")
except googleapiclient.errors.HttpError:
# this happens if the original historyId is too old,
# try to get last message and the historyId from it.
for mset in self.list_messages(1):
(_, mset) = mset
msg = self.get_message(mset[0]['id'])
return int(msg['historyId'])
@authorized
def get_history_since(self, start=0):
"""Get a list of changes since the given start point (a history id)."""
hist = self.service.users().history()
try:
results = hist.list(userId='me', startHistoryId=start).execute()
if 'history' in results:
yield results['history']
while 'nextPageToken' in results:
results = hist.list(userId='me',
pageToken=results['nextPageToken'],
startHistoryId=start).execute()
if 'history' in results:
yield results['history']
except googleapiclient.errors.HttpError as ex:
if ex.resp.status == 404:
raise Gmail.NoHistoryException
elif ex.resp.status == 403:
raise Gmail.UserRateException(ex)
else:
raise Gmail.GenericException(ex)
@authorized
def list_messages(self, limit=1, query=None):
"Returns a list of messages (max = limit)."
total = 0
token = None
results = []
if query is None:
query = self.opts.query
while True:
results = self.service.users().messages().list(userId='me',
pageToken=token,
q=query,
maxResults=limit,
includeSpamTrash=True).\
execute()
if 'messages' in results:
total += results['resultSizeEstimate']
yield results['resultSizeEstimate'], results['messages']
if 'nextPageToken' in results:
token = results['nextPageToken']
else:
break
if limit is not None and total >= limit:
break
@authorized
def get_message(self, id, format='minimal'):
"""Get the message in the given format."""
try:
return self.service.users().messages().get(userId='me',
id=id,
format=format).\
execute()
except googleapiclient.errors.HttpError as ex:
if ex.resp.status == 403 or ex.resp.status == 500:
return self.get_message(id, format)
else:
raise ex
@authorized
def get_thread(self, id, format='metadata'):
"""Get information abot a thread."""
try:
return self.service.users().threads().get(userId='me',
id=id,
format=format).\
execute()
except googleapiclient.errors.HttpError as ex:
if ex.resp.status == 403 or ex.resp.status == 500:
return self.get_thread(id, format)
else:
raise ex
@authorized
def get_messages(self, ids, format):
"Get a collection of messages."
# FIXME: support adaptive batch sizes
def chunks(l, n):
"Yield successive n-sized chunks from l."
for i in range(0, len(l), n):
yield l[i:i + n]
if '__getitem__' not in dir(ids):
ids = (ids, )
if self.opts.num_workers < 1:
what = self.service.users().messages()
for chunk in chunks(ids, self.opts.batch_size):
try:
cmds = [(gid, what.get(userId='me', id=gid,
format=format)) for gid in chunk]
responses = Gmail.batch_executor(self.creds, cmds)
except Gmail.UserRateException as ex:
print("remote: user rate error: ", ex)
except Gmail.BatchException as ex:
print("remote: batch request error: ", ex)
except ConnectionError as ex:
print("remote: connection error: ", ex)
else:
yield responses
return
idx = 0
ridx = 0
pending = {}
chunks = deque(chunks(ids, self.opts.batch_size))
what = self.service.users().messages()
while not (len(chunks) == 0 and idx == ridx):
if not self.inq.empty():
try:
xx, resp = self.inq.get()
pending[xx] = resp
while ridx in pending:
resp = pending[ridx]
del pending[ridx]
ridx += 1
if isinstance(resp, Exception):
raise resp
yield resp
except Gmail.UserRateException as ex:
assert False, "UserRateException propagated to caller"
print("remote: user rate error: ", ex)
except Gmail.BatchException as ex:
print("remote: batch request error: ", ex)
except ConnectionError as ex:
print("remote: connection error: ", ex)
finally:
self.inq.task_done()
if len(chunks) > 0:
chunk = chunks.popleft()
cmds = [(gid, what.get(userId='me', id=gid,
format=format)) for gid in chunk]
self.outq.put([idx, self.creds, cmds])
idx += 1
for ridx in sorted(pending.keys()):
resp = pending[ridx]
del pending[ridx]
ridx += 1
if isinstance(resp, Exception):
raise resp
yield resp
@authorized
def update_message(self, id, labels):
"""Update a message at the remote endpoint."""
try:
message = self.service.users().messages().\
modify(userId='me', id=id, body=labels).execute()
except errors.HttpError as ex:
msg = json.loads(ex.content)['error']['message']
return (ex.resp.status, msg)
return
@authorized
def update_messages(self, ids, add_labels, rm_labels):
"""Update acollection of messages at the remote endpoint."""
try:
messages = self.service.users().messages().\
batchModify(userId='me',
body={'ids': ids, 'addLabelIds': add_labels,
'removeLabelIds': rm_labels}).execute()
except errors.HttpError as ex:
msg = json.loads(ex.content)['error']['message']
return (ex.resp.status, msg)
return
@authorized
def trash(self, id):
"""Move a message to the trash at the remote endpoint."""
try:
message = self.service.users().messages().\
trash(userId='me', id=id).execute()
except errors.HttpError as ex:
msg = json.loads(ex.content)['error']['message']
return (ex.resp.status, msg)
return
@authorized
def untrash(self, id):
"""Move a message from the trash at the remote endpoint."""
try:
message = self.service.users().messages().\
untrash(userId='me', id=id).execute()
except errors.HttpError as ex:
msg = json.loads(ex.content)['error']['message']
return (ex.resp.status, msg)
return
@authorized
def search(self, query, labels=[]):
"""Search for messages matching query string.
Query string (query) is further limited to messages with matching labels
(if any).
:param query str: A query string in Gmail format.
:param labels list: A list of label names to further limit the
search. Only match messages with one or more of the labels in
the list.
"""
qstring = query + ' ' + self.opts.query
if labels:
query += ' (' + ' OR '.join(['label:' + l for l in labels]) + ')'
print(query)
cmd = self.service.users().messages()
try:
results = cmd.list(userId='me', q=query,
includeSpamTrash=True).execute()
if 'messages' not in results:
return []
gids = [m['id'] for m in results['messages']]
while 'nextPageToken' in results:
page_token = results['nextPageToken']
results = cmd.list(userId='me', q=query,
pageToken=page_token,
includeSpamTrash=True).execute()
gids.extend([m['id'] for m in results['messages']])
return gids
except errors.HttpError as ex:
print('An error occurred: %s' % ex)
return []
def reset_http(self):
"""Reset the http object.
If we don't reset the http object, the bad connection hangs around for a
while and causes subsequent connection attempts to fail--even if the
server is now reachable.
"""
self.service = None
|
17,819 | d1db98c0bd22a72830e5204b6456532a68e7251d | from odoo import models, api, fields
class Users(models.Model):
_inherit = 'res.users'
notification_type = fields.Selection(
selection=[
('email', 'Handle by Emails'),
('inbox', 'Handle in Discuss'),
]
)
odoobot_state = fields.Selection(string='Yantra Status')
|
17,820 | 923c3b94be6afea54ac92c5af8295262279d9011 | import requests
import json
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
auth = {"auth": {"tenantName": "admin", "passwordCredentials": {"username": "admin", "password": "admin"}}}
r = requests.post("http://10.1.0.208:5000/v2.0/tokens", headers=headers, data = json.dumps(auth))
print r.text
print r.headers
print 'token:', json.loads(r.text)['access']['token']['id']
headers['X-Auth-Token'] = json.loads(r.text)['access']['token']['id']
headers['User-Agent'] = 'python-keystoneclient'
r = requests.get("http://10.1.0.208:35357/v2.0/tenants", headers=headers)
print r.text
print headers
for tenant in json.loads(r.text)['tenants']:
print tenant['name'] == 'admin'
if tenant['name'] == 'admin':
print tenant
r = requests.get("http://10.1.0.208:8774/v2/%s/servers/detail" %(tenant['id']), headers=headers, params={'all_tenants': '1'})
print "http://10.1.0.208:8774/v2/%s/servers/detail" %(tenant['id'])
print r.text
break
|
17,821 | d6aac43d3a297e90c1e9f78b4db906989badce88 | import taso
import onnx
import torch
import torch.nn as nn
import torchvision.models as models
class SampleModel(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3,3,3,padding=1)
self.conv2 = nn.Conv2d(3,3,3,padding=1)
self.relu = nn.ReLU()
def forward(self,X):
x = self.conv1(X)
x = self.conv1(x)
x = self.relu(x)
y = self.conv2(X)
y = self.conv2(y)
y = self.relu(y)
x = x+y
x = x+1
x = x+3
# x = x * 2
# x = x * 0.5
return x
model = SampleModel()
x = torch.randn(1, 3, 24, 24, device='cpu')
torch.onnx.export(model,
x,
"model.onnx",
verbose=False,)
graph = taso.load_onnx("./model.onnx")
print("\n cost = {}".format(graph.cost()))
new_graph = taso.optimize(graph, alpha = 1.0, budget = 1000, print_subst=True)
print("\n optimized_cost = {}".format(new_graph.cost()))
new_model = taso.export_onnx(new_graph)
onnx.save(new_model, "./model_taso.onnx") |
17,822 | 6f5a99d6c3e21cbde1352cfbee4689b4ef0f44a0 | s = int(input())
i = 0
while s >= 0:
i += 1
s -= i
print(i-1)
|
17,823 | dd17f90256de7b8936445880360b48be7b2f22c8 |
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name = 'portfolio-home'),
path('blog/', views.blog, name = 'blog-home'),
]
|
17,824 | b939e5c14a5265ca4c0547dcf925fbef0996d080 | # coding: utf8
import sys
from pandas import json
from bottle import route, run, static_file, get, post, request
from event_simulator.lib.data_feeder import DataFeeder
from event_simulator.lib.ptb_model import SmallConfig
from event_simulator.web import config
from event_simulator.web.web_simulator import WebSimulator
PUBLIC_DIR = "%s/public" % config.WEB_DIR
def web_application(web_simulators, default_web_simulator):
@route('/')
def index():
return static_file('index.html', root=PUBLIC_DIR)
@route('/<filename:re:.*\.html>')
def index(filename):
return static_file(filename, root=PUBLIC_DIR)
@route('/public/<filename:path>')
def send_public(filename):
return static_file(filename, root=PUBLIC_DIR)
@get('/api/events')
def get_event_list():
return {"events": default_web_simulator.get_event_list()}
@post('/api/simulate')
def simulate():
json_body = request.json
init_sequence = json_body['init_sequence']
target_event_list = [json_body['target_event']]
num_sample = int(json_body.get('num_sample', default_web_simulator.num_sample))
web_simulator = web_simulators.get(num_sample)
if not web_simulator:
web_simulator = create_simulator(default_web_simulator.data_feeder,
default_web_simulator.model_path,
num_sample)
web_simulators[num_sample] = web_simulator
for event_name in target_event_list + init_sequence:
if event_name not in web_simulator.data_feeder.mapping:
return {"error": "Event %s is not found" % event_name}
cnt_hash = web_simulator.simulate_sequence(init_sequence, target_event_list)
return cnt_hash
run(host='localhost', port=8080, debug=True, reloader=True)
def create_simulator(data_feeder, model_path, num_sample):
simulator_config = SmallConfig()
simulator_config.batch_size = num_sample
with tf.Graph().as_default():
web_simulator = WebSimulator()
web_simulator.setup(data_feeder, tf.Session(), simulator_config, model_path)
return web_simulator
######
import tensorflow as tf
import numpy as np
flags = tf.flags
flags.DEFINE_string("data", None, "path to data")
flags.DEFINE_string("model", None, "path to model file")
flags.DEFINE_integer("num_sample", 10000, "number of samples to generate")
FLAGS = flags.FLAGS
def main(unused_args):
web_simulators = {}
model_path = FLAGS.model
data_path = FLAGS.data
num_sample = FLAGS.num_sample
if not model_path or not data_path:
print('--model and --data are required')
sys.exit(1)
np.random.seed()
# data_loader
data_feeder = DataFeeder.load_from_base_path(data_path, config=None) # only load mapping
w = web_simulators[num_sample] = create_simulator(data_feeder, model_path, num_sample)
web_application(web_simulators, w)
if __name__ == '__main__':
tf.app.run()
|
17,825 | c22e8620b6a77672441b1f3fd9f90ac4a8a15abe | import numpy as np
from scipy.sparse import csr_matrix
import sys
from sklearn.datasets import load_svmlight_file
import random
from datetime import datetime
import math
# def accuracy(Xts,Yts,w_final, ntest):
# Yp1 = np.matmul(Xts,w_final);
# Yp1 = np.multiply(Yp1,Yts);
# Yp1 = np.where(Yp1>0,1,0);
# acy_nor = np.sum(Yp1);
# print("accuracy nor :"),
# print(100*(acy_nor/float(ntest)))
def main():
traindatafile = sys.argv[1];
# For how many iterations do we wish to execute SCD?
n_iter = int(sys.argv[2]);
# After how many iterations do we want to timestamp?
spacing = int(sys.argv[3]);
# The training file is in libSVM format
tr_data = load_svmlight_file(traindatafile);
##
Xtr = tr_data[0]; # Training features in sparse format
Ytr = tr_data[1]; # Training labels
# We have n data points each in d-dimensions
n, d = Xtr.get_shape();
# The labels are named 1 and 2 in the data set. Convert them to our standard -1 and 1 labels
#Xts = Xtr[int(0.85*n):]
#Xtr = Xtr[:int(0.85*n)]
Ytr = 2*(Ytr - 1.5);
Ytr = Ytr.astype(int);
#Yts = Ytr[int(0.85*n):]
#Ytr = Ytr[:int(0.85*n)]
# Optional: densify the features matrix.
# Warning: will slow down computation
#n, d = Xtr.get_shape();
#ntest, dtest = Xts.get_shape();
Xtr = Xtr.toarray();
#Xts = Xts.toarray();
# Initialize model
# For dual SCD, you will need to maintain d_alpha and w
# Note: if you have densified the Xt matrix then you can initialize w as a NumPy array
# w = csr_matrix((1, d));
w = np.zeros(d)
# wbar = np.ones(d)
d_alpha = np.zeros((n,));
# We will take timestamp after every "spacing" iterations
time_elapsed = np.zeros(int(math.ceil(n_iter/spacing)));
tick_vals = np.zeros(int(math.ceil(n_iter/spacing)));
obj_val = np.zeros(int(math.ceil(n_iter/spacing)));
f_alpha = np.zeros(int(math.ceil(n_iter/spacing)));
theory_time = np.zeros(int(math.ceil(n_iter/spacing)));
tick = 0;
ttot = 0.0;
t_start = datetime.now();
# print type(Xtr)
# print(Xtr[1].shape)
# print(Ytr[1].shape)
# exit()
# temp = (Xtr.T*Ytr).T;
# Q = temp.dot(temp.T)
Xtr_y = (Xtr.T*Ytr).T;
# print(Xtr.shape)
# print(temp.shape)
# exit()
for t in range(n_iter):
i_rand = random.randint(0,n-1);
# Store the old and compute the new value of alpha along that coordinate
d_alpha_old = d_alpha[i_rand];
qii = Xtr_y[i_rand].dot(Xtr_y[i_rand]);
ret = (w).dot(Xtr_y[i_rand]);
#print(ret.shape)
#print(qii.shape)
#exit()
# xixj = Xtr[i_rand].dot(Xtr.T);
# axy = np.matrix(np.multiply(Ytr,xixj.T));
# alphaaxy = np.multiply(d_alpha,axy.T);
# summation = np.ones((1,n)).dot(alphaaxy);
# ret = Ytr[i_rand]*summation;
d_alpha[i_rand] = min(max(d_alpha_old - 0.1*(ret-1)/float(qii),0),1);
#Projection step
# if(d_alpha[i_rand]<0):
# d_alpha[i_rand]=0
# elif(d_alpha[i_rand]>1):
# d_alpha[i_rand]=1
# Update the model - takes only O(d) time!
w = w + (d_alpha[i_rand] - d_alpha_old)*Ytr[i_rand]*Xtr[i_rand];
"""if t%spacing == 0:
# Stop the timer - we want to take a snapshot
t_now = datetime.now();
delta = t_now - t_start;
time_elapsed[tick] = ttot + delta.total_seconds();
ttot = time_elapsed[tick];
tick_vals[tick] = tick;
# print((d_alpha.T).shape)
# print(Q.shape)
# temp1 = np.matmul(d_alpha.T,Q);
# print(temp1.shape)
# temp2 = np.matmul(temp1,d_alpha);
# temp2 = temp2/2.0;
# print(temp2.shape)
# print(temp2)
# s= np.sum(d_alpha);
# print(s)
# exit()
temp4 = np.matmul(Xtr_y,w);
temp5 = np.where(temp4<1,1-temp4,0);
temp6 = d_alpha.dot(Xtr_y);
temp7 = temp6.dot(temp6)
f_alpha[tick] = temp7 - np.sum(d_alpha);
obj_val[tick] = (w.dot(w))*0.5 + np.sum(temp5);
theory_time[tick] = tick_vals[tick]*spacing*d;
# Calculate the objective value f(w) for the current model w^t
#print(f_alpha[tick]),
print(obj_val[tick]),
print(time_elapsed[tick])
#print(theory_time[tick]),
tick = tick+1;
# Start the timer again - training time!
t_start = datetime.now();"""
w_final = w;
#np.savetxt("obj_val_SCD.dat", obj_val);
#np.savetxt("f_alpha_SCD.dat", f_alpha);
#np.savetxt("time_elapsed_SCD.dat", time_elapsed);
#np.savetxt("theory_time_SCD.dat", theory_time);
np.save("model_SCD.npy", w_final);
#accuracy(Xts,Yts,w_final,ntest)
if __name__ == '__main__':
main()
|
17,826 | c97842f4ac1df493c79198740ce1dc2caa584f02 | n = int(input())
d = [1]*n
a = list(map(int, input().split()))
for i in range(1, n):
acceptable = []
for j in range(i):
if a[i] % a[j] == 0 and d[j] + 1 > d[i]:
acceptable.append(d[j])
d[i] = 1 + max(acceptable or [0])
print(max(d)) |
17,827 | 29da211e5f6926fe05df453f4c5cdb53266e5801 | #!/usr/bin/python
try:
import sys, os
sys.path.append(os.getcwd()+'/cgi-bin')
def getTraceback():
import sys, traceback, string
type, value, tb = sys.exc_info()
body = "Traceback (innermost last):\n"
list = traceback.format_tb(tb, None) \
+ traceback.format_exception_only(type, value)
body = body + "%-20s %s" % (string.join(list[:-1], ""), list[-1])
return body
import webnotes.handler
except Exception, e:
print "Content-Type: text/html"
try:
out = {'message':'', 'exc':getTraceback().replace('\n','<br>')}
except:
out = {'exc': e}
print
print str(out) |
17,828 | 3a67aac768846e7d7db0dac66225a6067b9d6dcc | from rasa_core.channels import HttpInputChannel
from rasa_core.agent import Agent
from rasa_core.interpreter import RasaNLUInterpreter
from rasa_slack_connector import SlackInput
import ruamel.yaml as yaml
import warnings
warnings.simplefilter('ignore', yaml.error.UnsafeLoaderWarning)
nlu_interpreter = RasaNLUInterpreter('./models/nlu/default/restaurantnlu')
agent = Agent.load('./models/dialogue', interpreter = nlu_interpreter)
input_channel = SlackInput('xoxp-Verf.Key', #app verification token
'xoxb-bot.key', # bot verification token
'slack.key', # slack verification token
True)
agent.handle_channel(HttpInputChannel(5004, '/', input_channel)) |
17,829 | ec1eb17da573450afb905087ac0ee76b118a79ea | # class Interval(object):
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class Solution(object):
def merge(self, intervals):
"""
:type intervals: List[Interval]
:rtype: List[Interval]
"""
if intervals:
sort_list = sorted(intervals, key = self.getKey)
i = 0
n = len(sort_list)
print sort_list
while i < n-1:
if sort_list[i].end >= sort_list[i+1].start:
sort_list[i] = Interval(sort_list[i].start, max(sort_list[i+1].end, sort_list[i].end))
del sort_list[i+1]
n -= 1
else:
i += 1
return sort_list
else:
return []
def getKey(self,item):
return [item.start, item.end] |
17,830 | 694a3c1717898eccc485b98330e28f207d8aa193 | import requests
response = requests.get('https://linkedin.com')
print(response)
print(response.url) |
17,831 | 65f5f3fa42b8cd3f0e59ffa88803be0fa8ef81cc | #!/usr/bin/env python
# coding: utf-8
# In[150]:
import pandas as pd
import numpy as np
import datetime
get_ipython().run_line_magic('matplotlib', 'inline')
# In[177]:
employee = pd.read_csv('data/employee.csv',
parse_dates=['JOB_DATE', 'HIRE_DATE'],
index_col='HIRE_DATE')
'groupby' in dir(employee.resample('10AS'))
# In[ ]:
|
17,832 | 0f0a0bc2b314220aa7d7f6e639372c6cfd0e5a2d | #!/usr/bin/env python
import os
import io
import sys
import numpy as np
import argparse
from PIL import Image
def handle_args():
parser = argparse.ArgumentParser()
parser.add_argument('-i', nargs=1, type=str, metavar='input-dir', required=True, help='Input video path')
parser.add_argument('-n', nargs=1, type=int, metavar='number', default=[0], help='number of frames to process')
return parser.parse_args()
def get_files(path):
for (dirpath, _, filenames) in os.walk(path):
for filename in filenames:
yield os.path.join(dirpath, filename)
def main():
args = handle_args()
input_path = args.i[0]
n_of_frames = args.n[0]
counter = 0
list_files = get_files(input_path)
with os.fdopen(sys.stdout.fileno(), 'wb') as output_file:
for file_path in list_files:
if (counter < n_of_frames or n_of_frames == 0):
frame = Image.open(file_path, mode='r')
if frame is not None:
frame_data = io.BytesIO()
frame.save(frame_data, format='JPEG')
output_file.write(frame_data.getvalue())
counter += 1
else:
break
if __name__ == '__main__':
main() |
17,833 | 7cfc359aa5f5fb78a0d8d32bdf9899d9604d753a | # Copyright 2021 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mlperf_inference_benchmark."""
import os
import unittest
from perfkitbenchmarker import test_util
from perfkitbenchmarker.linux_benchmarks import mlperf_inference_benchmark
from perfkitbenchmarker.sample import Sample
from tests import pkb_common_test_case
class MlperfInferenceBenchmarkTestCase(pkb_common_test_case.PkbCommonTestCase,
test_util.SamplesTestMixin):
def setUp(self):
super(MlperfInferenceBenchmarkTestCase, self).setUp()
path = os.path.join(
os.path.dirname(__file__), '..', 'data',
'bert_inference_performance_output.txt')
with open(path) as fp:
self.bert_performance_contents = fp.read()
path = os.path.join(
os.path.dirname(__file__), '..', 'data',
'bert_inference_accuracy_output.txt')
with open(path) as fp:
self.bert_accuracy_contents = fp.read()
path = os.path.join(
os.path.dirname(__file__), '..', 'data',
'dlrm_inference_performance_output.txt')
with open(path) as fp:
self.dlrm_performance_contents = fp.read()
path = os.path.join(
os.path.dirname(__file__), '..', 'data',
'dlrm_inference_accuracy_output.txt')
with open(path) as fp:
self.dlrm_accuracy_contents = fp.read()
def testTrainResults(self):
samples = mlperf_inference_benchmark.MakePerformanceSamplesFromOutput(
{'version': 'v1.1'}, self.bert_performance_contents)
metadata = {
'mlperf 50.00 percentile latency (ns)': '40533329',
'mlperf 90.00 percentile latency (ns)': '51387550',
'mlperf 95.00 percentile latency (ns)': '54956149',
'mlperf 97.00 percentile latency (ns)': '57792422',
'mlperf 99.00 percentile latency (ns)': '82056764',
'mlperf 99.90 percentile latency (ns)': '543294654940',
'mlperf Completed samples per second': '3102.49',
'mlperf Max latency (ns)': '605456500256',
'mlperf Mean latency (ns)': '3037717062',
'mlperf Min duration satisfied': 'Yes',
'mlperf Min latency (ns)': '4126840',
'mlperf Min queries satisfied': 'Yes',
'mlperf Mode': 'PerformanceOnly',
'mlperf Performance constraints satisfied': 'Yes',
'mlperf Result is': 'VALID',
'mlperf SUT name': 'BERT SERVER',
'mlperf Scenario': 'Server',
'mlperf Scheduled samples per second': '3102.76',
'mlperf accuracy_level': '99%',
'mlperf accuracy_log_probability': '0',
'mlperf accuracy_log_rng_seed': '0',
'mlperf accuracy_log_sampling_target': '0',
'mlperf benchmark': 'Benchmark.BERT',
'mlperf coalesced_tensor': 'True',
'mlperf config_name': 'A100-SXM4-40GBx1_bert_Server',
'mlperf config_ver': 'custom_k_99_MaxP',
'mlperf cpu_freq': 'None',
'mlperf gpu_batch_size': '64',
'mlperf gpu_copy_streams': '1',
'mlperf gpu_inference_streams': '2',
'mlperf gpu_num_bundles': '2',
'mlperf inference_server': 'custom',
'mlperf input_dtype': 'int32',
'mlperf input_format': 'linear',
'mlperf log_dir': '/work/build/logs/2021.10.27-20.51.11',
'mlperf max_async_queries': '0',
'mlperf max_duration (ms)': '0',
'mlperf max_query_count': '0',
'mlperf min_duration (ms)': '600000',
'mlperf min_query_count': '270336',
'mlperf optimization_level': 'plugin-enabled',
'mlperf performance_issue_same': '0',
'mlperf performance_issue_same_index': '0',
'mlperf performance_issue_unique': '0',
'mlperf performance_sample_count': '10833',
'mlperf power_limit': 'None',
'mlperf precision': 'int8',
'mlperf print_timestamps': '0',
'mlperf qsl_rng_seed': '1624344308455410291',
'mlperf sample_index_rng_seed': '517984244576520566',
'mlperf samples_per_query': '1',
'mlperf scenario': 'Scenario.Server',
'mlperf schedule_rng_seed': '10051496985653635065',
'mlperf server_target_qps': '3100',
'mlperf system': 'A100-SXM4-40GBx1',
'mlperf system_id': 'A100-SXM4-40GBx1',
'mlperf target_latency (ns)': '130000000',
'mlperf tensor_path':
'${PREPROCESSED_DATA_DIR}/squad_tokenized/input_ids.npy,'
'${PREPROCESSED_DATA_DIR}/squad_tokenized/segment_ids.npy,'
'${PREPROCESSED_DATA_DIR}/squad_tokenized/input_mask.npy',
'mlperf use_cpu': 'False',
'mlperf use_graphs': 'True',
'version': 'v1.1'
}
golden = Sample(
metric='throughput', value=3102.76, unit='samples/s', metadata=metadata)
self.assertSamplesEqualUpToTimestamp(golden, samples[0])
samples = mlperf_inference_benchmark.MakeAccuracySamplesFromOutput(
{'version': 'v1.1'}, self.bert_accuracy_contents)
metadata = {
'mlperf benchmark': 'Benchmark.BERT',
'mlperf coalesced_tensor': 'True',
'mlperf gpu_batch_size': '64',
'mlperf gpu_copy_streams': '1',
'mlperf gpu_inference_streams': '2',
'mlperf input_dtype': 'int32',
'mlperf input_format': 'linear',
'mlperf precision': 'int8',
'mlperf scenario': 'Scenario.Server',
'mlperf server_target_qps': '3100',
'mlperf system': 'A100-SXM4-40GBx1',
'mlperf tensor_path':
'${PREPROCESSED_DATA_DIR}/squad_tokenized/input_ids.npy,'
'${PREPROCESSED_DATA_DIR}/squad_tokenized/segment_ids.npy,'
'${PREPROCESSED_DATA_DIR}/squad_tokenized/input_mask.npy',
'mlperf use_graphs': 'True',
'mlperf config_name': 'A100-SXM4-40GBx1_bert_Server',
'mlperf config_ver': 'custom_k_99_MaxP',
'mlperf accuracy_level': '99%',
'mlperf optimization_level': 'plugin-enabled',
'mlperf inference_server': 'custom',
'mlperf system_id': 'A100-SXM4-40GBx1',
'mlperf use_cpu': 'False',
'mlperf power_limit': 'None',
'mlperf cpu_freq': 'None',
'mlperf test_mode': 'AccuracyOnly',
'mlperf fast': 'True',
'mlperf gpu_num_bundles': '2',
'mlperf log_dir': '/work/build/logs/2021.11.09-05.18.28',
'Threshold': 89.965,
'version': 'v1.1'
}
golden = Sample(
metric='accuracy', value=90.376, unit='%', metadata=metadata)
self.assertSamplesEqualUpToTimestamp(golden, samples[0])
samples = mlperf_inference_benchmark.MakePerformanceSamplesFromOutput(
{'version': 'v1.1'}, self.dlrm_performance_contents)
metadata = {
'mlperf benchmark': 'Benchmark.DLRM',
'mlperf coalesced_tensor': 'True',
'mlperf gpu_batch_size': '262100',
'mlperf gpu_copy_streams': '1',
'mlperf gpu_inference_streams': '1',
'mlperf input_dtype': 'int8',
'mlperf input_format': 'chw4',
'mlperf precision': 'int8',
'mlperf scenario': 'Scenario.Server',
'mlperf server_target_qps': '2100000',
'mlperf system': 'A100-SXM4-40GBx8',
'mlperf tensor_path': '${PREPROCESSED_DATA_DIR}/criteo/full_recalib/'
'numeric_int8_chw4.npy,'
'${PREPROCESSED_DATA_DIR}/criteo/full_recalib/'
'categorical_int32.npy',
'mlperf use_graphs': 'False',
'mlperf config_name': 'A100-SXM4-40GBx8_dlrm_Server',
'mlperf config_ver': 'custom_k_99_MaxP',
'mlperf accuracy_level': '99%',
'mlperf optimization_level': 'plugin-enabled',
'mlperf inference_server': 'custom',
'mlperf system_id': 'A100-SXM4-40GBx8',
'mlperf use_cpu': 'False',
'mlperf power_limit': 'None',
'mlperf cpu_freq': 'None',
'mlperf gpu_num_bundles': '2',
'mlperf log_dir': '/work/build/logs/2021.11.13-04.12.53',
'mlperf SUT name': 'DLRM SERVER',
'mlperf Scenario': 'Server',
'mlperf Mode': 'PerformanceOnly',
'mlperf Scheduled samples per second': '2102380.29',
'mlperf Result is': 'VALID',
'mlperf Performance constraints satisfied': 'Yes',
'mlperf Min duration satisfied': 'Yes',
'mlperf Min queries satisfied': 'Yes',
'mlperf Completed samples per second': '2102359.14',
'mlperf Min latency (ns)': '159697',
'mlperf Max latency (ns)': '12452412',
'mlperf Mean latency (ns)': '1375416',
'mlperf 50.00 percentile latency (ns)': '1285505',
'mlperf 90.00 percentile latency (ns)': '1984044',
'mlperf 95.00 percentile latency (ns)': '2319343',
'mlperf 97.00 percentile latency (ns)': '2568660',
'mlperf 99.00 percentile latency (ns)': '3507998',
'mlperf 99.90 percentile latency (ns)': '5628323',
'mlperf samples_per_query': '1',
'mlperf target_latency (ns)': '30000000',
'mlperf max_async_queries': '0',
'mlperf min_duration (ms)': '60000',
'mlperf max_duration (ms)': '0',
'mlperf min_query_count': '1',
'mlperf max_query_count': '0',
'mlperf qsl_rng_seed': '1624344308455410291',
'mlperf sample_index_rng_seed': '517984244576520566',
'mlperf schedule_rng_seed': '10051496985653635065',
'mlperf accuracy_log_rng_seed': '0',
'mlperf accuracy_log_probability': '0',
'mlperf accuracy_log_sampling_target': '0',
'mlperf print_timestamps': '0',
'mlperf performance_issue_unique': '0',
'mlperf performance_issue_same': '0',
'mlperf performance_issue_same_index': '0',
'mlperf performance_sample_count': '204800',
'version': 'v1.1'
}
golden = Sample(
metric='throughput',
value=2102380.0,
unit='samples/s',
metadata=metadata)
self.assertSamplesEqualUpToTimestamp(golden, samples[0])
samples = mlperf_inference_benchmark.MakeAccuracySamplesFromOutput(
{'version': 'v1.1'}, self.dlrm_accuracy_contents)
metadata = {
'Threshold': 79.448,
'mlperf accuracy_level': '99%',
'mlperf benchmark': 'Benchmark.DLRM',
'mlperf coalesced_tensor': 'True',
'mlperf config_name': 'A100-SXM4-40GBx8_dlrm_Server',
'mlperf config_ver': 'custom_k_99_MaxP',
'mlperf cpu_freq': 'None',
'mlperf fast': 'True',
'mlperf gpu_batch_size': '262100',
'mlperf gpu_copy_streams': '1',
'mlperf gpu_inference_streams': '1',
'mlperf gpu_num_bundles': '2',
'mlperf inference_server': 'custom',
'mlperf input_dtype': 'int8',
'mlperf input_format': 'chw4',
'mlperf log_dir': '/work/build/logs/2021.11.13-06.24.26',
'mlperf optimization_level': 'plugin-enabled',
'mlperf power_limit': 'None',
'mlperf precision': 'int8',
'mlperf scenario': 'Scenario.Server',
'mlperf server_target_qps': '2100000',
'mlperf system': 'A100-SXM4-40GBx8',
'mlperf system_id': 'A100-SXM4-40GBx8',
'mlperf tensor_path': '${PREPROCESSED_DATA_DIR}/criteo/full_recalib/'
'numeric_int8_chw4.npy,'
'${PREPROCESSED_DATA_DIR}/criteo/full_recalib/'
'categorical_int32.npy',
'mlperf test_mode': 'AccuracyOnly',
'mlperf use_cpu': 'False',
'mlperf use_graphs': 'False',
'version': 'v1.1'
}
golden = Sample(
metric='accuracy', value=80.185, unit='%', metadata=metadata)
self.assertSamplesEqualUpToTimestamp(golden, samples[0])
print(samples[0])
if __name__ == '__main__':
unittest.main()
|
17,834 | 2925925d7f5c420908c4642ad9c9abf4c388ae62 | {'_data': [['Common',
[['Metabolism', u'Minskad aptit till aptitf\xf6rlust'],
['Psychiatric', u'S\xf6mnl\xf6shet'],
['Nervous system', u'Yrsel, huvudv\xe4rk, somnolens'],
['Ear', u'Vertigo'],
['Vascular', u'Blodvallning'],
['GI',
u'Buksm\xe4rtor, f\xf6rstoppning, diarr\xe9, muntorrhet, dyspepsi, kr\xe4kning, illam\xe5ende, flatulens'],
['Skin', u'Pruritus, hudreaktioner, hyperhidros'],
['General', u'Asteniska besv\xe4r'],
['Psychiatric',
u'Hum\xf6r- och personlighetsf\xf6r\xe4ndringar, minskad aktivitet, psykomotorisk hyperaktivitet'],
['GI', u'Hicka'],
['Skin', u'Dysuri']]],
['Uncommon',
[['Immune system', u'\xd6verk\xe4nslighet'],
['Psychiatric',
u'Rastl\xf6shet, onormalt t\xe4nkande, oro, f\xf6rvirring, depression, nervositet'],
['Nervous system',
u'Krampanfall (s\xe4rskilt hos personer med epilepsisjukdom eller predisposition f\xf6r krampanfall), uppm\xe4rksamhetsst\xf6rning, talst\xf6rningar, synkope, tremor'],
['Eye', u'Synskada'],
['Cardiac',
u'Angina pectoris (s\xe4rskilt hos patienter som tidigare har lidit av kransk\xe4rlssjukdom), palpitationer'],
['Vascular', u'Blodtrycksfall, blodtrycks\xf6kning'],
['Respiratory', u'Dyspn\xe9, rinorr\xe9, hosta'],
['GI', u'Abdominell distension'],
['Hepato', u'F\xf6rh\xf6jda leverenzymer, gallkolik'],
['Musculoskeletal',
u'Muskelspasmer, muskelryckningar, myalgi Njur- och urinv\xe4gsst\xf6rningar'],
['Musculoskeletal', u'Urintr\xe4ngningar'],
['General',
u'Abstinenssyndrom, br\xf6stsm\xe4rta, frossa, olustk\xe4nsla, sm\xe4rta, perifert \xf6dem, viktminskning'],
['Injury',
u'Olycksrelaterade skador F\xf6r den aktiva substansen oxikodonhydroklorid \xe4r f\xf6ljande tillkommande biverkningar k\xe4nda: P\xe5 grund av dess farmakologiska egenskaper kan oxikodonhydroklorid orsaka andningsdepression, mios, bronkial spasm och spasmer i den glatta muskulaturen liksom undertrycka hostreflexen.'],
['Metabolism', u'Dehydrering'],
['Psychiatric',
u'Uppr\xf6rdhet, perceptionsst\xf6rningar (t.ex. overklighetsk\xe4nsla), minskad libido, l\xe4kemedelsberoende'],
['Nervous system',
u'Nedsatt koncentrationsf\xf6rm\xe5ga, migr\xe4n, dysgeusi, hypertoni, ofrivilliga muskelsammandragningar, hypestesi, onormal koordination'],
['Ear', u'H\xf6rselneds\xe4ttning'],
['Vascular', u'Vasodilatation'],
['Respiratory', u'Dysfoni'],
['GI', u'Dysfagi, ileus, muns\xe5r, stomatit'],
['Skin', u'Torr hud'],
['General', u'\xd6dem, t\xf6rst, l\xe4kemedelstolerans']]],
['Rare',
[['Cardiac', u'Takykardi'],
['Respiratory', u'G\xe4spningar'],
['GI', u'Tandproblem'],
['General', u'Vikt\xf6kning'],
['Infections', u'Herpes simplex'],
['Metabolism', u'\xd6kad aptit'],
['GI', u'Melena, bl\xf6dande tandk\xf6tt'],
['Skin', u'Urtikaria Njur- och urinv\xe4gsst\xf6rningar']]],
['Unknown',
[['Psychiatric', u'Eufori, hallucination, mardr\xf6mmar'],
['Nervous system', u'Parestesier, sl\xf6het'],
['Respiratory', u'Andningsdepression'],
['GI', u'Eruktation'],
['Reproductive system', u'Erektionsst\xf6rningar'],
['Musculoskeletal', u'Urinretention'],
['Immune system', u'Anafylaktisk reaktion'],
['Hepato', u'Kolestas'],
['Reproductive system', u'Amenorr\xe9']]]],
'_pages': [6, 8],
u'_rank': 50,
u'_type': u'LSFU'} |
17,835 | c23fe8d50924746ea0fc7991bc69470f31b6ca50 | from django import forms
from .models import Register,Imageupload,Addpost
class Signupfrom(forms.ModelForm):
class Meta:
model=Register
fields='__all__'
class Uploadimage(forms.ModelForm):
class Meta:
model=Imageupload
fields = '__all__'
class Postadd(forms.ModelForm):
class Meta:
model=Addpost
fields='__all__'
|
17,836 | e1a18e74bfce448f1071a36a3da50ec7a8ecf151 | import command
import random
import math
import time
class SnapCommand(command.Command):
def __init__(self):
super().__init__("snap", 0, command.Permissions.broadcaster)
def run(self, client, user, msg):
all_names = client.get_names()
viewers = all_names["chatters"]["viewers"]
half = math.ceil(len(viewers) / 2)
snapped = []
while len(snapped) < half:
userToMove = random.choice(viewers)
if userToMove in snapped:
print("User " + userToMove + " is already to be timed out.")
else:
snapped.append(userToMove)
client.send_message("You should have gone for the head!")
for user in snapped:
client.timeout(user, 1)
time.sleep(1 / 30)
|
17,837 | 17919d2661a4d32e0eff64feeba3184a74eeed1c | import tkinter as tk
from tkinter import messagebox
import database
import widgets
import os
from config import path_to_documents
from config import db_credentials
from config import path_to_index
class WinControlPanel:
def __init__(self, img_window, coords, paths, index=0):
self.root = tk.Tk()
self.root.geometry("500x570+30+30")
self.db = database.Database(db_credentials['db_name'], db_credentials['user_name'],
db_credentials['pwd'], db_credentials['tables'])
self.db.load_seals()
self.coords = coords
self.paths = paths
self.path_index = index
# POINT 1 DISPLAY
self.pt1_label = tk.Label(self.root, text="Point 1:")
self.pt1_label.place(x=20, y=10)
self.pt1x_value = tk.IntVar()
self.pt1x_info = tk.Entry(self.root, state=tk.DISABLED, textvariable=self.pt1x_value)
self.pt1x_info.place(x=70, y=10, width=50)
self.pt1x_value.set(str(coords[0]))
self.pt1y_value = tk.IntVar()
self.pt1y_info = tk.Entry(self.root, state=tk.DISABLED, textvariable=self.pt1y_value)
self.pt1y_info.place(x=130, y=10, width=50)
self.pt1y_value.set(str(coords[1]))
# POINT 2 DISPLAY
self.pt2_label = tk.Label(self.root, text="Point 2:")
self.pt2_label.place(x=20, y=30)
self.pt2x_value = tk.IntVar()
self.pt2x_info = tk.Entry(self.root, state=tk.DISABLED, textvariable=self.pt2x_value)
self.pt2x_info.place(x=70, y=30, width=50)
self.pt2x_value.set(str(coords[2]))
self.pt2y_value = tk.IntVar()
self.pt2y_info = tk.Entry(self.root, state=tk.DISABLED, textvariable=self.pt2y_value)
self.pt2y_info.place(x=130, y=30, width=50)
self.pt2y_value.set(str(coords[3]))
# NEW SEAL WINDOW
def on_new_seal():
self.new_seal_win = WinNewSeal(self.db, img_window)
self.new_seal_butt = tk.Button(self.root, text='New', command=on_new_seal)
self.new_seal_butt.place(x=200, y=20)
# SEAL SELECTION ITEMS
self.seal_type_list = widgets.SealsList(self.root, self.db)
self.seal_type_list.x = 20
self.seal_type_list.y = 70
self.seal_type_list.place_items()
# OK BUTTON
def on_ok_button():
self.db.insert_document(self.paths[self.path_index],
self.seal_type_list.curr_seal_type.get(),
(self.pt1x_value.get(), self.pt1y_value.get(),
self.pt2x_value.get(), self.pt2y_value.get()))
if self.path_index < len(self.paths):
self.path_index += 1
onlyimages = [f for f in os.listdir(self.paths[self.path_index])
if os.path.isfile(os.path.join(self.paths[self.path_index], f)) and f.endswith('.png')]
# Llamar método de ventana de pygame que actualiza a la nueva imagen
if __name__ != "__main__":
img_window.update_img(self.paths[self.path_index] + '/' + onlyimages[0])
else:
messagebox.showinfo("End of classification", "There are no more documents to classify")
self.ok_button = tk.Button(self.root, text='OK', command=on_ok_button)
self.ok_button.place(x=250, y=20)
def on_closing():
if messagebox.askokcancel("Quit", "Do you want to quit?"):
index_file = open(path_to_index + '/index.txt', 'w')
index_file.write(str(self.path_index))
index_file.close()
self.root.destroy()
self.root.protocol("WM_DELETE_WINDOW", on_closing)
def update_labels(self, new_coords):
self.pt1x_value.set(str(new_coords[0]))
self.pt1y_value.set(str(new_coords[1]))
self.pt2x_value.set(str(new_coords[2]))
self.pt2y_value.set(str(new_coords[3]))
class WinNewSeal:
def __init__(self, db, img_win):
self.db = db
self.img_win = img_win
self.root = tk.Tk()
self.width = 300
self.height = 100
geom_str = "%ix%i+30+30" % (self.width, self.height)
self.root.geometry(geom_str)
# SEAL INFO
self.name_label = tk.Label(self.root, text='Nombre')
self.name_label.place(x=20, y=10)
# self.seal_name = tk.StringVar()
self.name_info = tk.Entry(self.root) # , textvariable=self.seal_name)
self.name_info.place(x=70, y=10)
self.author_label = tk.Label(self.root, text='Autor')
self.author_label.place(x=20, y=40)
# self.seal_author = tk.StringVar()
self.author_info = tk.Entry(self.root) # , textvariable=self.seal_author)
self.author_info.place(x=70, y=40)
# SEAL PREVIEW
# self.canvas = tk.Canvas(self.root, width=600, height=400) # <--CANVAS
# self.img_route = self.db.seal_list[0].img_route
# self.img_route = self.img_route.replace("\\", "/")
# photo = Image.open(self.img_route)
# cropped = photo.crop((coords[0], coords[1], coords[2], coords[3]))
# tk_cropped = ImageTk.PhotoImage(cropped)
# # photo = tk.PhotoImage(file=self.img_route)
# self.seal_img = self.canvas.create_image(0, 0, anchor=tk.NW, image=tk_cropped)
# self.canvas.image = tk_cropped
# OK BUTTON
self.ok_button = tk.Button(self.root, text='OK', command=self.on_ok_button)
self.ok_button.place(x=200, y=20)
def on_ok_button(self):
self.db.insert_seal(self.name_info.get(), self.author_info.get())
self.img_win.save_seal(self.name_info.get())
self.root.destroy()
if __name__ == "__main__":
path = path_to_documents
walk = os.walk(path)
doc_paths = []
for root, dirs, files in walk:
there_is_any_img = False
for curr_file in files:
if curr_file.endswith(".png"):
there_is_any_img = True
if there_is_any_img:
root = root.replace("\\", "/")
doc_paths.append(root)
win = WinControlPanel(None, (10, 20, 30, 40), doc_paths)
while 1:
win.root.update_idletasks()
win.root.update()
|
17,838 | 6fec79592c4a348cfebb3b23d9e5fd1972cd7a47 | from django import forms
from django.forms import ModelForm
# Create your forms here
from .models import Customer, Product, Order
class CustomerForm(ModelForm):
class Meta:
model = Customer
fields = '__all__'
exclude = ['time_created']
labels = {
'name': '姓名',
'phone': '电话',
'email': '邮箱',
}
widgets = {
'name': forms.TextInput(attrs={
'class': 'form-control',
'placeholder': '请输入姓名',
}),
'phone': forms.NumberInput(attrs={
'class': 'form-control',
'placeholder': '请输入电话',
}),
'email': forms.EmailInput(attrs={
'class': 'form-control',
'placeholder': '请输入邮箱'
})
}
class OrderForm(ModelForm):
class Meta:
model = Order
fields = '__all__'
exclude = ['time_created']
labels = {
'customer': '顾客',
'product': '商品',
'status': '状态',
}
widgets = {
'customer': forms.Select(attrs={
'class': 'form-control',
}),
'product': forms.Select(attrs={
'class': 'form-control',
}),
'status': forms.Select(attrs={
'class': 'form-control',
})
}
|
17,839 | 074975c9df25065865b41afc7f2ab8a57328edbe | from flask import Flask, request
import asyncio
import math
app=Flask(__name__)
INTERESTS_RATE=0.1
@app.route('/api/interests-rate', methods=['GET'])
def get_interests_rate():
return str(INTERESTS_RATE)
async def async_get_interests_rate():
return str(INTERESTS_RATE)
@app.route('/api/calc-interests-rate', methods=['POST'])
def calc_interests_rate():
# defining async method
asyncio.set_event_loop(asyncio.new_event_loop())
loop=asyncio.get_event_loop()
result=loop.run_until_complete(async_get_interests_rate())
try:
interests_rate=float(result)
initial_value=float(request.form.get('initial_value'))
time=float(request.form.get('time'))
calc=initial_value*(math.pow(1+interests_rate,time))
final_value= "{:.2f}".format(calc)
return final_value
except Exception as ex:
return str(ex)
#return 'Initial: ' + initial_value + '; Time: '+ time + '; interests-rate: ' + str(interests_rate)
@app.route('/api/show-me-your-code', methods=['GET'])
def show_github_link():
return '<a href="https://github.com/fabricioizumi/flaskapi">https://github.com/fabricioizumi/flaskapi</a>'
|
17,840 | 170f43f39a97aeac53a585910ad66ae10bfaf7ce | import matplotlib.pyplot as plt
from roundAlwaysUp import roundAlwaysUp
from matplotlib.widgets import RadioButtons
class SimpleRainPlots:
def __init__(self, rainDictionaries):
self.rainDictionaries = rainDictionaries
self.figure = plt.figure( constrained_layout=True )
self.gridSpaceBarChart = self.figure.add_gridspec( 1, 1 )
self.inputArea = self.figure.add_subplot( self.gridSpaceBarChart[ 0,0 ] )
firstYear = list(self.rainDictionaries.keys())[0]
self.AllStates = list(self.rainDictionaries[firstYear].keys())[0:-1]
self.names = []
for state in self.AllStates:
self.names.append(state)
self.radio_buttons = RadioButtons(self.inputArea, tuple( self.names ) );
self.buildInputArea()
self.inputArea.set_facecolor( 'lightgoldenrodyellow' )
self.figure.suptitle("Choose the state to visualize.")
self.figure.canvas.set_window_title("Control")
self.LineChartfigure, self.LineChartLocation = plt.subplots(2, 5, constrained_layout = True)
self.lineAllYearsPerMonth(self.names[0], 2, 5)
def on_clicked(self, label):
year = int( list( self.rainDictionaries.keys() )[0] )
self.lineAllYearsPerMonth(self.AllStates[ self.names.index(label) ], 2, 5)
plt.draw()
#A line graph of the state indicated, allocating every year we have as data in rows and cols indicated
def lineAllYearsPerMonth(self, state, rows, cols):
X_INCHES_LAPTOP = 12
Y_INCHES_LAPTOP = 7
years = {}
ROTATION_ANGLE = 90 #degrees
for key, value in self.rainDictionaries.items():
years[key] = value[state]
#We find the Maximum value of all the years
absoluteMax = 0
for value in years.values():
maxPerState = max( list( value.values() )[0:-1] )
if maxPerState > absoluteMax:
absoluteMax = maxPerState
absoluteMax = int(absoluteMax)
absoluteMax = roundAlwaysUp( absoluteMax )
r, c = 0, 0
for key, value in years.items():
months = list( value.keys() )
mm = list( value.values() )
if c >= cols:
c = 0
r = r+1
self.LineChartLocation[r, c].clear()
self.LineChartLocation[r, c].plot( months[0:-1], mm[0:-1] )# [0:] will get all the values but the last one which is the "ANUAL" value
self.LineChartLocation[r, c].set_title( str(key) )
self.LineChartLocation[r, c].grid(True)
self.LineChartLocation[r, c].set_ylim(0, absoluteMax)
self.LineChartLocation[r, c].set_yticks( self.LineChartLocation[r, c].get_yticks() )
for label in self.LineChartLocation[r, c].get_xticklabels():
label.set_rotation( ROTATION_ANGLE )
c = c+1
self.LineChartfigure.suptitle("Precipitación mensual en %s (%d años)" % (state, len ( self.rainDictionaries.items() ) ) )
self.LineChartfigure.canvas.set_window_title("Precipitación mensual por estado de la república mexicana")
self.LineChartfigure.set_size_inches(X_INCHES_LAPTOP + 1, X_INCHES_LAPTOP + 1)
return self.LineChartfigure, self.LineChartLocation
def setRainDictionaries(dictionaries):
self.rainDictionaries = dictionaries
def buildInputArea(self):
self.radio_buttons.on_clicked( self.on_clicked)
|
17,841 | e440f062f4198b6999fe55b48d45ce639975cd0e | #!/usr/bin/env python
# Efficient permutation of non-redundant binary numbers:
# https://stackoverflow.com/a/37584849
# Date located in: -
from __future__ import print_function
import sys, itertools, math
def getSquare(x):
root = math.sqrt(x)
return int(root + 0.5)
def isSquare(x):
root = math.sqrt(x)
if int(root + 0.5) ** 2 == x:
return True
return False
def binaryShuffle(length=30, ones=15):
global globalCount
result = []
rr = ['0'] * length ## initialize empty list with
## ZEROS of given length
for c in itertools.combinations(range(length), ones):
r = rr[:] ## create a copy of initialized list
for x in c:
r[x] = '1' ## Change ZERO to ONE based on different
## combinations of positions
number = "".join(r)
if isSquare(int(number)):
print("SUCCESS: The square root of " + number, end="")
print(" is " + str(getSquare(int(number))))
sys.exit(0)
globalCount += 1
if globalCount % 10000000 == 0:
print(str(globalCount) + " possible solutions evaluated.",
file=sys.stderr)
globalCount = 0
binaryShuffle()
print("No perfect square found of length 30 with 15 zeros and 15 ones")
print("Total " + str(globalCount) + " combinations evaluated.")
|
17,842 | 8fa1a985417b8e731b43d94d207de3dd809d40fc | from django.conf.urls.defaults import *
from app.views import CustomRegistrationView
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^$', 'app.views.main'),
url(r'^signup$', 'app.account.signup'),
url(r'^start$', 'app.views.start', name="start"),
url(r'^preferences$', 'app.views.preferences'),
url(r'^choose$', 'app.views.choose'),
url(r'^results$', 'app.views.results'),
url(r'^import$', 'app.import.main', name='import'),
url(r'^versions$', 'app.views.versions', name='versions'),
url(r'^v(?P<version_id>[0-9]+)$', 'app.views.set_version'),
url(r'^my_foodie$', 'app.views.my_foodie'), # reinier
url(r'^random_meal$', 'app.views.random_meal'), #reinier
url(r'^about$', 'app.views.about'), #reinier
url(r'^intro2$', 'app.views.intro2'),
url(r'^intro3$', 'app.views.intro3'),
url(r'^intro4$', 'app.views.intro4'),
url(r'^intro5$', 'app.views.intro5'),
url(r'^intro6$', 'app.views.intro6'),
url(r'^intro7$', 'app.views.intro7'),
url(r'^intro8$', 'app.views.intro8'),
url(r'^intro9$', 'app.views.intro9'),
url(r'^VIP$', 'app.views.VIP'), #Chums
url(r'^accounts/register/$',
CustomRegistrationView.as_view(),
name='registration_register'),
url(r'^login/$', 'django.contrib.auth.views.login', name='login'),
url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'}, name='logout'),
# (r'^accounts/', include('registration.backends.simple.urls')),
)
urlpatterns += staticfiles_urlpatterns()
|
17,843 | 194dbec8fff1a84ebb2e4cab25b5d7602065b0ab | import pickle
print("starting...")
def pickleIt(name, o):
pickle.dump(o, open(name + ".pickle", "wb"))
def unpickleIt(name):
return (pickle.load(open(name + ".pickle", "rb")))
def showPerson(p):
print ("Display name: " + p.DisplayName)
# pickle a string
a = "mike was here"
pickle.dump(a, open("first.pickle", "wb"))
returnedString = pickle.load(open("first.pickle", "rb"))
print("returned: " + returnedString )
class Person:
Age=-1
DisplayName=""
mike = Person()
mike.Age =40
mike.DisplayName = "Rapa, Mike"
pickleIt("mike", mike)
newPerson = unpickleIt("mike")
showPerson(newPerson)
print("mike: " + mike.DisplayName)
print("Ending")
|
17,844 | 080afdefa03adbc25e716792f2114019d8d50467 | import json
import time
from airflow import DAG
from airflow.operators.python import PythonOperator
from datetime import datetime, timedelta
from airflow.providers.mongo.hooks.mongo import MongoHook
from airflow.models import Variable
from scripts.location_scripts.tools import get_flat_geocoding, get_flat_distance_to_center, parse_address_info
default_args = {
'owner': 'oleg',
'start_date': datetime(2021, 6, 22),
'email': ['gysevov@yandex.ru'],
'email_on_failure': True,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
'schedule_interval': '@daily',
}
dag = DAG('geo_dag',
schedule_interval=None,
default_args=default_args)
hook = MongoHook('parser')
flats_collection_name = Variable.get('mongo_db_flats_collection_name')
flats_database_name = Variable.get('mongo_db_flats_database_name')
# flats = list(hook.find(collection_name, {"location": {"$or": [{"$exists": False}, {"$eq": None}]}}, mongo_db='parser'))
API_KEY = Variable.get('MAP_API_KEY')
RUSSIAN_CITIES = json.loads(Variable.get('RUSSIAN_CITIES'))[0]
API_QUERY_URL_GEOCODING = f'https://eu1.locationiq.com/v1/search.php?key={API_KEY}&format=json&q='
API_QUERY_URL_ROUTING = 'https://eu1.locationiq.com/v1/directions/driving/{lng},{lat};' \
'{city_center_lng},{city_center_lat}' \
f'?key={API_KEY}&steps=false&alternatives=false&overview=full'
def get_flats_info(query, api_query_url, function, **sub_args):
k = 0
flats = list(hook.find(flats_collection_name, query, mongo_db=flats_database_name))
flats_len = len(flats)
for i, flat in enumerate(flats):
res, wait = function(flat, api_query_url, **sub_args)
k += 1
if hook.find(flats_collection_name, {'id': flat['id']}, find_one=True, mongo_db=flats_database_name):
hook.update_one(flats_collection_name, {'id': flat['id']}, {"$set": res}, mongo_db=flats_database_name)
if i % 10 == 0:
print(f'Taken info for {i}/{flats_len}')
if k == 4700:
break
if wait:
time.sleep(1)
def parse_address():
query = {"location_parsed": {"$exists": False}}
flats = list(hook.find(flats_collection_name, query, mongo_db=flats_database_name))
flats_len = len(flats)
cities = [city.lower() for city in RUSSIAN_CITIES.keys()]
for i, flat in enumerate(flats):
parsed_info = parse_address_info(flat, cities=cities)
if hook.find(flats_collection_name, {'id': flat['id']}, find_one=True, mongo_db=flats_database_name):
hook.update_one(flats_collection_name, {'id': flat['id']}, {"$set": parsed_info}, mongo_db=flats_database_name)
if i % 100 == 0:
print(f'Parsed {i}/{flats_len}')
parse_address_info_operator = PythonOperator(
task_id='parse_address_info',
python_callable=parse_address,
dag=dag)
get_flats_location_operator = PythonOperator(
task_id='flats_location',
python_callable=get_flats_info,
op_kwargs={'query': {"$or": [{"location": {"$exists": False}}, {"location": {"$eq": None}}]},
'api_query_url': API_QUERY_URL_GEOCODING,
'function': get_flat_geocoding},
dag=dag)
query = {
"$and" : [
{"$and": [
{ "location": {"$exists": True}},
{ "location": {"$ne": None}},
]},
{"$or": [
{ "distance_to_center": {"$exists": False}},
{ "distance_to_center": {"$eq": None}},
]}
]
}
get_flats_distance_to_center_operator = PythonOperator(
task_id='flats_distance_to_center',
python_callable=get_flats_info,
op_kwargs={'query': query,
'api_query_url': API_QUERY_URL_ROUTING,
'function': get_flat_distance_to_center,
'sub_args': {'cities': RUSSIAN_CITIES}},
dag=dag)
parse_address_info_operator >> get_flats_location_operator >> get_flats_distance_to_center_operator
# flat_tasks
#['москва', 'санкт-петербург', 'новосибирск', 'екатеринбург', 'казань', 'нижний новгород', 'челябинск', 'самара', 'омск', 'ростов-на-дону', 'уфа', 'красноярск', 'воронеж', 'пермь', 'волгоград', 'краснодар', 'саратов', 'тюмень', 'тольятти', 'ижевск', 'барнаул', 'ульяновск', 'иркутск', 'хабаровск', 'махачкала', 'ярославль', 'владивосток', 'оренбург', 'томск', 'кемерово', 'новокузнецк', 'рязань', 'набережные челны', 'астрахань', 'киров', 'пенза', 'балашиха', 'липецк', 'чебоксары', 'калининград', 'тула', 'севастополь', 'ставрополь', 'курск', 'улан-удэ', 'сочи', 'тверь', 'магнитогорск', 'иваново', 'брянск', 'белгород', 'сургут', 'владимир', 'чита', 'архангельск', 'нижний тагил', 'симферопoль', 'калуга', 'якутск', 'грозный', 'волжский', 'смоленск', 'саранск', 'череповец', 'курган', 'подольск', 'вологда', 'орёл', 'владикавказ', 'тамбов', 'мурманск', 'петрозаводск', 'нижневартовск', 'кострома', 'йошкар-ола', 'новороссийск', 'стерлитамак', 'химки', 'таганрог', 'мытищи', 'сыктывкар', 'комсомольск-на-амуре', 'нижнекамск', 'нальчик', 'шахты', 'дзержинск', 'энгельс', 'благовещенск', 'королёв', 'братск', 'великий новгород', 'орск', 'старый оскол', 'ангарск', 'псков', 'люберцы', 'южно-сахалинск', 'бийск', 'прокопьевск', 'абакан', 'армавир', 'балаково', 'норильск', 'рыбинск', 'северодвинск', 'петропавловск-камчатский', 'красногорск', 'уссурийск', 'волгодонск', 'новочеркасск', 'сызрань', 'каменск-уральский', 'златоуст', 'альметьевск', 'электросталь', 'керчь', 'миасс', 'салават', 'хасавюрт', 'пятигорск', 'копейск', 'находка', 'рубцовск', 'майкоп', 'коломна', 'березники', 'одинцово', 'домодедово', 'ковров', 'нефтекамск', 'каспийск', 'нефтеюганск', 'кисловодск', 'новочебоксарск', 'батайск', 'щёлково', 'дербент', 'серпухов', 'назрань', 'раменское', 'черкесск', 'новомосковск', 'кызыл', 'первоуральск', 'новый уренгой', 'орехово-зуево', 'долгопрудный', 'обнинск', 'невинномысск', 'ессентуки', 'октябрьский', 'димитровград', 'пушкино', 'камышин', 'ноябрьск', 'евпатория', 'реутов', 'жуковский', 'северск', 'муром', 'новошахтинск', 'артём', 'ачинск', 'бердск', 'элиста', 'арзамас', 'ханты-мансийск', 'ногинск', 'елец', 'железногорск', 'зеленодольск', 'новокуйбышевск', 'сергиев посад', 'тобольск', 'воткинск', 'саров', 'междуреченск', 'михайловск', 'серов', 'сарапул', 'анапа', 'ленинск-кузнецкий', 'ухта', 'воскресенск', 'соликамск', 'глазов', 'магадан', 'великие луки', 'мичуринск', 'лобня', 'гатчина', 'канск', 'каменск-шахтинский', 'губкин', 'бузулук', 'киселёвск', 'ейск', 'ивантеевка', 'новотроицк', 'чайковский', 'бугульма', 'железногорск', 'юрга', 'кинешма', 'азов', 'кузнецк', 'усть-илимск', 'новоуральск', 'клин', 'видное', 'мурино', 'ялта', 'озёрск', 'кропоткин', 'бор', 'всеволожск', 'геленджик', 'черногорск', 'усолье-сибирское', 'балашов', 'новоалтайск', 'дубна', 'шадринск', 'верхняя пышма', 'выборг', 'елабуга', 'минеральные воды', 'егорьевск', 'троицк', 'чехов', 'чапаевск', 'белово', 'биробиджан', 'когалым', 'кирово-чепецк', 'дмитров', 'туймазы', 'славянск-на-кубани', 'феодосия', 'минусинск', 'сосновый бор', 'наро-фоминск', 'анжеро-судженск', 'кстово', 'сунжа', 'буйнакск', 'ступино', 'георгиевск', 'заречный', 'горно-алтайск', 'белогорск', 'белорецк', 'кунгур', 'ишим', 'урус-мартан', 'ишимбай', 'павловский посад', 'клинцы', 'гуково', 'россошь', 'асбест', 'котлас', 'зеленогорск', 'донской', 'лениногорск', 'избербаш', 'туапсе', 'вольск', 'ревда', 'будённовск', 'берёзовский', 'сибай', 'полевской', 'лыткарино', 'лысьва', 'кумертау', 'белебей', 'нерюнгри', 'лесосибирск', 'фрязино', 'сертолово', 'чистополь', 'прохладный', 'борисоглебск', 'нягань', 'лабинск', 'крымск', 'тихвин', 'гудермес', 'алексин', 'александров', 'михайловка', 'ржев', 'щёкино', 'тихорецк', 'сальск', 'шали', 'павлово', 'шуя', 'мелеуз', 'краснотурьинск', 'искитим', 'североморск', 'апатиты', 'свободный', 'выкса', 'лиски', 'дзержинский', 'волжск', 'вязьма', 'воркута', 'гусь-хрустальный', 'снежинск', 'краснокамск', 'арсеньев', 'краснокаменск', 'белореченск', 'салехард', 'жигулёвск', 'котельники', 'тимашёвск', 'кириши'] |
17,845 | 1689b0dffc6f70ac7a7e8c6c12ec4cacd23dbae8 | import math
class Vector3d:
def __init__(self, xyz):
self.x, self.y, self.z = xyx
class Point3d:
def __init__(self, xyz):
self.x, self.y, self.z = xyz
def add_vect(self, vect):
return Point3d((self.x + vect.x, self.y + vect.y, self.z + vect.z))
def translate(self, xyz):
return Point3d((self.x + xyz[0], self.y + xyz[1], self.z + xyz[2]))
def rotate_x(self, theta):
cos = math.cos(theta)
sin = math.sin(theta)
return Point3d((self.x, self.y * cos - self.z * sin, self.y * sin + self.z * cos))
def rotate_z(self, theta):
cos = math.cos(theta)
sin = math.sin(theta)
return Point3d((self.x * cos - self.y * sin, self.y * cos + self.x * sin, self.z))
def rotate_y(self, theta):
cos = math.cos(theta)
sin = math.sin(theta)
return Point3d((self.x * cos + self.z * sin, self.y, self.z * cos - self.x * sin))
def scale(self, xyz):
return Point3d((self.x * xyz[0], self.y * xyz[1], self.z * xyz[2]))
def __str__(self):
return "(" + str(self.x) + ", " + str(self.y) + ", " + str(self.z) + ")"
def calc_distance(triangle,
point = Point3d((1, 2, 1))
print point.rotate_y(math.radians(45))
print point.rotate_y(math.radians(60)).rotate_y(math.radians(60))
print point.rotate_y(math.radians(60)).rotate_y(math.radians(60)).rotate_y(math.radians(60))
print point.scale((3, 0.5, 1))
|
17,846 | 29e3855e81812423ffc43eb1d065d63f169f59aa | import requests
import re
from bs4 import BeautifulSoup
# 제출 버튼을 눌러야만 채점 및 점수 부여가 됩니다.
headers = headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'}
url = "https://www.worldometers.info/coronavirus/"
# request 요청
def get_request():
soup = None
try:
req = requests.get(url, headers=headers)
if req.status_code != 200:
print("http request에 실패하였습니다.")
else:
# 1. url html을 담고 있는 soup 객체 생성하기
soup = BeautifulSoup(req.text, "html.parser")
except:
print("Unhandled Error")
return soup
# crawling 함수
def crawling(soup):
corona_info = {}
# 2. soup 객체에 있는 국가들의 코로나 관련 데이터 크롤링하기
tbody = soup.find("tbody")
trs = tbody.find_all("tr")
continents = tbody.find_all("tr", "row_continent")
trs = [tr for tr in trs if tr not in continents]
for tr in trs:
tds = tr.find_all("td")
# 1(국가이름), 2(총 확진자), 4(총 사망자), 6(총 완치자)
tmp = [tds[i].text.replace("\n", "").replace(",", "").replace(
" ", "") for i in range(len(tds)) if i in [1, 2, 4, 6]]
arr = [x for x in tmp[1:]]
# values값은 int지만, 유효하지 않은 값일 경우 ""N\A" 출력
t_case, t_death, t_recover = map(lambda x: int(
x) if x != '' and x != 'N/A' else 'N/A', arr)
corona_info[tmp[0]] = {'확진자': t_case, '사망자': t_death, '완치': t_recover}
return corona_info
if __name__ == "__main__":
soup = get_request()
if soup != None:
res = crawling(soup)
# 3. 크롤링한 결과를 형식에 맞게 출력하기
for key, value in res.items():
print(key, value)
else:
print("failed: get_request()")
|
17,847 | 4b8fe1d70080a8852546bd14b483571fb24808b1 | import os
from datetime import datetime
from skimage.io import imread, imsave
import numpy as np
import pandas as pd
OPEN_FACE_BINARY_PATH = '/data/cvg/luca/tools/OpenFace/build/bin'
ALLOWED_ACTION_UNITS = [1, 2, 4, 5, 6, 7, 9, 10, 12, 14, 15, 17, 20, 23, 25, 26, 45]
def extract_action_units(np_img, au_list=None):
"""
Params:
- np_img: numpy array of shape (height, width, num_channels)
containing the image
- au_list: list of action units to extract. If not specified,
all action units listed above will be extracted.
Returns:
- numpy array containing the extracted action units
"""
# Make sure, only extractable action units are passed as parameter
if not au_list is None:
assert all(au in ALLOWED_ACTION_UNITS for au in au_list), 'Invalid action unit list provided.'
# Prepare extraction
current_dir = os.getcwd()
os.chdir(OPEN_FACE_BINARY_PATH)
timestamp = datetime.now().timestamp()
# Extract action units using OpenFace
np_img *= 255
np_img = np_img.astype(np.uint8)
imsave('{}.jpg'.format(timestamp), np_img)
exit_code = os.system('./FaceLandmarkImg -f {}.jpg >/dev/null'.format(timestamp))
csv_data = pd.read_csv('processed/{}.csv'.format(timestamp))
au_presence = csv_data.iloc[0, -18:].tolist()
del au_presence[-2] # remove AU_28c because it has no intensity equivalent
au_intensity = csv_data.iloc[0, -35:-18].tolist()
# Convert action unit lists to numpy arrays
au_presence = np.array(au_presence)
au_intensity = np.array(au_intensity)
# Filter only wanted action units
if not au_list is None:
wanted_au_indices = [ALLOWED_ACTION_UNITS.index(au) for au in au_list]
au_presence = au_presence[wanted_au_indices]
au_intensity = au_intensity[wanted_au_indices]
# Delete temporary files created during action unit extract_action_units
exit_code = os.system('rm -r {}.jpg >/dev/null'.format(timestamp))
exit_code = os.system('rm -r processed/{}* >/dev/null'.format(timestamp))
os.chdir(current_dir)
return au_presence, au_intensity
def stack_action_units(image):
"""
Adds one channel to the image containing the 17 action unit intensities
that OpenFace detects. The 17 intensity values are spread evenly spaced
throughout the channel.
"""
size = image.shape[0]
_, au_intensities = extract_action_units(image)
au_channel = np.zeros((size, size, 1))
indices = _get_au_embedding_indices(image.shape)
for au, rowcol in enumerate(indices):
row, col = rowcol
au_channel[row, col] = au_intensities[au]
image = np.concatenate([image, au_channel], axis=-1)
return image
def unstack_action_units(image):
"""
Unstacks action units from the fourth channel embedding and returns
the original image with three channels as well as a vector containing
the 17 action unit intensities.
"""
indices = _get_au_embedding_indices(image.shape)
au_channel = image[:, :, -1]
image = image[:, :, :4]
au_intensities = np.zeros(17)
for au, rowcol in enumerate(indices):
row, col = rowcol
au_intensities[au] += au_channel[row, col]
return image, au_intensities
def _get_au_embedding_indices(shape):
assert shape[0] == shape[1]
size = shape[0]
spacing = int(size ** 2 / 17)
offset = int((size ** 2 - spacing * 17) / 2)
indices = []
for i in range(1, 17):
location = i * spacing + offset
col = int(location / size)
row = np.mod(location, size)
indices.append((row, col))
return indices
def extract_pose(np_img):
"""Extracts pitch, yaw and roll from a face image.
Args:
np_img -- numpy array of shape (height, width, num_channels) containing the image.
Returns:
pose_params -- numpy array containing the extracted pose parameters: [Rx, Ry, Rz]
"""
# Prepare extraction
current_dir = os.getcwd()
os.chdir(OPEN_FACE_BINARY_PATH)
timestamp = datetime.now().timestamp()
# Extract action units using OpenFace
np_img *= 255
np_img = np_img.astype(np.uint8)
imsave('{}.jpg'.format(timestamp), np_img)
exit_code = os.system('./FaceLandmarkImg -f {}.jpg >/dev/null'.format(timestamp))
csv_data = pd.read_csv('processed/{}.csv'.format(timestamp), sep=',\s', engine='python')
pose_params = csv_data[['pose_Rx', 'pose_Ry', 'pose_Rz']].iloc[0].tolist()
# Convert pose parameters to numpy arrays
pose_params = np.array(pose_params)
# Delete temporary files created during action unit extract_action_units
exit_code = os.system('rm -r {}.jpg >/dev/null'.format(timestamp))
exit_code = os.system('rm -r processed/{}* >/dev/null'.format(timestamp))
os.chdir(current_dir)
return pose_params
def align_face(np_img, mask=True):
"""Uses OpenFace to align the face.
Args:
np_img -- numpy array of shape (height, width, num_channels) containing the image.
mask -- if True, a mask is added to the face resulting in a black background.
"""
# Prepare extraction
current_dir = os.getcwd()
os.chdir(OPEN_FACE_BINARY_PATH)
timestamp = datetime.now().timestamp()
mask_param = '-nomask' if mask is False else ''
# Align and mask face using OpenFace
np_img *= 255
np_img = np_img.astype(np.uint8)
imsave('{}.jpg'.format(timestamp), np_img)
exit_code = os.system(
'./FaceLandmarkImg -f {}.jpg -wild -simalign -simsize 192 {} -format_aligned jpg >/dev/null'.format(timestamp,
mask_param))
aligned_img = imread('processed/{}_aligned/face_det_000000.jpg'.format(timestamp))
aligned_img = aligned_img / 255.
# Delete temporary files created during face alignment
exit_code = os.system('rm -r {}.jpg >/dev/null'.format(timestamp))
exit_code = os.system('rm -r processed/{}* >/dev/null'.format(timestamp))
os.chdir(current_dir)
return aligned_img
|
17,848 | 3cd1a44ef7f0d205e334056dbf85d9ec0583c43c | import pandas as p
dt1 = {'Student': ['Ice Bear', 'Panda', 'Grizzly'],
'Math' :[80, 95, 79]}
dfdt1 = p.DataFrame (dt1, columns = ['Student', 'Math'])
dt2 = {'Student': ['Ice Bear', 'Panda', 'Grizzly'],
'Electronics' :[85, 81, 83]}
dfdt2 = p.DataFrame (dt2, columns = ['Student', 'Electronics'])
dt3 = {'Student': ['Ice Bear', 'Panda', 'Grizzly'],
'GEAS' :[90, 79, 93]}
dfdt3 = p.DataFrame (dt3, columns = ['Student', 'GEAS'])
dt4 = {'Student': ['Ice Bear', 'Panda', 'Grizzly'],
'ESAT' :[93, 89, 99]}
dfdt4 = p.DataFrame (dt4, columns = ['Student', 'ESAT'])
Dt5 = p.merge(dfdt1, dfdt2, how = 'outer')
Dt6 = p.merge(dfdt3, dfdt4, how = 'outer')
MergedData = p.merge(Dt5, Dt6, how = 'outer') |
17,849 | 3d00c5e62b0c8e3ed1acb4dd82ecf2ceced00ea6 | from app.database import *
import datetime
import ujson
import io
import tarfile
import os
import re # regex
defaultTables = [Statuses, Items, Buildings, Users, Records, Revisions]
partition = 1000
backup_dir = os.path.join("backup") # need check
# mail [].sort(key = lambda s: s[2])
# archiveName -> *.tar.gz
# fileName -> *.json
# tables -> [Users]
# tablenames -> ["users"]
pattern = re.compile(r"^(?P<tableName>[a-z]+)(_\d+-\d+)?.json$")
def convertTableName(fileName: str):
"""
'buildings.json' -> 'buildings'
'users_1-1000.json' -> 'users'
"""
if m := pattern.match(fileName):
return m.group("tableName")
def dbReprTest():
import sqlalchemy
import flask_sqlalchemy
def valid(value):
return type(value) not in [list, sqlalchemy.orm.state.InstanceState, flask_sqlalchemy.model.DefaultMeta]
b = True
for t in defaultTables:
obj1 = t.query.first()
obj2 = eval(repr(obj1))
d1 = {key: value for key, value in obj1.__dict__.items() if valid(value)}
d2 = {key: value for key, value in obj2.__dict__.items() if valid(value)}
print(t.__tablename__, d1 == d2)
b = b and (d1 == d2)
return b
def writeArchive(archive: tarfile, fileName: str, data: dict) -> None:
s = ujson.dumps(data, ensure_ascii=False)
encoded = s.encode("utf-8")
stream = io.BytesIO(encoded)
tarinfo = tarfile.TarInfo(name=fileName)
tarinfo.size = len(encoded)
archive.addfile(tarinfo, stream)
def readArchive(archive: tarfile.TarFile, fileName: str) -> dict:
s = archive.extractfile(fileName).read().decode("utf-8")
return ujson.loads(s)
def getBackups() -> list:
pass
def getFileNames(archiveName: str) -> list:
with tarfile.open(archiveName, "r:xz") as archive:
return archive.getnames()
def getTableNames(archiveName: str) -> list:
l = map(convertTableName, getFileNames(archiveName))
return list(set(l)) # unique value
def backup(tables: list[db.Model] = None): # path not fix
archiveName = "Backup_{time}.tar.xz".format(
time=datetime.datetime.now().strftime(timeformat))
if tables is None:
tables = defaultTables
else:
tables = filter(defaultTables.__contains__, tables)
with tarfile.open(archiveName, "w:xz") as archive:
for t in tables:
max = t.query.order_by(t.id.desc()).first().id
if max <= partition:
fileName = "{tablename}.json".format(tablename=t.__tablename__)
final = dict()
final[t.__tablename__] = [repr(col) for col in t.query.all()]
writeArchive(archive, fileName, final)
else:
for i in range(1, max+1, partition):
fileName = "{tablename}_{start}-{end}.json".format(
tablename=t.__tablename__,
start=i, end=i-1+partition)
final = dict()
final[t.__tablename__] = [repr(column) for column in t.query.filter(
t.id.between(i, i-1+partition)).all()]
writeArchive(archive, fileName, final)
print("Backup finished, file: {}".format(archiveName))
# not finished
def restore(archiveName: str, tables: list[db.Model] = None):
if tables is None:
tables = defaultTables
tablenames = [t.__tablename__ for t in tables]
with tarfile.open(archiveName, "r:xz") as archive:
for fileName in archive.getnames():
if convertTableName(fileName) in tablenames:
d = readArchive(archive, fileName)
l = list(d.values())[0]
for o in l:
db.session.merge(eval(o))
db.session.commit()
|
17,850 | cadfb97c2b280e55e448ed2c7af0c0af7a0e7637 | #! -*- coding: utf-8 -*-
import json
import os
class GeneratorCustomInitCode(object):
def __init__(self):
# contrutor
super(GeneratorCustomInitCode, self).__init__()
def getInitCodeWithFilter(self, tableFilter, rules):
optFilter = self.formatOptionFilter(tableFilter)
tableFilterFormated = self.formatTableFilter(tableFilter)
initCode = self.getTemplateInitCodeWithFilter()
initCode = initCode.replace(
'{optfilter}',
json.dumps(optFilter, ensure_ascii=False)
)
initCode = initCode.replace(
'{filter}',
json.dumps(tableFilterFormated, ensure_ascii=False)
)
initCode = initCode.replace(
'{rules}',
json.dumps(rules, ensure_ascii=False)
)
return initCode
def getInitCodeNotFilter(self, rules):
initCode = self.getTemplateInitCodeNotFilter()
initCode = initCode.replace(
'{rules}',
json.dumps(rules, ensure_ascii=False)
)
return initCode
def formatTableFilter(self, tableFilter):
tableFilterFormated = {}
for line in tableFilter:
tableFilterFormated[line[1]] = line[0]
return tableFilterFormated
def formatOptionFilter(self, tableFilter):
optFilter = {}
for line in tableFilter:
optFilter[unicode(line[2])] = ((line[0]-(line[0]%100))/100)
return optFilter
def getTemplateInitCodeNotFilter(self):
initCodeTemplate = u""
pathCode = os.path.join(
os.path.dirname(__file__),
'formInitCodeNotFilterTemplate'
)
codeFile = open(pathCode, "r")
for line in codeFile.readlines():
initCodeTemplate += line.decode("utf-8")
codeFile.close()
return initCodeTemplate
def getTemplateInitCodeWithFilter(self):
initCodeTemplate = u""
pathCode = os.path.join(
os.path.dirname(__file__),
'formInitCodeWithFilterTemplate'
)
codeFile = open(pathCode, "r")
for line in codeFile.readlines():
initCodeTemplate += line.decode("utf-8")
codeFile.close()
return initCodeTemplate |
17,851 | 2e03a60d27bce45b5872b2547a0923412af896ec | def solver(arr, n, k):
out = 999999 # INT_MAX
arr.sort()
# prev_sum maintains sum of all values before the current index, also
# exluding the current value
prev_sum = 0
for i in range(n):
sum_before = prev_sum
for j in range(n - 1, i, -1):
if arr[j] - arr[i] > k:
excess_val = arr[j] - arr[i] - k
sum_before += excess_val
else:
# as array is sorted so if above condition is satisfied then
# we don't need to go back more, as the value of arr[j] will
# be less
break
out = min(out, sum_before)
prev_sum += arr[i]
return out
T = int(input())
for _ in range(T):
n, k = map(int, input().split())
arr = list(map(int, input().split()))
print(solver(arr, n, k))
|
17,852 | 74321446ed669799abfe9d15c2e1c5adda6a9cca | # -*- coding: utf-8 -*-
# date: 2020/11/11 22:13
from django.urls import path, re_path, include
from . import views
from rest_framework.routers import SimpleRouter
router = SimpleRouter()
router.register(prefix='banner', viewset=views.BannerViewSet, basename='banner')
urlpatterns = [
path('', include(router.urls))
] |
17,853 | 7fdabaa89dd60d942887a31d1cdb85f3be730455 | #! /usr/bin/env python
import os, sys, random, re, math
from utils import *
from os.path import *
sys.path.append('../thirdparty/liblinear/python')
sys.path.append('../thirdparty/libsvm/python')
LIBLINEAR = False
if LIBLINEAR:
from liblinearutil import *
else:
from svmutil import *
feature_list = ['ugr', 'str', 'inquirer', 'liwc']
feature_name = '_'.join(feature_list)
data_path = '../dataset_v2/t0/{0}'
cache_path = '../dataset_v2/t0/cache/{0}'
genres = sys.argv[1:]
feature_dims = {'ugr':30000, 'str':10, 'inquirer':100, 'liwc':100}
for genre in genres:
data_path_genre = data_path.format( genre )
cache_path_genre = cache_path.format( genre )
model_path = cache_path_genre + '/models/'
result_path = cache_path_genre + '/results/'
if not os.path.exists(model_path):
os.makedirs(model_path)
if not os.path.exists(result_path):
os.makedirs(result_path)
print 'processing {0}'.format(genre)
fname_fold = join(data_path_genre, 'folds.txt')
fname_score = join(data_path_genre, 'labels.txt')
folds = [ int(f) for f in get_content(fname_fold) ]
#scores = read_scores_from_file_2(fname_score)
scores = read_scores_from_file(fname_score)
num = len(folds)
features = [ {} for i in range(num) ]
feat_idx_base = 0
for feature in feature_list:
print 'read feature: ', feature
fname_feature = join(cache_path_genre, 'features/{0}.feat'.format(feature))
iter_features = read_features_from_file(fname_feature)
for i in xrange(num):
X = iter_features[i]
feat_idxs = [ idx + feat_idx_base for idx in X.keys() ]
feat_values = X.values()
X = dict(zip(feat_idxs, feat_values))
features[i] = dict( features[i].items() + X.items() )
feat_idx_base += feature_dims[feature]
# Filter out invalid items
folds, features, scores = filter_out_invalid_items(folds, features, scores)
for fold_id in fold_ids:
###################################################################
# partition
y_train, x_train = get_train_data(scores, features, folds, fold_id)
y_test, x_test = get_test_data(scores, features, folds, fold_id)
y_dev, x_dev = get_dev_data(scores, features, folds, fold_id)
x_train.extend(x_dev)
y_train.extend(y_dev)
print "Processing folder: ", fold_id
print "Before Sampling: ", len(y_train), len(y_test)
###################################################################
# classification: training and testing
# Down sampling for genres has too much examples
x_train, y_train = sample_dataset(x_train, y_train, 20000)
print "After Sampling: ", len(y_train), len(y_test)
if False:
mse = [0]*4 # mseuracy
for cc in range(4):
tmpC = math.pow(10, cc-2)
print 'testing C = ', tmpC
if LIBLINEAR:
mse[cc] = train(y_train, x_train, '-s 11 -v 5 -q -c {0}'.format(tmpC) )
else:
mse[cc] = svm_train(y_train, x_train, '-s 3 -t 2 -v 5 -q -c {0}'.format(tmpC) )
mse_i = mse.index(min(mse))
best_c = math.pow(10, mse_i-2)
else:
best_c = 1.0
print 'The best C is: ', best_c
if LIBLINEAR:
m = train(y_train, x_train, '-s 11 -q -c {0}'.format(best_c) )
y_pred, mse_test, y_pred_2 = predict(y_test, x_test, m)
model_name = '{0}/fold_{1}.{2}.liblinear.regression.model'.format(model_path, fold_id, feature_name);
save_model( model, m )
out_file = '{0}/fold_{1}.{2}.liblinear.regression.pred'.format(result_path, fold_id, feature_name)
else:
m = svm_train(y_train, x_train, '-s 3 -t 2 -q -c {0}'.format(best_c) )
y_pred, mse_test, y_pred_2 = svm_predict(y_test, x_test, m)
model_name = '{0}/fold_{1}.{2}.libsvm.regression.model'.format(model_path, fold_id, feature_name);
svm_save_model( model_name, m)
out_file = '{0}/fold_{1}.{2}.libsvm.regression.pred'.format(result_path, fold_id, feature_name)
fout = open(out_file, 'w+')
for i in range(len(y_test)):
ostr = "{0} {1} {2}\n".format(y_test[i], y_pred[i], y_pred_2[i][0])
fout.write(ostr)
fout.close()
|
17,854 | fa360a9c67902d894c8107cc5abfc4fe38cf024c | from Domain.librarie import creeazaVanzare
from Logic.CRUD import stergeVanzare, modificaVanzare, adaugaVanzare
from UserInterface.console import showAll
def comenzi(lista):
while True:
try:
print("help")
optiune = input("Dati comanda ")
if optiune == "help":
print("Adaugare obiect nou->add")
print("Pentru a modifica->update")
print("Sterge obiect->delete")
print("Pentru a afisa obiectele-> showall")
print("Stop pentru iesire")
elif optiune == "stop":
break
else:
action = optiune.split(";")
for i in range(len(action)):
comanda = action[i].split(",")
if comanda[0] == "add":
if len(comanda) != 6:
raise ValueError("Datele nu sunt introduse corect! ")
id = int(comanda[1])
titlucarte = comanda[2]
gencarte = comanda[3]
pret = float(comanda[4])
tipreducere= comanda[5]
lista = adaugaVanzare(id, titlucarte, gencarte, pret, tipreducere, lista)
elif comanda[0] == "delete":
id = int(comanda[1])
lista = stergeVanzare(id, lista)
print("Obiectul a fost sters")
elif comanda[0] == "update":
if len(comanda) != 6:
raise ValueError("Datele nu sunt introduse corect! ")
id = int(comanda[1])
titlucarte = comanda[2]
gencarte = comanda[3]
pret = float(comanda[4])
tipreducere = comanda[5]
lista = modificaVanzare(id, titlucarte, gencarte, pret, tipreducere, lista)
print("Datele au fost modificate")
elif comanda[0] == "showall":
showAll(lista)
else:
print("Incorect!")
except ValueError as ve:
print("Eroare: {}".format(ve)) |
17,855 | 83a5ecbb5c0a08050eef1bfb4c5249e0439b9d18 | #!/usr/bin/env python
#
# Copyright (C) 2015 The Regents of the University of California.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#
# Submit this script to spark like so:
# spark-submit --master=local[4] spark-recordcount.py --start-time=1451606400 --end-time=1451779200 -t updates -c route-views.sg
#
import argparse
import csv
from datetime import datetime
import json
import math
from pyspark import SparkConf, SparkContext
import pybgpstream
import sys
try:
import urllib.request as urllib_request
except:
import urllib2 as urllib_request
# Output one data point per day
RESULT_GRANULARITY = 3600*24
# When processing RIBs, split days into 4hr chunks for RV, 8hrs for RIS
RV_RIB_PROCESSING_GRANULARITY = 3600*4
RIS_RIB_PROCESSING_GRANULARITY = 3600*8
# When processing updates, split days into 2hr chunks
UPD_PROCESSING_GRANULARITY = 3600*2
# The BGPStream broker service URL to query to get collector list from
COLLECTORS_URL = "http://bgpstream.caida.org/broker/meta/collectors"
# We only care about the two major projects
PROJECTS = ('routeviews', 'ris')
# Query the BGPStream broker and identify the collectors that are available
def get_collectors():
response = urllib_request.urlopen(COLLECTORS_URL)
data = json.load(response)
results = []
for coll in data['data']['collectors']:
if data['data']['collectors'][coll]['project'] in PROJECTS:
results.append(coll)
return results
# takes a record and an elem and builds a peer signature string that is globally
# unique.
def peer_signature(record, elem):
return record.project, record.collector, elem.peer_asn, elem.peer_address
def run_bgpstream(args):
(collector, start_time, end_time, data_type) = args
# initialize and configure BGPStream
stream = pybgpstream.BGPStream(
collector=collector,
from_time=start_time,
until_time=end_time-1,
record_type=data_type
)
# per-peer data
peers_data = {}
# loop over all records in the stream
for rec in stream.records():
# to track the peers that have elems in this record
peer_signatures = set()
# loop over all elems in the record
for elem in rec:
# create a peer signature for this elem
sig = peer_signature(rec, elem)
peer_signatures.add(sig)
# if this is the first time we have ever seen this peer, create
# an empty result: (elem_cnt, peer_record_cnt, coll_record_cnt)
if sig not in peers_data:
peers_data[sig] = [0, 0, 0]
peers_data[sig][0] += 1 # increment elem cnt for this peer
# done with elems, increment the 'coll_record_cnt' field for just
# one peer that was present in this record (allows a true, per-collector
# count of records since each record can contain elems for many peers)
if len(peer_signatures):
first = True
for sig in peer_signatures: # increment peer_record_cnt for all
if first:
peers_data[sig][2] += 1 # increment the coll_record_cnt
first = False
peers_data[sig][1] += 1
# the time in the output row is truncated down to a multiple of
# RESULT_GRANULARITY so that slices can be merged correctly
start_time = \
int(math.floor(start_time/RESULT_GRANULARITY) * RESULT_GRANULARITY)
# for each peer that we processed data for, create an output row
return [((start_time, collector, p), (peers_data[p])) for p in peers_data]
# takes a start time, an end time, and a partition length and splits the time
# range up into slices, each of len seconds. the interval is assumed to be a
# multiple of the len
def partition_time(start_time, end_time, len):
slices = []
while start_time < end_time:
slices.append((start_time, start_time+len))
start_time += len
return slices
# takes two result tuples, each of the format:
# (elem_cnt, peer_record_cnt, coll_record_cnt)
# and returns a single result tuple which is the sum of the two inputs.
# len(result_x) is assumed to be the same length as len(result_y)
def merge_results(result_x, result_y):
return [result_x[i] + result_y[i] for i in range(0, len(result_x))]
# takes a result row:
# ((time, collector, peer), (elem_cnt, peer_record_cnt, coll_record_cnt))
# and returns
# ((time, collector), (elem_cnt, peer_record_cnt, coll_record_cnt))
def map_per_collector(row):
return (row[0][0], row[0][1]), row[1]
# takes a result row:
# ((time, collector), (elem_cnt, peer_record_cnt, coll_record_cnt))
# and returns
# ((time), (elem_cnt, peer_record_cnt, coll_record_cnt))
def map_per_time(row):
return (row[0][0]), row[1]
def analyze(start_time, end_time, data_type, outdir,
collector=None, num_cores=None, memory=None):
# round start time down to nearest day
start_time = \
int(math.floor(start_time/RESULT_GRANULARITY) * RESULT_GRANULARITY)
# round end time up to nearest day
rounded = int(math.floor(end_time/RESULT_GRANULARITY) * RESULT_GRANULARITY)
if rounded != end_time:
end_time = rounded + RESULT_GRANULARITY
# generate a list of time slices to process
time_slices = partition_time(start_time, end_time, RESULT_GRANULARITY)
start_str = datetime.utcfromtimestamp(start_time).strftime('%Y-%m-%d')
end_str = datetime.utcfromtimestamp(end_time).strftime('%Y-%m-%d')
# establish the spark context
conf = SparkConf()\
.setAppName("ElemCounter.%s.%s-%s" % (data_type, start_str, end_str))\
.set("spark.files.overwrite", "true")
if memory:
conf.set("spark.executor.memory", str(memory)+"g")
sc = SparkContext(conf=conf)
# either use the collector argument, or default to using all collectors
# that the BGPStream broker knows about
collectors = [collector]
if not collector:
collectors = get_collectors()
# build our input for spark -- a set of BGPStream configurations to process
# in parallel
bs_configs = []
for time_slice in time_slices:
for collector in collectors:
(start, end) = time_slice
while start < end:
duration = UPD_PROCESSING_GRANULARITY
if type == 'ribs':
if 'rrc' in collector:
duration = RIS_RIB_PROCESSING_GRANULARITY
else:
duration = RV_RIB_PROCESSING_GRANULARITY
slice_end = min(start+duration, end)
bs_configs.append((collector, start, slice_end, data_type))
start += duration
# debugging
sys.stderr.write(str(bs_configs) + "\n")
# we need to instruct spark to slice up our input more aggressively than
# it normally would since we know that each row will take some time to
# process. to do this we either use 4x the number of cores available,
# or we split once per row. Once per row will be most efficient, but we
# have seem problems with the JVM exploding when numSlices is huge (it
# tries to create thousands of threads...)
slice_cnt = len(bs_configs)
if num_cores:
slice_cnt = num_cores*4
# instruct spark to create an RDD from our BGPStream config list
bs_rdd = sc.parallelize(bs_configs, numSlices=slice_cnt)
# step 1: use BGPStream to process BGP data
# output will be a list:
# ((time, collector, peer), (elem_cnt, peer_record_cnt, coll_record_cnt))
# the peer and collector record counts are separate as a single record
# may have data for multiple peers, thus naively summing the per-peer
# record counts would yield incorrect results
raw_results = bs_rdd.flatMap(run_bgpstream)
# since we split the processing by time, there will be several rows for
# each peer.
reduced_time_collector_peer = raw_results.reduceByKey(merge_results)
# we will use this result multiple times, so persist it
reduced_time_collector_peer.persist()
# collect the reduced time-collector-peer results back to the driver
# we take results that are in the form:
# ((time, collector, peer), (elem_cnt, peer_record_cnt, coll_record_cnt))
# and map them into:
# (time, collector, peer) => (elem_cnt, peer_record_cnt)
final_time_collector_peer = reduced_time_collector_peer\
.mapValues(lambda x: [x[0], x[1]]).collectAsMap()
# take the time-collector-peer result and map it into a new RDD which
# is time-collector. after the 'map' stage there will be duplicate
# time-collector keys, so perform a reduction as we did before
reduced_time_collector = reduced_time_collector_peer\
.map(map_per_collector).reduceByKey(merge_results)
reduced_time_collector.persist()
# collect the reduced time-collector results back to the driver
# we take results that are in the form:
# ((time, collector), (elem_cnt, peer_record_cnt, coll_record_cnt))
# and map them into:
# (time, collector) => (elem_cnt, coll_record_cnt)
final_time_collector = reduced_time_collector\
.mapValues(lambda x: [x[0], x[2]]).collectAsMap()
# take the time-collector result and map it into a new RDD which is keyed
# by time only (i.e. a global view). again we need to reduce after the map
# stage.
reduced_time = reduced_time_collector.map(map_per_time)\
.reduceByKey(merge_results)
# collect the reduced time-only results back to the driver
# we take results that are in the form:
# (time, (elem_cnt, peer_record_cnt, coll_record_cnt))
# and map them into:
# time => (elem_cnt, coll_record_cnt)
final_time = reduced_time.mapValues(lambda x: [x[0], x[2]]).collectAsMap()
# build the output file name
outfile = "%s/bgpstream-recordcounter.%s.%s-%s.csv" %\
(outdir, data_type, start_str, end_str)
with open(outfile, 'wb') as csvfile:
w = csv.writer(csvfile)
w.writerow(["Time", "Collector", "Peer", "#Elems", "#Records"])
# write out the per-peer statistics
for key in final_time_collector_peer:
(ts, coll, peer) = key
(elems, records) = final_time_collector_peer[key]
w.writerow([ts, coll, "AS"+str(peer[2])+"-"+peer[3],
elems, records])
# write out the per-collector statistics
for key in final_time_collector:
(ts, coll) = key
(elems, records) = final_time_collector[key]
w.writerow([ts, coll, "ALL-PEERS", elems, records])
# write out the global statistics
for key in final_time:
(ts) = key
(elems, records) = final_time[key]
w.writerow([ts, "ALL-COLLECTORS", "ALL-PEERS", elems, records])
reduced_time_collector_peer.unpersist()
reduced_time_collector.unpersist()
sc.stop()
return
def main():
parser = argparse.ArgumentParser(description="""
Script that uses PyBGPStream and Spark to analyze historical BGP data and
extract high-level statistics.
""")
parser.add_argument('-s', '--start-time', nargs='?', required=True,
type=int,
help='Start time. (Rounded down to the nearest day.)')
parser.add_argument('-e', '--end-time', nargs='?', required=True,
type=int,
help='End time. (Rounded up to the nearest day.)')
parser.add_argument('-c', '--collector', nargs='?', required=False,
help='Analyze only a single collector')
parser.add_argument('-n', '--num-cores', nargs='?', required=False,
type=int,
help="Number of CPUs in the cluster (used to determine"
" how to partition the processing).")
parser.add_argument('-m', '--memory', nargs='?', required=False,
type=int,
help="Amount of RAM available to each worker.")
parser.add_argument('-o', '--outdir', nargs='?', required=False,
default='./',
help="Output directory.")
parser.add_argument('-t', '--data-type', nargs='?', required=True,
help="One of 'ribs' or 'updates'.",
choices=['ribs', 'updates'])
opts = vars(parser.parse_args())
analyze(**opts)
if __name__ == "__main__":
main()
|
17,856 | 37df3ac9db75628b1b02745fe3ac88434ba02eac | from math import sqrt
mx = 10
triples = [(a, b, sqrt(a**2 + b**2)) for a in range(0, mx) for b in range(a, mx)]
triples = [(a, b, int(c)) for a, b, c in triples if c.is_integer()]
print(triples) |
17,857 | 33c494ef0bda88f436582f4588b3e7c1654d493a | N = int(input())
ab = [list(map(int, input().split())) for _ in range(N)]
aoki = 0
taka = 0
for a, b in ab:
aoki += a
ab.sort(key=lambda x: sum(x)+x[0], reverse=True)
ans = 0
for a, b in ab:
aoki -= a
taka += a + b
ans += 1
if taka > aoki:
break
print(ans) |
17,858 | 59f7b26fb528113cfd980dc9d7ebc5cbfb2a86b0 | #!/usr/bin/python
import hashlib
import os
import time
dropbox_path = os.path.expanduser("~/Dropbox")
input_path = dropbox_path + "/jarvis/input.txt"
output_path = dropbox_path + "/jarvis/output.txt"
def get_hash():
sha512 = hashlib.sha512()
input_file = open(input_path)
sha512.update(input_file.read())
input_file.close()
return sha512.hexdigest()
def monitor():
new_hash = get_hash()
while 1:
time.sleep(10)
old_hash = new_hash
new_hash = get_hash()
if old_hash != new_hash:
print "File changed!"
input_file = open(input_path)
data = input_file.read().splitlines()
input_file.close()
operation = data.pop(0)
# do stuff based on operation
if __name__ == "__main__":
print "Monitoring for changes..."
monitor()
|
17,859 | 273760d1684551a64781520f1a80190dc664351c | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Android WebViewCache plugin."""
import unittest
from plaso.parsers.sqlite_plugins import android_webviewcache
from tests.parsers.sqlite_plugins import test_lib
class AndroidWebViewCache(test_lib.SQLitePluginTestCase):
"""Tests for the Android WebViewCache database plugin."""
def testProcess(self):
"""Test the Process function on a WebViewCache file."""
plugin = android_webviewcache.AndroidWebViewCachePlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['webviewCache.db'], plugin)
number_of_event_data = storage_writer.GetNumberOfAttributeContainers(
'event_data')
self.assertEqual(number_of_event_data, 10)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
expected_event_values = {
'content_length': 1821,
'data_type': 'android:webviewcache',
'expiration_time': '2013-03-28T09:48:18.000+00:00',
'last_modified_time': None,
'url': (
'https://apps.skypeassets.com/static/skype.skypeloginstatic/css/'
'print.css?_version=1.15')}
event_data = storage_writer.GetAttributeContainerByIndex('event_data', 0)
self.CheckEventData(event_data, expected_event_values)
if __name__ == '__main__':
unittest.main()
|
17,860 | d0cf907960dc1246554f248ab7e299fe086333ae | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# create by zhangsp 2017-07-26
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
def icon(req):
if not req.session.get("sess_username", False):
return HttpResponseRedirect("/login/")
else:
return render_to_response('icons.html') |
17,861 | d46d8e7832f3d1bfd961ce85e41b0100f815f45f | from socket import *
serverName = '127.0.0.1'
serverPort = 6000
b = 1024
clientSocket = socket(AF_INET, SOCK_STREAM)
clientSocket.connect((serverName,serverPort))
question1 = clientSocket.recv(b)
print('Question 1: ')
print(question1.decode())
answer1 = input('Answer: ')
clientSocket.send(answer1.encode())
question2 = clientSocket.recv(b)
print('Question 2: ')
print(question2.decode())
answer2 = input('Answer: ')
clientSocket.send(answer2.encode())
question3 = clientSocket.recv(b)
print('Question 3: ')
print(question3.decode())
answer3 = input('Answer: ')
clientSocket.send(answer3.encode())
question4 = clientSocket.recv(b)
print('Question 4: ')
print(question4.decode())
answer4 = input('Answer: ')
clientSocket.send(answer4.encode())
question5 = clientSocket.recv(b)
print('Question 5: ')
print(question5.decode())
answer5 = input('Answer: ')
clientSocket.send(answer5.encode())
question6 = clientSocket.recv(b)
print('Question 6: ')
print(question6.decode())
answer6 = input('Answer: ')
clientSocket.send(answer6.encode())
question7 = clientSocket.recv(b)
print('Question 7: ')
print(question7.decode())
answer7 = input('Answer: ')
clientSocket.send(answer7.encode())
question8 = clientSocket.recv(b)
print('Question 8: ')
print(question8.decode())
answer8 = input('Answer: ')
clientSocket.send(answer8.encode())
question9 = clientSocket.recv(b)
print('Question 9: ')
print(question9.decode())
answer9 = input('Answer: ')
clientSocket.send(answer9.encode())
question10 = clientSocket.recv(b)
print('Question 10: ')
print(question10.decode())
answer10 = input('Answer: ')
clientSocket.send(answer10.encode())
count = clientSocket.recv(b)
print('\nSCORE: ' + count.decode() + '\n')
clientSocket.close()
# from socket import *
# serverName = '127.0.0.1'
# serverPort = 12000
# clientSocket = socket(AF_INET, SOCK_STREAM)
# clientSocket.connect((serverName,serverPort))
# sentence = input('Input lowercase sentence:')
# clientSocket.send(sentence.encode())
# modifiedSentence = clientSocket.recv(1024)
# print ('From Server:', modifiedSentence.decode())
# clientSocket.close() |
17,862 | b27e1335c90e01e33091768b317366a6fa85f9d6 | __author__ = 'kinkazma'
import socket
import select
import time
import tkinter
from threading import Thread
class Serveur(Thread):
def __init__(self):
Thread.__init__(self)
self.boucle = 1
self.data=0
self.envoi=0
def run(self):
PORT = 8090
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("[-]Creation du socket")
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(("", PORT))
print("[-]Bindage du socket")
s.listen(5)
print("[-]Mise a l'écoute")
client_conecter=[]
while self.boucle:
time.sleep(0.01)
client_demande, wlist, xlist = select.select([s],[],[],0.1)
for client in client_demande:
client_conect,info = client.accept()
print("[+] Client reçus :{0}".format(info))
client_conecter.append(client_conect)
self.envoi=client_conect
client_a_lire, wilist, xelist= select.select(client_conecter, [], [], 0.1)
for client in client_a_lire:
self.data=client.recv(1024)
if self.data == b'fin':
client.close()
client_conecter.remove(client)
if self.data != "\n" and self.data!= "":
for pigeon in client_conecter:
if pigeon != client:
pigeon.send(self.data)
print("[+] Message : {0}".format(self.data.decode()))
s.close()
def ChargeOther(box,message,who="Client"):
if message != "" and message != "\n" and message != 0:
box.config(state=tkinter.NORMAL)
texte = "{0} :> {1}".format(who,message)
box.insert(tkinter.END, texte)
box.config(state=tkinter.DISABLED)
box.yview('end')
class ReloadEntry(Thread):
def __init__(self,box,serv):
Thread.__init__(self)
self.message=""
self.box=box
self.server=serv
def run(self):
while self.server.boucle:
time.sleep(0.5)
if self.message!=self.server.con :
self.message=self.server.con
ChargeOther(self.box,self.message)
|
17,863 | 3c2f82cf7279564543f5cdc5b721fd73ca327b10 | #!/usr/local/bin/python3
## Written by Atul Kakrana - Kakrana@udel.edu
## Script takes a (transcript) summary file with start,end,chr and strand info and finds overlap with GTF file
## Returns file with overlapping genes, type of overlap and orientation of overlap
## Type of overlap - 5' Annotated genes overlaps at 5' of transcript 3' overlaps at 3' of transcript 8' completly enclaves transcript
## 0' transcript overlaps annotated gene
## Orientation - F: Overlapping gene is at same strand as transcript R: both are on different strands
## USER SETTINGS #######################################
gtf = "Zea_mays.AGPv3.27.gtf"
summary = "Summary.txt" ## Summery file with chr,start,stop and strand
delim = "\t" ## Delimiter for summary file
head = 'Y' ## Header is summary file: 'Y' else: 'N'
name = 2
chromo = 6
start = 3
end = 4
strand = 5
geneType = 20 ## Column for coding or non-coding, if info not available then mention 99
makeDB = 0 ## If DB for GTF file is not present in present directory then make : 1 else: 0
## IMPORTS ############################################
import os,sys
import sqlite3
## FUNCTIONS ##########################################
def overlapCheck(summary,conn,annotable):
print("Function: overlapCheck")
## Test DB
cur = conn.cursor()
## Test
# cur.execute("PRAGMA table_info(%s)" % (annotable))
# desc = cur.fetchall()
# print(desc)
# cur.execute("SELECT geneName FROM %s LIMIT 10" % (annotable))
# test = cur.fetchall()
# print(test)
outFile = "%s.overlap.txt" % summary.rpartition(".")[0]
fh_out = open(outFile,'w')
fh_out.write("Trans\toverlapGenes\toverlapFLags\toverlapConf\n")
fh_in = open(summary,'r')
if head == "Y":
fh_in.readline()
sumRead = fh_in.readlines()
for i in sumRead:
geneList = [] ## Store overlap genes
flagList = [] ## Store diffrent overlap flags - 5',3', 0' [if gene is enclaved within our transcript] and 8' [if gene extends our transcript at both ends]
confList = [] ## Store overlap configuration
resList = [] ## Store a single merged list fo results
ent = i.split(delim)
trans = ent[name-1]
achr = ent[chromo-1]
astart = ent[start-1]
aend = ent[end-1]
astrand = ent[strand-1]
# print("\n***Entry:",trans,achr,astart,aend,astrand)
## Gene end overlaps
cur.execute("SELECT * FROM %s WHERE chr = '%s' AND end between %s and %s ORDER BY start asc" % (annotable,achr,astart,aend))
prime5 = cur.fetchall()
# print("5Prime\n%s" % prime5)
tempP5 = [] ## Temp store gene names for checking later
if prime5:
for i in prime5:
tempP5.append(i[4])
## Gene start is overlaps
cur.execute("SELECT * FROM %s WHERE chr = '%s' AND start between %s and %s ORDER BY start asc" % (annotable,achr,astart,aend))
prime3 = cur.fetchall()
# print("3Prime\n%s" % (prime3))
tempP3 = []
if prime3:
for i in prime3:
tempP3.append(i[4])
## Gene that completly enclaves our transcript <------ ----trans---- -------->
cur.execute("SELECT * FROM %s WHERE chr = '%s' AND start < %s AND end > %s ORDER BY start asc" % (annotable,achr,astart,aend))
prime8 = cur.fetchall()
# print("8Prime\n%s\n" % prime8)
if prime5:
for i in prime5:
if i[4] in tempP3:
# print("Transcript enclaves the annotated gene'")
flag = 0
geneList.append(i[4])
flagList.append(flag)
if i[3] == astrand:
confList.append("F")
else:
confList.append("R")
# print(i)
# sys.exit()
else:
# print("Gene overlaps only at one end")
if astrand == "+":
flag = 5
elif astrand == "-":
flag = 3
# print("Appending prime5:%s" % (i[4]))
geneList.append(i[4])
flagList.append(flag)
if i[3] == astrand:
confList.append("F")
else:
confList.append("R")
# print(i)
if prime3:
for i in prime3:
if i[4] not in tempP5:
# print("Gene Overlaps only at one end")
if astrand == "+":
flag = 3
elif astrand == "-":
flag = 5
# print("Appending prime3:%s" % (i[4]))
geneList.append(i[4])
flagList.append(flag)
if i[3] == astrand:
confList.append("F")
else:
confList.append("R")
# print(i)
if prime8:
for i in prime8:
# print("Annotated gene enclaves our transcript")
# print(i)
flag = 8
geneList.append(i[4])
flagList.append(flag)
if i[3] == astrand:
confList.append("F")
else:
confList.append("R")
# print("geneList",geneList,"flagList",flagList,"confList",confList)
resList = list(zip(geneList,flagList,confList))
# print("Final Results",resList)
# print("FinalRes:%s\t%s\t%s\t%s\n" % (trans,','.join( str(x) for x in geneList), ','.join(str(x) for x in flagList), ','.join(str(x) for x in confList) ))
if geneList:
fh_out.write("%s\t%s\t%s\t%s\n" % (trans,','.join( str(x) for x in geneList), ','.join(str(x) for x in flagList), ','.join(str(x) for x in confList) ))
else:
## There are no overlaps
fh_out.write("%s\tNA\tNA\tNA\n" % (trans))
fh_out.close()
fh_in.close()
print("Exiting function - overlapCheck\n")
return outFile
def gtfParser(gtf):
print("Function: gtfParser")
## file I/O
fh_in = open(gtf,'r')
gtfRead = fh_in.readlines()
parsedGTF = [] ## List to hold parsed GTF entries
for i in gtfRead:
if i[0].isdigit():
ent = i.split("\t")
if ent[2] == "gene":
# print(ent)
achr = ent[0]
gStart = ent[3]
gEnd = ent[4]
gStrand = ent[6]
info = ent[8].strip("\n").split(";")
# print(info,len(info))
if len(info) == 5:
## Protein coding gene with a version number
gid = info[0].split()[1].replace('"','')
gVer = info[1].split()[1].replace('"','')
gSource = info[2].split()[1].replace('"','')
gType = info[3].split()[1].replace('"','')
# print(achr,gStart,gEnd,gStrand,gid,gVer,gSource,gType)
parsedGTF.append((achr,gStart,gEnd,gStrand,gid,gVer,gSource,gType))
elif len(info) == 4:
## miRNA with no version number
gid = info[0].split()[1].replace('"','')
gVer = "1"
gSource = info[1].split()[1].replace('"','')
gType = info[2].split()[1].replace('"','')
parsedGTF.append((achr,gStart,gEnd,gStrand,gid,gVer,gSource,gType))
else:
pass
print("First 10 entries of parsedGTF list: %s" % (parsedGTF[:10]))
print ("Exiting function\n")
return parsedGTF
def summParser(summary):
'''Create a a list of summary file'''
print("\nFunction: summParser")
fh_in = open(summary,'r')
if head == 'Y':
fh_in.readline()
summRead = fh_in.readlines()
geneSet = set()
# for i in summRead:
# ent = i.split("\t")
# agene = ent[gene-1]
# geneSet.add(agene)
parsedSumm = []
acount = 0 ## To count the entries
for i in summRead:
# print(i)
ent = i.split("\t") ## It has to be tab seprated file always
agene = ent[gene-1]
if agene not in geneSet:
## New entry add
print("Total entries scanned: %s | Length of GTF Dictionary %s" % (acount,len(summDict)))
print("Exiting function - summaryDict\n")
return parsedSumm
def tableMaker(parsedGTF,parsedSumm):
'''
Make a track specific sqlite DB for probe ID and coords that will be used
to query probes on 20MB interval. Each probe entry has following info:
probe_id,FC,pval,chr,start,end
'''
mergedInfo = parsedGTF + parsedSumm
print("Function: tableMaker")
annoDB = '%s.db' % (gtf.rpartition('.')[0])
annotable = "geneMaster"
conn = sqlite3.connect(annoDB)
cur = conn.cursor()
cur.execute('''DROP TABLE IF EXISTS %s''' % (annotable)) ### Drop Old table - while testing
conn.commit()
try:
cur.execute('''CREATE TABLE %s (chr integer, start integer, end integer, strand varchar(10), geneName varchar(255), geneVersion varchar(255), geneSource varchar(255), geneType varchar(255) )''' % (annotable))
conn.commit()
### Fill the table
acount = 0 ## COunt of number of gene entries
for ent in mergedInfo:
gChr,gStart,gEnd,gStrand,gid,gVer,gSource,gType = ent
# print(achr,gStart,gEnd,gStrand,gid,gVer,gSource,gType)
cur.execute("INSERT INTO %s VALUES (%d,%d,%d,'%s','%s',%d,'%s','%s')" % (annotable, int(gChr), int(gStart), int(gEnd), str(gStrand), str(gid), int(gVer), str(gSource), str(gType) ))
acount +=1
except sqlite3.Error:
print('ERROR:',Error)
sys.exit()
cur.execute("SELECT * FROM %s LIMIT 10" % (annotable))
test = cur.fetchall()
print("First 10 entries:\n%s" % (test))
cur.execute("SELECT COUNT(*) FROM %s" % (annotable))
totalEnt = cur.fetchall()
print("\nTotal entries in table:%s | Total entries in file: %s" % (totalEnt[0][0],acount) )
conn.commit() ## Imporatnt to save table
print("Exiting function\n")
return annoDB,annotable,conn
def main():
if makeDB == 1:
parsedGTF = gtfParser(gtf)
annoDB,annotable,conn= tableMaker(parsedGTF)
elif makeDB == 0:
print("Existing annotation DB will be used, to make a new DB please turn on makeDB from settings")
annoDB = '%s.db' % (gtf.rpartition('.')[0])
annotable = "geneMaster"
conn = sqlite3.connect(annoDB)
else:
print("'makeDB' variable takes boolean values")
resFile = overlapCheck(summary,conn,annotable)
print("Overlap Check complete - see '%s' for results" % (resFile))
if __name__ == '__main__':
main()
## v01 <-13th July -15
|
17,864 | 36dcd561aca78b4856abd81a30c2cb091e30286d | from time import sleep
from celery import shared_task, app
from FruitGP2.utils import send_active_email
@shared_task
def send_activate_email_async(username,to_email):
send_active_email(username,to_email)
|
17,865 | 132896419ea32755dec44806d3583d22e3e62cf9 | import os
def loadFiles(username):
for root,dirs,files in os.walk("/root/training/"+"chinmay.pawar"):
for name in files:
print "Dirs:::::::"+str(dirs)
if name.endswith('playbook.yml'):
print("/root/training/"+username+"/myplatform/"+name)
loadFiles("chinmay.pawar")
|
17,866 | 2b85de3c7e7b8b0d5cffed03c892dee9fdfc3c57 | class SnakesLadders:
def __init__(self):
self.board = [0,
0, 38, 0, 0, 0, 0, 14, 31, 0, 0,
0, 0, 0, 0, 26, 6, 0, 0, 0, 0,
42, 0, 0, 0, 0, 0, 0, 84, 0, 0,
0, 0, 0, 0, 0, 44, 0, 0, 0, 0,
0, 0, 0, 0, 0, 25, 0, 0, 11, 0,
67, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 19, 0, 60, 0, 0, 0, 0, 0, 0,
91, 0, 0, 53, 0, 0, 0, 98, 0, 0,
0, 0, 0, 0, 0, 0, 94, 0, 68, 0,
0, 88, 0, 0, 75, 0, 0, 0, 80, 0]
self.player = 0
self.square = [0, 0]
self.game_over = False
def play(self, die1, die2):
if self.game_over:
return 'Game over!'
next_square = self.square[self.player] + die1 + die2
if next_square <= 100:
self.square[self.player] = self.square[self.player] + die1 + die2 if not self.board[next_square] else self.board[next_square]
else:
next_square = 200 - self.square[self.player] - die1 - die2
self.square[self.player] = next_square if not self.board[next_square] else self.board[next_square]
if self.square[self.player] == 100:
self.game_over = True
return 'Player {} Wins!'.format(int(self.player) + 1)
if die1 != die2:
self.player ^= 1
return 'Player {} is on square {}'.format(int((self.player ^ 1) + 1), self.square[self.player ^ 1])
return 'Player {} is on square {}'.format(int(self.player + 1), self.square[self.player])
|
17,867 | 73a0cd3cae0813798c8642f7fca6bbccb7a70b33 | # -*- coding: utf-8 -*-
from odoo import models, fields, api,_
from datetime import datetime
from dateutil.relativedelta import relativedelta
from odoo.exceptions import except_orm
from odoo.exceptions import UserError
from odoo.tools import email_split, float_is_zero
import time
from odoo.exceptions import UserError, AccessError, ValidationError
class HrLoan(models.Model):
_name = 'hr.loan'
_inherit = ['mail.thread']
_description = "Loan Request"
@api.one
def _compute_loan_emi(self):
self.emi_count = self.env['hr.loan.line'].search_count([('loan_id', '=', self.id)])
emi_count = fields.Integer(string="EMI Count", compute='_compute_loan_emi')
@api.one
def _compute_loan_amount(self):
total_paid = 0.0
for loan in self:
for line in loan.loan_lines:
if line.state == 'Paid':
total_paid += line.amount
# Modifications needed........
balance_amount = loan.loan_amount - total_paid
self.total_amount = loan.loan_amount
self.balance_amount = balance_amount
self.total_paid_amount = total_paid
def _default_company(self):
return self.env['res.company']._company_default_get('res.partner')
def _default_journal_id(self):
company = self.env['res.company']._company_default_get('res.partner')
if self.company_id:
company = self.company_id
journals = self.env['account.journal'].search([('company_id', '=', company.id), ('name', 'ilike', 'HR loan'),
('type', 'in', ('bank', 'cash'))], limit=1)
return journals
def _default_currency(self):
return self.env['res.company']._company_default_get('res.partner').currency_id
@api.onchange('company_id')
def onchange_company_id(self):
self.currency_id = self.company_id.currency_id
if self.account_id and self.account_id.company_id != self.company_id:
self.account_id = False
if self.journal_id.company_id != self.company_id:
journal_id = self._default_journal_id()
self.journal_id = journal_id
self.account_id = journal_id.default_credit_account_id.id
self.employee_id = False
account_id_domain = self._get_account_id()
journal_id_domain = self._get_journal_id()
return {
'domain': {'account_id': account_id_domain, 'journal_id':journal_id_domain}
}
def _get_journal_id(self):
domain = [('company_id', '=', self.company_id.id), ('name', 'ilike', 'HR loan'), ('type', 'in', ('bank', 'cash'))]
return domain
def _get_account_id(self):
bank_cash = self.env.ref('account.data_account_type_liquidity').id
domain = [('company_id', '=', self.company_id.id), ('name', 'ilike', 'HR loan'),
('user_type_id', '=', bank_cash)]
return domain
name = fields.Char(string="Loan Name", default="/", readonly=True)
date = fields.Date(string="Issue Date", default=fields.Date.today(), readonly=True)
employee_id = fields.Many2one('hr.employee', string="Employee", required=True, readonly=True,
states={'draft': [('readonly', False)]}, domain="[('company_id', '=', company_id)]")
department_id = fields.Many2one('hr.department', related="employee_id.department_id", readonly=True,
string="Department")
job_position = fields.Many2one('hr.job', related="employee_id.job_id", readonly=True, string="Job Position")
company_id = fields.Many2one('res.company', 'Company', readonly=True,
default=_default_company,
states={'draft': [('readonly', False)]})
currency_id = fields.Many2one('res.currency', string='Currency', required=True,
default=_default_currency, domain="[('company_id', '=', company_id)]")
payment_start_date = fields.Date(string="Payment Start Date", required=True, default=fields.Date.today(),
readonly=True, states={'draft': [('readonly', False)]})
payment_end_date = fields.Date(string="Payment End Date", required=True, readonly=True, states={'draft': [('readonly', False)]})
installment = fields.Integer(string="No of Installments", default=1, readonly=True, states={'draft': [('readonly', False)]})
loan_amount = fields.Float(string="Loan Amount", required=True, readonly=True,
states={'draft': [('readonly', False)]})
emi = fields.Float(string="EMI", readonly=True, states={'draft': [('readonly', False)]})
total_amount = fields.Float(string="Total Amount", readonly=True, compute='_compute_loan_amount')
total_paid_amount = fields.Float(string="Amount Paid", compute='_compute_loan_amount')
balance_amount = fields.Float(string="Balance Amount", compute='_compute_loan_amount')
loan_lines = fields.One2many('hr.loan.line', 'loan_id', string="Installments", index=True, readonly=True, states={'draft': [('readonly', False)]})
@api.onchange('loan_amount', 'installment')
def onchange_loan_amount(self):
if self.installment and self.loan_amount:
self.emi = self.loan_amount / self.installment
@api.onchange('loan_amount', 'emi')
def onchange_loan_amount_emi(self):
if self.emi and self.loan_amount:
self.installment = self.loan_amount / self.emi
@api.constrains('emi')
def _check_wage_emi(self):
for loan in self:
if loan.employee_id.contract_id:
if loan.employee_id.contract_id.wage < loan.emi:
raise ValidationError(_('EMI should be less than Basic salary'))
@api.onchange('payment_start_date', 'installment')
def onchange_payment_start_date(self):
if self.payment_start_date and self.installment >=1:
self.payment_end_date = datetime.strptime(self.payment_start_date, '%Y-%m-%d') +\
relativedelta(months=self.installment-1)
state = fields.Selection([
('draft', 'Draft'),
('waiting_approval_1', 'Submitted'),
('approve', 'Approved'),
('disbursed', 'Disbursed'),
('refuse', 'Refused'),
('cancel', 'Canceled'),
], string="State", default='draft', track_visibility='onchange', copy=False)
color_tree = fields.Char(compute='_compute_color', string='Color Index')
def _compute_color(self):
today = datetime.strptime(time.strftime("%Y-%m-%d"), '%Y-%m-%d').date()
for loan in self:
if loan.state != 'disbursed':
loan.color_tree = 1
if loan.state == 'disbursed':
payment_end_date = datetime.strptime(loan.payment_end_date, '%Y-%m-%d').date()
payment_start_date = datetime.strptime(loan.payment_start_date, '%Y-%m-%d').date()
if payment_start_date <= today <= payment_end_date:
loan.color_tree = 3
else:
loan.color_tree = 2
all_emi_paid = all(emi_lines.state == 'Paid' for emi_lines in loan.loan_lines)
if all_emi_paid:
loan.color_tree = 2
journal_id = fields.Many2one('account.journal', string='Loan Journal',
default=_default_journal_id,
help="The journal used when the loan is done.",
readonly=True, states={'draft': [('readonly', False)],
'waiting_approval_1': [('readonly', False)],
'approve': [('readonly', False)]},
domain=_get_journal_id)
@api.onchange('journal_id')
def onchange_journal_id(self):
if self.journal_id:
self.account_id = self.journal_id.default_credit_account_id.id
account_id = fields.Many2one('account.account', string='Account', help="An loan account is expected",
readonly=True, domain=_get_account_id,
states={'draft': [('readonly', False)],
'waiting_approval_1': [('readonly', False)],
'approve': [('readonly', False)]})
account_move_id = fields.Many2one('account.move', string='Journal Entry', ondelete='restrict', copy=False, readonly=True)
@api.constrains('payment_start_date', 'payment_end_date', 'employee_id')
def _check_date_employee_id(self):
for loan in self:
domain_same_time = [
('payment_start_date', '<=', loan.payment_end_date),
('payment_end_date', '>=', loan.payment_start_date),
('employee_id', '=', loan.employee_id.id),
('id', '!=', loan.id),
('state', 'not in', ['cancel', 'refuse']),
]
nloans = self.search_count(domain_same_time)
if nloans:
raise ValidationError(_('You cannot have 2 loans that overlaps on same day for %s !')
% (loan.employee_id.name))
@api.constrains('balance_amount', 'employee_id')
def _check_balance_amount_employee_id(self):
for loan in self:
domain_pending_ins = [
('employee_id', '=', loan.employee_id.id),
('id', '!=', loan.id),
('balance_amount', '!=', 0),
('state', 'not in', ['cancel', 'refuse']),
]
nloans_pend = self.search_count(domain_pending_ins)
if nloans_pend:
raise ValidationError(_('%s has a pending installment') % (loan.employee_id.name))
@api.constrains('company_id', 'doctor', 'journal_id', 'employee_id')
def _check_same_company_appt(self):
if self.company_id:
if self.account_id.company_id:
if self.company_id.id != self.account_id.company_id.id:
raise ValidationError(_('Error ! Account and Appointment should be of same company'))
if self.journal_id.company_id:
if self.company_id.id != self.journal_id.company_id.id:
raise ValidationError(_('Error ! Journal and Appointment should be of same company'))
if self.employee_id.company_id:
if self.company_id.id != self.employee_id.company_id.id:
raise ValidationError(_('Error ! Employee and Appointment should be of same company'))
@api.model
def create(self, values):
# loan_count = self.env['hr.loan'].search_count([('employee_id', '=', values['employee_id']),
# # ('state', '=', 'disbursed'),
# # ('state', '=', 'approve'),
# ('state', 'not in', ('refuse', 'cancel')),
# ('balance_amount', '!=', 0)])
# if loan_count:
# raise except_orm('Error!', 'The employee has a pending installment')
# else:
values['name'] = self.env['ir.sequence'].get('hr.loan.seq') or ' '
res = super(HrLoan, self).create(values)
return res
@api.multi
def action_refuse(self):
return self.write({'state': 'refuse'})
@api.multi
def action_submit(self):
self.compute_installment()
self.write({'state': 'waiting_approval_1'})
@api.multi
def action_cancel(self):
self.write({'state': 'cancel'})
def get_current_login_user_mail(self):
if self.env.user.partner_id.email:
return self.env.user.partner_id.email
return self.company_id.email
@api.multi
def action_approve(self):
template_id = self.env.ref('hr_loan_and_advance.email_template_edi_loan')
template_id.with_context(lang=self.env.user.lang).send_mail(self.id, force_send=True, raise_exception=False)
self.write({'state': 'approve'})
return True
@api.multi
def _compute_loan_totals(self, company_currency, account_move_lines, move_date):
self.ensure_one()
total = 0.0
total_currency = 0.0
for line in account_move_lines:
line['currency_id'] = False
line['amount_currency'] = False
if self.currency_id != company_currency:
line['currency_id'] = self.currency_id.id
line['amount_currency'] = line['price']
line['price'] = self.currency_id.with_context(
date=move_date or fields.Date.context_today(self)).compute(line['price'], company_currency)
total -= line['price']
total_currency -= line['amount_currency'] or line['price']
return total, total_currency, account_move_lines
def _prepare_move_line(self, line):
partner_id = self.employee_id.address_home_id.commercial_partner_id.id
return {
'date_maturity': line.get('date_maturity'),
'partner_id': partner_id,
'name': line['name'][:64],
'debit': line['price'] > 0 and line['price'],
'credit': line['price'] < 0 and - line['price'],
'account_id': line['account_id'],
'analytic_line_ids': line.get('analytic_line_ids'),
'amount_currency': line['price'] > 0 and abs(line.get('amount_currency')) or - abs(line.get('amount_currency')),
'currency_id': line.get('currency_id'),
'tax_line_id': line.get('tax_line_id'),
'tax_ids': line.get('tax_ids'),
'quantity': line.get('quantity', 1.00),
'product_id': line.get('product_id'),
'product_uom_id': line.get('uom_id'),
'analytic_account_id': line.get('analytic_account_id'),
'payment_id': line.get('payment_id'),
'expense_id': line.get('expense_id'),
}
@api.multi
def disburse_loan(self):
for loan in self:
if loan.state != 'approve':
raise UserError(_("You can only generate accounting entry for approved loan request."))
if not loan.journal_id:
raise UserError(_("Loan request must have a journal specified to generate accounting entries."))
journal = loan.journal_id
acc_date = loan.date
move = self.env['account.move'].create({
'journal_id': journal.id,
'company_id': loan.company_id.id,
'date': acc_date,
'ref': loan.name,
'name': '/',
'narration': 'HR Loan Disburse',
})
account_move = []
account = ""
if loan.account_id:
account = loan.account_id
if not account:
raise UserError(_('Please configure Default loan account'))
move_line = {
'type': 'src',
'name': loan.employee_id.name + ': ' + loan.name,
'price': -loan.loan_amount,
'account_id': account.id,
}
account_move.append(move_line)
company_currency = loan.company_id.currency_id
diff_currency_p = loan.currency_id != company_currency
total, total_currency, move_lines = loan._compute_loan_totals(company_currency, account_move, acc_date)
if not loan.employee_id.address_home_id:
raise UserError(_("No Home Address found for the employee %s, please configure one.") % (
loan.employee_id.name))
if loan.employee_id.address_home_id:
emp_account = loan.employee_id.address_home_id.property_account_payable_id.id
aml_name = loan.employee_id.name + ': ' + loan.name
move_lines.append({
'type': 'dest',
'name': aml_name,
'price': total,
'account_id': emp_account,
'date_maturity': acc_date,
'amount_currency': diff_currency_p and total_currency or False,
'currency_id': diff_currency_p and loan.currency_id.id or False,
})
lines = [(0, 0, loan._prepare_move_line(x)) for x in move_lines]
move.with_context(dont_create_taxes=True).write({'line_ids': lines})
loan.write({'account_move_id': move.id})
move.post()
loan.write({'state': 'disbursed'})
return True
@api.multi
def compute_installment(self):
for loan in self:
date_start = datetime.strptime(loan.payment_start_date, '%Y-%m-%d')
amount = loan.loan_amount / loan.installment
for i in range(1, loan.installment + 1):
self.env['hr.loan.line'].create({
'date': date_start,
'amount': amount,
'employee_id': loan.employee_id.id,
'loan_id': loan.id})
date_start = date_start + relativedelta(months=1)
return True
class InstallmentLine(models.Model):
_name = "hr.loan.line"
_inherit = ['mail.thread']
_description = "Installment Line"
date = fields.Date(string="Payment Date", required=True, track_visibility='onchange')
amount = fields.Float(string="Amount", required=True, track_visibility='onchange')
loan_id = fields.Many2one('hr.loan', string="Loan Ref.", track_visibility='onchange')
employee_id = fields.Many2one('hr.employee', string="Employee", related='loan_id.employee_id', track_visibility='onchange')
paid_manually = fields.Boolean(string="Paid Manually", track_visibility='onchange')
loan_state = fields.Selection([
('draft', 'Draft'),
('waiting_approval_1', 'Submitted'),
('approve', 'Approved'),
('disbursed', 'Disbursed'),
('refuse', 'Refused'),
('cancel', 'Canceled'),
], string="Loan State", related='loan_id.state', track_visibility='onchange', copy=False)
state = fields.Selection([
('payment_pending', 'Payment pending'),
('waiting_for_postponed', 'Waiting for postponement'),
('Postponed', 'Postponed'),
('Paid', 'Paid'),
], string="State", default='payment_pending', track_visibility='onchange', copy=False)
account_move_id = fields.Many2one('account.move', string='Journal Entry', ondelete='restrict', copy=False,
readonly=True)
emi_postponed_date = fields.Date(string="EMI Postpone Date", track_visibility='onchange')
emi_postponed_reason = fields.Text(string="EMI Postpone Reason", track_visibility='onchange')
def accept_postpone_request(self):
self.write({'state': 'Postponed', 'date':self.emi_postponed_date})
def reject_postpone_request(self):
self.write({'state': 'payment_pending', 'emi_postponed_date': False, 'emi_postponed_reason': False})
class HrEmployee(models.Model):
_inherit = "hr.employee"
@api.one
def _compute_employee_loans(self):
self.loan_count = self.env['hr.loan'].search_count([('employee_id', '=', self.id)])
loan_count = fields.Integer(string="Loan Count", compute='_compute_employee_loans')
def get_loan_ins(self, employee, date_from, date_to):
employee_id = self.env['hr.employee'].browse(employee)
dom = [('employee_id', '=', employee_id.id), ('loan_state', '=', 'disbursed'), ('state', '!=', 'Paid'),
('date', '>=', date_from), ('date', '<=', date_to)]
loan_line_amount = 0.0
for loan_line in self.env['hr.loan.line'].search(dom):
loan_line.write({'state': 'Paid'})
loan_line_amount += loan_line.amount
return loan_line_amount
|
17,868 | ea97815692b37286676cbfbfd7e113ad18d0536a | #!/usr/bin/python3
# -*- coding: <utf-8> -*-
__author__ = "Adam Jarzebak"
__copyright__ = "Copyright 2018, Middlesex University"
__license__ = "MIT License"
__maintainer__ = "Adam Jarzebak"
__email__ = "adam@jarzebak.eu"
__status__ = "Production"
from mirto_asip_manager.asip_manager import AsipManager
from time import sleep
from mirto_asip_manager.settings import logging as log
import time
class MirtoRobot:
def __init__(self, debug: bool, services_on: dict) -> None:
self.robot = AsipManager(debug)
self.robot.initialize_main(services_on)
self.get_version_info()
def terminate(self) -> None:
"""
Function which is making all threads and ports terminated
:return:
"""
self.robot.terminate_all()
def get_version_info(self):
"""
Getting and displaying information about current system installed on mirto robot
:return:
"""
sys_info_service = self.robot.all_services.get("sys_info")
if sys_info_service is not None:
log.info("System version info: %s" % sys_info_service.system_version)
else:
log.warning("Service get_version_info is not enabled!")
def get_left_encoder_values(self, delta: bool=False) -> list:
"""
Retrieving left wheel encoder values. Please provide True or False if delta values required
:param delta:
:return:
"""
encoders = self.robot.all_services.get('encoders')
if encoders is not None:
left_values_all = encoders.left_values
if delta:
return left_values_all
else:
return left_values_all[1]
else:
log.warning("Service encoders is not enabled!")
def get_right_encoder_values(self, delta: bool=False) -> list:
"""
Retrieving right wheel encoder values. Please provide True or False if delta values required
:param delta:
:return:
"""
encoders = self.robot.all_services.get('encoders')
if encoders is not None:
right_values_all = encoders.right_values
if delta:
return right_values_all
else:
return right_values_all[1]
else:
log.warning("Service encoders is not enabled!")
def get_encoders_values(self, delta: bool=False) -> list:
"""
Function getting encoders count for both wheels. Please provide True if delta value is required.
:return: Encoders counts
:rtype: list
"""
encoders = self.robot.all_services.get('encoders')
if encoders is not None:
values_all = encoders.all_values
if delta:
return values_all
else:
return [values_all[0][1], values_all[1][1]]
else:
log.warning("Service encoders is not enabled!")
def set_motors(self, speed0: int, speed1: int) -> None:
"""
Input is a speed value which is send to robot. Range: -100 -> 100
:param speed0:
:param speed1:
:return:
"""
motor_1 = self.robot.all_services.get('motor_1')
motor_2 = self.robot.all_services.get('motor_2')
if motor_1 or motor_1 is not None:
motor_1.set_motor(speed0)
motor_2.set_motor(speed1)
log.info("Setting motor: '{}': {} motor:'{}': {}".format(motor_1.name, speed0, motor_2.name, speed1))
else:
log.warning("One of the motors is not enabled!")
def stop_motors(self) -> None:
"""
Sending speed value 0 to both motor will cause robot to stop
:return:
"""
motor_1 = self.robot.all_services.get('motor_1')
motor_2 = self.robot.all_services.get('motor_2')
if motor_1 is not None or motor_1 is not None:
motor_1.stop_motor()
motor_2.stop_motor()
log.info("Motors stopped")
else:
log.warning("One of the motors is not enabled!")
def get_ir_sensors_values(self) -> list:
"""
Receiving ir sensors values, then appending to the list and returning. Please amend the order list if sensors
are positioned in different order.
:return:
"""
ir_sensors = self.robot.all_services.get('ir_sensors')
if ir_sensors is not None:
ir_sensors_order = [0, 1, 2]
ir_all_values = []
for num in ir_sensors_order:
ir_all_values.append(ir_sensors.ir_all_values[num])
return ir_all_values
else:
log.warning("Service IR sensors is not enabled!")
def play_tone(self, frequency, duration):
"""
This function allows to play sound with specific frequency for give duration.
:return: None
"""
tone = self.robot.all_services.get("tone")
if tone is not None:
tone.play_tone(frequency, duration)
log.info("Finish playing tone")
else:
log.warning("Service tone is not enabled!")
def test_play_tone(self):
self.play_tone(440, 2000)
sleep(1)
log.info("Finished test for tone service")
def test_encoders(self, interval: float=0.1, time_to_finish: int=10) -> None:
end_time = time.time() + time_to_finish
while time.time() < end_time:
print(self.get_left_encoder_values(True), self.get_right_encoder_values(True))
sleep(interval)
log.info("Finish encoder test")
def test_motor(self, with_encoders: bool=False, period: time=5) -> None:
self.set_motors(30, 30)
if with_encoders:
self.test_encoders(0.1, period)
else:
sleep(period)
self.stop_motors()
def test_ir_sensors(self, time_to_finish: int = 10, interval: float = 0.1) -> None:
end_time = time.time() + time_to_finish
while time.time() < end_time:
print(self.get_ir_sensors_values())
sleep(interval)
if __name__ == '__main__':
services_to_run = {"encoders": [True, False], "motors": [True, False], "ir_sensors": [True, False]}
# Run services test
mirto = MirtoRobot(debug=False, services_on=services_to_run)
mirto.test_encoders(0.1, 2)
mirto.test_motor(True, 2)
mirto.test_ir_sensors(2, 0.2)
# This will stop all threads and close ports
mirto.terminate()
|
17,869 | 3beaa5c06b49647f247d7bc04760de24c8dd5180 |
import pytest
from rest_framework import status
from rest_framework.reverse import reverse
pytestmark = pytest.mark.django_db
def test_all_good(client):
"""Test all good."""
url = reverse('ping')
response = client.get(url)
assert response.status_code == status.HTTP_200_OK
assert '<status>OK</status>' in str(response.content)
assert response._headers['content-type'] == ('Content-Type', 'text/xml')
|
17,870 | 6fa32e8b556c5777402c61b1b47492c5c1d94170 | """ Rotation matrices for rotations around x, y, z axes
See: https://en.wikipedia.org/wiki/Rotation_matrix#Basic_rotations
"""
import numpy as np
import math
def x_rotmat(theta):
""" Rotation matrix for rotation of `theta` radians around x axis
Parameters
----------
theta : scalar
Rotation angle in radians
Returns
-------
M : shape (3, 3) array
Rotation matrix
"""
cos_t = np.cos(theta)
sin_t = np.sin(theta)
return np.array([[1, 0, 0],
[0, cos_t, -sin_t],
[0, sin_t, cos_t]])
def y_rotmat(theta):
""" Rotation matrix for rotation of `theta` radians around y axis
Parameters
----------
theta : scalar
Rotation angle in radians
Returns
-------
M : shape (3, 3) array
Rotation matrix
"""
cos_t = np.cos(theta)
sin_t = np.sin(theta)
return np.array([[cos_t, 0, sin_t],
[0, 1, 0],
[-sin_t, 0, cos_t]])
def z_rotmat(theta):
""" Rotation matrix for rotation of `theta` radians around z axis
Parameters
----------
theta : scalar
Rotation angle in radians
Returns
-------
M : shape (3, 3) array
Rotation matrix
"""
cos_t = np.cos(theta)
sin_t = np.sin(theta)
return np.array([[cos_t, -sin_t, 0],
[sin_t, cos_t, 0],
[0, 0, 1]])
def shear(angle, x, y):
'''
|1 -tan(𝜃/2) | |1 0| |1 -tan(𝜃/2) |
|0 1 | |sin(𝜃) 1| |0 1 |
'''
# shear 1
tangent = math.tan(angle / 2)
new_x = round(x - y * tangent)
new_y = y
# shear 2
new_y = round(new_x * math.sin(angle) + new_y) # since there is no change in new_x according to the shear matrix
# shear 3
new_x = round(new_x - new_y * tangent) # since there is no change in new_y according to the shear matrix
return new_y, new_x
|
17,871 | 85a163daf0c66ca392f0cf04fe03ce8c169d1319 | from foods.fruits import orange, apple, watermelon
orange.eat()
apple.eat()
watermelon.eat() |
17,872 | 41eb373807dfa6e8bfaa95327c86c5a7463e1bf5 | # -*- coding: utf-8 -*-
"""
File Name: Main.py
Author: GSS
Mail: gao.hillhill@gmail.com
Description:
Created Time: 12/22/2017 11:11:01 AM
Last modified: 4/11/2018 4:09:58 PM
"""
#IO1717
import os
import numpy as np
import time
import sys
from adc_meas import LF_MEAS
lfm = LF_MEAS()
lfm.gen.ADDR = u'USB0::0x0957::0x5707::MY53802435::0::INSTR' #need change
lfm.smu.ADDR = u'USB0::0x0957::0x4118::MY57340004::0::INSTR' #need change
#set shift and phase for AD7274
chn0sft = 1 # normally don't need change
lfm.wib.adc_sft_np = [chn0sft, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
chn0pha = 3 # normally don't need change
lfm.wib.adc_phase_np = [chn0pha, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
def one_lf_cycle(savepath, t_hr= 1, chn=0, Vstress = 5.5):
t_sec = t_hr * 3600
print "A new lifetime cycle starts... "
#####################################################################
print "Characterize ADC with power supply from LDO"
smu_chn1 = [1, 2.5, 50, 20, 120, 120, 25]
smu_chn2 = [2, 1.8, 50, 20, 10, 10, 25]
smu_chn3 = [3, 2.5, 50, 20, 10, 10, 25]
smu_chns = [smu_chn1, smu_chn2, smu_chn3]
lfm.smu_config(smu_chns)
lfm.adc_meas(savepath,chn=0, vref=1.8, mode=lfm.mode_ldo)
lfm.cur_meas(savepath, t = 300, mode=lfm.mode_smu_nor, Vref=1.8)
#####################################################################
print "Characterize ADC with power supply from MSU"
if (Vstress > 5.0):
smu_chn1 = [1, 5.0, 50, 20, 120, 120, 25]
else:
smu_chn1 = [1, Vstress, 50, 20, 120, 120, 25]
smu_chn2 = [2, Vstress, 50, 20, 10, 10, 25]
smu_chn3 = [3, Vstress, 50, 20, 10, 10, 25]
smu_chns = [smu_chn1, smu_chn2, smu_chn3]
lfm.smu_config(smu_chns)
for stress_1hr in range (1, int(t_hr) + 1, 1):
one_hr = 3600
lfm.adc_meas(savepath,chn=0, vref=Vstress, mode=lfm.mode_smu_str)
lfm.cur_meas(savepath, t = one_hr, mode=lfm.mode_smu_str,Vref = Vstress)
print "Present lifetime cycle done... "
#lf_hours = [1]
lf_hours = [1,1,2,2,2,2, 4,4,4,4, 4,4,4,4,8,8,8,8, 8,8,8,8, 8,8,8,8, 8,8,8,8,\
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,\
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,\
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,\
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,\
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16]
chippn = sys.argv[1] #AD7274
chipno = sys.argv[2] #001
chipchn = int(sys.argv[3])
stress_hours = int(sys.argv[4]) # duration request for stress test
Vstress = float(sys.argv[5])
if Vstress > 6 :
print "Error: Stress Voltage should be no more than 6V. Exit anyway!"
sys.exit()
savepath = "D:/COTS_ADC_LF/Rawdata/" + chippn +"_" + chipno + "/"
if os.path.exists(savepath):
print "Folder exist, please check!"
ow_flg = raw_input("Overwrite ? (y/n) : ")
if (ow_flg == "y"):
pass
else:
raise
else:
try:
os.makedirs(savepath)
except OSError:
print "Cannot make the folder!"
raise
lfm.meas_init()
Tpassed = 0
for lf_hr in lf_hours:
one_lf_cycle(savepath, t_hr= lf_hr, chn=chipchn, Vstress = Vstress)
Tpassed = Tpassed + lf_hr
if (Tpassed >= stress_hours):
print "Entire stress has done"
break
else:
print "Have been stressed %d hours"%stress_hours
lfm.meas_close()
|
17,873 | fc7b0efe13ae40719db65343c4ab66e14e0dca7b |
import numpy as np
import tensorflow as tf
from tensorflow.contrib import learn
from tensorflow.contrib.learn.python.learn.preprocessing import text
from tensorflow.contrib.learn.python.learn.preprocessing import CategoricalVocabulary
x_raw = []
input_text = input('사용자 평가를 문장으로 입력하세요: ')
x_raw.append(input_text)
#########
# get vocaburary dic
######
vocab_processor = learn.preprocessing.VocabularyProcessor.restore("model/vocab")
input_data = np.array(list(vocab_processor.transform(x_raw)))
print(input_data)
#########
# let's load meta graph and restore weights
######
sess=tf.Session()
sess.run(tf.global_variables_initializer())
saver = tf.train.import_meta_graph('model/output.ckpt-1000.meta')
saver.restore(sess,tf.train.latest_checkpoint('./model'))
graph = tf.get_default_graph()
X = graph.get_tensor_by_name("X:0")
W = graph.get_tensor_by_name("weight:0")
b = graph.get_tensor_by_name("bias:0")
logits = tf.matmul(X, W) + b
hypothesis = tf.nn.softmax(logits)
prediction = tf.argmax(hypothesis, 1)
prob = sess.run(hypothesis, feed_dict={X: input_data})
pred = sess.run(prediction, feed_dict={X: input_data})
print('probability', prob)
print("Prediction: {}".format(pred))
|
17,874 | 27b503680fee1b9f197acddd935ac87b41e6ce62 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from configargparse import ArgumentParser
# from SMACB.MercadoPage import MercadoPageContent
from SMACB.TemporadaACB import TemporadaACB
# from Utils.Misc import ReadFile
if __name__ == '__main__':
parser = ArgumentParser()
parser.add('-i', dest='tempin', type=str, env_var='SM_TEMPIN', required=False)
parser.add('-o', dest='tempout', type=str, env_var='SM_TEMPOUT', required=False)
parser.add_argument(dest='trads', type=str, nargs='*')
args = parser.parse_args()
temporada = None
if 'tempin' in args and args.tempin:
temporada = TemporadaACB()
temporada.cargaTemporada(args.tempin)
for trad in args.trads:
try:
newCod, newNombre = trad.split(':', maxsplit=1)
except ValueError:
print("AddTraducJugadores: Traducción '%s' incorrecta. Formato debe ser codigo:nombre. Ignorando" % trad)
continue
print("AddTraducJugadores: añadiendo '%s' -> '%s'" % (newNombre, newCod))
temporada.nuevaTraduccionJugador(newCod, newNombre)
if temporada.changed and ('tempout' in args) and args.tempout:
print("Temporada: There were changes!")
temporada.grabaTemporada(args.tempout)
|
17,875 | e78223e1504c99a93c1fbd7903f978a080e9ede9 | from django.contrib import admin
from django.urls import path
from . import views
app_name = 'animeval'
urlpatterns = [
path('create_profile', views.CreateProfile.as_view(), name = 'create_profile'),
path('update_profile/<int:pk>', views.UpdateProfile.as_view(), name = 'update_profile'),
path('home', views.Home.as_view(), name = 'home'),
path('analysis',views.Analysis.as_view(), name = 'analysis'),
path('mypage/<int:pk>', views.mypage, name = 'mypage'),
path('anime_list/<str:char>',views.AnimeList.as_view(), name = 'anime_list'),
path('anime_detail/<int:pk>',views.AnimeDetail.as_view(), name = 'anime_detail'),
path('create_review', views.create_review, name = 'create_review'),
path('review_detail/<int:pk>', views.review_detail, name = 'review_detail'),
path('image/<int:pk>',views.get_svg2, name = 'image'),
path('anime_image/<int:pk>',views.get_svg3, name = 'anime_image'),
path('trend_image/<int:pk>',views.get_svg, name = 'trend_image'),
path('create_comment/<int:pk>', views.create_comment, name = 'create_comment'),
path('create_reply/<int:pk>', views.create_reply, name = 'create_reply'),
path('delete_review/<int:pk>', views.DeleteReview.as_view(), name = 'delete_review'),
path('update_review/<int:pk>', views.update_review, name = 'update_review'),
path('like/<int:review_id>/<user_id>',views.like, name = 'like'),
] |
17,876 | 668f63bd443cbd19ba800415c6a1aaa0c14a1c9c | #!/bin/python3
import os
import sys
# Complete the solve function below.
def solve(steps):
a = min(map(lambda x : x[0],steps))
b = min(map(lambda x : x[1],steps))
return a*b
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
steps = []
for _ in range(n):
steps.append(list(map(int, input().rstrip().split())))
result = solve(steps)
fptr.write(str(result) + '\n')
fptr.close()
|
17,877 | 96d836e24461a73851b6490bd828f1d6b2665d47 | from django import forms
class HelloForm(forms.Form):
name = forms.CharField(label='name')
mial = forms.CharField(label='mail')
age = forms.IntegerField(label='age')
|
17,878 | bfac336733da5de7026a36672baa894208b2ebea | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/8/17 18:26
# @Author : weiyu
# @File : 146_lru_cache.py
import collections
class LRUCache:
def __init__(self, capacity):
self.dic = collections.OrderedDict()
self.remain = capacity
def get(self, key):
if key not in self.dic:
return -1
v = self.dic.pop(key)
self.dic[key] = v
return v
def put(self, key, value):
if key in self.dic:
self.dic.pop(key)
else:
if self.remain > 0:
self.remain -= 1
else:
self.dic.popitem(last = False)
self.dic[key] = value |
17,879 | 7a20551c8efdea5c212a725148db04ce46a0cf65 | from gda.jython.commands.GeneralCommands import alias
from Diamond.Utility.BeamlineFunctions import BeamlineFunctionClass
from gda.configuration.properties import LocalProperties
print("-"*100)
print("create 'beamlinefunction' object and commands 'lastscan', 'gettitle', 'settitle', 'getvisit', 'setvisit', 'setdir'")
beamline = LocalProperties.get(LocalProperties.GDA_BEAMLINE_NAME)
beamlinefunction = BeamlineFunctionClass(beamline);
beamlinefunction.setTerminalLogger();
def lastscan():
return beamlinefunction.getLastScanFile();
def setTitle(title):
beamlinefunction.setTitle(title);
def settitle(title):
beamlinefunction.setTitle(title);
def getTitle():
return beamlinefunction.getTitle();
def gettitle():
return beamlinefunction.getTitle();
def setVisit(visit):
beamlinefunction.setVisit(visit);
beamlinefunction.setSubDir("");
def setvisit(visit):
beamlinefunction.setVisit(visit);
beamlinefunction.setSubDir("");
def getVisit():
return beamlinefunction.getVisit();
def getvisit():
return beamlinefunction.getVisit();
def setDir(newSubDir):
beamlinefunction.setSubDir(newSubDir);
def setdir(newSubDir):
beamlinefunction.setSubDir(newSubDir);
alias("lastscan")
alias("getTitle"); alias("gettitle")
alias("setTitle"); alias("settitle")
alias("getVisit"); alias("getvisit")
alias("setVisit"); alias("setvisit")
alias("setDir"); alias("setdir")
|
17,880 | d25078a7da99c4418ba8bfe7bc51ff9e97079c1d | class Solution:
def rotate(self, nums: List[int], k: int) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
if len(nums) < k:
nums.reverse()
k = k - len(nums)
for idx in range(k):
if idx < k // 2:
temp = nums[idx]
nums[idx] = nums[k - 1 - idx]
nums[k - 1 - idx] = temp
for idx in range(k, len(nums)):
if idx < (len(nums) + k) // 2:
temp = nums[idx]
nums[idx] = nums[(len(nums) - 1) - (idx - k)]
nums[(len(nums) - 1) - (idx - k)] = temp
else:
nums.reverse()
for idx in range(k):
if idx < k // 2:
temp = nums[idx]
nums[idx] = nums[k - 1 - idx]
nums[k - 1 - idx] = temp
for idx in range(k, len(nums)):
if idx < (len(nums) + k) // 2:
temp = nums[idx]
nums[idx] = nums[(len(nums) - 1) - (idx - k)]
nums[(len(nums) - 1) - (idx - k)] = temp
|
17,881 | 8170ba933a5874b28c4312b2ed66e2cf3185f756 | def solveMeFirst(a,b):
# Hint: Type return a+b below
return a+b
if __name__ == "__main__":
num1 = int(input())
num2 = int(input())
res = solveMeFirst(num1,num2)
print(res)
|
17,882 | 4249f45ecbe9afad94e9107c805e6a7276350084 | # prog imeet spisoki vyvodit spisok iz 3 elementov;1go ,3go i vtorogo s konca
list1 = ['1','2','3','4','5']
print(list1[0])
print(list1[2])
list_len = len(list1)
print(list1[list_len-2])
|
17,883 | 0aa1619e34ffa5afc266bb148141ece944be3103 |
from mixer.backend.django import mixer
from django.core.management import BaseCommand
from mycourse.models import Course, Teacher, Lesson
def create_all():
courses = mixer.cycle(20).blend(Course)
teachers = mixer.cycle(20).blend(Teacher)
lessions = mixer.cycle(20).blend(Lesson)
class Command(BaseCommand):
def handle(self, *args, **options):
create_all() |
17,884 | 6c90d18f0ff212ff9b84af41f1e835b9549cefbc | # Eduardo Nunes
from turtle import *
screen = Screen()
screen.setup(900, 850)
speed(5)
pensize(5)
penup()
def desenhar(raio):
colors = ["#0085C7", "#000000", "#DF0024",
"#F4C300", "#009F3D"]
goto(-raio-raio*1.20,0)
for x in range(5):
pendown()
showturtle()
color(colors[x])
circle(raio)
penup()
hideturtle()
forward(raio*2.20)
if x==2:
goto(-raio * 1.10, -raio)
pendown()
showturtle()
desenhar(50)
hideturtle()
Terminator()
done() |
17,885 | 9ff4a71ac5d83161a5cdd90d2c00564f4e8b990b | __version__ = "1.0"
import numpy as np
from numpy import array as arr
from matplotlib.cm import get_cmap
from pandas import DataFrame
def prefix(num):
"""
Convert number to nearest numbers with SI prefixes.
:param num: the number to convert
"""
# determine which range it lies in, r1/r2 means reduction 1 or reduction 2
divisors = [1e-24 * pow(10, 3 * x) for x in range(17)]
prefixes = list(reversed(['Yotta (Y)', 'Zetta (Z)', 'Exa (E)', 'Peta (P)', 'Tera (T)', 'Giga (G)', 'Mega (M)',
'Kilo (K)', '', 'Milli (m)', 'Micro ($\mu$)', 'Nano (n)', 'Pico (p)', 'Femto (f)',
'Atto (a)', 'Zepto (z)', 'Yocto (y)']))
exp = np.floor(np.log10(np.abs(num)))
if exp < 0:
exp -= 3
expIndex = int(exp / 3) + 8
expIndex = 0 if expIndex < 0 else expIndex
expIndex = len(prefixes)-1 if expIndex >= len(prefixes) else expIndex
r1 = prefixes[expIndex]
num1 = num / divisors[expIndex]
if expIndex != len(prefixes):
r2 = prefixes[expIndex + 1]
num2 = num / divisors[expIndex + 1]
else:
num2 = None
retStr = str(num1) + ' ' + r1
if num2 is not None:
retStr += '\nor\n' + str(num2) + ' ' + r2
return retStr
def what(obj, callingLocals=locals()):
"""
quick function to print name of input and value.
If not for the default-Valued callingLocals, the function would always
get the name as "obj", which is not what I want.
:param obj: the object to print info for
:param callingLocals: don't use, always should be locals().
"""
name = "name not found"
for k, v in list(callingLocals.items()):
if v is obj:
name = k
if type(obj) == float:
print(name, "=", "{:,}".format(obj))
else:
print(name, "=", obj)
def transpose(l):
"""
Transpose a list.
:param l: the list to be transposed
:return: the tranposed list
"""
return list(map(list, zip(*l)))
def getStats(data, printStats=False):
"""
get some basic statistics about the input data, in the form of a pandas dataframe.
:param data: the data to analyze
:param printStats: an option to print the results
:return: the dataframe containing the statistics
"""
data = list(data)
d = DataFrame()
d['Avg'] = [np.mean(data)]
d['len'] = [len(data)]
d['min'] = [min(data)]
d['max'] = [max(data)]
d['std'] = [np.std(data)]
d = d.transpose()
d.columns = ['Stats']
d = d.transpose()
if printStats:
print(d)
return d
def getColors(num, rgb=False):
"""
Get an array of colors, typically to use for plotting.
:param rgb: an option to return the colors as an rgb tuple instead of a hex.
:param num: number of colors to get
:return: the array of colors, hex or rgb (see above)
"""
cmapRGB = get_cmap('nipy_spectral', num)
c = [cmapRGB(i)[:-1] for i in range(num)][1:]
if rgb:
return c
# the negative of the first color
c2 = [tuple(arr((1, 1, 1)) - arr(color)) for color in c]
c = ['#%02x%02x%02x' % tuple(int(255 * color[i]) for i in range(len(color))) for color in c]
c2 = ['#%02x%02x%02x' % tuple(int(255 * color[i]) for i in range(len(color))) for color in c2]
return c, c2
def round_sig(x, sig=3):
"""
round a float to some number of significant digits
:param x: the numebr to round
:param sig: the number of significant digits to use in the rounding
:return the rounded number, as a float.
"""
if np.isnan(x):
x = 0
try:
return round(x, sig-int(np.floor(np.log10(abs(x)+2*np.finfo(float).eps)))-1)
except ValueError:
print(abs(x))
def getExp(val):
return np.floor(np.log10(np.abs(val)))
def round_sig_str(x, sig=3):
"""
round a float to some number of significant digits
:param x: the numebr to round
:param sig: the number of significant digits to use in the rounding
:return the rounded number, as a string.
"""
if sig<=0:
return "0"
if np.isnan(x):
x = 0
try:
num = round(x, sig-int(np.floor(np.log10(abs(x)+2*np.finfo(float).eps)))-1)
decimals = sig-getExp(num)-1
if decimals <= 0:
decimals = 0
result = ("{0:."+str(int(decimals))+"f}").format(num)
# make sure result has the correct number of significant digits given the precision.
return result
except ValueError:
print(abs(x))
def errString(val, err, precision=3):
"""
takes the input value and error and makes a nice error string. e.g.
inputs of
1.423, 0.086, 3 gives
1.42(9)
:param val:
:param err:
:param precision:
:return:
"""
valE = getExp(val)
# determine number of values of err to show.
errE = getExp(err)
num = int(errE-valE+precision)
if num < 0:
num = 0
expFactor = -getExp(err)+num-1
if expFactor <= 0:
expFactor = 0
errNum = int(round(err*10**expFactor))
result = round_sig_str(val, precision) + '(' + round_sig_str(errNum, num) + ')'
return result
|
17,886 | 86d499a9a85bb17773bc3c1826f74791b2289449 | class Persona:
def __init__(self):
self.__nombre = ''
self.__direccion = ''
self.__fono = ''
def setname(self,name):
print('el setname() ha sido llamado')
self.__nombre = name
def getname(self):
print('el getname() ha sido llamado')
return self.__nombre
def delname(self):
print('el delname () ha sido llamado')
del self.__nombre
# La funcion property sirve para definir nuestras funciones de get, sete, delete
name = property(getname,setname, delname)
persona1 = Persona()
# persona1.__name
persona1.name = "Eduardo"
nombre = persona1.name
del persona1.name
# print(nombre)
|
17,887 | 4a43940753158ce980f49bf2e273c3d979e36c2c | def download_playlist(username,playlist_id,credential_file_location):
from spotipy import Spotify,util
from json import load
#import the credentials
credentials = load(open(credential_file_location))
#Create an authenticated connection
scope = 'playlist-modify-public'
token = util.prompt_for_user_token(username,scope=scope,client_id=credentials['client_id'],
client_secret=credentials['client_secret'],
redirect_uri=credentials['redirect_url'])
connection = Spotify(auth=token)
#Mine the songs
songs = []
for track in connection.user_playlist(username, playlist_id,fields="tracks,next")['tracks']['items']:
songs.append((track['track']['artists'][0]['name'],track['track']['name']))
#Gebruikersnaaam lostrekken, geheimen lostrekken
return songs
def find_youtube_url(artist,song_title):
import urllib.request
import urllib.parse
import re
query_string = urllib.parse.urlencode({"search_query" : artist + ' - ' + song_title + ' album'})
html_content = urllib.request.urlopen("http://www.youtube.com/results?" + query_string)
search_results = re.findall(r'href=\"\/watch\?v=(.{11})" class="yt-uix-sessionlink yt-uix-tile-link', html_content.read().decode())
return search_results[0]
def save_youtube_video(url,output_location):
from subprocess import check_output
check_output('youtube-dl.exe --extract-audio --audio-format mp3 '+url+' -o "'+output_location+'"',shell=True)
|
17,888 | 5c492fe7b883089b4ee083c0bd5abab4d4c94c69 | #-*-coding:GBK -*-
import time,json
from lib.test_api import TestAPI
from tools.read_yaml import ReadYaml
from tools.write_data_txt import Write_Data_txt
class Url_data:
def __init__(self,sercice):
self.txt=Write_Data_txt
self.sercice=sercice
self.read_yaml = ReadYaml.read_yaml(self.sercice)[self.sercice]
def get_data(self,data):
a=0
list_data=[]
get_url = TestAPI.get_location(self.read_yaml["atmosphere_track"] % data).text
url_data = get_url.strip("typhoon_jsons_view_%s("%data)
url_data1 = url_data.strip(");")
result = json.loads(url_data1)["typhoon"]
for i in range(len(result[8])):
if a ==0:
if result[8][i][3]=="TD":
pass
elif result[8][i][3] != "TD":
a=1
list_data.append(result[8][i])
elif a==1:
list_data.append(result[8][i])
result[8]=list_data
self.txt.write_data('aqi_data/%s'%result[1],'w+',str(result))
def get_number_list(self):
get_url_2020 = TestAPI.get_location(self.read_yaml["atmosphere_list"]%2020).text
url_data = get_url_2020.strip('typhoon_jsons_list_%s('%2020)
get_data = url_data.strip(");")
get_url_2021 = TestAPI.get_location(self.read_yaml["atmosphere_list"]%2021).text
url_data1 = get_url_2021.strip('typhoon_jsons_list_%s('%2021)
get_data1 = url_data1.strip(");")
list_data=json.loads(get_data)["typhoonList"]
for i in range(len(json.loads(get_data1)["typhoonList"])):
list_data.append(json.loads(get_data1)["typhoonList"][i])
for i in range(len(list_data)):
for j in range(len(list_data)-i-1):
if list_data[j][3]==None:
pass
elif int(list_data[j][3])>int(list_data[j+1][3]):
list_data[j],list_data[j+1]=list_data[j+1],list_data[j]
list_data.pop(0)
self.txt.write_data('aqi_data/typhoon_list','w+',str(list_data))
if __name__ == '__main__':
Url_data('guangzhou').get_number_list()
|
17,889 | 802034235c4afb2c2aaade7a4737d98e8f29c45f | """This is my first github project"""
print("Hello World")
teams = ['united', 'city', 'forrest']
new_teams = []
def sort_teams(teams, new_teams):
while teams:
for team in teams:
moving_team=teams.pop()
new_teams.append(moving_team)
sort_teams(teams, new_teams)
print(f"First list of teams, {teams}")
print(f"Second list of teams, {new_teams}") |
17,890 | 479e8861467189a9bd549b81c9b28f7cc71605aa |
from django.shortcuts import *
from django.template import RequestContext
from django.contrib.auth import *
from django.contrib.auth.models import Group, User
from django.core import serializers
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseRedirect
from django.core import serializers
from django.core.urlresolvers import reverse
from django.db.models import Max,Count
from django.core.mail import send_mail
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from cotizar.models import *
from django.core.mail import send_mail
from django.shortcuts import render
from django.views.generic import View
from jwt_auth.compat import json
from jwt_auth.mixins import JSONWebTokenAuthMixin
from django.db import transaction
from django.contrib.auth.hashers import *
from django.core.mail import send_mail
from django.db import connection
from django.utils.six.moves import range
from django.http import StreamingHttpResponse
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import permission_required
from django.views.decorators.csrf import csrf_exempt
from jwt_auth.compat import json
from jwt_auth.mixins import JSONWebTokenAuthMixin
from django.db.models import Count, Min, Sum, Avg
import collections
from datetime import *
from decimal import *
import xlrd
import json
import csv
import simplejson
import xlwt
import requests
import os
import pdfkit
import datetime
import pdfcrowd
from django.shortcuts import render
from django.views.generic import View
from django.http import HttpResponse
from django.contrib.auth.models import Group, User
from jwt_auth.compat import json
from jwt_auth.mixins import JSONWebTokenAuthMixin
import simplejson
from django.views.decorators.csrf import csrf_exempt
import xlrd
from django.views.decorators.csrf import csrf_exempt
from reportlab.pdfgen import canvas
from reportlab.lib.enums import TA_JUSTIFY
from reportlab.lib.pagesizes import letter, landscape
from reportlab.lib.enums import TA_JUSTIFY
from reportlab.lib.pagesizes import letter
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Image
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import inch
from django.http import HttpResponse
def tipousosubir(request):
xls_name = '/home/tipousos.xls'
book = xlrd.open_workbook(xls_name)
sh = book.sheet_by_index(0)
for rx in range(sh.nrows):
for col in range(sh.ncols):
if col == 3:
tipo= str(sh.row(rx)[col]).split(':')[1].split('.')[0]
if col == 4:
uso = str(sh.row(rx)[col]).split(':')[1].split('.')[0]
Tipouso(tipo_id=tipo,uso_id=uso).save()
return HttpResponse('nologeado', content_type="application/json")
def marcaschinas(request):
xls_name = '/home/chinas.xls'
book = xlrd.open_workbook(xls_name)
sh = book.sheet_by_index(0)
for rx in range(sh.nrows):
for col in range(sh.ncols):
if rx>0:
if col==0:
marca= str(sh.row(rx)[col]).split("'")[1]
marc= Marca.objects.get(name_marca=marca)
if col==1:
origen= str(sh.row(rx)[col]).split("'")[1]
if origen=='Chino':
marc.origen='Chino'
if col ==2:
orden = str(sh.row(rx)[col]).split(':')[1].split('.')[0]
marc.orden = orden
marc.save()
return HttpResponse('nologeado', content_type="application/json")
def excluidospositiva(request):
xls_name = '/home/excluidos_positiva.xls'
book = xlrd.open_workbook(xls_name)
sh = book.sheet_by_index(0)
for rx in range(sh.nrows):
for col in range(sh.ncols):
if rx>0:
if col==0:
id= str(sh.row(rx)[col]).split(':')[1].split('.')[0]
a = AutoValor.objects.get(id=id)
if col==5:
if str(sh.row(rx)[col]).split("'")[1] == 'No permitido':
a.permitido = 'No Permitido'
a.save()
return HttpResponse('nologeado', content_type="application/json")
def excluidoshdi(request):
xls_name = '/home/excluidoshdi.xls'
book = xlrd.open_workbook(xls_name)
sh = book.sheet_by_index(0)
for rx in range(sh.nrows):
for col in range(sh.ncols):
if rx>0:
if col==0:
id= str(sh.row(rx)[col]).split(':')[1].split('.')[0]
a = AutoValor.objects.get(id=id)
if col==5:
if str(sh.row(rx)[col]).split("'")[1] == 'x':
a.excluidohdi = 'Si'
a.save()
return HttpResponse('nologeado', content_type="application/json")
def gpsrimacsubir(request):
xls_name = '/home/gpspacificosubir.xls'
book = xlrd.open_workbook(xls_name)
sh = book.sheet_by_index(0)
Gps.objects.filter(id_aseg_id=2).delete()
for rx in range(sh.nrows):
for col in range(sh.ncols):
if rx>0:
if col==0:
id= str(sh.row(rx)[col]).split(':')[1].split('.')[0]
a = AutoValor.objects.get(id=id)
# if col==4:
# if str(sh.row(rx)[col]).split("'")[1] == 'x':
# Gps(id_auto_id=a.id,value='Si',id_aseg_id=4).save()
# if col==5:
# if str(sh.row(rx)[col]).split("'")[1] == 'x':
# Gps(id_auto_id=a.id,value='Si',id_aseg_id=5).save()
# if col==6:
# if str(sh.row(rx)[col]).split("'")[1] == 'x':
# Gps(id_auto_id=a.id,value='Si',id_aseg_id=3).save()
# if col==7:
# if str(sh.row(rx)[col]).split("'")[1] == 'x':
# Gps(id_auto_id=a.id,value='Si',id_aseg_id=1).save()
if col==8:
if str(sh.row(rx)[col]).split("'")[1] == 'x':
Gps(id_auto_id=a.id,value='Si',id_aseg_id=2).save()
return HttpResponse('nologeado', content_type="application/json")
def excluidosrimac(request):
xls_name = '/home/excluidorimac.xls'
book = xlrd.open_workbook(xls_name)
sh = book.sheet_by_index(0)
for rx in range(sh.nrows):
for col in range(sh.ncols):
print rx,col
if rx>0:
if col==0:
id= str(sh.row(rx)[col]).split(':')[1].split('.')[0]
a = AutoValor.objects.get(id=id)
if col==5:
if str(sh.row(rx)[col]).split("'")[1] == 'x':
a.excluidorimac = 'Si'
a.save()
return HttpResponse('nologeado', content_type="application/json")
def riesgohdi(request,aseguradora):
xls_name = '/home/riesgosubirhdi.xls'
book = xlrd.open_workbook(xls_name)
sh = book.sheet_by_index(0)
RiesgAseg.objects.filter(aseguradora_id=3).delete()
for rx in range(sh.nrows):
for col in range(sh.ncols):
print rx,col
if rx>0:
if col==0:
asegu = str(sh.row(rx)[col]).split("'")[1]
if asegu =='HDI':
if col==1:
if str(sh.row(rx)[col]).split(':')[0]=='number':
modelo = str(sh.row(rx)[col]).split(':')[1].split('.')[0]
else:
modelo = str(sh.row(rx)[col]).split("'")[1]
if col==2:
marca = str(sh.row(rx)[col]).split("'")[1]
if AutoValor.objects.filter(id_marca__name_marca=marca,id_modelo__name_model=modelo).count()>0:
id_auto_valor = AutoValor.objects.filter(id_marca__name_marca=marca,id_modelo__name_model=modelo).values('id')[0]['id']
if col==3:
riesgo = str(sh.row(rx)[col]).split("'")[1]
if AutoValor.objects.filter(id_marca__name_marca=marca,id_modelo__name_model=modelo).count()>0:
id_riesgo = Riesgo.objects.get(tipo_riesgo=riesgo).id_riesgo
RiesgAseg(id_model_id=id_auto_valor,aseguradora_id=3,id_riesg_id=id_riesgo).save()
return HttpResponse('nologeado', content_type="application/json")
def riesgosubir(request,aseguradora):
xls_name = '/home/riesgosubir.xls'
print xls_name
book = xlrd.open_workbook(xls_name)
sh = book.sheet_by_index(0)
RiesgAseg.objects.filter(aseguradora_id=2).delete()
for rx in range(sh.nrows):
for col in range(sh.ncols):
print rx,col
if rx>0:
if col==0:
asegu = str(sh.row(rx)[col]).split("'")[1]
if asegu =='Pacifico':
if col ==1:
id_auto_valor= str(sh.row(rx)[col]).split(':')[1].split('.')[0]
if col==6:
n_riesgo = str(sh.row(rx)[col]).split("'")[1]
if n_riesgo!='No Aplica':
id_riesgo = Riesgo.objects.get(tipo_riesgo=n_riesgo).id_riesgo
RiesgAseg(aseguradora_id=2,id_model_id=id_auto_valor,id_riesg_id=id_riesgo).save()
#if col==1:
# if str(sh.row(rx)[col]).split(':')[0]=='number':
# modelo = str(sh.row(rx)[col]).split(':')[1].split('.')[0]
# else:
# modelo = str(sh.row(rx)[col]).split("'")[1]
# if col==2:
# marca = str(sh.row(rx)[col]).split("'")[1]
# print marca,modelo
# if AutoValor.objects.filter(id_marca__name_marca=marca,id_modelo__name_model=modelo).count()>0:
# id_auto_valor = AutoValor.objects.filter(id_marca__name_marca=marca,id_modelo__name_model=modelo).values('id')[0]['id']
# print 'id_auto_valor',id_auto_valor
# if col==3:
# riesgo = str(sh.row(rx)[col]).split("'")[1]
# if AutoValor.objects.filter(id_marca__name_marca=marca,id_modelo__name_model=modelo).count()>0:
# id_riesgo = Riesgo.objects.get(tipo_riesgo=riesgo).id_riesgo
# print id_riesgo
# RiesgAseg(id_model_id=id_auto_valor,aseguradora_id=aseguradora,id_riesg_id=id_riesgo).save()
return HttpResponse('nologeado', content_type="application/json")
def uploadfile(request):
if request.method == 'POST':
process_file = request.FILES['file']
Lote(file=process_file).save()
id_lote = Lote.objects.all().values('id').order_by('-id')[0]['id']
process_file = Lote.objects.get(id=id_lote).file
xls_name = '/var/www/html/'+str(process_file)
book = xlrd.open_workbook(xls_name)
# sh = book.sheet_by_index(0)
# TasaAsegur.objects.filter(id_aseg_id=5).delete()
# print 'Rimac'
# for rx in range(sh.nrows):
# for col in range(sh.ncols):
# if rx>4 and rx <24 :
# if col==0:
# ant= str(sh.row(rx)[col]).split(' ')[0].split("u'")[1]
# if col > 0:
# print rx,col,str(sh.row(rx)[col])
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# valor= str(sh.row(rx)[col]).split('number:')[1]
# print valor
# # Corporativo Rimac
# if col==1:
# TasaAsegur(id_aseg_id=5,value=valor,riesgo_id=6,anio=ant,programa_id=2).save()
# if col==2:
# TasaAsegur(id_aseg_id=5,value=valor,riesgo_id=7,anio=ant,programa_id=2).save()
# if col==3:
# TasaAsegur(id_aseg_id=5,value=valor,riesgo_id=4,anio=ant,programa_id=2).save()
# if col==4:
# TasaAsegur(id_aseg_id=5,value=valor,riesgo_id=5,anio=ant,programa_id=2).save()
# if col==5:
# TasaAsegur(id_aseg_id=5,value=valor,tipo_id=6,anio=ant,programa_id=2).save()
# # Rimac 4x4
# if col==6:
# TasaAsegur(id_aseg_id=5,value=valor,anio=ant,tipo_id=6,id_uso_id=1,programa_id=7).save() # Rural
# if col==7:
# TasaAsegur(id_aseg_id=5,value=valor,tipo_id=7,anio=ant,id_uso_id=2,programa_id=7).save()
# # Rimac Vehicular Pick Up
# if col==8:
# TasaAsegur(id_aseg_id=5,value=valor,anio=ant,programa_id=6,id_uso_id=1).save()
# if col==9:
# TasaAsegur(id_aseg_id=5,value=valor,anio=ant,programa_id=6,id_uso_id=6).save()
# if col==10:
# TasaAsegur(id_aseg_id=5,value=valor,anio=ant,programa_id=6,id_uso_id=2).save()
# # Rimac Chinos e Indios
# if col==11:
# TasaAsegur(id_aseg_id=5,value=valor,anio=ant,programa_id=10,tipo_id=1).save()
# TasaAsegur(id_aseg_id=5,value=valor,anio=ant,programa_id=10,tipo_id=3).save()
# if col==12:
# TasaAsegur(id_aseg_id=5,value=valor,anio=ant,programa_id=10,tipo_id=6).save()
# # Rimac Taxi Urbano
# if col==13:
# TasaAsegur(id_aseg_id=5,value=valor,anio=ant,programa_id=11,tipo_id=1).save()
# if col==14:
# mo = AutoValor.objects.filter(id_modelo__name_model__in=['Yaris','Sail'])
# for m in mo:
# TasaAsegur(id_aseg_id=5,value=valor,anio=ant,programa_id=11,modelo_id=m.id_modelo.id_model).save()
# if col==15:
# TasaAsegur(id_aseg_id=5,value=valor,anio=ant,origen='Chino',programa_id=11).save()
# # Rimac Transporte Personal, Escolar, Turismo y Paneles
# if col==16:
# TasaAsegur(id_aseg_id=5,value=valor,anio=ant,programa_id=12,ubicacion='Lima').save()
# if col==17:
# mo = AutoValor.objects.filter(id_modelo__name_model='H1')
# for m in mo:
# TasaAsegur(id_aseg_id=5,value=valor,anio=ant,programa_id=12,modelo_id=m.id_modelo.id_model).save()
# if col==18:
# TasaAsegur(id_aseg_id=5,value=valor,anio=ant,programa_id=12,tipo_id=7,id_uso_id=2,origen='No Chinas').save()
# # Rimac pesados
# if col==19:
# TasaAsegur(id_aseg_id=5,value=valor,anio=ant,programa_id=13,tipo_id=2).save()
# TasaAsegur(id_aseg_id=5,value=valor,anio=ant,programa_id=13,tipo_id=21).save()
# if col==20:
# TasaAsegur(id_aseg_id=5,value=valor,anio=ant,programa_id=13,tipo_id=20).save()
# if col==21:
# TasaAsegur(id_aseg_id=5,value=valor,anio=ant,programa_id=13,tipo_id=24).save()
# if rx >= 24 and rx < 43:
# if col==0:
# ant= str(sh.row(rx)[col]).split(':')[1].split('.')[0]
# print ant
# if col > 0:
# print rx,col,str(sh.row(rx)[col]).split("'")[1]
# valor= str(sh.row(rx)[col]).split("'")[1]
# if col==1:
# TasaAsegur(id_aseg_id=5,value=valor,riesgo_id=6,anio=ant,programa_id=26).save()
# if col==2:
# TasaAsegur(id_aseg_id=5,value=valor,riesgo_id=7,anio=ant,programa_id=26).save()
# if col==3:
# TasaAsegur(id_aseg_id=5,value=valor,riesgo_id=4,anio=ant,programa_id=26).save()
# if col==4:
# TasaAsegur(id_aseg_id=5,value=valor,riesgo_id=5,anio=ant,programa_id=26).save()
# if rx >= 43:
# if col==0:
# ant= str(sh.row(rx)[col]).split(':')[1].split('.')[0]
# if col > 0:
# print rx,col
# valor= str(sh.row(rx)[col]).split("'")[1]
# if col==1:
# TasaAsegur(id_aseg_id=5,value=valor,riesgo_id=6,anio=ant,programa_id=25).save()
# if col==2:
# TasaAsegur(id_aseg_id=5,value=valor,riesgo_id=7,anio=ant,programa_id=25).save()
# if col==3:
# TasaAsegur(id_aseg_id=5,value=valor,riesgo_id=4,anio=ant,programa_id=25).save()
# if col==4:
# TasaAsegur(id_aseg_id=5,value=valor,riesgo_id=5,anio=ant,programa_id=25).save()
# print 'Mapfre'
# TasaAsegur.objects.filter(id_aseg_id=4).delete()
# sh = book.sheet_by_index(1)
# for rx in range(sh.nrows):
# for col in range(sh.ncols):
# if rx>4:
# if col==0:
# ant= str(sh.row(rx)[col]).split(':')[1].split('.')[0]
# print ant
# if col > 0:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# valor= str(sh.row(rx)[col]).split('number:')[1]
# #Corporativo Mapfre
# if col==1:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,riesgo_id=3,anio=ant,programa_id=1).save()
# if col==2:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,riesgo_id=2,anio=ant,programa_id=1).save()
# if col==3:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,riesgo_id=1,anio=ant,programa_id=1).save()
# if col==4:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,origen='Chino',anio=ant,programa_id=1).save()
# # MAPFRE Dorada Pick Up
# if col==5:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,id_uso_id=1,anio=ant,programa_id=22).save()
# if col==6:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,id_uso_id=2,anio=ant,programa_id=22).save()
# if col==7:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,anio=ant,id_uso_id=6,programa_id=22).save() # Rural
# if col==8:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,origen='Chino',id_uso_id=1,anio=ant,programa_id=22).save()
# if col==9:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,origen='Chino',anio=ant,id_uso_id=2,programa_id=22).save()
# if col==10:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,anio=ant,origen='Chino',id_uso_id=6,programa_id=22).save()
# # Mapfre Dorada Economica
# if col==11:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,riesgo_id=3,anio=ant,programa_id=5).save()
# if col==12:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,riesgo_id=2,anio=ant,programa_id=5).save()
# if col==13:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,riesgo_id=1,anio=ant,programa_id=5).save()
# if col==14:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,anio=ant,origen='Chino',programa_id=5).save()
# if col==15:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,anio=ant,tipo_id=6,programa_id=5).save()
# #Mapfre 0 Km
# if col==16:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,riesgo_id=1,anio=ant,programa_id=27).save()
# if col==17:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,anio=ant,origen='Chino',programa_id=27).save()
# if col==18:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,anio=ant,tipo_id=6,programa_id=27).save()
# # Mapfre 0 KM x 2
# if col==20:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,riesgo_id=3,anio=ant,programa_id=14).save()
# if col==21:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,riesgo_id=2,anio=ant,programa_id=14).save()
# if col==22:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,riesgo_id=1,anio=ant,programa_id=14).save()
# if col==23:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,anio=ant,tipo_id=6,programa_id=14).save()
# #Mapfre Camiones A
# if col==24:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,anio=ant,tipo_id=2,programa_id=15).save()
# if col==25:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,anio=ant,tipo_id=8,programa_id=15).save()
# if col==26:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,anio=ant,tipo_id=25,programa_id=15).save()
# # Mapfre Camiones menores A
# if col==27:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,anio=ant,programa_id=16,tipo_id=2).save()
# # MAPFRE Perdida Total Livianos
# if col==28:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,anio=ant,programa_id=17).save()
# # MAPFRE Plateada
# if col==29:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,anio=ant,programa_id=18).save()
# if col==30:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,anio=ant,programa_id=18).save()
# # MAPFRE Serv. Turistico/Personal
# if col==31:
# print rx,col,str(sh.row(rx)[col])
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,anio=ant,ubicacion='Lima',programa_id=19).save()
# #MAPFRE VIP Mujer
# if col==32:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,anio=ant,riesgo_id=3,programa_id=20).save()
# if col==33:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,anio=ant,riesgo_id=2,programa_id=20).save()
# if col==34:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,anio=ant,riesgo_id=1,programa_id=20).save()
# # Mapfre Taxi
# if col==35:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,anio=ant,riesgo_id=3,programa_id=21).save()
# if col==36:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,anio=ant,riesgo_id=1,programa_id=21).save()
# if col==37:
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# TasaAsegur(id_aseg_id=4,value=valor,anio=ant,origen='Chino',programa_id=21).save()
# TasaAsegur.objects.filter(id_aseg_id=3).delete()
# print 'Hdi'
# sh = book.sheet_by_index(2)
# for rx in range(sh.nrows):
# for col in range(sh.ncols):
# if rx>4:
# if col==0:
# ant= str(sh.row(rx)[col]).split(' ')[0].split("u'")[1]
# if col > 0:
# print rx,col
# if str(sh.row(rx)[col])!="text:u'No aplica'":
# valor= str(sh.row(rx)[col]).split('number:')[1]
# if col==1:
# TasaAsegur(id_aseg_id=3,value=valor,categoria_id=5,anio=ant,programa_id=3).save()
# if col==2:
# TasaAsegur(id_aseg_id=3,value=valor,categoria_id=6,anio=ant,programa_id=3).save()
# if col==3:
# TasaAsegur(id_aseg_id=3,value=valor,categoria_id=7,anio=ant,programa_id=3).save()
# if col==4:
# TasaAsegur(id_aseg_id=3,value=valor,categoria_id=8,anio=ant,programa_id=3).save()
# if col==5:
# TasaAsegur(id_aseg_id=3,value=valor,categoria_id=9,anio=ant,programa_id=3).save()
# if col==6:
# TasaAsegur(id_aseg_id=3,value=valor,categoria_id=10,anio=ant,programa_id=3).save()
# if col==7:
# TasaAsegur(id_aseg_id=3,value=valor,anio=ant,categoria_id=11,programa_id=3).save() # Rural
# if col==8:
# TasaAsegur(id_aseg_id=3,value=valor,categoria_id=12,anio=ant,programa_id=3).save()
# if col==9:
# TasaAsegur(id_aseg_id=3,value=valor,anio=ant,categoria_id=13,programa_id=3).save()
# if col==10:
# TasaAsegur(id_aseg_id=3,value=valor,anio=ant,categoria_id=14,programa_id=3).save()
# if col==11:
# TasaAsegur(id_aseg_id=3,value=valor,categoria_id=15,anio=ant,programa_id=3).save()
# if col==12:
# TasaAsegur(id_aseg_id=3,value=valor,categoria_id=16,anio=ant,programa_id=3).save()
# if col==13:
# TasaAsegur(id_aseg_id=3,value=valor,categoria_id=17,anio=ant,programa_id=3).save()
# if col==14:
# TasaAsegur(id_aseg_id=3,value=valor,categoria_id=18,anio=ant,programa_id=3).save()
# if col==15:
# TasaAsegur(id_aseg_id=3,value=valor,categoria_id=19,anio=ant,programa_id=3).save()
TasaAsegur.objects.filter(id_aseg_id=1).delete()
print 'Positiva'
sh = book.sheet_by_index(3)
for rx in range(sh.nrows):
for col in range(sh.ncols):
if rx>4:
if col==0:
ant= str(sh.row(rx)[col]).split(':')[1].split('.')[0]
if col > 0:
print rx,col,str(sh.row(rx)[col])
if str(sh.row(rx)[col])!="text:u'No aplica'":
valor= str(sh.row(rx)[col]).split('number:')[1]
if col==1:
TasaAsegur(id_aseg_id=1,value=valor,riesgo_id=3,anio=ant,programa_id=4).save()
if col==2:
TasaAsegur(id_aseg_id=1,value=valor,riesgo_id=2,anio=ant,programa_id=4).save()
if col==3:
TasaAsegur(id_aseg_id=1,value=valor,riesgo_id=1,anio=ant,programa_id=4).save()
if col==4:
TasaAsegur(id_aseg_id=1,value=valor,origen='Chino',anio=ant,programa_id=4).save()
if col==5:
TasaAsegur(id_aseg_id=1,value=valor,timon='Cambiado',anio=ant,programa_id=4).save()
if col==6:
TasaAsegur(id_aseg_id=1,value=valor,tipo_id=6,anio=ant,programa_id=4).save()
if col==7:
TasaAsegur(id_aseg_id=1,value=valor,tipo_id=6,origen='Chino',anio=ant,programa_id=4).save()
###Uso Comercial
if col == 8:
TasaAsegur(id_aseg_id=1,value=valor,id_uso_id=2,riesgo_id=3,programa_id=28,anio=ant).save()
if col==9:
TasaAsegur(id_aseg_id=1,value=valor,id_uso_id=2,riesgo_id=2,programa_id=28,anio=ant).save()
if col==10:
TasaAsegur(id_aseg_id=1,value=valor,id_uso_id=2,riesgo_id=1,programa_id=28,anio=ant).save()
if col==11:
TasaAsegur(id_aseg_id=1,value=valor,id_uso_id=2,riesgo_id=1,programa_id=28,anio=ant).save()
if col==13:
TasaAsegur(id_aseg_id=1,value=valor,tipo_id=6,id_uso_id=2,programa_id=28,anio=ant).save()
if col==14:
TasaAsegur(id_aseg_id=1,value=valor,origen='Chino',id_uso_id=2,programa_id=28,anio=ant).save()
#Uso Urbano Taxi
if col==15:
TasaAsegur(id_aseg_id=1,value=valor,id_uso_id=20,programa_id=29,riesgo_id=3,anio=ant).save()
TasaAsegur(id_aseg_id=1,value=valor,id_uso_id=20,programa_id=29,riesgo_id=2,anio=ant).save()
TasaAsegur(id_aseg_id=1,value=valor,id_uso_id=20,programa_id=29,riesgo_id=1,anio=ant).save()
if col==16:
TasaAsegur(id_aseg_id=1,value=valor,id_uso_id=20,origen='Chino',programa_id=29,anio=ant).save()
#Uso ubano Publico
if col==17:
TasaAsegur(id_aseg_id=1,value=valor,id_uso_id=20,tipo_id=4,programa_id=30,anio=ant).save()
TasaAsegur(id_aseg_id=1,value=valor,id_uso_id=20,tipo_id=5,programa_id=30,anio=ant).save()
#Uso Carga
if col==18:
TasaAsegur(id_aseg_id=1,value=valor,id_uso_id=17,tipo_id=2,programa_id=31,anio=ant).save()
TasaAsegur(id_aseg_id=1,value=valor,id_uso_id=17,tipo_id=21,programa_id=31,anio=ant).save()
if col==19:
TasaAsegur(id_aseg_id=1,value=valor,id_uso_id=17,tipo_id=20,programa_id=32,anio=ant).save()
TasaAsegur(id_aseg_id=1,value=valor,id_uso_id=17,tipo_id=18,programa_id=32,anio=ant).save()
if col==20:
TasaAsegur(id_aseg_id=1,value=valor,id_uso_id=17,tipo_id=8,programa_id=32,anio=ant).save()
TasaAsegur(id_aseg_id=1,value=valor,id_uso_id=17,tipo_id=19,programa_id=32,anio=ant).save()
TasaAsegur.objects.filter(id_aseg_id=2).delete()
### Pacifico
sh = book.sheet_by_index(4)
for rx in range(sh.nrows):
for col in range(sh.ncols):
if rx>3:
if int(col)==0:
ant= str(sh.row(rx)[col]).split(':')[1].split('.')[0]
if int(col) > 0:
if str(sh.row(rx)[col])!="text:u'No aplica'":
valor= str(sh.row(rx)[col]).split('number:')[1]
print rx,col
if col==1:
TasaAsegur(id_aseg_id=2,value=valor,riesgo_id=6,anio=ant,programa_id=4).save()
if col==2:
TasaAsegur(id_aseg_id=2,value=valor,riesgo_id=7,anio=ant,programa_id=4).save()
if col==3:
TasaAsegur(id_aseg_id=2,value=valor,riesgo_id=2,anio=ant,programa_id=4).save()
if col==4:
TasaAsegur(id_aseg_id=2,value=valor,riesgo_id=1,anio=ant,programa_id=4).save()
if col==5:
TasaAsegur(id_aseg_id=2,value=valor,origen='Chino',anio=ant,programa_id=4).save()
if col==6:
TasaAsegur(id_aseg_id=2,value=valor,tipo_id=6,anio=ant,programa_id=4).save()
data_json = simplejson.dumps('nn')
return HttpResponse(data_json, content_type="application/json")
def subirtasas(request):
return render(request, 'subirtasas.html',{})
class Perfil(JSONWebTokenAuthMixin, View):
def get(self, request):
id =request.user.id
print 'ID',id
return HttpResponse(id, content_type="application/json")
def hello(c):
from reportlab.lib.units import inch
#First Example
c.setFont("Helvetica", 6) #choose your font type and font size
def pdfout(request):
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="somefilename.pdf"'
p = canvas.Canvas(response)
p.setFillColorRGB(0,0,0)
logo = "/var/www/cotizacion/frontend/img/logo-hermes.png"
p.drawImage(logo, 20, 800,width=80,height=22,mask='auto');
hello(p)
j=50
r = requests.get('http://cotizador.hermes.pe:800/html/cotiza.json')
c = requests.get('http://cotizador.hermes.pe:800/html/coberturas.json')
d = requests.get('http://cotizador.hermes.pe:800/html/deducibles.json')
s = requests.get('http://cotizador.hermes.pe:800/html/servicios.json')
g = requests.get('http://cotizador.hermes.pe:800/html/gps.json')
cl = requests.get('http://cotizador.hermes.pe:800/html/cliente.json')
f = requests.get('http://cotizador.hermes.pe:800/html/financiamiento.json')
a = json.loads(r.text)
c= json.loads(c.text)
d= json.loads(d.text)
s= json.loads(s.text)
g= json.loads(g.text)
cl= json.loads(cl.text)
cl= cl[0]
fi= json.loads(f.text)
#'id_cliente','fullname','email','celular','chose_marca__name_marca','chose_modelo__name_model','chose_tipo__clase','chose_timon__name_tipo','chose_modalid__name_modalidad','chose_uso__uso','chose_anio__anio_antig','chose_ubicl','chose_ubicp','chose_informat','value');
# Draw things on the PDc. Here's where the PDF generation happens.
# See the ReportLab documentation for the full list of functionality.
columna = 780
es=0
p.drawString(20, columna, "Datos Cliente")
p.drawString(70+j, columna,str(cl['id_cliente'] ))
p.drawString(120+j*2, columna,'Nombre: '+str(cl['fullname'] ))
p.drawString(170+j*3, columna,'Email: '+str(cl['email'] ))
et=2
p.setFillColorRGB(0,0,0)
p.rect(20,columna-13*et,7.5*inch,.15*inch, fill=1)
p.setFillColorRGB(13,10,101)
p.drawString(22, columna-13*et+3,'Marca: ' )
p.drawString(70+j, columna-13*et+3,'Modelo: ')
p.drawString(120+j*2, columna-13*et+3,'Clase: ')
p.drawString(170+j*3, columna-13*et+3,'Anio: ')
p.drawString(220+j*4, columna-13*et+3,'Precio: ')
et=3
p.setFillColorRGB(0,0,0)
p.drawString(20, columna-12*et,str(cl['chose_marca__name_marca']))
p.drawString(70+j, columna-12*et,str(cl['chose_modelo__name_model'] ))
p.drawString(120+j*2, columna-12*et,str(cl['chose_tipo__clase'] ))
p.drawString(170+j*3, columna-12*et,str(cl['chose_anio__anio_antig'] ))
p.drawString(220+j*4, columna-12*et,str(cl['value'] ))
#et=4
#p.drawString(70+j, columna-13*et+3,'MAPFRE ')
#p.drawString(120+j*2, columna-13*et+3,'LA POSITIVA ')
#p.drawString(170+j*3, columna-13*et+3,'RIMAC: ')
#p.drawString(220+j*4, columna-13*et+3,'PACIFICO: ')
#p.drawString(250+j*5, columna-13*et+3,'Marca: ' )
et = 4
#p.drawString(20, columna-15*et, "GPS")
p.drawString(70+j, columna-13*et,'GPS: '+g['gpsmapfre'] )
p.drawString(120+j*2, columna-13*et,'GPS: '+g['gpspositiva'] )
p.drawString(170+j*3, columna-13*et,'GPS: '+g['gpsrimac'] )
p.drawString(220+j*4, columna-13*et,'GPS: '+g['gpspacifico'])
p.drawString(250+j*5, columna-13*et,'GPS: '+g['gpshdi'] )
et=5
p.setFillColorRGB(0,0,0)
p.rect(20,columna-15*et,7.5*inch,.15*inch, fill=1)
p.setFillColorRGB(13,10,101)
p.drawString(250, columna-15*et+3, "Financiamiento")
fi[0]['financiamiento'] = fi[0]['financiamiento'].encode('ascii','ignore').encode('ascii','replace')
fi[0]['mapfre'] = fi[0]['mapfre'].encode('ascii','ignore').encode('ascii','replace')
fi[0]['positiva']= fi[0]['positiva'].encode('ascii','ignore').encode('ascii','replace')
fi[0]['rimac']= fi[0]['rimac'].encode('ascii','ignore').encode('ascii','replace')
fi[0]['hdi']= fi[0]['hdi'].encode('ascii','ignore').encode('ascii','replace')
fi[1]['financiamiento'] = fi[1]['financiamiento'].encode('ascii','ignore').encode('ascii','replace')
fi[1]['mapfre'] = fi[1]['mapfre'].encode('ascii','ignore').encode('ascii','replace')
fi[1]['positiva']= fi[1]['positiva'].encode('ascii','ignore').encode('ascii','replace')
fi[1]['rimac']= fi[1]['rimac'].encode('ascii','ignore').encode('ascii','replace')
fi[1]['hdi']= fi[1]['hdi'].encode('ascii','ignore').encode('ascii','replace')
fi[2]['financiamiento'] = fi[2]['financiamiento'].encode('ascii','ignore').encode('ascii','replace')
fi[2]['mapfre'] = fi[2]['mapfre'].encode('ascii','ignore').encode('ascii','replace')
fi[2]['positiva']= fi[2]['positiva'].encode('ascii','ignore').encode('ascii','replace')
fi[2]['rimac']= fi[2]['rimac'].encode('ascii','ignore').encode('ascii','replace')
fi[2]['hdi']= fi[2]['hdi'].encode('ascii','ignore').encode('ascii','replace')
et =6
p.drawString(20, columna-15*et, str(fi[0]['financiamiento']))
p.drawString(70+j, columna-15*et, str(fi[0]['mapfre']))
p.drawString(120+j*2, columna-15*et, str(fi[0]['positiva']))
p.drawString(170+j*3, columna-15*et, str(fi[0]['rimac']))
p.drawString(220+j*4, columna-15*et, str(fi[0]['pacifico']))
p.drawString(250+j*5, columna-15*et, str(fi[0]['hdi']))
et=7
p.drawString(20, columna-15*et, str(fi[1]['financiamiento']))
p.drawString(70+j, columna-15*et, str(fi[1]['mapfre']))
p.drawString(120+j*2, columna-15*et, str(fi[1]['positiva']))
p.drawString(170+j*3, columna-15*et, str(fi[1]['rimac']))
p.drawString(220+j*4, columna-15*et, str(fi[1]['pacifico']))
p.drawString(250+j*5, columna-15*et, str(fi[1]['hdi']))
et=8
p.drawString(20, columna-15*et, str(fi[2]['financiamiento'])[0:30])
p.drawString(70+j, columna-15*et, str(fi[2]['mapfre']))
p.drawString(120+j*2, columna-15*et, str(fi[2]['positiva']))
p.drawString(170+j*3, columna-15*et, str(fi[2]['rimac']))
p.drawString(220+j*4, columna-15*et, str(fi[2]['pacifico']))
p.drawString(250+j*5, columna-15*et, str(fi[2]['hdi']))
et=9
p.drawString(20, columna-15*et, "Aseguradoras")
p.drawString(70+j, columna-15*et, "Mapfre")
p.drawString(120+j*2, columna-15*et, "La Positiva")
p.drawString(170+j*3, columna-15*et, "Rimac")
p.drawString(220+j*4, columna-15*et, "Pacifico")
p.drawString(250+j*5, columna-15*et, "HDI")
et =10
p.drawString(20, columna-15*et, "Tasa")
p.drawString(70+j, columna-15*et, str(a['tasamapfre']))
p.drawString(120+j*2, columna-15*et, str(a['tasapositiva']))
p.drawString(170+j*3, columna-15*et, str(a['tasarimac']))
p.drawString(220+j*4, columna-15*et, str(a['tasapacifico']))
p.drawString(250+j*5, columna-15*et, str(a['tasahdi']))
et=11
p.drawString(20, columna-15*et, "Prima Neta")
p.drawString(70+j, columna-15*et, str(a['mapfre']))
p.drawString(120+j*2, columna-15*et, str(a['positiva']))
p.drawString(170+j*3, columna-15*et, str(a['rimac']))
p.drawString(220+j*4, columna-15*et, str(a['pacifico']))
p.drawString(250+j*5, columna-15*et, str(a['hdi']))
et=12
p.drawString(20, columna-15*et, "Prima Comercial")
p.drawString(70+j, columna-15*et, str(a['mapfresubtotal']))
p.drawString(120+j*2, columna-15*et, str(a['positivasubtotal']))
p.drawString(170+j*3, columna-15*et, str(a['rimacsubtotal']))
p.drawString(220+j*4, columna-15*et, str(a['pacificosubtotal']))
p.drawString(250+j*5, columna-15*et, str(a['phdisubtotal']))
et=13
p.drawString(20, columna-15*et, "Total")
p.drawString(70+j, columna-15*et, str(a['mapfresubtotal']))
p.drawString(120+j*2, columna-15*et, str(a['positivatotal']))
p.drawString(170+j*3, columna-15*et, str(a['rimactotal']))
p.drawString(220+j*4, columna-15*et, str(a['pacificototal']))
p.drawString(250+j*5, columna-15*et, str(a['phditotal']))
columna = columna -40
p.setFillColorRGB(1,0.54902,0)
p.rect(20,columna-140,7.5*inch,.15*inch, fill=1)
p.setFillColorRGB(0,0,0)
p.drawString(122, columna-140+3, "Coberturas")
for i in range(0,18):
c[i]['descripcion'] = c[i]['descripcion'].encode('ascii','ignore').encode('ascii','replace')
c[i]['mapfre'] = c[i]['mapfre'].encode('ascii','ignore').encode('ascii','replace')
c[i]['positiva']= c[i]['positiva'].encode('ascii','ignore').encode('ascii','replace')
c[i]['rimac']= c[i]['rimac'].encode('ascii','ignore').encode('ascii','replace')
#c[i]['pacifico']= c[i]['pacifico'].encode('ascii','ignore').encode('ascii','replace')
c[i]['hdi']= c[i]['hdi'].encode('ascii','ignore').encode('ascii','replace')
p.drawString(20, columna-160-15*i, str(c[i]['descripcion'])[0:30])
p.drawString(70+50, columna-160-15*i, str(c[i]['mapfre'])[0:30])
p.drawString(120+50*2, columna-160-15*i, str(c[i]['positiva'])[0:30])
p.drawString(170+50*3, columna-160-15*i, str(c[i]['rimac'])[0:30])
#p.drawString(250+50*4, 700-15*i, str(c[i]['pacifico']))
p.drawString(250+50*5, columna-160-15*i, str(c[i]['hdi'])[0:30])
columna =columna-15*i
p.setFillColorRGB(1,0.54902,0)
p.rect(20,columna-180,7.5*inch,.15*inch, fill=1)
p.setFillColorRGB(0,0,0)
p.drawString(22, columna-180+3, "Deducibles")
for k in range(0,14):
d[k]['deducible'] = d[k]['deducible'].encode('ascii','ignore').encode('ascii','replace')
d[k]['mapfre'] = d[k]['mapfre'].encode('ascii','ignore').encode('ascii','replace')
d[k]['positiva']= d[k]['positiva'].encode('ascii','ignore').encode('ascii','replace')
d[k]['rimac']= d[k]['rimac'].encode('ascii','ignore').encode('ascii','replace')
#d[i]['pacifico']= d[i]['pacifico'].encode('ascii','ignore').encode('ascii','replace')
d[k]['hdi']= d[k]['hdi'].encode('ascii','ignore').encode('ascii','replace')
p.drawString(20, columna-200-15*k, str(d[k]['deducible'])[0:30])
p.drawString(70+50, columna-200-15*k, str(d[k]['mapfre'])[0:30])
p.drawString(120+50*2, columna-200-15*k, str(d[k]['positiva'])[0:30])
p.drawString(170+50*3, columna-200-15*k, str(d[k]['rimac'])[0:30])
#p.drawString(250+50*4, 320-15*i, str(d[i]['pacifico']))
p.drawString(250+50*5, columna-200-15*k, str(d[k]['hdi'])[0:30])
columna = columna-15*k
p.setFillColorRGB(1,0.54902,0)
p.rect(20,columna-220,7.5*inch,.15*inch, fill=1)
p.setFillColorRGB(0,0,0)
p.drawString(22, columna-220+3, "Servicios")
for sk in range(0,5):
s[sk]['services'] = s[sk]['services'].encode('ascii','ignore').encode('ascii','replace')
s[sk]['mapfre'] = s[sk]['mapfre'].encode('ascii','ignore').encode('ascii','replace')
s[sk]['positiva']= s[sk]['positiva'].encode('ascii','ignore').encode('ascii','replace')
s[sk]['rimac']= s[sk]['rimac'].encode('ascii','ignore').encode('ascii','replace')
s[sk]['pacifico']= s[sk]['pacifico'].encode('ascii','ignore').encode('ascii','replace')
s[sk]['hdi']= s[sk]['hdi'].encode('ascii','ignore').encode('ascii','replace')
p.drawString(20, columna-240-15*sk, str(s[sk]['services'])[0:30])
p.drawString(70+50, columna-240-15*sk, str(s[sk]['mapfre'])[0:30])
p.drawString(120+50*2, columna-240-15*sk, str(s[sk]['positiva'])[0:30])
p.drawString(170+50*3, columna-240-15*sk, str(s[sk]['rimac'])[0:30])
p.drawString(220+50*4, columna-240-15*sk, str(s[sk]['pacifico'])[0:25])
p.drawString(250+50*5, columna-240-15*sk, str(s[sk]['hdi'])[0:30])
# Close the PDF object cleanly, and we're done.
p.showPage()
p.save()
return response
@csrf_exempt
def recibetasas(request):
data = json.loads(request.body)
data = json.dumps(data)
f = open('/var/www/html/cotiza.json', 'w')
f.write(data)
f.close()
return HttpResponse('nologeado', content_type="application/json")
@csrf_exempt
def recibecliente(request):
data = json.loads(request.body)
data = json.dumps(data)
f = open('/var/www/html/cliente.json', 'w')
f.write(data)
f.close()
return HttpResponse('nologeado', content_type="application/json")
@csrf_exempt
def recibeservicios(request):
data = json.loads(request.body)
data = json.dumps(data)
f = open('/var/www/html/servicios.json', 'w')
f.write(data)
f.close()
return HttpResponse('nologeado', content_type="application/json")
@csrf_exempt
def marcacsv(request):
ri = Marca.objects.all()
response = HttpResponse(content_type='text/xls')
response['Content-Disposition'] = 'attachment; filename="Marcas.xls"'
writer = csv.writer(response)
data = 'Marca','Origen'
writer.writerow(data)
for r in ri:
datos= r.name_marca,r.origen
writer.writerow(datos)
return response
@csrf_exempt
def gpscsv(request,aseguradora):
g=Gps.objects.filter(id_aseg=aseguradora)
response = HttpResponse(content_type='text/xls')
response['Content-Disposition'] = 'attachment; filename="Gps.xls"'
writer = csv.writer(response)
data = 'Marca','Modelo','Tipo','Value'
writer.writerow(data)
for r in g:
print r.id_auto_id
datos= r.id_auto_id,r.id_auto.id_marca.name_marca,r.id_auto.id_modelo.name_model,r.id_auto.id_tipo.clase,r.value
writer.writerow(datos)
return response
@csrf_exempt
def riesgocsv(request,aseguradora):
ri = RiesgAseg.objects.filter(aseguradora=aseguradora)
#values('id_model_id','id_model__id_modelo__name_model','id_model__id_marca__name_marca','id_riesg__tipo_riesgo')
response = HttpResponse(content_type='text/xls')
response['Content-Disposition'] = 'attachment; filename="Riesgos.xls"'
writer = csv.writer(response)
data = 'Aseguradora','Id','Modelo','Marca','Riesgo','Riesgo'
writer.writerow(data)
for r in ri:
modelo = None
marca= None
riesgo= None
aseguradora = None
programa = None
if r.aseguradora_id:
aseguradora = r.aseguradora.name_asegurad
if r.id_model_id:
if AutoValor.objects.filter(id=r.id_model_id).count()>0:
modelo = r.id_model.id_modelo.name_model
if Marca.objects.filter(id_marca=r.id_model.id_marca_id).count()>0:
marca = r.id_model.id_marca.name_marca
if r.id_riesg_id:
riesgo = r.id_riesg.tipo_riesgo
if r.programa_id:
programa = r.programa.program
datos= aseguradora,r.id_model_id,modelo,marca,riesgo
writer.writerow(datos)
# print r
# if AutoValor.objects.filter(id=r['id_model_id']).count()>0:
# datos = r['id_model__id_modelo__name_model'],r['id_model__id_marca__name_marca'],r['id_riesg__tipo_riesgo']
# print datos
# # datos = x['id'],x['tipo__clase'],x['antigued'],x['programa__program'],x['id_cob__descripcion'],x['id_aseg__name_asegurad'],x['id_uso__uso'],x['modalidad__name_modalidad'],x['value']
# writer.writerow(datos)
return response
@csrf_exempt
def coberturacsv(request,aseguradora):
ta = CobertAsegur.objects.all()
response = HttpResponse(content_type='text/xls')
response['Content-Disposition'] = 'attachment; filename="Coberturas.csv"'
writer = csv.writer(response)
data = 'Cobertura','Programa'
writer.writerow(data)
for r in ta:
print r.id_cob_id
if r.id_cob_id:
r.id_cob.descripcion = r.id_cob.descripcion.encode('ascii','ignore')
r.id_cob.descripcion = r.id_cob.descripcion.encode('ascii','replace')
r.value = r.value.encode('ascii','ignore')
r.value = r.value.encode('ascii','replace')
data = r.id_cob_id,'|',r.id_cob.descripcion,'|',r.programa.program,'|',r.tipo.clase,'|',r.value
#data = t.id_aseg.name_asegurad,programa,riesgo,uso,tipo,marca,modelo,categoria,t.origen,t.ubicacion,t.anio,t.value
writer.writerow(data)
return response
@csrf_exempt
def deduciblecsv(request,aseguradora):
ta = DeducAsegur.objects.filter(id_aseg=aseguradora)
response = HttpResponse(content_type='text/xls')
response['Content-Disposition'] = 'attachment; filename="Coberturas.csv"'
writer = csv.writer(response)
data = 'Cobertura','Programa'
writer.writerow(data)
for r in ta:
if r.id_deduc_id:
r.id_deduc.deducible = r.id_deduc.deducible.encode('ascii','ignore')
r.id_deduc.deducible = r.id_deduc.deducible.encode('ascii','replace')
r.value = r.value.encode('ascii','ignore')
r.value = r.value.encode('ascii','replace')
data = r.id_deduc_id,'|',r.id_deduc.deducible,'|',r.programa.program,'|',r.tipo.clase,'|',r.riesgo.tipo_riesgo,'|',r.value
#data = t.id_aseg.name_asegurad,programa,riesgo,uso,tipo,marca,modelo,categoria,t.origen,t.ubicacion,t.anio,t.value
writer.writerow(data)
return response
@csrf_exempt
def serviciocsv(request,aseguradora):
ta = ServicAsegur.objects.filter(id_aseg=aseguradora)
response = HttpResponse(content_type='text/xls')
response['Content-Disposition'] = 'attachment; filename="Coberturas.csv"'
writer = csv.writer(response)
data = 'Cobertura','Programa'
writer.writerow(data)
for r in ta:
if r.id_serv_id:
r.id_serv.services = r.id_serv.services.encode('ascii','ignore')
r.id_serv.services = r.id_serv.services.encode('ascii','replace')
r.value = r.value.encode('ascii','ignore')
r.value = r.value.encode('ascii','replace')
data = r.id_serv_id,'|',r.id_serv.services,'|',r.id_program.program,'|',r.tipo.clase,'|',r.value
#data = t.id_aseg.name_asegurad,programa,riesgo,uso,tipo,marca,modelo,categoria,t.origen,t.ubicacion,t.anio,t.value
writer.writerow(data)
return response
@csrf_exempt
def tasascsv(request,aseguradora):
ta = TasaAsegur.objects.filter(id_aseg=aseguradora,programa_id=19)
response = HttpResponse(content_type='text/xls')
response['Content-Disposition'] = 'attachment; filename="Tasas.xls"'
writer = csv.writer(response)
data = 'Aseguradora','Programa','Riesgo','Uso','Tipo','Marca','Modelo','Caegoria','Origen','Ubicacion','Anio','Valor'
writer.writerow(data)
for t in ta:
riesgo=None
uso=None
programa=None
tipo=None
modelo=None
categoria=None
marca=None
if t.riesgo_id:
t.riesgo.tipo_riesgo = t.riesgo.tipo_riesgo.encode('ascii','ignore')
t.riesgo.tipo_riesgo = t.riesgo.tipo_riesgo.encode('ascii','replace')
riesgo=t.riesgo.tipo_riesgo
if t.id_uso_id:
t.id_uso.uso = t.id_uso.uso.encode('ascii','ignore')
t.id_uso.uso = t.id_uso.uso.encode('ascii','replace')
uso = t.id_uso.uso
if t.programa_id:
t.programa.program = t.programa.program.encode('ascii','ignore')
t.programa.program = t.programa.program.encode('ascii','replace')
programa= t.programa.program
if t.tipo_id:
t.tipo.clase = t.tipo.clase.encode('ascii','ignore')
t.tipo.clase = t.tipo.clase.encode('ascii','replace')
tipo=t.tipo.clase
if AutoValor.objects.filter(id=t.modelo_id).count()>0:
if t.modelo_id:
marca = t.modelo.id_marca.name_marca.encode('ascii','ignore')
marca = marca.encode('ascii','replace')
if t.modelo_id:
t.modelo.id_modelo.name_model = t.modelo.id_modelo.name_model.encode('ascii','ignore')
t.modelo.id_modelo.name_model = t.modelo.id_modelo.name_model.encode('ascii','replace')
modelo=t.modelo.id_modelo.name_model
if t.categoria_id:
t.categoria.categoria = t.categoria.categoria.encode('ascii','ignore')
t.categoria.categoria = t.categoria.categoria.encode('ascii','replace')
categoria=t.categoria.categoria
data = t.id_aseg.name_asegurad,t.id_aseg.name_asegurad,programa,riesgo,uso,tipo,marca,modelo,categoria,t.origen,t.ubicacion,t.anio,t.value
# datos = str(data).encode('ascii','ignore')
# datos = datos.encode('ascii','replace')
writer.writerow(data)
return response
@csrf_exempt
def modeloscsv(request):
ri = AutoValor.objects.all().values('id','id_modelo__name_model','id_marca__name_marca','id_tipo__clase','traccion').order_by('id_tipo__clase')
response = HttpResponse(content_type='text/xls')
response['Content-Disposition'] = 'attachment; filename="Modelos.xls"'
datos = 'id','modelo','marca','clase','traccion'
writer = csv.writer(response)
writer.writerow(datos)
for r in ri:
modelo = r['id_modelo__name_model'].encode('ascii','ignore')
modelo = modelo.encode('ascii','replace')
marca = r['id_marca__name_marca'].encode('ascii','ignore')
marca = marca.encode('ascii','replace')
clase = r['id_tipo__clase'].encode('ascii','ignore')
clase = clase.encode('ascii','replace')
datos = r['id'],modelo,marca,clase,r['traccion']
# datos = x['id'],x['tipo__clase'],x['antigued'],x['programa__program'],x['id_cob__descripcion'],x['id_aseg__name_asegurad'],x['id_uso__uso'],x['modalidad__name_modalidad'],x['value']
writer.writerow(datos)
return response
@csrf_exempt
def corrige(request):
mo=Clase.objects.all()
for m in mo:
m.clase = m.clase.encode('ascii','ignore')
m.clase = m.clase.encode('ascii','replace')
m.clase
m.save()
return HttpResponse('nologeado', content_type="application/json")
@csrf_exempt
def recibecoberturas(request):
data = json.loads(request.body)
data = json.dumps(data)
f = open('/var/www/html/coberturas.json', 'w')
f.write(data)
f.close()
return HttpResponse('nologeado', content_type="application/json")
@csrf_exempt
def recibegps(request):
data = json.loads(request.body)
data = json.dumps(data)
f = open('/var/www/html/gps.json', 'w')
f.write(data)
f.close()
return HttpResponse('nologeado', content_type="application/json")
@csrf_exempt
def recibefinanciamiento(request):
data = json.loads(request.body)
data = json.dumps(data)
f = open('/var/www/html/financiamiento.json', 'w')
f.write(data)
f.close()
return HttpResponse('nologeado', content_type="application/json")
@csrf_exempt
def recibededucibles(request):
data = json.loads(request.body)
data = json.dumps(data)
f = open('/var/www/html/deducibles.json', 'w')
f.write(data)
f.close()
return HttpResponse('nologeado', content_type="application/json")
@csrf_exempt
def subir(request):
send_mail('Hermes','Evento Enviado','cotiza@hermes.pe', ['joelunmsm@gmail.com'], fail_silently=False)
return HttpResponse('mmmmmmmmm', content_type="application/json")
def generapdf(request):
# Create the HttpResponse object with the appropriate PDF headers.
for i in range(0,27):
TasaAsegur.objects.filter(anio=28-i).update(anio=0+i)
return HttpResponse('total', content_type="application/json")
@csrf_exempt
def estadologin(request):
if request.user.is_authenticated():
return HttpResponse('logeado', content_type="application/json")
else:
return HttpResponse('nologeado', content_type="application/json")
@csrf_exempt
def exportarcobertura(request,data):
cobertura = str(data).split('a')[0].split('x')
aseguradora = str(data).split('a')[1].split('x')
#['1'] ['1', '1'] <type 'list'> <type 'list'>
c =CobertAsegur.objects.filter(programa_id__in=cobertura,id_aseg_id__in=aseguradora).values('id','tipo__clase','antigued','programa__program','id_cob__descripcion','id_aseg__name_asegurad','id_uso__uso','modalidad__name_modalidad','value').order_by('-id')
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="Coberturas.csv"'
writer = csv.writer(response)
for x in c:
#x['text_message'] = x['text_message'].encode('ascii','ignore')
# x['text_message'] = x['text_message'].encode('ascii','replace')
datos = x['id'],x['tipo__clase'],x['antigued'],x['programa__program'],x['id_cob__descripcion'],x['id_aseg__name_asegurad'],x['id_uso__uso'],x['modalidad__name_modalidad'],x['value']
writer.writerow([datos])
return response
@csrf_exempt
def exportarriesgo(request,data):
auto = AutoValor.objects.all().values('id','id_modelo','id_modelo__name_model','id_marca__name_marca')
for i in range(len(auto)):
auto[i]['riesgo'] = ''
auto[i]['aseguradora'] = ''
if RiesgAseg.objects.filter(aseguradora_id=data,id_model_id=auto[i]['id_modelo']).values('id_riesg__tipo_riesgo').count()>0:
auto[i]['riesgo'] = RiesgAseg.objects.filter(aseguradora_id=data,id_model_id=auto[i]['id_modelo']).values('id_riesg__tipo_riesgo')[0]['id_riesg__tipo_riesgo']
auto[i]['aseguradora'] = RiesgAseg.objects.filter(aseguradora_id=data).values('aseguradora__name_asegurad')[0]['aseguradora__name_asegurad']
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="Riesgos.csv"'
writer = csv.writer(response)
for a in auto:
datos = a['id'],a['riesgo'],a['id_modelo__name_model'],a['id_marca__name_marca'],a['aseguradora']
writer.writerow([datos])
return response
@csrf_exempt
def exportardeducible(request,data):
cobertura = str(data).split('a')[0].split('x')
aseguradora = str(data).split('a')[1].split('x')
c =DeducAsegur.objects.filter(id_deduc_id__in=cobertura,id_aseg_id__in=aseguradora).values('riesgo__tipo_riesgo','programa__program','id_deduc__deducible','id_aseg__name_asegurad','id_uso__uso','tipo__clase','value','modalidad__name_modalidad','value').order_by('-id')
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="Deducible.csv"'
writer = csv.writer(response)
for x in c:
#x['text_message'] = x['text_message'].encode('ascii','ignore')
# x['text_message'] = x['text_message'].encode('ascii','replace')
datos = x['riesgo__tipo_riesgo'],x['programa__program'],x['id_deduc__deducible'],x['id_aseg__name_asegurad'],x['id_uso__uso'],x['tipo__clase'],x['value'],x['modalidad__name_modalidad'],x['value']
writer.writerow([datos])
return response
@csrf_exempt
def tasaadmin(request):
if request.method == 'POST':
demision = Parametros.objects.get(id=1).d_emision
igv = Parametros.objects.get(id=1).igv
monto = json.loads(request.body)['monto']['precio']
data = json.loads(request.body)['data']
tasahdi = data['tasahdi']
tasarimac = data['tasarimac']
tasapositiva = data['tasapositiva']
tasapacifico = data['tasapacifico']
tasamapfre = data['tasamapfre']
aseguradora = Aseguradora.objects.all().values('id_asegurad','name_asegurad').order_by('name_asegurad')
for i in range(len(aseguradora)):
if aseguradora[i]['id_asegurad'] == 3:
aseguradora[i]['tasahdi'] = round(float(tasahdi),2)
aseguradora[i]['hdi'] = round(float(tasahdi)/100*float(monto),2)
aseguradora[i]['phdisubtotal'] = round((100+float(demision))*float(aseguradora[i]['hdi'])/100,2)
aseguradora[i]['phditotal'] = round((100+float(igv))*aseguradora[i]['phdisubtotal']/100,2)
if aseguradora[i]['id_asegurad'] == 1:
aseguradora[i]['tasapositiva'] = round(float(tasapositiva),2)
aseguradora[i]['positiva'] = round(float(tasapositiva)/100*float(monto),2)
aseguradora[i]['positivasubtotal'] = round((100+float(demision))*aseguradora[i]['positiva']/100,2)
aseguradora[i]['positivatotal'] = round((100+float(igv))*aseguradora[i]['positivasubtotal']/100,2)
if aseguradora[i]['id_asegurad'] == 2:
aseguradora[i]['tasapacifico'] = round(float(tasapacifico),2)
aseguradora[i]['pacifico'] = round(float(tasapacifico)/100*float(monto),2)
aseguradora[i]['pacificosubtotal'] = round((100+float(demision))*aseguradora[i]['pacifico']/100,2)
aseguradora[i]['pacificototal'] = round((100+int(igv))*aseguradora[i]['pacificosubtotal']/100,2)
else:
aseguradora[i]['pacifico'] = 'Consultar en la URL:'
aseguradora[i]['pacificosubtotal'] = 'http://pacifico.com'
aseguradora[i]['pacificototal'] = ''
if aseguradora[i]['id_asegurad'] == 4:
aseguradora[i]['tasamapfre'] = round(float(tasamapfre),2)
aseguradora[i]['mapfre'] = round(float(tasamapfre)/100*float(monto),2)
aseguradora[i]['mapfresubtotal'] = round((100+float(demision))*aseguradora[i]['mapfre']/100,2)
aseguradora[i]['mapfretotal'] = round((100+float(igv))*aseguradora[i]['mapfresubtotal']/100,2)
if aseguradora[i]['id_asegurad'] == 5:
aseguradora[i]['tasarimac'] = round(float(tasarimac),2)
# Bajo Riesgo
aseguradora[i]['rimac'] = round(float(tasarimac)/100*float(monto),2)
aseguradora[i]['rimacsubtotal'] = round((100+float(demision))*aseguradora[i]['rimac']/100,2)
aseguradora[i]['rimactotal'] = round((100+float(igv))*aseguradora[i]['rimacsubtotal']/100,2)
data_dict = ValuesQuerySetToDict(aseguradora)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def logearse(request):
if request.user.is_authenticated():
return HttpResponse('logeado', content_type="application/json")
else:
if request.method == 'POST':
data = json.loads(request.body)
user = json.loads(request.body)['username']
psw = json.loads(request.body)['password']
user = authenticate(username=user, password=psw)
if user is not None:
if user.is_active:
login(request, user)
return HttpResponse('logeado', content_type="application/json")
else:
return HttpResponse('noautorizado', content_type="application/json")
return HttpResponse('nologeado', content_type="application/json")
@csrf_exempt
def parametros(request):
data = request.body
igv = data['igv']
demision = data['demision']
Parametros(igv=igv,d_emision=demision).save()
return HttpResponse('data', content_type="application/json")
fullname = models.CharField(max_length=100, blank=True)
email = models.CharField(max_length=50, blank=True)
chose_informat = models.IntegerField()
@csrf_exempt
def customers(request):
d=Clientes.objects.all().values('id_cliente','fullname','email','chose_informat').order_by('id_cliente')
data_dict = ValuesQuerySetToDict(d)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def listparametros(request):
p= Parametros.objects.all().values('igv','d_emision').order_by('-id')
data_dict = ValuesQuerySetToDict(p)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def listprimas(request):
p= Primas.objects.all().values('id','aseguradora__name_asegurad','riesgo__tipo_riesgo','programa__program','primaminima').order_by('-id')
data_dict = ValuesQuerySetToDict(p)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def addigv(request):
data = json.loads(request.body)
igv = data['igv']
demision = data['demision']
p = Parametros.objects.get(id=1)
p.igv = igv
p.d_emision = demision
p.save()
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def asegprogram(request,aseguradora,modelo,uso,marca,tipo,precio):
aseg = ProgAseg.objects.filter(id_aseg_id=aseguradora).values('id_prog','id_prog__program').order_by('id_prog__program')
tiponame = AutoValor.objects.get(id_modelo_id=modelo,id_marca_id=marca,id_tipo_id=tipo).id_tipo.clase
id_auto_valor = AutoValor.objects.get(id_modelo_id=modelo,id_marca_id=marca,id_tipo_id=tipo).id
origenname = Marca.objects.get(id_marca=marca).origen
usoname = Uso.objects.get(id_uso=uso).uso
marcaname = Marca.objects.get(id_marca=marca).name_marca
restringido =False
if int(aseguradora)==5:
progrimac =[]
traccion = AutoValor.objects.filter(id_modelo_id=modelo,id_marca_id=marca,id_tipo_id=tipo,traccion=1).count()
if traccion>0 and usoname=='Particular':
progrimac.append(7)
if int(uso) == 1: #Particular
progrimac.append(2)
progrimac.append(25)
progrimac.append(26)
if tiponame=='Pick-UP':
progrimac.append(6)
progrimac.remove(2)
progrimac.remove(25)
progrimac.remove(26)
if int(uso) == 20: # Taxis
progrimac.append(11)
progrimac.remove(2)
progrimac.remove(25)
progrimac.remove(26)
if origenname =='Chino':
progrimac.append(10)
progrimac.remove(2)
progrimac.remove(25)
progrimac.remove(26)
if usoname=='Taxi/Publico':
progrimac.append(11)
progrimac.remove(2)
progrimac.remove(25)
progrimac.remove(26)
if usoname=='Transporte de Personal, Turistico, Escolar':
progrimac.append(12)
progrimac.remove(2)
progrimac.remove(25)
progrimac.remove(26)
if usoname =='Panel' and origenname=='Chino':
progrimac.remove(12)
progrimac.remove(2)
progrimac.remove(25)
progrimac.remove(26)
if AutoValor.objects.filter(id=id_auto_valor,excluidorimac='Si').count()>0:
restringido =True
if (int(precio) >= 75000 and int(precio)<=4000) or origenname=='Chino' or restringido==True or (tiponame=='Van' and usoname=='Particular'):
progrimac.remove(2)
progrimac.remove(25)
progrimac.remove(26)
if int(precio)>60000 and tiponame=='Pick-UP':
progrimac.remove(6)
if int(precio)>60000 and int(precio)<=4000 or origenname=='Chino' :
if 7 in progrimac:
progrimac.remove(7)
if (tiponame =='Microbus' or tiponame=='Omnibus' or tiponame=='Camion' or usoname=='Taxi/Publico' or int(precio)>=60000) and origenname=='Chino':
progrimac.remove(10)
aseg = ProgAseg.objects.filter(id_aseg_id=aseguradora,id_prog_id__in=progrimac).values('id_prog','id_prog__program')
if int(aseguradora)==1: #Positiva
prog =[]
prog.append(4)
#Comercial
if usoname=='Comercial':
prog.append(28)
prog.remove(4)
if usoname =='Taxi/Publico':
prog.append(29)
prog.remove(4)
if usoname=='Carga':
prog.append(31)
prog.remove(4)
if usoname=='Transporte de Personal, Turistico, Escolar':
prog.append(30)
prog.remove(4)
aseg = ProgAseg.objects.filter(id_aseg_id=aseguradora,id_prog_id__in=prog).values('id_prog','id_prog__program')
if int(aseguradora)==4: #Mapfre
prog = []
#### Dorada
if tiponame=='Auto' and usoname=='Particular':
prog.append(1)
prog.append(24)
if tiponame=='Rural' and usoname=='Particular':
prog.append(1)
prog.append(24)
if origenname=='Chino':
prog.append(1)
prog.append(24)
if origenname=='Pick-UP':
prog.remove(1)
prog.remove(24)
## Dorada Economica
if marcaname == 'TOYOTA' or marcaname =='NISSAN' or marcaname=='KIA' or marcaname=='CHEVROLET' or marcaname =='GEELY' or marcaname=='LIFAN' or marcaname=='CHERY' or marcaname=='GREAT WALL' or marcaname=='JAC' or marcaname=='INCAPOWER' or marcaname=='BYD' or marcaname=='CHANGE' or marcaname=='HAFEI':
if (tiponame=='Auto' and usoname=='Particular') or (tiponame=='Station Wagon' and usoname=='Particular') or (tiponame=='Rural' and usoname=='Particular') :
prog.append(5)
if origenname=='Pick-UP':
prog.append(5)
if origenname=='Chino':
prog.append(5)
## Dorada Pickup
if (tiponame == 'Pick-UP' and usoname =='Particular') or usoname=='Comercial' or usoname =='Transporte de Personal, Turistico, Escolar':
prog.append(22)
## Taxi Individual
if(tiponame=='Rural' and usoname=='Taxi/Publico') or (tiponame=='Auto' and usoname=='Taxi/Publico') or (tiponame=='Station Wagon' and usoname=='Taxi/Publico') or (origenname=='Chino' and usoname=='Taxi/Publico'):
prog.append(21)
if tiponame == 'Pick-UP' and usoname=='Taxi/Publico':
prog.remove(21)
## Program Transporte Personal
if (tiponame == 'Rural' or tiponame == 'Omnibus' or tiponame=='Microbus') and usoname=='Transporte de Personal, Turistico, Escolar':
prog.append(19)
aseg = ProgAseg.objects.filter(id_aseg_id=aseguradora,id_prog_id__in=prog).values('id_prog','id_prog__program')
data_dict = ValuesQuerySetToDict(aseg)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def pdfx(request):
urlx = request.body
f = open('/var/www/pdf.txt', 'a')
f.write(str(urlx)+'\n')
f.close()
os.system('wkhtmltopdf '+urlx+' /var/www/html/output.pdf')
return HttpResponse('data', content_type="application/json")
@csrf_exempt
def marca(request):
d=Marca.objects.all().values('id_marca','name_marca','orden').order_by('orden')
data_dict = ValuesQuerySetToDict(d)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def modelo(request,id_marca):
d=AutoValor.objects.filter(id_marca_id=id_marca).values('id_modelo','id_modelo__name_model','id_marca').annotate(model=Max('id_modelo__name_model')).order_by('id_modelo__name_model')
data_dict = ValuesQuerySetToDict(d)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def listmodelo(request):
d=Modelo.objects.all().values('id_model','name_model').order_by('-id_model');
data_dict = ValuesQuerySetToDict(d)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def clase(request):
d=Clase.objects.all().values('id_clase','clase').order_by('id_clase')
data_dict = ValuesQuerySetToDict(d)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def usos(request,tipo):
d=Tipouso.objects.filter(tipo_id=tipo).values('id','uso__uso','uso')
data_dict = ValuesQuerySetToDict(d)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def catemodelo(request,modelo):
modelos = AutoValor.objects.filter(id_modelo=modelo).values('id_modelo','id_modelo__name_model','id_marca__name_marca')
for i in range(len(modelos)):
if modelos[i]['id_marca__name_marca'] == 'Toyota' or modelos[i]['id_marca__name_marca'] == 'Nissan' :
cat = 2
else:
cat = 1
return HttpResponse(cat, content_type="application/json")
@csrf_exempt
def claseModelo(request,id_model):
d=AutoValor.objects.filter(id_modelo=id_model).values('id','id_tipo','id_tipo__clase','id_modelo')
data_dict = ValuesQuerySetToDict(d)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def riesgosclase(request):
d = RiesgAseg.objects.all().values('aseguradora__name_asegurad','id_model__id_marca__name_marca','id_model__id_tipo__clase','id_riesg__tipo_riesgo','id_model__id_modelo__name_model','id').order_by('-id');
data_dict = ValuesQuerySetToDict(d)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def datosfiltro(request,id_cliente):
d = Clientes.objects.filter(id_cliente=id_cliente).values('id_cliente','fullname','email','celular','chose_marca__name_marca','chose_modelo__name_model','chose_tipo__clase','chose_modalid__name_modalidad','chose_uso__uso','chose_anio__anio_antig','chose_ubicl','chose_ubicp','chose_informat','value');
data_dict = ValuesQuerySetToDict(d)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def categorias(request):
d=Categorias.objects.all().values('id_categ','categoria').order_by('id_categ')
data_dict = ValuesQuerySetToDict(d)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def anio(request):
d=Anio.objects.all().values('id_anio','anio_antig').order_by('-id_anio')
data_dict = ValuesQuerySetToDict(d)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def programas(request):
d=Programa.objects.all().values('id_program','program').order_by('id_program')
data_dict = ValuesQuerySetToDict(d)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def aseguradoras(request):
d=Aseguradora.objects.all().values('id_asegurad','name_asegurad').order_by('id_asegurad')
data_dict = ValuesQuerySetToDict(d)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def coberturas(request):
d=Cobertura.objects.all().values('id_cobert','descripcion').order_by('id_cobert')
data_dict = ValuesQuerySetToDict(d)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def deducciones(request):
d=Deducibles.objects.all().values('id_deduc','deducible').order_by('id_deduc')
data_dict = ValuesQuerySetToDict(d)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def financiamiento(request):
d=FinanAsegu.objects.values('id','id_finan','id_finan__financiamiento','id_aseg','id_aseg__name_asegurad','cuota','tea')
data_dict = ValuesQuerySetToDict(d)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
def pdfmargarita(request):
return render(request, 'pdfmargarita.html')
def generate_pdf_view(request):
try:
# create an API client instance
client = pdfcrowd.Client("username", "apikey")
# convert an HTML file
output_file = open('file.pdf', 'wb')
client.convertFile('/var/www/cotizacion/frontend/resultadofiltro.html', output_file)
output_file.close()
except pdfcrowd.Error, why:
print('Failed: {}'.format(why))
return HttpResponse(output_file, content_type="application/json")
@csrf_exempt
def fiiiii(request):
data = json.loads(request.body)
body = ''
financiamiento = Financiamiento.objects.all().values('id_financ','financiamiento').order_by('id_financ')
lista = []
cober = []
modelo = data['modelo']
anio = data['anio']
monto = data['precio']
uso = data['uso']
a = AutoValor.objects.filter(id_modelo_id=modelo)
for m in a:
tipo = m.id_tipo.id_clase
if RiesgAseg.objects.filter(aseguradora_id=1,id_model_id=modelo):
riesgopositiva = RiesgAseg.objects.get(aseguradora_id=1,id_model_id=modelo).id_riesg__tipo_riesgo
if RiesgAseg.objects.filter(aseguradora_id=2,id_model_id=modelo):
riesgopacifico = RiesgAseg.objects.get(aseguradora_id=2,id_model_id=modelo).id_riesg__tipo_riesgo
if RiesgAseg.objects.filter(aseguradora_id=4,id_model_id=modelo):
riesgomapfre = RiesgAseg.objects.get(aseguradora_id=4,id_model_id=modelo).id_riesg__tipo_riesgo
if RiesgAseg.objects.filter(aseguradora_id=5,id_model_id=modelo):
riesgorimac = RiesgAseg.objects.get(aseguradora_id=5,id_model_id=modelo).id_riesg__tipo_riesgo
riesgohdi = 3
riesgorimac= 3
riesgopositiva = 3
riesgomapfre = 3
riesgopacifico = 3
anio = int(Anio.objects.get(id_anio=anio).anio_antig)
anioact = int(datetime.datetime.now().year)
anio = anioact - anio
demision = Parametros.objects.get(id=1).d_emision
igv = Parametros.objects.get(id=1).igv
aseguradora = Aseguradora.objects.all().values('id_asegurad','name_asegurad').order_by('name_asegurad')
for i in range(len(financiamiento)):
if FinanAsegu.objects.filter(id_finan=financiamiento[i]['id_financ'],id_aseg=3).count()==1:
h = TasaAsegur.objects.filter(id_aseg_id=3,riesgo_id=riesgohdi,anio=anio)
if h.count() == 1:
tasa = round(float(TasaAsegur.objects.get(id_aseg_id=3,riesgo_id=riesgohdi,anio=anio).value),2)
PN= round(float(tasa)*float(monto)/100,2)
m1 = round(float(igv)/100+1,2)
m2 = round(float(PN)*float(m1),2)
m3 = round(float(demision)/100+1,2)
PT = round(float(m3)*float(m2),2)
if int(FinanAsegu.objects.get(id_finan=financiamiento[i]['id_financ'],id_aseg=3).cuota)==4:
fact=round(float(PT)/4,2)
elif int(FinanAsegu.objects.get(id_finan=financiamiento[i]['id_financ'],id_aseg=3).cuota)==11:
part1= round(float(FinanAsegu.objects.get(id_finan=financiamiento[i]['id_financ'],id_aseg=3).tea)+1,4)
fact=round((float(PT)*part1)/11,2)
else:
part2= round(float(FinanAsegu.objects.get(id_finan=financiamiento[i]['id_financ'],id_aseg=3).tea)+1,4)
fact=round((float(PT)*part2)/12,2)
financiamiento[i]['hdi'] = FinanAsegu.objects.get(id_finan=financiamiento[i]['id_financ'],id_aseg=3).cuota+str(" CUOTAS de ")+str(fact)
if FinanAsegu.objects.filter(id_finan=financiamiento[i]['id_financ'],id_aseg=1).count()==1:
h = TasaAsegur.objects.filter(id_aseg_id=1,anio=anio,riesgo_id=riesgopositiva,id_uso_id=uso,tipo_id=tipo)
if h.count() == 1:
tasa = round(float(TasaAsegur.objects.get(id_aseg_id=1,anio=anio,riesgo_id=riesgopositiva,id_uso_id=uso,tipo_id=tipo).value),2)
PN= round(float(tasa)*float(monto)/100,2)
m1 = round(float(igv)/100+1,2)
m2 = round(float(PN)*float(m1),2)
m3 = round(float(demision)/100+1,2)
PT = round(float(m3)*float(m2),2)
if int(FinanAsegu.objects.get(id_finan=financiamiento[i]['id_financ'],id_aseg=1).cuota)==5:
fact=round(float(PT)/5,4)
elif int(FinanAsegu.objects.get(id_finan=financiamiento[i]['id_financ'],id_aseg=1).cuota)==10:
part1= round(float(FinanAsegu.objects.get(id_finan=financiamiento[i]['id_financ'],id_aseg=1).tea)+1,4)
fact=round((float(PT)*part1)/10,2)
else:
part2= round(float(FinanAsegu.objects.get(id_finan=financiamiento[i]['id_financ'],id_aseg=1).tea)+1,4)
fact=round((float(PT)*part2)/12,2)
financiamiento[i]['positiva'] = FinanAsegu.objects.get(id_finan=financiamiento[i]['id_financ'],id_aseg=1).cuota+str(" CUOTAS de ")+str(fact)
if FinanAsegu.objects.filter(id_finan=financiamiento[i]['id_financ'],id_aseg=2).count()==1:
h = TasaAsegur.objects.filter(id_aseg_id=2,riesgo_id=riesgopacifico,anio=anio)
if h.count() == 1:
tasa = round(float(TasaAsegur.objects.get(id_aseg_id=2,riesgo_id=riesgopacifico,anio=anio).value),2)
PN= round(float(tasa)*float(monto)/100,2)
m1 = round(float(igv)/100+1,2)
m2 = round(float(PN)*float(m1),2)
m3 = round(float(demision)/100+1,2)
PT = round(float(m3)*float(m2),2)
if int(FinanAsegu.objects.get(id_finan=financiamiento[i]['id_financ'],id_aseg=2).cuota)==4:
fact=round(float(PT)/4,2)
elif int(FinanAsegu.objects.get(id_finan=financiamiento[i]['id_financ'],id_aseg=2).tea)==0.082:
part1= round(float(FinanAsegu.objects.get(id_finan=financiamiento[i]['id_financ'],id_aseg=2).tea)+1,4)
fact=round((float(PT)*part1)/10,2)
else:
part2= round(float(FinanAsegu.objects.get(id_finan=financiamiento[i]['id_financ'],id_aseg=2).tea)+1,4)
fact=round((float(PT)*part2)/10,2)
financiamiento[i]['pacifico'] = FinanAsegu.objects.get(id_finan=financiamiento[i]['id_financ'],id_aseg=2).cuota+str(" CUOTAS de ")+str(fact)
if FinanAsegu.objects.filter(id_finan=financiamiento[i]['id_financ'],id_aseg=5).count()==1:
h = TasaAsegur.objects.filter(id_aseg_id=5,riesgo_id=riesgorimac,anio=anio,tipo_id=tipo,ubicacion=1,id_uso_id=uso)
if h.count() == 1:
tasa = round(float(TasaAsegur.objects.get(id_aseg_id=5,riesgo_id=riesgorimac,anio=anio,tipo_id=tipo,ubicacion=1,id_uso_id=uso).value),2)
PN= round(float(tasa)*float(monto)/100,2)
m1 = round(float(igv)/100+1,2)
m2 = round(float(PN)*float(m1),2)
m3 = round(float(demision)/100+1,2)
PT = round(float(m3)*float(m2),2)
if int(FinanAsegu.objects.get(id_finan=financiamiento[i]['id_financ'],id_aseg=5).cuota)==5:
fact=round(float(PT)/5,2)
elif int(FinanAsegu.objects.get(id_finan=financiamiento[i]['id_financ'],id_aseg=5).cuota)==10:
part1= round(float(FinanAsegu.objects.get(id_finan=financiamiento[i]['id_financ'],id_aseg=5).tea)+1,4)
fact=round((float(PT)*part1)/10,2)
else:
part2= round(float(FinanAsegu.objects.get(id_finan=financiamiento[i]['id_financ'],id_aseg=5).tea)+1,4)
fact=round((float(PT)*part2)/12,2)
financiamiento[i]['rimac'] = FinanAsegu.objects.get(id_finan=financiamiento[i]['id_financ'],id_aseg=5).cuota+str(" CUOTAS de ")+str(fact)
if FinanAsegu.objects.filter(id_finan=financiamiento[i]['id_financ'],id_aseg=5).count()==1:
h = TasaAsegur.objects.filter(id_aseg_id=5,riesgo_id=riesgorimac,anio=anio,tipo_id=tipo,ubicacion=1,id_uso_id=uso)
if h.count() == 1:
tasa = round(float(TasaAsegur.objects.get(id_aseg_id=5,riesgo_id=riesgorimac,anio=anio,tipo_id=tipo,ubicacion=1,id_uso_id=uso).value),2)
PN= round(float(tasa)*float(monto)/100,2)
m1 = round(float(igv)/100+1,2)
m2 = round(float(PN)*float(m1),2)
m3 = round(float(demision)/100+1,2)
PT = round(float(m3)*float(m2),2)
if int(FinanAsegu.objects.get(id_finan=financiamiento[i]['id_financ'],id_aseg=5).cuota)==5:
fact=round(float(PT)/5,2)
elif int(FinanAsegu.objects.get(id_finan=financiamiento[i]['id_financ'],id_aseg=5).cuota)==10:
part1= round(float(FinanAsegu.objects.get(id_finan=financiamiento[i]['id_financ'],id_aseg=5).tea)+1,4)
fact=round((float(PT)*part1)/10,2)
else:
part2= round(float(FinanAsegu.objects.get(id_finan=financiamiento[i]['id_financ'],id_aseg=5).tea)+1,4)
fact=round((float(PT)*part2)/12,2)
financiamiento[i]['rimac'] = FinanAsegu.objects.get(id_finan=financiamiento[i]['id_financ'],id_aseg=5).cuota+str(" CUOTAS de ")+str(fact)
if FinanAsegu.objects.filter(id_finan=financiamiento[i]['id_financ'],id_aseg=4).count()==1:
h = TasaAsegur.objects.filter(id_aseg_id=4,anio=anio,id_uso_id=uso,riesgo_id=riesgomapfre,tipo_id=tipo)
if h.count() == 1:
tasa = round(float(TasaAsegur.objects.get(id_aseg_id=4,riesgo_id=riesgomapfre,anio=anio,id_uso_id=uso,tipo_id=tipo).value),2)
PN= round(float(tasa)*float(monto)/100,2)
m1 = round(float(igv)/100+1,2)
m2 = round(float(PN)*float(m1),2)
m3 = round(float(demision)/100+1,2)
PT = round(float(m3)*float(m2),2)
if int(FinanAsegu.objects.get(id_finan=financiamiento[i]['id_financ'],id_aseg=4).cuota)==4:
fact=round(float(PT)/4,2)
elif int(FinanAsegu.objects.get(id_finan=financiamiento[i]['id_financ'],id_aseg=4).cuota)==10:
part1= round(float(FinanAsegu.objects.get(id_finan=financiamiento[i]['id_financ'],id_aseg=4).tea)+1,4)
fact=round((float(PT)*part1)/10,2)
else:
part2= round(float(FinanAsegu.objects.get(id_finan=financiamiento[i]['id_financ'],id_aseg=4).tea)+1,4)
fact=round((float(PT)*part2)/12,2)
financiamiento[i]['mapfre'] = FinanAsegu.objects.get(id_finan=financiamiento[i]['id_financ'],id_aseg=4).cuota+str(" CUOTAS de ")+str(fact)
data_dict = ValuesQuerySetToDict(financiamiento)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def primaneta(request,descuento):
data = json.loads(request.body)
# PrimaNeta {u'orderId': u'618', u'anio': u'29', u'uso': u'1', u'precio': u'212', u'modelo': u'5506', u'programa': u'4z13z25z', u'modalidad': u'1'}
monto = data['precio']
marca=data['marca']
orderId = data['orderId']
uso = data['uso']
usoname = Uso.objects.get(id_uso=uso).uso
modelo = data['modelo']
a = AutoValor.objects.get(id_marca_id=marca,id_modelo_id=modelo,id_tipo_id=data['tipo'])
id_auto_valor = a.id
origenname = a.id_marca.origen
tiponame= a.id_tipo.clase
modelname =a.id_modelo.name_model
# for m in a:
# tipo = m.id_tipo.id_clase
modalidad = data['modalidad']
anio = data['anio']
programa = data['programa'].split('z')
programarimac= programa[1]
programamapfre = programa[0]
programapositiva = programa[2]
riesgohdi = 3
riesgorimac= 7
riesgopositiva = 3
riesgomapfre = 3
riesgopacifico = 6
nameriesgomapfre = 'Bajo Riesgo'
nameriesgorimac = 'Bajo Riesgo II'
nameriesgopositiva = 'Bajo Riesgo'
nameriesgohdi='Cat. I S/.700'
nameriesgopacifico = 'Bajo Riesgo I'
if RiesgAseg.objects.filter(id_model_id=id_auto_valor,aseguradora_id=3):
t =RiesgAseg.objects.filter(id_model_id=id_auto_valor,aseguradora_id=3).values('id_riesg__tipo_riesgo')[0]['id_riesg__tipo_riesgo'].split(' ')[1]
if t == 'I':
if int(monto) <= 40000:
nameriesgohdi = 'Cat. I S/.700'
if int(monto) > 40000:
nameriesgohdi = 'Alta Gama S/.2,500'
if t == 'II':
if int(monto) <= 40000:
nameriesgohdi = 'Cat. II S/.700'
if t=='Gama':
nameriesgohdi = 'Alta Gama S/.2,500'
if t=='Up':
nameriesgohdi = 'Pick Up S/.1,100'
if RiesgAseg.objects.filter(aseguradora_id=5,id_model_id=id_auto_valor):
riesgorimac = RiesgAseg.objects.get(aseguradora_id=5,id_model_id=id_auto_valor).id_riesg.id_riesgo
nameriesgorimac = RiesgAseg.objects.get(aseguradora_id=5,id_model_id=id_auto_valor).id_riesg.tipo_riesgo
if int(monto)>50000:
riesgorimac= 4
nameriesgorimac ='Alto Riesgo I'
if RiesgAseg.objects.filter(aseguradora_id=4,id_model_id=id_auto_valor):
riesgomapfre = RiesgAseg.objects.get(aseguradora_id=4,id_model_id=id_auto_valor).id_riesg.id_riesgo
nameriesgomapfre = RiesgAseg.objects.get(aseguradora_id=4,id_model_id=id_auto_valor).id_riesg.tipo_riesgo
print 'riesgomapfre',riesgomapfre
if RiesgAseg.objects.filter(aseguradora_id=1,id_model_id=id_auto_valor):
riesgopositiva = RiesgAseg.objects.get(aseguradora_id=1,id_model_id=id_auto_valor).id_riesg.id_riesgo
nameriesgopositiva = RiesgAseg.objects.get(aseguradora_id=1,id_model_id=id_auto_valor).id_riesg.tipo_riesgo
if RiesgAseg.objects.filter(aseguradora_id=2,id_model_id=id_auto_valor):
riesgopacifico = RiesgAseg.objects.get(aseguradora_id=2,id_model_id=id_auto_valor).id_riesg.id_riesgo
nameriesgopacifico = RiesgAseg.objects.get(aseguradora_id=2,id_model_id=id_auto_valor).id_riesg.tipo_riesgo
anio = int(Anio.objects.get(id_anio=anio).anio_antig)
anioact = int(datetime.datetime.now().year)
anio = anioact - anio
demision = Parametros.objects.get(id=1).d_emision
igv = Parametros.objects.get(id=1).igv
aseguradora = Aseguradora.objects.all().values('id_asegurad','name_asegurad').order_by('name_asegurad')
for i in range(len(aseguradora)):
if aseguradora[i]['id_asegurad'] == 1:
tasa = None
print 'riesgopositiva',riesgopositiva,anio,programapositiva
## Programa Taxi
if int(programapositiva) ==29:
if usoname=='Taxi/Publico':
tasa = TasaAsegur.objects.get(id_aseg_id=1,anio=int(anio),id_uso__uso=usoname,riesgo_id=riesgopositiva)
## Comercial
if int(programapositiva) ==28:
tasa = TasaAsegur.objects.get(id_aseg_id=1,anio=int(anio),riesgo_id=riesgopositiva,programa_id=programapositiva)
if origenname=='Chino':
tasa = TasaAsegur.objects.get(id_aseg_id=1,anio=int(anio),origen='Chino',programa_id=programapositiva)
if tiponame == 'Pick-UP':
tasa = TasaAsegur.objects.get(id_aseg_id=1,anio=int(anio),tipo__clase='Pick-UP',programa_id=programapositiva)
if origenname=='Chino':
tasa = TasaAsegur.objects.get(id_aseg_id=1,tipo__clase='Pick-UP',origen='Chino',anio=int(anio),programa_id=programapositiva)
#Uso Urbano Taxi
if int(programapositiva) ==29:
tasa = TasaAsegur.objects.get(id_aseg_id=1,anio=int(anio),riesgo_id=riesgopositiva,programa_id=programapositiva)
if origenname =='Chino':
tasa = TasaAsegur.objects.get(id_aseg_id=1,anio=int(anio),origen='Chino',programa_id=programapositiva)
#Uso Urbano
if int(programapositiva) ==30:
tasa = TasaAsegur.objects.get(id_aseg_id=1,anio=int(anio),programa_id=programapositiva)
#Uso Carga
if int(programapositiva) ==31:
tasa = TasaAsegur.objects.get(id_aseg_id=1,anio=int(anio),id_uso__uso=usoname,programa_id=programapositiva)
## Corporativo
if int(programapositiva) ==4:
tasa = TasaAsegur.objects.get(id_aseg_id=1,anio=int(anio),riesgo_id=riesgopositiva,programa_id=4)
if origenname == 'Chino' and tiponame != 'Pick-UP':
tasa = TasaAsegur.objects.get(id_aseg_id=1,anio=int(anio),origen='Chino',tipo__isnull=True,programa_id=4)
if origenname == 'Chino' and tiponame == 'Pick-UP':
tasa = TasaAsegur.objects.get(id_aseg_id=1,anio=int(anio),origen='Chino',tipo__clase='Pick-UP',programa_id=4)
if origenname != 'Chino' and tiponame == 'Pick-UP':
tasa = TasaAsegur.objects.filter(id_aseg_id=1,anio=int(anio),tipo__clase='Pick-UP',programa_id=4).exclude(origen='Chino')[0]
if tasa !=None:
aseguradora[i]['tasapositiva'] = round(float(tasa.value),2)
aseguradora[i]['positiva'] = round(aseguradora[i]['tasapositiva']*float(monto)/100,2)
aseguradora[i]['positivasubtotal'] = round((aseguradora[i]['positiva'] + 3*aseguradora[i]['positiva']/100),2)
aseguradora[i]['positivatotal'] = round((aseguradora[i]['positivasubtotal']+18*aseguradora[i]['positivasubtotal']/100),2)
aseguradora[i]['riesgopositiva'] = nameriesgopositiva
aseguradora[i]['idriesgopositiva'] = riesgopositiva
else:
aseguradora[i]['positiva']='No Aplica'
if aseguradora[i]['id_asegurad'] == 2:
tasa = None
if TasaAsegur.objects.filter(id_aseg_id=2,anio=int(anio),riesgo_id=riesgopacifico).count()>0:
tasa = TasaAsegur.objects.get(id_aseg_id=2,anio=int(anio),riesgo_id=riesgopacifico)
print '99',round(float(tasa.value),2)
print origenname
if origenname == 'Chino':
tasa = TasaAsegur.objects.get(id_aseg_id=2,anio=int(anio),origen=origenname)
print '888',round(float(tasa.value),2)
if tasa !=None:
aseguradora[i]['tasapacifico'] = round(float(tasa.value),2)
print aseguradora[i]['tasapacifico']
aseguradora[i]['pacifico'] = round(aseguradora[i]['tasapacifico']*float(monto)/100,2)
aseguradora[i]['pacificosubtotal'] = round((aseguradora[i]['pacifico'] + 3*aseguradora[i]['pacifico']/100),2)
aseguradora[i]['pacificototal'] = round((aseguradora[i]['pacificosubtotal']+18*aseguradora[i]['pacificosubtotal']/100),2)
aseguradora[i]['riesgopacifico'] = nameriesgopacifico
aseguradora[i]['idriesgopacifico'] = riesgopacifico
else:
aseguradora[i]['pacifico']='No Aplica'
if aseguradora[i]['id_asegurad'] == 3:
tasa=None
e = AutoValor.objects.get(id=id_auto_valor).excluidohdi
if e !='Si':
tasa = TasaAsegur.objects.get(id_aseg_id=3,anio=int(anio),categoria__categoria=nameriesgohdi)
if tasa !=None:
aseguradora[i]['tasahdi'] = round(float(tasa.value),2)
aseguradora[i]['hdi'] = round(aseguradora[i]['tasahdi']*float(monto)/100,2)
aseguradora[i]['hdisubtotal'] = round((aseguradora[i]['hdi'] + 3*aseguradora[i]['hdi']/100),2)
aseguradora[i]['hditotal'] = round((aseguradora[i]['hdisubtotal']+18*aseguradora[i]['hdisubtotal']/100),2)
aseguradora[i]['riesgohdi'] = nameriesgohdi
aseguradora[i]['idriesgohdi'] = riesgohdi
else:
aseguradora[i]['hdi']='No Aplica'
##### Primas mapfre ######
if aseguradora[i]['id_asegurad'] == 4:
tasa = None
if int(programamapfre)==1:
if TasaAsegur.objects.filter(id_aseg_id=4,anio=int(anio),riesgo_id=riesgomapfre,programa_id=programamapfre).count()>0:
tasa = TasaAsegur.objects.get(id_aseg_id=4,anio=int(anio),riesgo_id=riesgomapfre,programa_id=programamapfre)
if origenname == 'Chino':
tasa = TasaAsegur.objects.get(id_aseg_id=4,anio=int(anio),origen='Chino',programa_id=programamapfre)
if int(programamapfre)==22:
print 'usoname,programamapfre,origen',usoname,programamapfre
tasa = TasaAsegur.objects.filter(id_aseg_id=4,anio=int(anio),id_uso__uso=usoname,programa_id=programamapfre).exclude(origen='Chino')[0]
print tasa
if origenname == 'Chino':
tasa = TasaAsegur.objects.get(id_aseg_id=4,anio=int(anio),origen='Chino',id_uso__uso=usoname,programa_id=programamapfre)
if int(programamapfre)==5:
tasa = TasaAsegur.objects.get(id_aseg_id=4,anio=int(anio),riesgo_id=riesgomapfre,programa_id=programamapfre)
if origenname == 'Chino':
tasa = TasaAsegur.objects.get(id_aseg_id=4,anio=int(anio),origen='Chino',programa_id=programamapfre)
if tiponame =='Pick-UP':
tasa = TasaAsegur.objects.get(id_aseg_id=4,anio=int(anio),tipo__clase=tiponame,programa_id=programamapfre)
if int(programamapfre)==14:
tasa = TasaAsegur.objects.get(id_aseg_id=4,anio=int(anio),riesgo_id=riesgomapfre,programa_id=programamapfre)
if tiponame =='Pick-UP':
tasa = TasaAsegur.objects.get(id_aseg_id=4,anio=int(anio),tipo__clase=tiponame,programa_id=programamapfre)
if int(programamapfre)==15:
tasa = TasaAsegur.objects.get(id_aseg_id=4,anio=int(anio),tipo__clase=tiponame,programa_id=programamapfre)
if int(programamapfre)==16:
tasa = TasaAsegur.objects.get(id_aseg_id=4,anio=int(anio),tipo__clase=tiponame,programa_id=programamapfre)
if int(programamapfre)==17:
print 'Consultar'
if int(programamapfre)==18:
print 'Consultar'
if int(programamapfre)==19:
print 'Consultar'
if int(programamapfre)==20:
tasa = TasaAsegur.objects.get(id_aseg_id=4,anio=int(anio),riesgo_id=riesgomapfre,programa_id=programamapfre)
if int(programamapfre)==21:
if riesgomapfre == 2:
riesgomapfre=3
tasa = TasaAsegur.objects.get(id_aseg_id=4,anio=int(anio),riesgo_id=riesgomapfre,programa_id=programamapfre)
if origenname == 'Chino':
tasa = TasaAsegur.objects.get(id_aseg_id=4,anio=int(anio),origen='Chino',programa_id=programamapfre)
if tasa !=None:
aseguradora[i]['tasamapfre'] = round(float(tasa.value),2)
aseguradora[i]['mapfre'] = round(aseguradora[i]['tasamapfre']*float(monto)/100,2)
aseguradora[i]['mapfresubtotal'] = round((aseguradora[i]['mapfre'] + 3*aseguradora[i]['mapfre']/100),2)
aseguradora[i]['mapfretotal'] = round((aseguradora[i]['mapfresubtotal']+18*aseguradora[i]['mapfresubtotal']/100),2)
aseguradora[i]['riesgomapfre'] = nameriesgomapfre
aseguradora[i]['idriesgomapfre'] = riesgomapfre
else:
aseguradora[i]['mapfre']='No Aplica'
if int(aseguradora[i]['id_asegurad']) == 5:
tasa = None
if int(programarimac) == 2: # Corporativa Rimac
if tiponame != 'Pick-UP':
tasa = TasaAsegur.objects.get(id_aseg_id=5,anio=int(anio),riesgo_id=riesgorimac,programa_id=programarimac)
if tiponame == 'Pick-UP' and anio >= 3:
tasa = TasaAsegur.objects.get(id_aseg_id=5,anio=int(anio),tipo__clase=tiponame,programa_id=programarimac)
nameriesgorimac ='Pick-UP'
if tiponame == 'Pick-UP' and anio < 3:
tasa = None
if int(programarimac) == 25: # Corporativa Rimac comision 12.5
tasa = TasaAsegur.objects.get(id_aseg_id=5,anio=int(anio),riesgo_id=riesgorimac,programa_id=programarimac)
if int(programarimac) == 26: # Corporativa Rimac comision 15
tasa = TasaAsegur.objects.get(id_aseg_id=5,anio=int(anio),riesgo_id=riesgorimac,programa_id=programarimac)
if int(programarimac) == 7: # Programa 4x4
tasa = TasaAsegur.objects.get(id_aseg_id=5,anio=int(anio),id_uso__uso=usoname,programa_id=programarimac)
nameriesgorimac = 'Pick-UP '+usoname
if int(programarimac) == 6: # Rimac Vehicular Pick Up
if TasaAsegur.objects.filter(id_aseg_id=5,anio=int(anio),id_uso__uso=usoname,programa_id=programarimac).count()>0:
tasa = TasaAsegur.objects.get(id_aseg_id=5,anio=int(anio),id_uso__uso=usoname,programa_id=programarimac)
nameriesgorimac = usoname
else:
tasa =None
nameriesgopositiva = None
if int(programarimac) == 10:
tasa = TasaAsegur.objects.get(id_aseg_id=5,anio=int(anio),tipo__clase=tiponame,programa_id=programarimac)
if int(programarimac) == 11: # Programa taxi urbano
if tiponame =='Auto':
tasa = TasaAsegur.objects.get(id_aseg_id=5,anio=int(anio),tipo__clase='Auto',programa_id=programarimac)
if 'Yaris' in modelname:
tasa = TasaAsegur.objects.get(id_aseg_id=5,anio=int(anio),modelo__name_model__contains='Yaris',programa_id=programarimac)
if 'Sail' in modelname:
tasa = TasaAsegur.objects.get(id_aseg_id=5,anio=int(anio),modelo__name_model__contains='Sail',programa_id=programarimac)
if origenname == 'Chino':
tasa = TasaAsegur.objects.get(id_aseg_id=5,anio=int(anio),origen='Chino',programa_id=programarimac)
if int(programarimac) == 12:
if 'H1' in modeloname:
tasa = TasaAsegur.objects.get(id_aseg_id=5,anio=int(anio),modelo__name_model__contains='H1',programa_id=programarimac)
if tiponame == 'Panel':
tasa = TasaAsegur.objects.get(id_aseg_id=5,anio=int(anio),tipo__clase='Panel',programa_id=programarimac)
if int(programarimac) == 13:
if tiponame =='Camion':
tasa = TasaAsegur.objects.get(id_aseg_id=5,anio=int(anio),tipo__clase='Camion',programa_id=programarimac)
if tasa != None:
aseguradora[i]['tasarimac'] = round(float(tasa.value)*int(descuento)/100,2)
aseguradora[i]['rimac'] = round(aseguradora[i]['tasarimac']*float(monto)/100,2)
if int(monto) <= 350:
if nameriesgorimac =='Bajo Riesgo I' or nameriesgorimac=='Bajo Riesgo II' or nameriesgorimac =='Alto Riesgo I':
aseguradora[i]['rimac']= 350
if nameriesgorimac =='Alto Riesgo II':
aseguradora[i]['rimac']= 350
if nameriesgorimac =='Pick-UP' and monto <= 375:
aseguradora[i]['rimac']= 375
aseguradora[i]['rimacsubtotal'] = round((aseguradora[i]['rimac'] + 3*aseguradora[i]['rimac']/100),2)
aseguradora[i]['rimactotal'] = round((aseguradora[i]['rimacsubtotal']+18*aseguradora[i]['rimacsubtotal']/100),2)
aseguradora[i]['riesgo'] = nameriesgorimac
aseguradora[i]['idriesgo'] = riesgorimac
else:
aseguradora[i]['rimac'] = 'No Aplica'
if AutoValor.objects.filter(id=id_auto_valor,excluidorimac='Si').count()>0:
aseguradora[i]['rimac'] = 'Modelo Restringido'
# print programarimac
data_dict = ValuesQuerySetToDict(aseguradora)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def riesgomodelo(request,modelo):
data=RiesgAseg.objects.filter(id_model=modelo).values('aseguradora__name_asegurad','id_model__id_modelo__name_model','id_model','id_riesg_id')
data_dict = ValuesQuerySetToDict(data)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def riesgos(request):
d=Riesgo.objects.all().values('id_riesgo','tipo_riesgo').order_by('tipo_riesgo');
data_dict = ValuesQuerySetToDict(d)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def listagps2(request):
d=Gps.objects.filter(value=1).values('sumaminima','id','id_aseg__name_asegurad','id_prog__program','value').order_by('-id')[:10]
data_dict = ValuesQuerySetToDict(d)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def listagps(request):
d=Gps.objects.all().exclude(value=1).values('sumaminima','id','id_aseg__name_asegurad','id_prog__program','id_auto__id_modelo__name_model','id_auto__id_marca__name_marca','id_auto__id_modelo__name_model','id_uso__uso','anio_antig','value','anio_antig__anio_antig').order_by('-id')[:10]
data_dict = ValuesQuerySetToDict(d)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def riesgomodelo(request,modelo):
data=RiesgAseg.objects.filter(id_model=modelo).values('aseguradora__name_asegurad','id_model__id_modelo__name_model','id_model','id_riesg_id')
data_dict = ValuesQuerySetToDict(data)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def getgps(request,modelo,marca,tipo,uso,monto,anio,programa):
gpspositiva = 'No'
gpsrimac = 'No'
gpspacifico = 'No'
gpsmapfre = 'No'
gpshdi = 'No'
auto =AutoValor.objects.get(id_modelo_id=modelo,id_marca_id=marca,id_tipo_id=tipo)
id_auto = auto.id
today = date.today()
anio = Anio.objects.get(id_anio=anio).anio_antig
difanio = int(today.year)-int(anio)
if Gps.objects.filter(id_auto =id_auto,id_aseg=1).count() > 0 :
gpspositiva = 'Si'
## Gps Rimac
yaris =[1369,1370,1371,1372,1454,1455,1456,1457,1493,1495,1519,1520,1521,8448]
if Gps.objects.filter(id_auto =id_auto,id_aseg=5).count() > 0 and int(difanio)<3:
gpsrimac = 'Si'
if (int(difanio)<5 and int(id_auto)==8376) or (int(difanio)<5 and str(id_auto in yaris) == 'True') :
gpsrimac = 'Si'
if int(monto) >=50000:
gpsrimac = 'Si'
## Gps Mapfre
## Dorada
progmapfre = programa.split('z')[0]
print 'progmapfre',progmapfre
if int(progmapfre)==5 or int(progmapfre)==1 or int(progmapfre)==24:
if (str(id_auto in yaris) == 'True' and int(difanio)<4) or (int(id_auto)==8376 and int(difanio)<4):
gpsmapfre = 'Si'
if Gps.objects.filter(id_auto=id_auto,id_aseg=4).count()>0 and int(difanio)<3:
gpsmapfre = 'Si'
if int(monto) >=50000:
gpsmapfre = 'Si'
## Dorada Pickup
pickma= [7669,7815,7817,7814]
if int(progmapfre)== 22:
if int(id_auto)==8376 and int(difanio)<4:
gpsmapfre = 'Si'
if str(id_auto in pickma) == 'True' and int(difanio)<3:
gpsmapfre = 'Si'
if int(progmapfre)== 19:
if int(id_auto)==8376 and int(difanio)<3:
gpsmapfre = 'Si'
if int(progmapfre)== 21:
gpsmapfre = 'Si'
# Gps Positiva
tipop=[1,3,17,6]
if str(tipo in tipop) == 'True' and int(monto)>=50000:
gpspositiva = 'Si'
if tipo==6:
gpspositiva = 'Si'
if Gps.objects.filter(id_auto=id_auto,id_aseg=1).count()>0 and int(difanio)<3:
gpspositiva = 'Si'
if int(monto)>=50000:
gpspositiva = 'Si'
## Gps Hdi
if Gps.objects.filter(id_auto=id_auto,id_aseg=3).count()>0 :
gpshdi= 'Si'
## Gps Pacifico
if Gps.objects.filter(id_auto=id_auto,id_aseg=2).count()>0 :
gpspacifico = 'Si'
data = {'gpshdi':gpshdi,'gpsmapfre':gpsmapfre,'gpsrimac':gpsrimac,'gpspacifico':gpspacifico,'gpspositiva':gpspositiva}
data = json.dumps(data)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def cobertura(request,orden_id,uso,anio,modalidad,programa,modelo):
tipo = AutoValor.objects.filter(id_modelo_id=modelo)
for t in tipo:
tipo = t.id_tipo_id
pro = programa.split('z')
promapfre = pro[0]
propositiva = pro[2]
prorimac = pro[1]
body = ''
today = date.today()
anio = Anio.objects.get(id_anio=anio).anio_antig
difanio = int(today.year)-int(anio)
anioset = 10
if difanio <= 10 :
anioset = 10
if difanio > 10 and difanio <= 15:
anioset = 15
if difanio > 15 and difanio <= 20:
anioset = 20
if RiesgAseg.objects.filter(aseguradora_id=1,id_model_id=modelo):
riesgopositiva = RiesgAseg.objects.get(aseguradora_id=1,id_model_id=modelo).id_riesg__tipo_riesgo
if RiesgAseg.objects.filter(aseguradora_id=2,id_model_id=modelo):
riesgopacifico = RiesgAseg.objects.get(aseguradora_id=2,id_model_id=modelo).id_riesg__tipo_riesgo
if RiesgAseg.objects.filter(aseguradora_id=4,id_model_id=modelo):
riesgomapfre = RiesgAseg.objects.get(aseguradora_id=4,id_model_id=modelo).id_riesg__tipo_riesgo
cobertura = Cobertura.objects.all().values('id_cobert','descripcion').order_by('id_cobert')
lista = []
cober = []
for i in range(len(cobertura)):
cober.append(i)
if CobertAsegur.objects.filter(id_cob=cobertura[i]['id_cobert'],id_aseg_id=3).count()==1:
lista.append(i)
cobertura[i]['hdi'] = CobertAsegur.objects.get(id_cob=cobertura[i]['id_cobert'],id_aseg_id=3).value
if CobertAsegur.objects.filter(id_cob=cobertura[i]['id_cobert'],id_aseg_id=1).count()==1:
lista.append(i)
cobertura[i]['positiva'] = CobertAsegur.objects.get(id_cob=cobertura[i]['id_cobert'],id_aseg_id=1).value
if CobertAsegur.objects.filter(id_cob=cobertura[i]['id_cobert'],id_aseg_id=2).count()==1:
lista.append(i)
cobertura[i]['pacifico'] = CobertAsegur.objects.get(id_cob=cobertura[i]['id_cobert'],id_aseg_id=2).value
if CobertAsegur.objects.filter(id_cob=cobertura[i]['id_cobert'],id_aseg_id=4,programa_id=promapfre).count()==1:
lista.append(i)
cobertura[i]['mapfre'] = CobertAsegur.objects.get(id_cob=cobertura[i]['id_cobert'],id_aseg_id=4,programa_id=promapfre).value
if CobertAsegur.objects.filter(id_cob=cobertura[i]['id_cobert'],id_aseg_id=5,programa_id=prorimac).count()==1:
lista.append(i)
cobertura[i]['rimac'] = CobertAsegur.objects.get(id_cob=cobertura[i]['id_cobert'],id_aseg_id=5,programa_id=prorimac).value
data_dict = ValuesQuerySetToDict(cobertura)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def deducible(request,orden_id,uso,anio,modalidad,programa,modelo):
tipo = AutoValor.objects.filter(id_modelo_id=modelo)
riesgomapfre = 3
if RiesgAseg.objects.filter(aseguradora_id=1,id_model_id=modelo):
riesgopositiva = RiesgAseg.objects.get(aseguradora_id=1,id_model_id=modelo).id_riesg__tipo_riesgo
if RiesgAseg.objects.filter(aseguradora_id=2,id_model_id=modelo):
riesgopacifico = RiesgAseg.objects.get(aseguradora_id=2,id_model_id=modelo).id_riesg__tipo_riesgo
if RiesgAseg.objects.filter(aseguradora_id=4,id_model_id=modelo):
riesgomapfre = RiesgAseg.objects.get(aseguradora_id=4,id_model_id=modelo).id_riesg__tipo_riesgo
if RiesgAseg.objects.filter(aseguradora_id=5,id_model_id=modelo):
riesgorimac = RiesgAseg.objects.get(aseguradora_id=5,id_model_id=modelo).id_riesg__tipo_riesgo
for t in tipo:
tipo = t.id_tipo_id
pro = programa.split('z')
promapfre = pro[0]
propositiva = pro[2]
prorimac = pro[1]
body = ''
deducible = Deducibles.objects.all().values('id_deduc','deducible').order_by('id_deduc')
anio = int(Anio.objects.get(id_anio=anio).anio_antig)
lista = []
cober = []
for i in range(len(deducible)):
deducible[0]['mapfre']= 'Taller Preferente: 15 % del Monto indemnizable, minimo US$ 15000 Afiliado concesionario: 20% del monto indemnizable minimo US$ 200 Taller no afiliado. 20% del monto indemnizable, minimo US$ 300'
deducible[1]['mapfre']= 'Sin deducible'
deducible[2]['mapfre']= 'Deducible dano propio'
deducible[3]['mapfre']= 'Sin deducible'
deducible[4]['mapfre']= 'Deducible dano propio'
deducible[5]['mapfre']= 'Deducible adicional del 10% del monto indemnizable'
deducible[6]['mapfre']= '15% del monto indem Min $150'
deducible[7]['mapfre']= 'Deducible dano propio'
deducible[8]['mapfre']= '20% del monto indem. Min $150'
deducible[9]['mapfre']= 'Sin cobertura'
deducible[10]['mapfre']= 'Sin cobertura'
deducible[11]['mapfre']= 'Deducible adicional del 20% del monto indemnizable'
deducible[12]['mapfre']= 'Deducible adicional del 20% del monto indemnizable'
deducible[13]['mapfre']= '10% del monto indem Min $150'
break
for i in range(len(deducible)):
deducible[0]['positiva']= 'Taller Multimarca: 10 % del Monto indemnizable, minimo US$ 15000 -Taller preferente: 12% del monto indemnizable minimo US$ 150 -Afiliado concecionario 15% del monto indemnizable, minimo US$ 250 - DIVEMOTOR 20% del monto indemnizable, minimo $500 - Taller no afiliado: 20% del monto indemnizable, minimo US$ 300'
deducible[1]['positiva']= 'Sin deducible'
deducible[2]['positiva']= 'Deducible dano propio'
deducible[3]['positiva']= 'Con GPS: sin deducible Sin GPS sin cobertura'
deducible[4]['positiva']= '20% del monto indem Min $ 500'
deducible[5]['positiva']= 'Deducible dano propio'
deducible[6]['positiva']= '10% del monto indem Min $150'
deducible[7]['positiva']= '20% del monto indem Min $300'
deducible[8]['positiva']= 'Hasta 25 anos doble deducible'
deducible[9]['positiva']= '20% del monto indem Min $300'
deducible[10]['positiva']= '20% del monto indemnizable'
deducible[11]['positiva']= '20% del monto indem Min $300'
deducible[12]['positiva']= '20% del monto indem Min $300'
deducible[13]['positiva']= '10% del monto indem Min $150'
break
for i in range(len(deducible)):
deducible[0]['rimac']= 'Taller Multimarca: 15 % del Monto indemnizable, minimo US$ 150 -Afiliado concesionario 20% del monto indemnizable, minimo US$ 200'
deducible[1]['rimac']= '20% del monto indemnizable'
deducible[2]['rimac']= '20% del monto indemnizable Min $200'
deducible[3]['rimac']= 'Con GPS: sin deducible Sin GPS: sin cobertura'
deducible[4]['rimac']= '20% del monto indem Min $ 200'
deducible[5]['rimac']= '10% del monto indemnizable minimo $150 (Talleres Multimarca)'
deducible[6]['rimac']= 'Deducible dano propio'
deducible[7]['rimac']= '20% del monto indem Min $500'
deducible[8]['rimac']= 'Varon 20% del monto indem Min $300 todo evento'
deducible[9]['rimac']= '20% del monto indem Min $300'
deducible[10]['rimac']= 'Deducible Dano Propio o deducible x Imprudencia el que sea mayor'
deducible[11]['rimac']= '-Talleres afiliados multimarca 15% del monto indeminizable, minimo US$ 300- Afiliado concesionario: 20% del monto indeminizable, minimo US$ 500'
deducible[12]['rimac']= 'Deducible Ausencia de control'
deducible[13]['rimac']= '10% del monto indem Min $150'
break
for i in range(len(deducible)):
if DeducAsegur.objects.filter(id_deduc=deducible[i]['id_deduc'],id_aseg_id=3,id_uso=uso).count()==1:
deducible[i]['hdi'] = DeducAsegur.objects.get(id_deduc=deducible[i]['id_deduc'],id_aseg_id=3,id_uso=uso).value
p= DeducAsegur.objects.filter(id_deduc=deducible[i]['id_deduc'],id_aseg_id=1,id_uso=uso,modalidad_id=modalidad,programa_id=propositiva,tipo_id=tipo).values('id_deduc__deducible')
if p.count()==1:
deducible[i]['positiva'] = DeducAsegur.objects.get(id_deduc=deducible[i]['id_deduc'],id_aseg_id=1,id_uso=uso,modalidad_id=modalidad,programa_id=propositiva,tipo_id=tipo).value
if DeducAsegur.objects.filter(id_deduc=deducible[i]['id_deduc'],id_aseg_id=2).count()==1:
deducible[i]['pacifico'] = DeducAsegur.objects.get(id_deduc=deducible[i]['id_deduc'],id_aseg_id=2).value
p = DeducAsegur.objects.filter(id_deduc=deducible[i]['id_deduc'],id_aseg_id=4,programa_id=promapfre,id_uso_id=uso,riesgo_id=riesgomapfre,tipo_id=tipo).values('id_deduc__deducible')
if p.count()==1:
deducible[i]['mapfre'] = DeducAsegur.objects.get(id_deduc=deducible[i]['id_deduc'],id_aseg_id=4,programa_id=promapfre,id_uso_id=uso,riesgo_id=riesgomapfre,tipo_id=tipo).value
if DeducAsegur.objects.filter(id_deduc=deducible[i]['id_deduc'],id_aseg_id=5,id_uso_id=uso).count()==1:
deducible[i]['rimac'] = DeducAsegur.objects.get(id_deduc=deducible[i]['id_deduc'],id_aseg_id=5,id_uso_id=uso).value
data_dict = ValuesQuerySetToDict(deducible)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def servic(request):
body = ''
servicio = Servicios.objects.all().values('id_serv','services').order_by('id_serv')
lista = []
cober = []
for i in range(len(servicio)):
cober.append(i)
if ServicAsegur.objects.filter(id_serv=servicio[i]['id_serv'],id_aseg_id=3).count()==1:
lista.append(i)
servicio[i]['hdi'] = ServicAsegur.objects.get(id_serv=servicio[i]['id_serv'],id_aseg_id=3).valor
if ServicAsegur.objects.filter(id_serv=servicio[i]['id_serv'],id_aseg_id=1).count()==1:
lista.append(i)
servicio[i]['positiva'] = ServicAsegur.objects.get(id_serv=servicio[i]['id_serv'],id_aseg_id=1).valor
if ServicAsegur.objects.filter(id_serv=servicio[i]['id_serv'],id_aseg_id=2).count()==1:
lista.append(i)
servicio[i]['pacifico'] = ServicAsegur.objects.get(id_serv=servicio[i]['id_serv'],id_aseg_id=2).valor
if ServicAsegur.objects.filter(id_serv=servicio[i]['id_serv'],id_aseg_id=5).count()==1:
lista.append(i)
servicio[i]['rimac'] = ServicAsegur.objects.get(id_serv=servicio[i]['id_serv'],id_aseg_id=5).valor
if ServicAsegur.objects.filter(id_serv=servicio[i]['id_serv'],id_aseg_id=4).count()==1:
lista.append(i)
servicio[i]['mapfre'] = ServicAsegur.objects.get(id_serv=servicio[i]['id_serv'],id_aseg_id=4).valor
data_dict = ValuesQuerySetToDict(servicio)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def servicio(request):
d=ServicAsegur.objects.values('id','id_serv','id_serv__services','id_aseg__name_asegurad','valor')
data_dict = ValuesQuerySetToDict(d)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
# @csrf_exempt
# def postin(request):
# if request.method == 'POST':
# print json.loads(request.body)
# return HttpResponse('xxxxx', content_type="application/json")
@csrf_exempt
def precio(request,id_model,anio):
precioact =AutoValor.objects.get(id_modelo=id_model).valor
return HttpResponse(precioact, content_type="application/json")
@csrf_exempt
def preciodreprecio(request,precio):
precio = float(precio)
precio = '{0:.2f}'.format(precio)
return HttpResponse(precio, content_type="application/json")
@csrf_exempt
def listaservice(request):
d=Servicios.objects.all().values('id_serv','services').order_by('id_serv')
data_dict = ValuesQuerySetToDict(d)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def listfinanase(request):
d=FinanAsegu.objects.all().values('id','id_finan','id_finan__financiamiento','id_aseg__name_asegurad','cuota','tea').order_by('-id')
data_dict = ValuesQuerySetToDict(d)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def listafinance(request):
d=FinanAsegu.objects.all().values('id_finan','id_aseg','cuota','tea').order_by('id_finan')
data_dict = ValuesQuerySetToDict(d)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def listafinanciamiento(request):
d=Financiamiento.objects.all().values('id_financ','financiamiento').order_by('id_financ')
data_dict = ValuesQuerySetToDict(d)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def uso(request):
d=Uso.objects.all().values('id_uso','uso').order_by('id_uso')
data_dict = ValuesQuerySetToDict(d)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def timon(request):
d=Timon.objects.all().values('id_timon','name_tipo').order_by('id_timon')
data_dict = ValuesQuerySetToDict(d)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def modalidad(request):
d=Modalidad.objects.all().values('id_modalidad','name_modalidad').order_by('id_modalidad')
data_dict = ValuesQuerySetToDict(d)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
def ValuesQuerySetToDict(vqs):
return [item for item in vqs]
@csrf_exempt
def add(request):
if request.method == 'POST':
data = json.loads(request.body)
#
#{u'categoria': {u'categoria': u'I', u'id_categ': 1}, u'igv': 18, u'clase': [{u'clase': u'Cami\xf3n', u'id_clase': 2}], u'anio': {u'id_anio': 16, u'anio_antig': 2004}, u'uso': {u'uso': u'Particular', u'id_uso': 1}, u'cobertura': [{u'descripcion': u'Riesgos de la Naturaleza', u'id_cobert': 20}], u'riesgo': {u'id_riesgo': 153, u'tipo_riesgo': u'Alta Gama I S/.1700'}, u'marca': {u'id_marca': 178, u'name_marca': u'LADA'}, u'value': u'111', u'demision': 3, u'aniox': 0, u'modelo': {u'id_model': 8062, u'name_model': u'C-61'}, u'programa': [{u'id_program': 3, u'program': u'Corporativo HDI'}], u'ubicacion': {u'id': 1, u'label': u'Lima'}, u'modalidad': [{u'id_modalidad': 2, u'name_modalidad': u'Todo Riesgo'}], u'aseguradora': {u'id_asegurad': 2, u'name_asegurad': u'Pacifico'}}
cobertura = data['cobertura']
aseguradora = data['aseguradora']
programa = data['programa']
modalidad = data['modalidad']
uso = data['uso']
clase = data['clase']
#anio = data['anio']
value = data['value']
if type(cobertura) == dict:
cobertura=[cobertura]
if type(aseguradora) == dict:
aseguradora=[aseguradora]
if type(programa) == dict:
programa=[programa]
if type(modalidad) == dict:
modalidad=[modalidad]
if type(uso) == dict:
uso=[uso]
if type(clase) == dict:
clase=[clase]
# if type(anio) == dict:
# anio=[anio]
for c in cobertura:
for a in aseguradora:
for p in programa:
for m in modalidad:
for u in uso:
for cl in clase:
#for an in anio:
CobertAsegur(tipo_id = int(cl['id_clase']),programa_id=int(p['id_program']),id_cob_id=int(c['id_cobert']),id_aseg_id=int(a['id_asegurad']),id_uso_id=int(u['id_uso']),value=value,modalidad_id=int(m['id_modalidad'])).save()
data = json.dumps('data_dict')
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def addservice(request):
if request.method == 'POST':
data = json.loads(request.body)
aseguradora = data['aseguradora']['id_asegurad']
value = data['valor']
servicio = data['servicio']['id_serv']
uso = data['uso']['id_uso']
programa = data['programa']['id_program']
ServicAsegur(id_aseg_id=aseguradora,id_serv_id=servicio,valor=value,id_uso_id=uso,id_program_id=programa).save()
return HttpResponse(json.dumps(request.body), content_type="application/json")
@csrf_exempt
def addfinanz(request):
if request.method == 'POST':
data = json.loads(request.body)
aseguradora = data['aseguradora']['id_asegurad']
cuota = data['cuota']
tea = data['tea']
financiamiento = data['financiamiento']['id_financ']
FinanAsegu(id_aseg_id=aseguradora,id_finan_id=financiamiento,cuota=cuota,tea=tea).save()
return HttpResponse(json.dumps(request.body), content_type="application/json")
@csrf_exempt
def addfinanciamiento(request):
if request.method == 'POST':
data = json.loads(request.body)
Financiamiento(financiamiento=data['financiamiento']).save()
return HttpResponse(json.dumps(request.body), content_type="application/json")
@csrf_exempt
def addtasa(request):
if request.method == 'POST':
data = json.loads(request.body)
# uso = data['uso']['id_uso']
# aseguradora = data['aseguradora']['id_asegurad']
# modalidad = data['modalidad']['id_modalidad']
# anio = data['anio']['id_anio']
# value = data['value']
# categoria = data['categoria']['id_categ']
# riesgo = data['riesgo']['id_riesgo']
# programa = data['programa']['id_program']
# ubicacion = data['ubicacion']['id']
# clase_id = data['clase']['id_clase']
uso = data['uso']
aseguradora = data['aseguradora']
modalidad = data['modalidad']
anio = data['anio']
value = data['value']
riesgo = data['riesgo']
programa = data['programa']
ubicacion = data['ubicacion']
clase = data['clase']
if type(uso) == dict:
uso=[uso]
if type(aseguradora) == dict:
aseguradora=[aseguradora]
if type(modalidad) == dict:
modalidad=[modalidad]
if type(programa) == dict:
programa=[programa]
if type(anio) == dict:
anio=[anio]
if type(clase) == dict:
clase=[clase]
if type(riesgo) == dict:
riesgo=[riesgo]
if type(ubicacion) == dict:
ubicacion=[ubicacion]
for a in aseguradora:
for p in programa:
for m in modalidad:
for u in uso:
for r in riesgo:
for an in anio:
for cl in clase:
for ub in ubicacion:
TasaAsegur(ubicacion=ub['id'],riesgo_id=r['id_riesgo'],id_aseg_id=a['id_asegurad'],id_uso_id=u['id_uso'],tipo_id=cl['id_clase'],modalidad_id=m['id_modalidad'],value=value,anio_id=an['id_anio'],programa_id=p['id_program']).save()
return HttpResponse(json.dumps(request.body), content_type="application/json")
@csrf_exempt
def addriesgoclase(request):
if request.method == 'POST':
data = json.loads(request.body)
#{u'riesgo': {u'id_riesgo': 153, u'tipo_riesgo': u'Alta Gama I S/.1700'}, u'aseguradora': {u'id_asegurad': 2, u'name_asegurad': u'Pacifico'}}
#{u'modelos': [[{u'id_modelo__name_model': u'ACURA', u'model': u'ACURA', u'id_modelo': 6372, u'checkmodel': True}, {u'id_modelo__name_model': u'COMPACT', u'model': u'COMPACT', u'id_modelo': 5183, u'checkmodel': True}, {u'id_modelo__name_model': u'INTEGRA', u'model': u'INTEGRA', u'id_modelo': 8101, u'checkmodel': True}, {u'id_modelo__name_model': u'LEGEND', u'model': u'LEGEND', u'id_modelo': 5184, u'checkmodel': True}, {u'id_modelo__name_model': u'MDX', u'model': u'MDX', u'id_modelo': 5185, u'checkmodel': True}, {u'id_modelo__name_model': u'RSX', u'model': u'RSX', u'id_modelo': 5186, u'checkmodel': True}, {u'id_modelo__name_model': u'TL', u'model': u'TL', u'id_modelo': 5187, u'checkmodel': True}, {u'id_modelo__name_model': u'VIGOR', u'model': u'VIGOR', u'id_modelo': 5188, u'checkmodel': True}]], u'datax': {u'riesgo': {u'id_riesgo': 153, u'tipo_riesgo': u'Alta Gama I S/.1700'}, u'aseguradora': {u'id_asegurad': 3, u'name_asegurad': u'HDI'}}}
riesgo = data['datax']['riesgo']['id_riesgo']
aseguradora = data['datax']['aseguradora']['id_asegurad']
for m in data['modelos'][0]:
for i in m:
if i=='checkmodel':
if m['checkmodel'] == True:
#print 'modelo', AutoValor.objects.filter(id_modelo=m['id_modelo']).values('id','id_marca','id_modelo')
id_modelo=AutoValor.objects.filter(id_modelo_id=m['id_modelo']).values('id','id_marca','id_modelo')[0]['id']
RiesgAseg(id_riesg_id=int(riesgo),id_model_id=id_modelo,aseguradora_id=int(aseguradora)).save()
data = json.dumps('data_dict')
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def addprima(request):
if request.method == 'POST':
data = json.loads(request.body)
#{u'categoria': {u'categoria': u'I', u'id_categ': 1}, u'igv': 18, u'anio': {u'id_anio': 16, u'anio_antig': 2004}, u'uso': {u'uso': u'Particular', u'id_uso': 1}, u'riesgo': {u'id_riesgo': 153, u'tipo_riesgo': u'Alta Gama I S/.1700'}, u'marca': {u'id_marca': 178, u'name_marca': u'LADA'}, u'demision': 3, u'aniox': 0, u'modelo': {u'id_model': 8062, u'name_model': u'C-61'}, u'programa': {u'id_program': 2, u'program': u'Corporativo RIMAC'}, u'valor': u'145', u'ubicacion': {u'id': 1, u'label': u'Lima'}, u'modalidad': {u'id_modalidad': 3, u'name_modalidad': u'Responsabilidad Civil'}, u'aseguradora': {u'id_asegurad': 1, u'name_asegurad': u'Positiva'}}
riesgo = data['riesgo']['id_riesgo']
aseguradora = data['aseguradora']['id_asegurad']
programa = data['programa']['id_program']
primaminima = data['valor']
Primas(riesgo_id=riesgo,aseguradora_id=aseguradora,primaminima=primaminima,programa_id=programa).save()
return HttpResponse('data', content_type="application/json")
@csrf_exempt
def adddeduccion(request):
if request.method == 'POST':
data = json.loads(request.body)
#
#{u'categoria': {u'categoria': u'I', u'id_categ': 1}, u'igv': 18, u'clase': [{u'clase': u'Cami\xf3n', u'id_clase': 2}], u'anio': {u'id_anio': 16, u'anio_antig': 2004}, u'uso': {u'uso': u'Particular', u'id_uso': 1}, u'cobertura': [{u'descripcion': u'Riesgos de la Naturaleza', u'id_cobert': 20}], u'riesgo': {u'id_riesgo': 153, u'tipo_riesgo': u'Alta Gama I S/.1700'}, u'marca': {u'id_marca': 178, u'name_marca': u'LADA'}, u'value': u'111', u'demision': 3, u'aniox': 0, u'modelo': {u'id_model': 8062, u'name_model': u'C-61'}, u'programa': [{u'id_program': 3, u'program': u'Corporativo HDI'}], u'ubicacion': {u'id': 1, u'label': u'Lima'}, u'modalidad': [{u'id_modalidad': 2, u'name_modalidad': u'Todo Riesgo'}], u'aseguradora': {u'id_asegurad': 2, u'name_asegurad': u'Pacifico'}}
cobertura = data['deduccion']
aseguradora = data['aseguradora']
programa = data['programa']
modalidad = data['modalidad']
uso = data['uso']
clase = data['clase']
#anio = data['anio']
value = data['value']
riesgo =data['riesgo']
if type(cobertura) == dict:
cobertura=[cobertura]
if type(aseguradora) == dict:
aseguradora=[aseguradora]
if type(programa) == dict:
programa=[programa]
if type(modalidad) == dict:
modalidad=[modalidad]
if type(uso) == dict:
uso=[uso]
if type(clase) == dict:
clase=[clase]
if type(clase) == dict:
clase=[clase]
if type(riesgo) == dict:
riesgo=[riesgo]
# if type(anio) == dict:
# anio=[anio]
for c in cobertura:
for a in aseguradora:
for p in programa:
for m in modalidad:
for u in uso:
for r in riesgo:
for cl in clase:
DeducAsegur(riesgo_id=int(r['id_riesgo']),tipo_id = int(cl['id_clase']),programa_id=int(p['id_program']),id_deduc_id=int(c['id_deduc']),id_aseg_id=int(a['id_asegurad']),id_uso_id=int(u['id_uso']),value=value,modalidad_id=int(m['id_modalidad'])).save()
data = json.dumps('data_dict')
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def addauto(request):
data = json.loads(request.body)
model = json.loads(request.body)['model']
tipo=model['clase']['id_clase']
marca= model['marca']['id_marca']
modelo= model['modelo']['id_model']
# if type(clase) == dict:
# clase=[clase]
# for i in data['item'][0]:
# for c in clase:
AutoValor(id_tipo_id=tipo,id_modelo_id=modelo,id_marca_id=marca).save()
return HttpResponse(json.dumps(request.body), content_type="application/json")
@csrf_exempt
def addmarca(request):
data = json.loads(request.body)
marca = data['data']
Marca(name_marca=marca).save()
return HttpResponse(marca, content_type="application/json")
@csrf_exempt
def addservicio(request):
data = json.loads(request.body)
ser = data['data']
Servicios(services=ser).save()
return HttpResponse(marca, content_type="application/json")
@csrf_exempt
def addfinanzas(request):
data = json.loads(request.body)
finan = data['data']
Financiamiento(financiamiento=finan).save()
return HttpResponse(marca, content_type="application/json")
@csrf_exempt
def editauto(request):
data = json.loads(request.body)
tipo = data['clase']['id_clase']
marca = data['name_marca']['id_marca']
modelo = data['name_model']['id_model']
auto = AutoValor.objects.get(id=data['id'])
auto.id_tipo_id=tipo
auto.id_marca_id=marca
auto.id_modelo_id=modelo
auto.save()
return HttpResponse('marca', content_type="application/json")
@csrf_exempt
def addmodelo(request):
data = json.loads(request.body)
modelo = data['data']
Modelo(name_model=modelo).save()
return HttpResponse(json.dumps(request.body), content_type="application/json")
@csrf_exempt
def addriesgo(request):
data = json.loads(request.body)
riesgo = data['data']
Riesgo(tipo_riesgo=riesgo).save()
return HttpResponse(json.dumps(request.body), content_type="application/json")
@csrf_exempt
def man_tasas(request):
coberturas = TasaAsegur.objects.all().values('id_aseg','tipo','id','programa','riesgo','modalidad','id_uso','ubicacion','programa__program','riesgo__tipo_riesgo','id','id_aseg__name_asegurad','id_uso__uso','tipo__clase','modalidad__name_modalidad','value','anio','anio__anio_antig').order_by('-id')
data_dict = ValuesQuerySetToDict(coberturas)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def man_autos(request):
autos = AutoValor.objects.all().values('id','id_tipo__clase','id_modelo__name_model','id_marca__name_marca').order_by('-id')
data_dict = ValuesQuerySetToDict(autos)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def man_cob(request):
coberturas = CobertAsegur.objects.all().values('id','tipo__clase','antigued','id_cob','programa','programa__program','id_aseg','modalidad','id_cob__descripcion','id_aseg__name_asegurad','id_uso','id_uso__uso','modalidad__name_modalidad','value').order_by('-id')
data_dict = ValuesQuerySetToDict(coberturas)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
def man_serv(request):
servicios = ServicAsegur.objects.all().values('id','id_serv','id_aseg','id_serv__services','id_aseg__name_asegurad','valor','id_uso__uso','id_program__program').order_by('-id')
data_dict = ValuesQuerySetToDict(servicios)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
def man_finan(request):
financiamiento = FinanAsegu.objects.all().values('id','id_finan','id_finan__financiamiento','id_aseg','id_aseg__name_asegurad','cuota','tea').order_by('-id')
data_dict = ValuesQuerySetToDict(financiamiento)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
# @csrf_exempt
# def financiamiento(request):
# financiamiento = FinanAsegu.objects.all().values('id','id_finan','id_finan__financiamiento','id_aseg','id_aseg__name_asegurad','cuota','tea').order_by('-id')
# data_dict = ValuesQuerySetToDict(financiamiento)
# data = json.dumps(data_dict)
# return HttpResponse(data, content_type="application/json")
@csrf_exempt
def deduc_cob(request):
deducib = DeducAsegur.objects.all().values('riesgo__tipo_riesgo','id_aseg','id_uso','tipo','modalidad','id','programa','id_deduc','programa__program','id','id_deduc__deducible','id_aseg__name_asegurad','id_uso__uso','tipo__clase','value','modalidad__name_modalidad','value').order_by('-id')
data_dict = ValuesQuerySetToDict(deducib)
data = json.dumps(data_dict)
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def eliminarries(request):
data = json.loads(request.body)
RiesgAseg.objects.get(id=data['id']).delete()
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def eliminarcob(request):
data = json.loads(request.body)
CobertAsegur.objects.get(id=data['id']).delete()
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def eliminardedu(request):
data = json.loads(request.body)
DeducAsegur.objects.get(id=data['id']).delete()
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def eliminarpolitica(request):
data = json.loads(request.body)
Gps.objects.get(id=data['id']).delete()
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def eliminarserv(request):
data = json.loads(request.body)
ServicAsegur.objects.get(id=data['id']).delete()
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def eliminarprima(request):
data = json.loads(request.body)
Primas.objects.get(id=data['id']).delete()
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def eliminarfinan(request):
data = json.loads(request.body)
FinanAsegu.objects.get(id=data['id']).delete()
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def eliminartasa(request):
data = json.loads(request.body)
TasaAsegur.objects.get(id=data['id']).delete()
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def eliminarauto(request):
data = json.loads(request.body)
AutoValor.objects.get(id=data['id']).delete()
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def addaseguradora(request):
data = json.loads(request.body)['data']
Aseguradora(name_asegurad=data).save()
return HttpResponse('data', content_type="application/json")
@csrf_exempt
def addmodalidad(request):
data = json.loads(request.body)['data']
Modalidad(name_modalidad=data).save()
return HttpResponse('data', content_type="application/json")
@csrf_exempt
def adduso(request):
data = json.loads(request.body)['data']
Uso(uso=data).save()
return HttpResponse('data', content_type="application/json")
@csrf_exempt
def addclase(request):
clase = json.loads(request.body)['data']
Clase(clase=clase).save()
return HttpResponse('data', content_type="application/json")
@csrf_exempt
def addprograma(request):
program = json.loads(request.body)['data']
Programa(program=program).save()
return HttpResponse('data', content_type="application/json")
@csrf_exempt
def addcobertura(request):
data = json.loads(request.body)['data']
Cobertura(descripcion=data).save()
return HttpResponse('data', content_type="application/json")
@csrf_exempt
def adddeducible(request):
data = json.loads(request.body)['data']
Deducibles(deducible=data).save()
return HttpResponse('data', content_type="application/json")
@csrf_exempt
def addpoliticagps(request):
if request.method == 'POST':
data = json.loads(request.body)['gps']
modelitos = json.loads(request.body)['modelitos']
# uso = data['uso']
aseguradora = data['aseguradora']
programa = data['programa']
# modalidad=data['modalidad']
value = data['value']
ubicacion = data['ubicacion']['id']
anio=data['anio']
if type(aseguradora) == dict:
aseguradora=[aseguradora]
# if type(modalidad) == dict:
# modalidad=[modalidad]
if type(modelitos) == dict:
modelitos=[modelitos]
if type(programa) == dict:
programa=[programa]
# if type(uso) == dict:
# uso=[uso]
if type(anio) == dict:
anio=[anio]
for a in aseguradora:
for p in programa:
# for u in uso:
for an in anio:
for m in modelitos[0]:
for i in m:
if i=='checkmodel':
if m['checkmodel'] == True:
# precio = AutoValor.objects.filter(id_modelo_id=m['id_modelo']).values('id','id_marca','id_modelo','valor')[0]
# precio = AutoValor.objects.filter(id_modelo_id=m['id_modelo']).values('id','id_marca','id_modelo','valor')[0]['valor']
# value = 'No'
# if precio > 5000:
# value = 'Si'
# riesgo = RiesgAseg.objects.filter(id_model_id=id_modelo).values('id_riesg__tipo_riesgo')[0]('id_riesg__tipo_riesgo')
# if 'Alt' in riesgo:
# value = 'Si'
Gps(id_uso_id=1,anio_antig_id=an['id_anio'],id_prog_id=p['id_program'],id_auto_id=m['id_modelo'],value='Si',id_aseg_id=a['id_asegurad']).save()
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def addpoliticagps2(request):
if request.method == 'POST':
data = json.loads(request.body)
gps = data['gps']
aseguradora = gps['aseguradora']
programa = gps['programa']
sumaminima = gps['sumaminima']
if type(aseguradora) == dict:
aseguradora=[aseguradora]
if type(programa) == dict:
programa=[programa]
for a in aseguradora:
for p in programa:
Gps(sumaminima=sumaminima,id_prog_id=p['id_program'],id_aseg_id=a['id_asegurad'],id_auto_id=1,id_riesg_id=1,anio_antig_id=1,id_uso_id=1,region=1,value=1).save()
return HttpResponse('data', content_type="application/json")
@csrf_exempt
def cotiSave(request):
if request.method == 'POST':
dato = json.loads(request.body)['dato']
precio = json.loads(request.body)['precio']
name =''
cel = ''
email = ''
for i in dato:
if i == 'name':
name=dato['name']
if i == 'cel':
cel=dato['cel']
if i == 'email':
email=dato['email']
#timon=dato['timon']['id_timon']
anio=dato['anio']['id_anio']
uso=dato['uso']
marca=dato['marca']['id_marca']
modelo=dato['claseModelo']['id_modelo']
modalidad=dato['modalidad']['id_modalidad']
tipo=dato['claseModelo']['id_tipo']
statuscheck=dato['statuscheck']
statusubicL=dato['statusubicL']
statusubicP=dato['statusubicP']
Clientes(fullname=name,email=email,celular=cel,chose_tipo_id=int(tipo),chose_marca_id=int(marca),chose_anio_id=int(anio),chose_modelo_id=int(modelo),chose_modalid_id=int(modalidad),chose_uso_id=int(uso),value=float(precio),chose_ubicl=int(statusubicL),chose_ubicp=int(statusubicP),chose_informat=int(statuscheck)).save()
id_cliente = Clientes.objects.all().values('id_cliente').order_by('-id_cliente')[0]['id_cliente']
return HttpResponse(json.dumps(id_cliente), content_type="application/json")
@csrf_exempt
def enviaemail(request):
if request.method == 'POST':
data = json.loads(request.body)
#go.db.models.query.ValuesQuerySet'>
#{u'orderId': u'425', u'anio': u'28', u'uso': u'1', u'precio': u'4444', u'modelo': u'6372', u'programa': u'3', u'modalidad': u'2'}
orderId = data['orderId']
cli = Clientes.objects.get(id_cliente=orderId)
name = cli.fullname
email = cli.email
modelo = data['modelo']
cli = data['orderId']
precio = data['precio']
a = AutoValor.objects.filter(id_modelo_id=modelo)
#url = data['urld']
for m in a:
marca = m.id_marca.name_marca
tipo = m.id_tipo.clase
modelo = m.id_modelo.name_model
msj = 'Estimado cliente '+ str(name) +' , el siguiente link detalla la cotizacion del auto ' + str(marca) +' '+ str(modelo)+ ' valorizado en ' +str(precio)+'. Adjunto el link: '+ str('http://cotizador.hermes.pe:800/html/pdfout.pdf')
id = Clientes.objects.get(id_cliente=cli).id_cliente
flag = Clientes.objects.get(id_cliente=id).chose_informat
if flag == 1:
f = open('/var/www/html/email.txt', 'a')
f.write(str(email)+'\n')
f.close()
#send_mail('Hermes',msj,'cotiza@hermes.pe', [email], fail_silently=False)
return HttpResponse(json.dumps('id_cliente'), content_type="application/json")
@csrf_exempt
def savecob(request):
data = json.loads(request.body)
clase = data['clase']['id_clase']
uso = data['uso']['id_uso']
##anio = data['antigued']
aseguradora = data['aseguradora']['id_asegurad']
modalidad = data['modalidad']['id_modalidad']
programa = data['programa']['id_program']
valor = data['value']
c = CobertAsegur.objects.get(id=data['id'])
c.tipo_id=clase
c.id_aseg_id=aseguradora
c.modalidad_id=modalidad
c.programa_id=programa
c.id_uso_id=uso
##c.antigued=anio
c.value = valor
c.save()
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def saveprimas(request):
data = json.loads(request.body)
primaminima = data['primaminima']
programa = data['programa']['id_program']
aseguradora = data['aseguradora']['id_asegurad']
riesgo= data['riesgo']['id_riesgo']
prima = Primas.objects.get(id=data['id'])
prima.aseguradora_id = aseguradora
prima.riesgo_id = riesgo
prima.programa_id=programa
prima.primaminima = primaminima
prima.save()
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def savededu(request):
data = json.loads(request.body)
clase = data['clase']['id_clase']
uso = data['uso']['id_uso']
#anio = data['anio']['id_anio']
aseguradora = data['aseguradora']['id_asegurad']
modalidad = data['modalidad']['id_modalidad']
programa = data['programa']['id_program']
valor = data['value']
riesgo = data['riesgo']['id_riesgo']
c = DeducAsegur.objects.get(id=data['id'])
c.tipo_id=clase
c.id_aseg_id=aseguradora
c.modalidad_id=modalidad
c.programa_id=programa
c.anio_id=anio
c.id_uso_id=uso
c.value = valor
c.riesgo_id=riesgo
c.save()
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def saveservicio(request):
if request.method == 'POST':
data = json.loads(request.body)
servicio = data['servicio']['id_serv']
value = data['valor']
aseguradora = data['aseguradora']['id_asegurad']
c = ServicAsegur.objects.get(id=data['id'])
c.id_aseg_id=aseguradora
c.id_serv_id = servicio
c.valor=value
c.save()
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def savefinanc(request):
data = json.loads(request.body)
#{u'id_aseg__name_asegurad': u'Positiva', u'id_finan__financiamiento': u'Cuotas Sin Interes', u'clase': {u'clase': u'Auto', u'id_clase': 1}, u'tea': 676, u'cuota': u'787', u'riesgo': {u'id_riesgo': 84, u'tipo_riesgo': u'Alta Gama hasta $40k'}, u'aseguradora': {u'id_asegurad': 1, u'name_asegurad': u'Positiva'}}
cuota = data['cuota']
tea = data['tea']
fi = data['id_finan']
id=data['id']
aseguradora = data['aseguradora']['id_asegurad']
c = FinanAsegu.objects.get(id=id)
c.id_finan_id=fi
c.id_aseg_id=aseguradora
c.cuota=cuota
c.tea=tea
c.save()
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def savetasa(request):
data = json.loads(request.body)
clase = data['clase']['id_clase']
uso = data['uso']['id_uso']
anio = data['anio']['id_anio']
aseguradora = data['aseguradora']['id_asegurad']
modalidad = data['modalidad']['id_modalidad']
programa = data['programa']['id_program']
riesgo=data['riesgo']['id_riesgo']
valor = data['value']
c = TasaAsegur.objects.get(id=data['id'])
c.tipo_id=clase
c.id_aseg_id=aseguradora
c.modalidad_id=modalidad
c.id_uso_id=uso
c.programa_id=programa
c.anio_id=anio
c.riesgo_id=riesgo
c.value=valor
c.save()
return HttpResponse(data, content_type="application/json")
@csrf_exempt
def savepoliticas(request):
data = json.loads(request.body)
anio = data['anio']['id_anio']
aseguradora = data['aseguradora']['id_asegurad']
programa = data['programa']['id_program']
valor = data['value']
c = Gps.objects.get(id=data['id'])
c.id_aseg_id=aseguradora
c.programa_id=programa
c.anio_antig_id=anio
c.value=valor
c.save()
return HttpResponse(data, content_type="application/json")
|
17,891 | c50044d9b51bba72b0f1b3bbed79b54e1f59507c | #!/usr/bin/python3
#GPIO USAGE: §https://sourceforge.net/p/raspberry-gpio-python/wiki/BasicUsage/
#GPIO PINOUT: https://www.raspberrypi-spy.co.uk/2012/06/simple-guide-to-the-rpi-gpio-header-and-pins/
try:
import RPi.GPIO as GPIO
except RuntimeError:
print("Error importing RPi.GPIO! This is probably because you need superuser privileges. You can achieve this by using 'sudo' to run your script")
import time
def simple_gpio_usage_output():
# SET GPIO MODE
GPIO.setmode(GPIO.BOARD)
#GPIO.setmode(GPIO.BCM)
# GET GPIO MODE
mode = GPIO.getmode()
print("gpio mode: " + str(mode))
# GPIO OUTPUT EXAMPLE
channel=12
state=True
GPIO.setup(channel, GPIO.OUT)
GPIO.output(channel, state)
time.sleep(1)
state=False
GPIO.output(channel, state)
# CLEANUP GPIO AFTER USAGE
#GPIO.cleanup()
GPIO.cleanup(channel)
def pwm_gpio_usage_output():
channel = 12
dc = 0 # where dc is the duty cycle (0.0 <= dc <= 100.0)
frequency = 40 # Hz
# set gpio
GPIO.setmode(GPIO.BOARD)
GPIO.setup(channel, GPIO.OUT)
p = GPIO.PWM(channel, frequency)
# start pwm
p.start(dc)
# brighten/dim demo
for i in range(1,50):
time.sleep(0.1)
dc = i
p.ChangeDutyCycle(dc)
# exit
input('Press return to stop:') # use raw_input for Python 2
p.stop()
GPIO.cleanup()
#simple_gpio_usage_output()
pwm_gpio_usage_output()
|
17,892 | 97058273e422a3343a1a077114fe222cf87b0ee2 | import os
import csv
from datetime import datetime, timedelta
import requests
dt_today = datetime.now()
dt_yesterday = dt_today - timedelta(1)
date_today = "%04i%02i%02i" %(dt_today.year,dt_today.month,dt_today.day)
hostname_gateway = "fritz.box"
hostname_wifi = ""
hostname_extern = "google.com"
filename_today = "network_test_%04i%02i%02i.csv" % (dt_today.year,dt_today.month,dt_today.day)
filename_yesterday = "network_test_%04i%02i%02i.csv" % (dt_yesterday.year,dt_yesterday.month,dt_yesterday.day)
filename_telegram_log = "telegram_log.csv"
gateway_up_cnt = 0
wifi_up_cnt = 0
extern_up_cnt = 0
check_cnt = 0
bot_token = ""
bot_ID = ""
def ping(hostname):
return os.system("ping -c 1 " + hostname)
def print_summary():
print("Gateway: %i (%.f%%)" %(gateway_up_cnt,gateway_up_cnt/check_cnt*100))
print("Wifi: %i (%.f%%)" %(wifi_up_cnt,wifi_up_cnt/check_cnt*100))
print("Extern: %i (%.f%%)" %(extern_up_cnt,extern_up_cnt/check_cnt*100))
def check_network():
with open(filename_today,"a") as csvfile:
csv_writer = csv.writer(csvfile)
if os.stat(filename_today).st_size == 0:
csv_writer.writerow(['time','gateway','wifi','extern'])
gateway_check = 'nOK'
wifi_check = 'nOK'
extern_check = 'nOK'
if ping(hostname_gateway) == 0:
gateway_check = 'OK'
if ping(hostname_wifi) == 0:
wifi_check = 'OK'
if ping(hostname_extern) == 0:
extern_check = 'OK'
csv_writer.writerow(["%02i:%02i:%02i" % (dt_today.hour,dt_today.minute,dt_today.second),gateway_check,wifi_check,extern_check])
def read_csv(filename):
global gateway_up_cnt
global wifi_up_cnt
global extern_up_cnt
global check_cnt
gateway_up_cnt = 0
wifi_up_cnt = 0
extern_up_cnt = 0
check_cnt = 0
try:
with open(filename,"r") as csvfile:
csv_reader = csv.reader(csvfile)
next(csv_reader) #skip first line (header)
for row in csv_reader:
if row[1] == 'OK':
gateway_up_cnt += 1
if row[2] == 'OK':
wifi_up_cnt += 1
if row[3] == 'OK':
extern_up_cnt += 1
check_cnt += 1
return True
except:
return False
def get_daily_summary():
title_text = "Zusammenfassung für gestern %02i.%02i.%i:\n" %(dt_yesterday.day,dt_yesterday.month,dt_yesterday.year)
if check_cnt == gateway_up_cnt and check_cnt == wifi_up_cnt and check_cnt == extern_up_cnt:
summary_text = "Gestern gab es keine Ausfälle!"
else:
summary_text = "Gateway: %i (%.f%%)\nWifi: %i (%.f%%)\nExtern: %i (%.f%%)" %(
gateway_up_cnt,gateway_up_cnt/check_cnt*100,wifi_up_cnt,wifi_up_cnt/check_cnt*100,extern_up_cnt,extern_up_cnt/check_cnt*100)
return title_text + summary_text
def send_message(message):
send_text = 'https://api.telegram.org/bot' + bot_token + '/sendMessage?chat_id=' + bot_ID + '&parse_mode=Markdown&text=' + message
response = requests.get(send_text)
set_telegram_log(filename_telegram_log)
return response.json()
def set_telegram_log(filename):
with open(filename,"a") as csvfile:
csv_writer = csv.writer(csvfile)
csv_writer.writerow([date_today,"%02i:%02i:%02i" %(dt_today.hour,dt_today.minute,dt_today.second)])
def get_telegram_log(filename):
try:
with open(filename,"r") as csvfile:
csv_reader = csv.reader(csvfile)
for row in csv_reader:
if row[0] == date_today:
return True
return False
except:
return False
if __name__ == "__main__":
check_network()
read_csv(filename_today)
print_summary()
if True == read_csv(filename_yesterday) and False == get_telegram_log(filename_telegram_log):
send_message(get_daily_summary()) |
17,893 | a53cc3457b37c9d23f9706b069f36407f199fbe7 | #!/usr/bin/env python
from __future__ import print_function
import argparse
import getopt
import os
import re
import shutil
import sys
import time
from port import die, open_or_die, pprint, print_stderr
usage = '''
{} file_command_line file_structure rounds_controls
-a Output progress to screen
'''.format(sys.argv[0])
def perform_controls(_dir, rounds, command_line, options):
os.mkdir(_dir)
_round = 1
while _round <= rounds:
if options.get('-a') == '':
print_stderr('{}\r'.format(_round))
cmd = 'permute_structure.py {} > {}/precursors_permuted.str 2> /dev/null'.format(
file_structure, _dir)
# print(cmd)
os.system(cmd)
pprint('permutation {}\n\n'.format(_round))
cmd = '{} 2> /dev/null'.format(command_line)
os.system(cmd)
# ret = os.popen(cmd).read().strip()
# pprint(ret)
_round += 1
shutil.rmtree(_dir)
if options.get('-a') == '':
print_stderr('controls performed\n\n')
def parse_file_command_line(file_command_line, file_structure, _dir):
FILE = open_or_die(file_command_line, 'rb',
'can not open {}'.format(file_command_line))
while True:
line = FILE.readline()
if not line:
break
if re.search(r'(\S+)', line):
line = line.strip()
line = re.sub(
file_structure, '{}/precursors_permuted.str'.format(_dir), line, count=1)
line = re.sub(r'>.+', '', line, count=1)
return line
die('{} is empty\n'.format(file_command_line))
if __name__ == '__main__':
if len(sys.argv) < 4:
die(usage)
parser = argparse.ArgumentParser(usage=usage)
parser.add_argument('file_command_line', help='command list file')
parser.add_argument('file_structure', help='structure file')
parser.add_argument('rounds', help='rounds')
args = parser.parse_args(sys.argv[1:4])
file_command_line = args.file_command_line
file_structure = args.file_structure
rounds = int(args.rounds)
opt, argss = getopt.getopt(sys.argv[4:], "a")
options = dict(opt)
ltime = long(time.time())
_dir = 'dir_perform_controls{}'.format(ltime)
command_line = parse_file_command_line(
file_command_line, file_structure, _dir)
if options.get('-a') == '':
print_stderr('total number of rounds controls={}\n'.format(rounds))
perform_controls(_dir, rounds, command_line, options)
|
17,894 | 6335234b438de049790b4ecb4efb245afc9a7da9 | from flask import request, redirect, render_template, session, flash
# from flask_sqlalchemy import SQLAlchemy
# from models import User, Movie
import cgi
import pytz
from app import app, db
from models import User,Blog
#You're able to submit a new post at the /newpost route.
#After submitting a new post, your app displays the main blog page.
@app.route("/newpost", methods=['POST', 'GET'])
def postblog():
blog_title = ""
body = ""
user = User.query.filter_by(username=session['user']).first()
if request.method == 'POST':
blog_title = request.form['blog_title']
body = request.form['body']
if not blog_title:
flash("Please enter a blog title", "error_title")
if not body:
flash("Please enter text into the blog", "error_body")
if blog_title and body:
blog_post = Blog(blog_title, body, user)
db.session.add(blog_post)
db.session.commit()
post = Blog.query.filter_by(body=body).first()
return render_template('blogpage.html', post=post, blog_title=blog_title, body=body, user=user)
return render_template('newpost.html', title="Add a Blog Entry", id=id, blog_title=blog_title, body=body, user=user)
'''
@app.route('/blog', methods=['GET'])
def blog_listings():
owner = User.query.filter_by(email=session['user']).first()
posts = Blog.query.filter_by(owner=owner).all()
if request.args.get('id'):
post_id = request.args.get('id')
post = Blog.query.filter_by(id=post_id).first()
return render_template('singleUser.html', post=post)
return render_template('singleUser.html', posts=posts, title="Build a Blog!")
'''
@app.route("/login", methods=['GET', 'POST'])
def login():
if request.method == 'GET':
return render_template('login.html')
elif request.method == 'POST':
username = request.form['username']
password = request.form['password']
users = User.query.filter_by(username=username)
if username == "":
flash('please enter a username and password')
elif users.count() == 1:
user = users.first()
if password == user.password:
session['user'] = user.username
return redirect("/newpost")
else:
flash('password is incorrect')
elif users.count() == 0:
flash('username does not exist')
username = ""
return render_template('login.html', username=username)
@app.route("/signup", methods=['GET', 'POST'])
def signup():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
verify = request.form['verify']
username_db_count = User.query.filter_by(username=username).count()
if username_db_count > 0:
flash('yikes! "' + username + '" is already taken')
if not username:
if len(username) < 4:
flash('username should be longer than 4 characters')
else:
flash('please enter a username')
elif len(username) < 4:
flash('username should be longer than 4 characters')
if not password:
if len(password) < 4:
flash('password should be longer than 3 characters, please try again')
else:
flash('please enter a password')
if password != verify:
if len(password) < 4:
flash('password should be longer than 3 characters')
else:
flash('passwords did not match')
if password == verify and len(username) > 4 and username_db_count < 0 and len(password) > 4:
user = User(username=username, password=password)
db.session.add(user)
db.session.commit()
session['user'] = user.username
return redirect("/newpost")
return render_template('signup.html', username=username)
if request.method == 'GET':
return render_template('signup.html')
'''def is_email(string):
# for our purposes, an email string has an '@' followed by a '.'
# there is an embedded language called 'regular expression' that would crunch this implementation down
# to a one-liner, but we'll keep it simple:
atsign_index = string.find('@')
atsign_present = atsign_index >= 0
if not atsign_present:
return False
else:
domain_dot_index = string.find('.', atsign_index)
domain_dot_present = domain_dot_index >= 0
return domain_dot_present'''
@app.route("/logout", methods=['GET'])
def logout():
del session['user']
return redirect("/blogz")
@app.route('/blogz', methods=['GET'])
def blog_listings():
posts = Blog.query.order_by(Blog.pub_date.desc()).all()
if request.args.get('username'):
post_user = request.args.get('username')
username = User.query.filter_by(username=post_user).first()
posts = Blog.query.filter_by(owner=username).all()
return render_template('singleUser.html', posts=posts, title="blogz posts!", post_user=post_user, username=username)
if request.args.get('id'):
post_id = request.args.get('id')
post = Blog.query.filter_by(id=post_id).first()
'''post contains the first blog post, owner id and id connect it to User'''
user = User.query.filter_by(id=post.owner_id).first()
return render_template('blogpage.html', post=post, user=user)
return render_template('blog.html', posts=posts, title="All the Blogz Postz")
@app.route("/")
def index():
users = User.query.filter_by().all()
return render_template('index.html', users=users, title="Blogz Userz!")
def logged_in_user():
owner = User.query.filter_by(username=session['user']).first()
return owner
endpoints_without_login = ['blog_listings', 'signup', 'login', 'index']
@app.before_request
def require_login():
if not ('user' in session or request.endpoint in endpoints_without_login):
return redirect("/signup")
# In a real application, this should be kept secret (i.e. not on github)
# As a consequence of this secret being public, I think connection snoopers or
# rival movie sites' javascript could hijack our session and act as us,
# perhaps giving movies bad ratings - the HORROR.
app.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RU'
if __name__ == '__main__':
app.run() |
17,895 | 07f6b4347c58dafb36f899e5fc811152fbfc75d5 | from myMath import myArithmetic,myCalcArea,myStatistics
the_first_number=float(input('input your first number:'))
the_second_number=float(input('input your second number:'))
the_third_number=float(input('input your third number:'))
the_fourth_number=float(input('input your fourth number:'))
the_fifth_number=float(input('input your fifth number:'))
the_first_plus_the_second_number=myArithmetic.myAdd(the_first_number,the_second_number)
the_third_plus_the_fourth_number=myArithmetic.myAdd(the_third_number,the_fourth_number)
the_first_plus_the_second_plus_the_third_plus_the_fourth_number=myArithmetic.myAdd(the_first_plus_the_second_number,the_third_plus_the_fourth_number)
total_five_number=myArithmetic.myAdd(the_first_plus_the_second_plus_the_third_plus_the_fourth_number,the_fifth_number)
total_five_number_divise_five=myArithmetic.myDiv(total_five_number,5)
print(total_five_number_divise_five)
|
17,896 | c8cf2812f2681bf7480402d757cefdd5a339e372 | import os
from openbiolink.graph_creation import graphCreationConfig as g
from openbiolink.graph_creation.file_reader.csvReader import CsvReader
from openbiolink.graph_creation.metadata_db_file.edge.dbMetaEdgeHpoGene import DbMetaEdgeHpoGene
from openbiolink.graph_creation.types.dbType import DbType
from openbiolink.graph_creation.types.readerType import ReaderType
class EdgeHpoGeneReader(CsvReader):
DB_META_CLASS = DbMetaEdgeHpoGene
def __init__(self):
super().__init__(
in_path=os.path.join(g.O_FILE_PATH, self.DB_META_CLASS.OFILE_NAME),
sep=None,
cols=self.DB_META_CLASS.COLS,
use_cols=self.DB_META_CLASS.FILTER_COLS,
nr_lines_header=self.DB_META_CLASS.HEADER,
dtypes=None,
readerType=ReaderType.READER_EDGE_HPO_GENE,
dbType=DbType.DB_EDGE_HPO_GENE,
)
|
17,897 | 0c57830ed5d4d9ef4001feeae5f15ed4977fa020 | portal = context.getPortalObject()
person = portal.portal_membership.getAuthenticatedMember().getUserValue()
request = context.REQUEST
response = request.RESPONSE
import json
if person is None:
response.setStatus(403)
return {}
try:
return json.dumps(person.generateCertificate())
# Certificate is Created
except ValueError:
# Certificate was already requested, please revoke existing one.
return json.dumps(False)
|
17,898 | 40d452192c2080cea614bb2d0779f69478cd2b7c | from influxdb import InfluxDBClient
from edgeos import edgeos_webstream, edgeos_web
from time import sleep, time
from secret import edgeos_url,username,password, mac2name
s = edgeos_web(edgeos_url, username=username,password=password, verify=False)
s.login()
print("Sleeping 5 to make sure the session id {} is in the filesystem".format(s.session_id))
sleep(5)
default_tags = None
client = InfluxDBClient('192.168.111.20', 8086)
# END EDIT
try:
default_tags = {
'hostname': '192.168.111.1',
}
except:
print("Could not generate site default tags")
client.create_database('edgeos')
client.switch_database('edgeos')
ews = s.create_websocket()
print(ews.status)
print(ews.subscribe(subs=['interfaces','system-stats','export', 'users']))
sys_stat_fields = [ 'cpu', 'mem', 'uptime' ]
def process_system_stats(x):
json =[{
'measurement': 'system-stats',
'fields': dict(
(field_name, int(x[field_name])) for field_name in sys_stat_fields
),
}]
return json
if_fields = [
'rx_packets', 'rx_bytes', 'rx_errors', 'rx_dropped',
'tx_packets', 'tx_bytes', 'tx_errors', 'tx_dropped',
]
dups = {}
def is_dup(point):
cts = time()
_id = [ point['measurement'] ]
_id.extend( point['tags'].values() )
xid = '-'.join(_id)
yid = '-'.join( map(str, point['fields'].values()) )
if xid in dups:
( ts, cyid ) = dups[xid]
if cyid == yid and (cts-ts)<60:
return True
dups[xid] = (cts, yid)
return False
def process_interfaces(x):
json = []
for interface, data in x.items():
temp = {
'measurement': 'interface',
'tags': {
'interface': interface,
},
'fields': dict(
(field_name, int(data['stats'][field_name])) for field_name in if_fields
)
}
if not is_dup(temp):
json.append(temp)
return json
ip2mac = {}
#ip2client_hostname = {}
ip2name = {}
def process_dhcp():
leases = s.dhcp_leases()
for lan in leases['dhcp-server-leases'].values():
if not lan: continue
for ip, lease in lan.items():
ip2mac[ip] = lease['mac']
if lease['mac'] in mac2name:
ip2name[ip] = mac2name[lease['mac']]
else:
ip2name[ip] = lease['client-hostname']
if ip2name[ip] == "":
ip2name[ip] = ip
def ip_to_mac(ip):
if ip in ip2mac:
return ip2mac[ip]
process_dhcp()
if ip in ip2mac:
return ip2mac[ip]
return 'UNKNOWN'
def ip_to_name(ip):
if ip in ip2name:
return ip2name[ip]
process_dhcp()
if ip in ip2name:
return ip2name[ip]
return ip
def process_export(x):
json = [ {
'measurement': 'clients',
'fields': { 'count': len(x) }
} ]
for ip, data in x.items():
mac = ip_to_mac(ip)
name = ip_to_name(ip)
for application, stats in data.items():
#print(application,stats)
temp = {
'measurement': 'dpi',
'tags': {
'name': "{}-{}".format(mac[-8:],name),
'mac': mac,
'application': application,
},
'fields': {
'rx_bytes': int(stats['rx_bytes']),
'tx_bytes': int(stats['tx_bytes']),
}
}
if not is_dup(temp):
json.append(temp)
return json
def process_users(x):
json = []
for key, value in x.items():
temp = {
'measurement': 'user-count',
'tags': {
'key': key
},
'fields': {
'value': len(value)
}
}
if not is_dup(temp):
json.append(temp)
if key == 'l2tp':
for vpn in value:
for user, stuff in vpn.items():
ip2name[stuff['remote-ip']] = "{}-{}".format(user, stuff['interface'])
return json
while True:
if not s:
try:
s = edgeos_web(edgeos_url, username=username,password=password, verify=False)
s.login()
sleep(2)
ews = s.create_websocket()
ews.subscribe(subs=['interfaces','system-stats','export', 'users'])
except:
pass
try:
x = ews.next()
if 'system-stats' in x:
json = process_system_stats(x['system-stats'])
while not client.write_points(json, tags=default_tags):
sleep(1)
#print("S", end="", flush=True)
elif 'interfaces' in x:
json = process_interfaces(x['interfaces'])
while not client.write_points(json, tags=default_tags):
sleep(1)
#print("I", end="", flush=True)
elif 'export' in x:
json = process_export(x['export'])
while not client.write_points(json, tags=default_tags):
sleep(1)
#print("X", end="", flush=True)
elif 'users' in x:
json = process_users(x['users'])
while not client.write_points(json, tags=default_tags):
sleep(1)
#print("U", end="", flush=True)
else:
pass
#print(x)
except Exception as e:
#raise
print(f"Exception caught {e}")
s = None
sleep(5)
continue
|
17,899 | 677246bdec85a68e0deba2529115fd3b2100875e | import datetime
import os
import shutil
import subprocess
import sys
import time
import traceback
import maya.cmds as cmds
import maya.mel as mel
import maya.OpenMaya as om
from keyframe_pro.keyframe_pro_client import KeyframeProClient
class MayaToKeyframePro:
WINDOW_NAME = "MayaToKeyframeProWindow"
WINDOW_TITLE = "Keyframe Pro"
VERSION = "1.4.1"
KEYFRAME_PRO_PATH = ""
if sys.platform == "win32":
KEYFRAME_PRO_PATH = "C:/Program Files/Keyframe Pro/bin/KeyframePro.exe"
elif sys.platform == "darwin":
KEYFRAME_PRO_PATH = "/Applications/KeyframePro.app/Contents/MacOS/KeyframePro"
else:
om.MGlobal.displayError("MayaToKeyframePro is not supported on the current platform ({0})".format(sys.platform))
PORT = 18181
SYNC_SCRIPT_NODE_NAME = "MayaToKeyframeProScriptNode"
CACHED_TEMP_DIR_OPTION_VAR = "MayaToKeyframeProCachedTempDir"
COLLAPSE_STATE_OPTION_VAR = "MayaToKeyframeProCollapseState"
SYNC_OFFSET_OPTION_VAR = "MayaToKeyframeProSyncOffset"
FROM_RANGE_START_OPTION_VAR = "MayaToKeyframeProFromRangeStart"
WAIT_FOR_OPEN_DURATION = 1 # Seconds to sleep after trying to open the application
BUTTON_COLOR_01 = (0.5, 0.5, 0.5)
BUTTON_COLOR_02 = (0.361, 0.361, 0.361)
SYNC_ACTIVE_COLOR = (0.0, 0.5, 0.0)
kpro_client = None
main_window = None
sync_layout = None
viewer_layout = None
playblast_layout = None
sync_from_range_start_cb = None
sync_offset_ifg = None
playblast_viewer_rbg = None
@classmethod
def open_keyframe_pro(cls, application_path=""):
if not application_path:
application_path = cls.KEYFRAME_PRO_PATH
if not application_path:
om.MGlobal.displayError("Keyframe Pro application path not set.")
elif not os.path.exists(application_path):
om.MGlobal.displayError("Keyframe Pro application path does not exist: {0}".format(application_path))
else:
try:
subprocess.Popen(cls.KEYFRAME_PRO_PATH, shell=False, stdin=None, stdout=None, stderr=None)
except:
traceback.print_exc()
om.MGlobal.displayError("Failed to open Keyframe Pro. See script editor for details.")
@classmethod
def is_initialized(cls, display_errors=True):
if not cls.kpro_client:
cls.kpro_client = KeyframeProClient()
if cls.kpro_client.connect(port=cls.PORT, display_errors=display_errors):
if cls.kpro_client.initialize():
return True
else:
if display_errors:
om.MGlobal.displayError("Connection failed. Application may be closed or the port may be in use ({0}).".format(cls.PORT))
if display_errors:
om.MGlobal.displayError("Failed to connect to Keyframe Pro. See script editor for details.")
return False
@classmethod
def toggle_sync(cls):
if not cls.sync_script_node_exists() and cls.is_initialized():
cls.create_sync_script_node()
if cls.sync_script_node_exists():
cls.update_sync_time()
else:
cls.delete_sync_script_node()
cls.kpro_client.disconnect()
cls.update_sync_state()
@classmethod
def update_sync_time(cls):
frame = cmds.currentTime(q=True) + cls.get_sync_offset()
from_range_start = bool(cls.get_from_range_start())
if cls.kpro_client.set_frame(frame, False, from_range_start) <= 0:
cls.toggle_sync()
@classmethod
def set_viewer_layout(cls, layout):
if cls.is_initialized():
cls.kpro_client.set_viewer_layout(layout)
@classmethod
def swap_timelines(cls):
if cls.is_initialized():
a = cls.kpro_client.get_active_in_viewer(0)
b = cls.kpro_client.get_active_in_viewer(1)
if b:
cls.kpro_client.set_active_in_viewer(b["id"], 0)
if a:
cls.kpro_client.set_active_in_viewer(a["id"], 1)
@classmethod
def playblast(cls):
format = cls.get_option_var("playblastFormat", "avi")
ext = ""
if format == "avi":
ext = "avi"
elif format == "qt" or format == "avfoundation":
ext = "mov"
elif format == "image":
ext = ""
else:
om.MGlobal.displayError("Unsupported playblast format: {0}".format(format))
return
temp_dir = cls.get_temp_dir()
if not temp_dir:
om.MGlobal.displayError("Failed to get temp directory from Keyframe Pro. See script editor for details.")
return
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
name = "blast"
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
if format == "image":
file_path = "{0}/{1}_{2}".format(temp_dir, name, timestamp)
else:
file_path = "{0}/{1}_{2}.{3}".format(temp_dir, name, timestamp, ext)
clear_cache = cls.get_option_var("playblastClearCache", True)
show_ornaments = cls.get_option_var("playblastShowOrnaments", False)
compression = cls.get_option_var("playblastCompression", "none")
quality = cls.get_option_var("playblastQuality", 70)
percent = cls.get_option_var("playblastScale", 0.5) * 100
padding = cls.get_option_var("playblastPadding", 4)
display_source_size = cls.get_option_var("playblastDisplaySizeSource", 1)
playblast_width = cls.get_option_var("playblastWidth", 720)
playblast_height = cls.get_option_var("playblastHeight", 480)
args = {"format": format,
"clearCache": clear_cache,
"viewer": False,
"showOrnaments": show_ornaments,
"fp": padding,
"percent": percent,
"compression": compression,
"quality": quality,
"filename": file_path
}
if display_source_size == 2:
args["widthHeight"] = [cmds.getAttr("defaultResolution.w"), cmds.getAttr("defaultResolution.h")]
elif display_source_size == 3:
args["widthHeight"] = [playblast_width, playblast_height]
playback_slider = mel.eval("$tempVar = $gPlayBackSlider")
if(cmds.timeControl(playback_slider, q=True, rv=True)):
range = cmds.timeControl(playback_slider, q=True, ra=True)
args["startTime"] = range[0]
args["endTime"] = range[1]
sound = cmds.timeControl(playback_slider, q=True, sound=True)
if sound:
args["sound"] = sound
temp_path = cmds.playblast(**args)
if temp_path:
file_path = temp_path
if not os.path.exists(file_path):
om.MGlobal.displayError("Playblast file does not exist. See script editor for details.")
return
# Open in viewer
viewer_index = cmds.radioButtonGrp(cls.playblast_viewer_rbg, query=True, select=True) - 1
if viewer_index <= 1:
if not cls.is_initialized(False):
cls.open_keyframe_pro()
time.sleep(cls.WAIT_FOR_OPEN_DURATION)
if not cls.is_initialized():
om.MGlobal.displayError("Failed to open in viewer. See script editor for details.")
return
if viewer_index >= 0 and viewer_index <= 1:
# On import, source may be loaded into A. Restore current A if source is to be in B
source_in_a = None
if viewer_index > 0:
source_in_a = cls.kpro_client.get_active_in_viewer(0)
# Swap
source = cls.kpro_client.import_file(file_path)
if source:
cls.kpro_client.set_active_in_viewer(source["id"], viewer_index)
if source_in_a:
cls.kpro_client.set_active_in_viewer(source_in_a["id"], 0)
autoplay = cls.kpro_client.is_autoplay()
if(autoplay):
cls.kpro_client.set_playing(autoplay)
@classmethod
def get_option_var(cls, name, default):
if cmds.optionVar(exists=name):
return cmds.optionVar(q=name)
else:
return default
@classmethod
def open_temp_dir(cls):
temp_dir = cls.get_temp_dir()
if temp_dir:
if sys.platform == "win32":
os.startfile(temp_dir, 'explore')
elif sys.platform == "darwin":
subprocess.Popen(["open", temp_dir])
else:
om.MGlobal.displayError("Open temp dir is not supported on the current platform ({0})".format(sys.platform))
else:
om.MGlobal.displayError("Failed to get temp directory from Keyframe Pro. See script editor for details.")
@classmethod
def clear_temp_dir(cls):
result = cmds.confirmDialog(title='Confirm',
message='Clear temporary directory?',
button=['Yes', 'No'],
defaultButton='Yes',
cancelButton='No',
dismissString='No')
if result == "Yes":
temp_dir = cls.get_temp_dir()
if temp_dir:
errors_occurred = False
for the_file in os.listdir(temp_dir):
file_path = os.path.join(temp_dir, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except:
om.MGlobal.displayWarning("Failed to remove file: {0}".format(file_path))
om.MGlobal.displayWarning("File may be open in an application")
errors_occurred = True
if errors_occurred:
om.MGlobal.displayWarning("Unable to remove all files. See script editor for details.")
else:
om.MGlobal.displayInfo("Temporary directory cleared: {0}".format(temp_dir))
else:
om.MGlobal.displayError("Failed to get temp directory from Keyframe Pro. See script editor for details.")
@classmethod
def get_temp_dir(cls):
if cls.is_initialized(display_errors=False):
config = cls.kpro_client.get_config()
if config:
cmds.optionVar(sv=[cls.CACHED_TEMP_DIR_OPTION_VAR, config["temp_dir"]])
return config["temp_dir"]
temp_dir = cls.get_option_var(cls.CACHED_TEMP_DIR_OPTION_VAR, "")
if not temp_dir:
cls.open_keyframe_pro()
time.sleep(cls.WAIT_FOR_OPEN_DURATION)
if cls.is_initialized(display_errors=False):
config = cls.kpro_client.get_config()
if config:
cmds.optionVar(sv=[cls.CACHED_TEMP_DIR_OPTION_VAR, config["temp_dir"]])
return config["temp_dir"]
return temp_dir
@classmethod
def sync_script_node_exists(cls):
return cmds.objExists(cls.SYNC_SCRIPT_NODE_NAME)
@classmethod
def create_sync_script_node(cls):
if not cls.sync_script_node_exists():
cmds.scriptNode(scriptType=7,
beforeScript="try: MayaToKeyframePro.update_sync_time()\nexcept: pass",
name=cls.SYNC_SCRIPT_NODE_NAME,
sourceType="python")
@classmethod
def delete_sync_script_node(cls):
if cls.sync_script_node_exists():
cmds.delete(cls.SYNC_SCRIPT_NODE_NAME)
@classmethod
def get_sync_offset(cls):
if cmds.optionVar(exists=cls.SYNC_OFFSET_OPTION_VAR):
return cmds.optionVar(q=cls.SYNC_OFFSET_OPTION_VAR)
else:
return 0
@classmethod
def set_sync_offset(cls, value):
cmds.intFieldGrp(cls.sync_offset_ifg, e=True, value1=value)
cmds.optionVar(iv=[cls.SYNC_OFFSET_OPTION_VAR, value])
if (cls.sync_script_node_exists()):
cls.update_sync_time()
@classmethod
def sync_offset_to_current(cls):
cls.set_sync_offset(-cmds.currentTime(q=True) + 1)
@classmethod
def sync_offset_changed(cls):
cls.set_sync_offset(cmds.intFieldGrp(cls.sync_offset_ifg, q=True, value1=True))
@classmethod
def get_from_range_start(cls):
if cmds.optionVar(exists=cls.FROM_RANGE_START_OPTION_VAR):
return cmds.optionVar(q=cls.FROM_RANGE_START_OPTION_VAR)
else:
return 1
@classmethod
def update_from_range_start(cls):
value = cmds.checkBox(cls.sync_from_range_start_cb, q=True, value=True)
cmds.optionVar(iv=[cls.FROM_RANGE_START_OPTION_VAR, value])
if cls.sync_script_node_exists():
cls.update_sync_time()
@classmethod
def get_collapse_state(cls):
if cmds.optionVar(exists=cls.COLLAPSE_STATE_OPTION_VAR):
collapse_state = cmds.optionVar(q=cls.COLLAPSE_STATE_OPTION_VAR)
if len(collapse_state) == 3:
for value in collapse_state:
if value < 0 or value > 1:
return [0, 1, 1]
return collapse_state
return [0, 1, 1]
@classmethod
def update_collapse_state(cls):
cmds.optionVar(clearArray=cls.COLLAPSE_STATE_OPTION_VAR)
layouts = [cls.sync_layout, cls.viewer_layout, cls.playblast_layout]
for layout in layouts:
collapse = cmds.frameLayout(layout, q=True, cl=True)
cmds.optionVar(iva=[cls.COLLAPSE_STATE_OPTION_VAR, collapse])
@classmethod
def display(cls):
if(KeyframeProClient.API_VERSION != MayaToKeyframePro.VERSION):
om.MGlobal.displayInfo("The Keyframe Pro Client API ({0}) and the MayaToKeyframePro script ({1}) versions do not match.".format(KeyframeProClient.API_VERSION, MayaToKeyframePro.VERSION))
om.MGlobal.displayInfo("Please download and update to the latest versions: http://zurbrigg.com/keyframe-pro")
om.MGlobal.displayError("Client API Version Mismatch (see the script editor for more details)")
return
if cmds.window(cls.WINDOW_NAME, exists=True):
cmds.deleteUI(cls.WINDOW_NAME, window=True)
collapse_state = cls.get_collapse_state()
# ---------------------------------------------------------------------
# Main layout
# ---------------------------------------------------------------------
cls.main_window = cmds.window(cls.WINDOW_NAME, title=cls.WINDOW_TITLE, s=True, tlb=False, rtf=True, mnb=False, mxb=False)
main_layout = cmds.formLayout(parent=cls.main_window)
cls.sync_layout = cmds.frameLayout(parent=main_layout,
label="Sync", collapsable=True,
cl=collapse_state[0],
cc='cmds.evalDeferred("MayaToKeyframePro.on_collapse_changed()")',
ec='cmds.evalDeferred("MayaToKeyframePro.on_collapse_changed()")')
sync_form_layout = cmds.formLayout(parent=cls.sync_layout)
cls.viewer_layout = cmds.frameLayout(parent=main_layout,
label="Viewer",
collapsable=True,
cl=collapse_state[1],
cc='cmds.evalDeferred("MayaToKeyframePro.on_collapse_changed()")',
ec='cmds.evalDeferred("MayaToKeyframePro.on_collapse_changed()")')
viewer_form_layout = cmds.formLayout(parent=cls.viewer_layout)
cls.playblast_layout = cmds.frameLayout(parent=main_layout,
label="Playblast",
collapsable=True,
cl=collapse_state[2],
cc='cmds.evalDeferred("MayaToKeyframePro.on_collapse_changed()")',
ec='cmds.evalDeferred("MayaToKeyframePro.on_collapse_changed()")')
playblast_form_layout = cmds.formLayout(parent=cls.playblast_layout)
cmds.formLayout(main_layout, e=True, af=(cls.sync_layout, "top", 0))
cmds.formLayout(main_layout, e=True, af=(cls.sync_layout, "left", 0))
cmds.formLayout(main_layout, e=True, af=(cls.sync_layout, "right", 0))
cmds.formLayout(main_layout, e=True, ac=(cls.viewer_layout, "top", 0, cls.sync_layout))
cmds.formLayout(main_layout, e=True, af=(cls.viewer_layout, "left", 0))
cmds.formLayout(main_layout, e=True, af=(cls.viewer_layout, "right", 0))
cmds.formLayout(main_layout, e=True, ac=(cls.playblast_layout, "top", 0, cls.viewer_layout))
cmds.formLayout(main_layout, e=True, af=(cls.playblast_layout, "left", 0))
cmds.formLayout(main_layout, e=True, af=(cls.playblast_layout, "right", 0))
# ---------------------------------------------------------------------
# Sync layout
# ---------------------------------------------------------------------
cls.sync_offset_ifg = cmds.intFieldGrp(label="Offset: ",
value1=MayaToKeyframePro.get_sync_offset(),
columnWidth2=(40, 48),
cl2=("left", "right"),
cc="MayaToKeyframePro.sync_offset_changed()",
parent=sync_form_layout)
cls.sync_from_range_start_cb = cmds.checkBox(label="From Range Start",
value=MayaToKeyframePro.get_from_range_start(),
cc="MayaToKeyframePro.update_from_range_start()",
parent=sync_form_layout)
sync_offset_to_current_btn = cmds.button(label="Current",
bgc=cls.BUTTON_COLOR_01,
c="MayaToKeyframePro.sync_offset_to_current()",
parent=sync_form_layout)
reset_sync_offset_btn = cmds.button(label=" Reset ",
bgc=cls.BUTTON_COLOR_01,
c="MayaToKeyframePro.set_sync_offset(0)",
parent=sync_form_layout)
cls.sync_btn = cmds.button(label="SYNC", c="MayaToKeyframePro.toggle_sync()", parent=sync_form_layout)
top_offset = 1
bottom_offset = 4
left_position = 1
right_position = 99
spacing = 2
cmds.formLayout(sync_form_layout, e=True, af=(cls.sync_offset_ifg, "top", top_offset))
cmds.formLayout(sync_form_layout, e=True, ap=(cls.sync_offset_ifg, "left", 0, left_position))
cmds.formLayout(sync_form_layout, e=True, af=(sync_offset_to_current_btn, "top", top_offset))
cmds.formLayout(sync_form_layout, e=True, ac=(sync_offset_to_current_btn, "left", 0, cls.sync_offset_ifg))
cmds.formLayout(sync_form_layout, e=True, af=(reset_sync_offset_btn, "top", top_offset))
cmds.formLayout(sync_form_layout, e=True, ac=(reset_sync_offset_btn, "left", spacing, sync_offset_to_current_btn))
cmds.formLayout(sync_form_layout, e=True, ac=(cls.sync_from_range_start_cb, "top", top_offset, sync_offset_to_current_btn))
cmds.formLayout(sync_form_layout, e=True, ap=(cls.sync_from_range_start_cb, "left", 0, left_position))
cmds.formLayout(sync_form_layout, e=True, ac=(cls.sync_btn, "top", 2 * spacing, cls.sync_from_range_start_cb))
cmds.formLayout(sync_form_layout, e=True, af=(cls.sync_btn, "bottom", bottom_offset))
cmds.formLayout(sync_form_layout, e=True, ap=(cls.sync_btn, "left", 0, left_position))
cmds.formLayout(sync_form_layout, e=True, ap=(cls.sync_btn, "right", 0, right_position))
# ---------------------------------------------------------------------
# Viewer layout
# ---------------------------------------------------------------------
single_viewer_btn = cmds.button(label="Single",
bgc=cls.BUTTON_COLOR_01,
c="MayaToKeyframePro.set_viewer_layout('single')",
parent=viewer_form_layout)
hori_viewer_btn = cmds.button(label="Horizontal",
bgc=cls.BUTTON_COLOR_01,
c="MayaToKeyframePro.set_viewer_layout('horizontal')",
parent=viewer_form_layout)
vert_viewer_btn = cmds.button(label=" Vertical ",
bgc=cls.BUTTON_COLOR_01,
c="MayaToKeyframePro.set_viewer_layout('vertical')",
parent=viewer_form_layout)
swap_timelines_btn = cmds.button(label="Swap Timelines",
bgc=cls.BUTTON_COLOR_01,
c="MayaToKeyframePro.swap_timelines()",
parent=viewer_form_layout)
cmds.formLayout(viewer_form_layout, e=True, af=(single_viewer_btn, "top", top_offset))
cmds.formLayout(viewer_form_layout, e=True, ap=(single_viewer_btn, "left", 0, left_position))
cmds.formLayout(viewer_form_layout, e=True, ap=(single_viewer_btn, "right", 0, 38))
cmds.formLayout(viewer_form_layout, e=True, af=(hori_viewer_btn, "top", top_offset))
cmds.formLayout(viewer_form_layout, e=True, ac=(hori_viewer_btn, "left", spacing, single_viewer_btn))
cmds.formLayout(viewer_form_layout, e=True, ap=(hori_viewer_btn, "right", 0, 68))
cmds.formLayout(viewer_form_layout, e=True, af=(vert_viewer_btn, "top", top_offset))
cmds.formLayout(viewer_form_layout, e=True, ac=(vert_viewer_btn, "left", spacing, hori_viewer_btn))
cmds.formLayout(viewer_form_layout, e=True, ap=(vert_viewer_btn, "right", 0, right_position))
cmds.formLayout(viewer_form_layout, e=True, ac=(swap_timelines_btn, "top", spacing, single_viewer_btn))
cmds.formLayout(viewer_form_layout, e=True, af=(swap_timelines_btn, "bottom", bottom_offset))
cmds.formLayout(viewer_form_layout, e=True, ap=(swap_timelines_btn, "left", 0, left_position))
cmds.formLayout(viewer_form_layout, e=True, ap=(swap_timelines_btn, "right", 0, right_position))
# ---------------------------------------------------------------------
# Playblast layout
# ---------------------------------------------------------------------
cls.playblast_viewer_rbg = cmds.radioButtonGrp(label='Open in Viewer: ',
labelArray3=['A', 'B', 'None'],
numberOfRadioButtons=3,
select=1,
cw4=(100, 40, 40, 40),
cl4=("left", "left", "left", "left"),
parent=playblast_form_layout)
playblast_btn = cmds.button(label="PLAYBLAST",
bgc=cls.BUTTON_COLOR_01,
c="MayaToKeyframePro.playblast()",
parent=playblast_form_layout)
open_temp_dir_btn = cmds.button(label="Open Temp Folder",
bgc=cls.BUTTON_COLOR_01,
c="MayaToKeyframePro.open_temp_dir()",
parent=playblast_form_layout)
clear_temp_dir_btn = cmds.button(label="Clear Temp Folder",
bgc=cls.BUTTON_COLOR_01,
c="MayaToKeyframePro.clear_temp_dir()",
parent=playblast_form_layout)
version_label = cmds.text(label="v{0}".format(cls.VERSION), align="right")
cmds.formLayout(playblast_form_layout, e=True, af=(cls.playblast_viewer_rbg, "top", top_offset))
cmds.formLayout(playblast_form_layout, e=True, ap=(cls.playblast_viewer_rbg, "left", 0, left_position))
cmds.formLayout(playblast_form_layout, e=True, ac=(playblast_btn, "top", spacing, cls.playblast_viewer_rbg))
cmds.formLayout(playblast_form_layout, e=True, ap=(playblast_btn, "left", 0, left_position))
cmds.formLayout(playblast_form_layout, e=True, ap=(playblast_btn, "right", 0, right_position))
cmds.formLayout(playblast_form_layout, e=True, ac=(open_temp_dir_btn, "top", spacing, playblast_btn))
cmds.formLayout(playblast_form_layout, e=True, ap=(open_temp_dir_btn, "left", 0, left_position))
cmds.formLayout(playblast_form_layout, e=True, ap=(open_temp_dir_btn, "right", 1, 50))
cmds.formLayout(playblast_form_layout, e=True, ac=(clear_temp_dir_btn, "top", spacing, playblast_btn))
cmds.formLayout(playblast_form_layout, e=True, ap=(clear_temp_dir_btn, "left", 1, 50))
cmds.formLayout(playblast_form_layout, e=True, ap=(clear_temp_dir_btn, "right", 0, right_position))
cmds.formLayout(playblast_form_layout, e=True, ac=(version_label, "top", spacing, open_temp_dir_btn))
cmds.formLayout(playblast_form_layout, e=True, ap=(version_label, "right", 0, right_position))
# ---------------------------------------------------------------------
# Update and show
# ---------------------------------------------------------------------
cls.update_sync_state()
cls.on_collapse_changed()
cmds.setFocus(cls.sync_btn)
cmds.showWindow(cls.main_window)
@classmethod
def on_collapse_changed(cls):
total_height = 0
layouts = [cls.sync_layout, cls.viewer_layout, cls.playblast_layout]
for layout in layouts:
total_height += cmds.frameLayout(layout, q=True, h=True)
cmds.window(MayaToKeyframePro.main_window, e=True, h=total_height)
cls.update_collapse_state()
@classmethod
def update_sync_state(cls):
if cls.sync_script_node_exists():
cmds.button(cls.sync_btn, e=True, bgc=cls.SYNC_ACTIVE_COLOR, label="SYNCED")
else:
cmds.button(cls.sync_btn, e=True, bgc=cls.BUTTON_COLOR_01, label="SYNC")
if __name__ == "__main__":
MayaToKeyframePro.display()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.