index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
4,900 | cf9339659f49b4093c07e3723a2ede1543be41b8 | from django.test import TestCase
from django.urls import reverse
from django.utils import timezone
from recensioni_site import settings
from django.contrib.auth.models import User
from forum.models import Sezione,Post,UserDataReccomandation
class testRegistrazione(TestCase):
def setUp(self):
self.credential = {'username': 'dummy', 'password': 'dummypassword', 'is_staff': 'False'}
self.credentialp = {'username':'dummyp', 'password':'dummypasswordp', 'is_staff':'True'}
self.proprietario1 = User.objects.create(username="Proprietario1",
email="proprietario1@gmail.com",
password="PasswordProprietario1",
is_staff="True")
self.proprietario2 = User.objects.create(username="Proprietario2",
email="proprietario2@gmail.com",
password="PasswordProprietario2",
is_staff="True")
#--------------------------------------------
self.user1 = User.objects.create(username="User1",
email="user1@gmail.com",
password="PasswordUser1",
is_staff="False")
self.user2 = User.objects.create(username="User2",
email="user2@gmail.com",
password="PasswordUser2",
is_staff="False")
# --------------------------------------------
self.sezione1 = Sezione.objects.create(user=self.proprietario1,
nome_sezione="hotel1",
descrizione="descrizione",
citta="città_test",
provincia="provincia_test",
indirizzo="indirizzo_test",
logo_sezione="null",
hotelB="True",
ristoranteB="False",
fastFoodB="False",
casaVacanzaB="False",
agriturismoB="False")
self.post1 = Post.objects.create(autore_post=self.user1,
contenuto="post_test",
rating=5,
data_creazione=timezone.now(),
sezione=self.sezione1)
self.post2 = Post.objects.create(autore_post=self.user1,
contenuto="post_test",
rating=3,
data_creazione=timezone.now(),
sezione=self.sezione1)
def tearDown(self):
self.proprietario1.delete()
self.proprietario2.delete()
# --------------------------------------------
self.user1.delete()
self.user2.delete()
# --------------------------------------------
self.sezione1.delete()
self.post1.delete()
def test_vsualizzaSezione(self):
self.client.login(**self.credential)
response= self.client.get('/forum/sezione/' + str(self.sezione1.id) + '/')
self.assertTemplateUsed(response, 'forum/singola_sezione.html')
self.assertEqual(response.status_code, 200)
def test_rating(self):
self.client.login(**self.credential)
response = self.client.get('/forum/sezione/' + str(self.sezione1.id) + '/')
self.assertEqual(response.context['sezione'], self.sezione1)
self.assertEqual(response.context['media_rating'], 4)
|
4,901 | 266ce1aaa3283cf2aaa271a317a80c3860880a49 | from django.conf.urls.defaults import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'foo.views.home', name='home'),
# url(r'^foo/', include('foo.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
# required url to login so you can authorize token
url(r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}),
)
# piston, oauth urls
urlpatterns += patterns(
'piston.authentication',
url(r'^oauth/request_token/$','oauth_request_token'),
url(r'^oauth/authorize/$','oauth_user_auth'),
url(r'^oauth/access_token/$','oauth_access_token'),
)
|
4,902 | c853f922d1e4369df9816d150e5c0abc729b325c | # This file is used to run a program to perform Active measuremnts
import commands
import SocketServer
import sys
#Class to handle Socket request
class Handler(SocketServer.BaseRequestHandler):
def handle(self):
# Get the IP of the client
IP = self.request.recv(1024)
#print 'IP=' + IP
latency = ''
try:
# Use Scamper to determine the latency of the Requesting Client identified by the IP
scamperCommand = "scamper -c 'ping -c 1' -i "+IP
# Get the output of the system command
output = commands.getoutput(scamperCommand)
print "Output=" + output
#Parse and get the Latency
latency = output.split("\n")[1].split("time=")[1].split(" ")[0]
except Exception:
latency = 'Error'
#print latency
# Send latency to requester
self.request.sendall(latency)
return
def main(argv):
port = int(argv[1])
addr = ('', port)
# Start an active measurement system which listenes to a given port
server = SocketServer.TCPServer(addr, Handler);
print 'Active Measurement Server Listening at ' + str(port) + "..."
server.serve_forever()
if __name__ == '__main__':
main(sys.argv) |
4,903 | 121fddf022c4eed7fd00e81edcb2df6a7a3b7510 | #!/usr/bin/env python3
from collections import deque
from itertools import permutations
INS_ADD = 1
INS_MULTIPLY = 2
INS_INPUT = 3
INS_OUTPUT = 4
INS_JUMP_IF_TRUE = 5
INS_JUMP_IF_FALSE = 6
INS_LESS_THAN = 7
INS_EQUALS = 8
INS_ADJUST_RELATIVE_BASE = 9
INS_DONE = 99
MODE_POSITION = 0
MODE_IMMEDIATE = 1
MODE_RELATIVE = 2
class InvalidInstructionException (Exception):
def __init__(self, instruction):
super().__init__("<%d>" % instruction)
class InvalidModeException (Exception):
pass
class Computer:
def __init__(self, data, inputs, memory_size=8192, interactive=True):
self._memory = [0] * memory_size
for i in range(len(data)):
self._memory[i] = data[i]
self._pc = 0
self._inputs = deque(inputs)
self._outputs = []
self._relative_base = 0
self._interactive = interactive
def input(self, value):
self._inputs.append(value)
def _parse_modes(self, instruction):
i = "%.5d" % instruction
return (int(i[2]), int(i[1]), int(i[0]))
def _fetch(self):
instruction = self._memory[self._pc]
self._pc += 1
if instruction > 100:
return instruction % 100, self._parse_modes(instruction)
else:
return instruction, (MODE_POSITION, MODE_POSITION, MODE_POSITION)
def _pop(self):
v = self._memory[self._pc]
self._pc += 1
return v
def _load(self, a, mode):
if mode == MODE_IMMEDIATE:
return a
elif mode == MODE_POSITION:
return self._memory[a]
elif mode == MODE_RELATIVE:
return self._memory[self._relative_base + a]
else:
raise InvalidModeException()
def _store(self, a, mode, v):
if mode == MODE_IMMEDIATE:
pass
if mode == MODE_POSITION:
self._memory[a] = v
elif mode == MODE_RELATIVE:
self._memory[self._relative_base + a] = v
else:
raise InvalidModeException()
def _add(self, modes, a, b, d):
self._store(d, modes[2], self._load(a, modes[0]) + self._load(b, modes[1]))
def _multiply(self, modes, a, b, d):
self._store(d, modes[2], self._load(a, modes[0]) * self._load(b, modes[1]))
def _input(self, modes, a):
if self._interactive:
self._store(a, modes[0], int(input("=> ")))
else:
self._store(a, modes[0], self._inputs.popleft())
def _output(self, modes, s):
v = self._load(s, modes[0])
if self._interactive:
print(v)
else:
self._outputs.append(v)
def _jump_if_true(self, modes, a, d):
if self._load(a, modes[0]) != 0:
self._pc = self._load(d, modes[1])
def _jump_if_false(self, modes, a, d):
if self._load(a, modes[0]) == 0:
self._pc = self._load(d, modes[1])
def _less_than(self, modes, a, b, d):
if self._load(a, modes[0]) < self._load(b, modes[1]):
self._store(d, modes[2], 1)
else:
self._store(d, modes[2], 0)
def _equals(self, modes, a, b, d):
if self._load(a, modes[0]) == self._load(b, modes[1]):
self._store(d, modes[2], 1)
else:
self._store(d, modes[2], 0)
def _adjust_relative_base(self, modes, a):
self._relative_base += self._load(a, modes[0])
def run(self, debug = False):
while True:
instruction, modes = self._fetch()
if debug:
print(instruction, modes)
if instruction == INS_ADD:
self._add(modes, self._pop(), self._pop(), self._pop())
elif instruction == INS_MULTIPLY:
self._multiply(modes, self._pop(), self._pop(), self._pop())
elif instruction == INS_INPUT:
self._input(modes, self._pop())
elif instruction == INS_OUTPUT:
v = self._output(modes, self._pop())
if not self._interactive:
return v
elif instruction == INS_JUMP_IF_TRUE:
self._jump_if_true(modes, self._pop(), self._pop())
elif instruction == INS_JUMP_IF_FALSE:
self._jump_if_false(modes, self._pop(), self._pop())
elif instruction == INS_LESS_THAN:
self._less_than(modes, self._pop(), self._pop(), self._pop())
elif instruction == INS_EQUALS:
self._equals(modes, self._pop(), self._pop(), self._pop())
elif instruction == INS_ADJUST_RELATIVE_BASE:
self._adjust_relative_base(modes, self._pop())
elif instruction == INS_DONE:
return self._outputs
else:
raise InvalidInstructionException(instruction)
PROGRAM = [1102,34463338,34463338,63,1007,63,34463338,63,1005,63,53,1101,0,3,1000,109,988,209,12,9,1000,209,6,209,3,203,0,1008,1000,1,63,1005,63,65,1008,1000,2,63,1005,63,904,1008,1000,0,63,1005,63,58,4,25,104,0,99,4,0,104,0,99,4,17,104,0,99,0,0,1101,0,396,1029,1101,0,356,1023,1101,401,0,1028,1101,24,0,1008,1101,33,0,1019,1101,35,0,1010,1102,359,1,1022,1102,32,1,1001,1101,37,0,1004,1101,0,31,1009,1101,0,30,1003,1101,28,0,1002,1102,1,36,1014,1102,20,1,1012,1101,21,0,1000,1101,0,22,1015,1102,23,1,1013,1102,1,1,1021,1102,1,39,1007,1102,26,1,1017,1101,0,38,1016,1101,0,437,1024,1102,432,1,1025,1101,0,421,1026,1101,0,29,1005,1101,27,0,1011,1102,1,0,1020,1101,0,25,1018,1101,0,414,1027,1102,34,1,1006,109,6,2108,33,-3,63,1005,63,201,1001,64,1,64,1105,1,203,4,187,1002,64,2,64,109,14,21108,40,40,-6,1005,1014,221,4,209,1105,1,225,1001,64,1,64,1002,64,2,64,109,-21,2102,1,3,63,1008,63,28,63,1005,63,251,4,231,1001,64,1,64,1106,0,251,1002,64,2,64,109,12,2101,0,-3,63,1008,63,21,63,1005,63,275,1001,64,1,64,1105,1,277,4,257,1002,64,2,64,109,-10,1207,1,27,63,1005,63,293,1105,1,299,4,283,1001,64,1,64,1002,64,2,64,109,9,21108,41,42,3,1005,1013,315,1105,1,321,4,305,1001,64,1,64,1002,64,2,64,109,-12,1202,6,1,63,1008,63,37,63,1005,63,347,4,327,1001,64,1,64,1105,1,347,1002,64,2,64,109,29,2105,1,-4,1105,1,365,4,353,1001,64,1,64,1002,64,2,64,109,-17,2108,32,-9,63,1005,63,387,4,371,1001,64,1,64,1105,1,387,1002,64,2,64,109,17,2106,0,1,4,393,1105,1,405,1001,64,1,64,1002,64,2,64,109,1,2106,0,-1,1001,64,1,64,1106,0,423,4,411,1002,64,2,64,109,-13,2105,1,9,4,429,1106,0,441,1001,64,1,64,1002,64,2,64,109,3,21107,42,41,-1,1005,1017,461,1001,64,1,64,1106,0,463,4,447,1002,64,2,64,109,-4,21107,43,44,1,1005,1015,481,4,469,1106,0,485,1001,64,1,64,1002,64,2,64,109,-6,21101,44,0,6,1008,1014,47,63,1005,63,505,1106,0,511,4,491,1001,64,1,64,1002,64,2,64,109,-6,1208,-1,32,63,1005,63,529,4,517,1105,1,533,1001,64,1,64,1002,64,2,64,109,11,1205,7,545,1106,0,551,4,539,1001,64,1,64,1002,64,2,64,109,11,21102,45,1,-7,1008,1017,48,63,1005,63,575,1001,64,1,64,1106,0,577,4,557,1002,64,2,64,109,-8,1206,5,593,1001,64,1,64,1105,1,595,4,583,1002,64,2,64,109,7,1206,-3,609,4,601,1106,0,613,1001,64,1,64,1002,64,2,64,109,-10,2101,0,-6,63,1008,63,39,63,1005,63,635,4,619,1106,0,639,1001,64,1,64,1002,64,2,64,109,-9,1208,0,39,63,1005,63,655,1106,0,661,4,645,1001,64,1,64,1002,64,2,64,109,4,2107,25,0,63,1005,63,681,1001,64,1,64,1105,1,683,4,667,1002,64,2,64,109,-5,2107,31,-2,63,1005,63,701,4,689,1106,0,705,1001,64,1,64,1002,64,2,64,109,19,1205,-1,719,4,711,1105,1,723,1001,64,1,64,1002,64,2,64,109,-17,1201,3,0,63,1008,63,24,63,1005,63,745,4,729,1106,0,749,1001,64,1,64,1002,64,2,64,109,13,21102,46,1,-3,1008,1015,46,63,1005,63,771,4,755,1105,1,775,1001,64,1,64,1002,64,2,64,109,-13,1207,4,32,63,1005,63,793,4,781,1106,0,797,1001,64,1,64,1002,64,2,64,109,7,2102,1,-9,63,1008,63,27,63,1005,63,821,1001,64,1,64,1105,1,823,4,803,1002,64,2,64,109,-18,1201,8,0,63,1008,63,25,63,1005,63,847,1001,64,1,64,1106,0,849,4,829,1002,64,2,64,109,23,21101,47,0,2,1008,1019,47,63,1005,63,871,4,855,1106,0,875,1001,64,1,64,1002,64,2,64,109,-22,1202,5,1,63,1008,63,19,63,1005,63,899,1001,64,1,64,1106,0,901,4,881,4,64,99,21102,27,1,1,21102,1,915,0,1105,1,922,21201,1,25165,1,204,1,99,109,3,1207,-2,3,63,1005,63,964,21201,-2,-1,1,21102,942,1,0,1105,1,922,22102,1,1,-1,21201,-2,-3,1,21101,0,957,0,1105,1,922,22201,1,-1,-2,1106,0,968,21201,-2,0,-2,109,-3,2105,1,0]
if __name__ == "__main__":
c = Computer(PROGRAM, [])
c.run()
|
4,904 | a3cbdecbbfc49e8ac045f4aabbea6b9f54ed3d5f | class Node:
def __init__(self, data):
self.data = data
self.prev = None
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def insertAtHead(self, newNode, curNode):
newNode.next = curNode
if curNode is not None: curNode.prev = newNode
self.head = newNode
def insertAtTail(self, newNode, curNode):
if self.head is None:
self.head = newNode
return
while curNode.next is not None:
curNode = curNode.next
curNode.next = newNode
newNode.prev = curNode
def printForward(self, curNode):
while curNode is not None:
print(curNode.data)
curNode = curNode.next
def printReverse(self, curNode):
while curNode.next is not None:
curNode = curNode.next
while curNode is not None:
print(curNode.data)
curNode = curNode.prev
################################################
linkedList = LinkedList()
for i in range(3):
newNode = Node(input("Enter data: "))
#linkedList.insertAtTail(newNode, linkedList.head)
linkedList.insertAtHead(newNode, linkedList.head)
linkedList.printForward(linkedList.head)
print("######################")
linkedList.printReverse(linkedList.head) |
4,905 | 67446f50d1c062eddcad282d3bf508967c5192fc | from network.utility import *
from entities.message import Message, BroadcastMessage, GroupMessage
from entities.node import Node
from entities.group import GroupBroadcast
from entities.request import Request
import threading
import time
import logging
import random
import json
import socket
from services.user import UserService
class Sender:
def __init__(self, reverseMap, info):
self.reverseMap = reverseMap
self.info = info
def sendMessage(self, message):
data = {"timestamp": message.timestamp, "message": message.message}
body = json.dumps(data).encode('utf-8')
header = {
"srcUsername": message.fromUsername,
"srcGroup": self.info.get("groupID", ""),
"desGroup": "",
"admin": self.info.get("isAdmin", ""),
"member": self.info.get("isMember", ""),
"broadcast": False,
"groupBroadcast": False,
"memberRq": False,
"ackRq": False,
"denyRq": False,
"leaveRq": False,
"nodeRq": False,
"big": False,
"nodeRep": False,
"contentLength": len(body),
}
packedHeader = packHeader(header)
msg = packedHeader + body
addr = self.reverseMap.get(message.toUsername)
worker = SenderWorker(addr, msg)
worker.start()
def sendMessageBroadcast(self, message):
data = {"timestamp": message.timestamp, "message": message.message}
body = json.dumps(data).encode('utf-8')
header = {
"srcUsername": message.fromUsername,
"srcGroup": self.info.get("groupID", ""),
"desGroup": "",
"admin": self.info.get("isAdmin", ""),
"member": self.info.get("isMember", ""),
"broadcast": True,
"groupBroadcast": False,
"memberRq": False,
"ackRq": False,
"denyRq": False,
"leaveRq": False,
"nodeRq": False,
"big": False,
"nodeRep": False,
"contentLength": len(body),
}
packedHeader = packHeader(header)
msg = packedHeader + body
for addr in self.reverseMap.values():
worker = SenderWorker(addr, msg)
worker.start()
def sendMessageGroup(self, message):
data = {"timestamp": message.timestamp, "message": message.message}
body = json.dumps(data).encode('utf-8')
header = {
"srcUsername": message.fromUsername,
"srcGroup": message.groupID,
"desGroup": "",
"admin": self.info.get("isAdmin", ""),
"member": self.info.get("isMember", ""),
"broadcast": True,
"groupBroadcast": True,
"memberRq": False,
"ackRq": False,
"denyRq": False,
"leaveRq": False,
"nodeRq": False,
"big": False,
"nodeRep": False,
"contentLength": len(body),
}
packedHeader = packHeader(header)
msg = packedHeader + body
for addr in self.reverseMap.values():
worker = SenderWorker(addr, msg)
worker.start()
def sendGroupJoinRequest(self, request):
data = {"message": request.message}
body = json.dumps(data).encode('utf-8')
header = {
"srcUsername": request.fromUsername,
"srcGroup": self.info["groupID"],
"desGroup": request.groupID,
"admin": self.info.get("isAdmin", ""),
"member": self.info.get("isMember", ""),
"broadcast": True,
"groupBroadcast": False,
"memberRq": True,
"ackRq": False,
"denyRq": False,
"leaveRq": False,
"nodeRq": False,
"big": False,
"nodeRep": False,
"contentLength": len(body),
}
packedHeader = packHeader(header)
msg = packedHeader + body
for addr in self.reverseMap.values():
worker = SenderWorker(addr, msg)
worker.start()
def sendGroupAcknowledgeRequest(self, request):
body = b""
header = {
"srcUsername": self.info["username"],
"srcGroup": self.info["groupID"],
"desGroup": "",
"admin": self.info.get("isAdmin", ""),
"member": self.info.get("isMember", ""),
"broadcast": False,
"groupBroadcast": False,
"memberRq": False,
"ackRq": True,
"denyRq": False,
"leaveRq": False,
"nodeRq": False,
"big": False,
"nodeRep": False,
"contentLength": len(body),
}
packedHeader = packHeader(header)
msg = packedHeader + body
addr = self.reverseMap.get(request.fromUsername)
worker = SenderWorker(addr, msg)
worker.start()
def sendGroupDenyRequest(self, request):
body = b""
header = {
"srcUsername": self.info["username"],
"srcGroup": self.info["groupID"],
"desGroup": "",
"admin": self.info.get("isAdmin", ""),
"member": self.info.get("isMember", ""),
"broadcast": False,
"groupBroadcast": False,
"memberRq": False,
"ackRq": False,
"denyRq": True,
"leaveRq": False,
"nodeRq": False,
"big": False,
"nodeRep": False,
"contentLength": len(body),
}
packedHeader = packHeader(header)
msg = packedHeader + body
addr = self.reverseMap.get(request.fromUsername)
worker = SenderWorker(addr, msg)
worker.start()
def sendGroupBroadcast(self):
data = self.info
body = json.dumps(data).encode('utf-8')
header = {
"srcUsername": self.info["username"],
"srcGroup": self.info["groupID"],
"desGroup": "",
"admin": self.info.get("isAdmin", ""),
"member": self.info.get("isMember", ""),
"broadcast": True,
"groupBroadcast": False,
"memberRq": False,
"ackRq": False,
"denyRq": False,
"leaveRq": False,
"nodeRq": False,
"big": False,
"nodeRep": True,
"contentLength": len(body),
}
packedHeader = packHeader(header)
msg = packedHeader + body
for addr in self.reverseMap.values():
worker = SenderWorker(addr, msg)
worker.start()
class SenderWorker(threading.Thread):
def __init__(self, addr, msg):
threading.Thread.__init__(self)
self.addr = addr
self.packageHash = bytes.fromhex(
format(random.getrandbits(256), "x").zfill(64))
self.msg = self.packageHash+msg
self.sock = None
def run(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
start = time.time()
logger.debug(
f"On thread #{threading.get_ident()}, start connection attempt")
while True:
iStart = time.time()
if type(self.msg) not in [str, bytearray, bytes]:
print('Sender worker msg: ', self.msg)
if type(self.addr) not in [str, bytearray, bytes]:
print('SenderWorker addr: ', self.addr,
'type: ', type(self.addr))
self.addr = self.addr[0]
self.sock.sendto(self.msg, (self.addr, 8421,))
if time.time() - iStart > 0.3:
break
logger.debug(f"Send complete using {time.time()-start} seconds")
self.sock.close()
logger = logging.getLogger('Sender')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
fh = logging.FileHandler("applog.log")
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
|
4,906 | ccf1710cff972eaa06e1ccb5ebedc70d946e3215 | from . import views
from django.conf.urls import url,re_path
enquiryUrlPattern = [
url(r'daily-rate-enquiry', views.daily_rate_enquiry_form),
re_path(r'^contact-us-landing-page/$', views.contact_us_landing_page),
]
|
4,907 | 675d564ad60870f49b88dece480d5a50a30491df | try:
import RPi.GPIO as GPIO
import time
import numpy as np
import matplotlib.pyplot as plt
from os.path import dirname, join as pjoin
from scipy.io import wavfile
import scipy.io
except ImportError:
print ("Import error!")
raise SystemExit
try:
chan_list = (26, 19, 13, 6, 5, 11, 9, 10)
GPIO.setmode (GPIO.BCM)
GPIO.setup (chan_list, GPIO.OUT)
except:
print ("GPIO Initialization error!")
raise SystemExit
def decToBinList (decNumber):
if decNumber < 0 or decNumber > 255:
raise ValueError
return [(int(decNumber) & (1 << i)) >> i for i in range (7, -1, -1)]
def num2dac (value):
x = decToBinList (value)
GPIO.output (chan_list, tuple (x))
wav_fname = pjoin('SOUND.WAV')
samplerate, data = wavfile.read(wav_fname)
length = data.shape[0] / samplerate
print ("length: ", int(length), "s, number of channels: ", data.shape[1], ", Sample Rate: ", samplerate, ", data type: ", type (data[1, 0]))
try:
for i in data[:, 0]:
num2dac ((int(i) + 32768) / 256)
except ValueError:
print ("Ошибка в в размере входных данных. Выходим из программы")
except:
print ("Неизвестная ошибка. Выходим из программы")
finally:
GPIO.output (chan_list, 0)
GPIO.cleanup (chan_list) |
4,908 | a83230e71cc1bcc843d00487746f16114d304eec | # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from ..backend.ir_to_caffe import CaffeConverter
from ..converter_ir.ir_transform import IRTransform, TransformerRule
from ..frontend.mge_to_ir import MGE_FrontEnd
def mge_to_caffe(
mge_fpath,
prototxt="out.prototxt",
caffemodel="out.caffemodel",
outspec=None,
use_empty_blobs=False,
):
assert isinstance(mge_fpath, str), "mge_fpath must be string"
irgraph = MGE_FrontEnd(mge_fpath, outspec=outspec).resolve()
transformer_options = [
TransformerRule.EXPAND_MUL_ADD3,
TransformerRule.FUSE_FOR_LEAKY_RELU,
]
transformer = IRTransform(transformer_options)
transformed_irgraph = transformer.transform(irgraph)
converter = CaffeConverter(transformed_irgraph, use_empty_blobs)
converter.convert()
assert isinstance(prototxt, str) and isinstance(
caffemodel, str
), "'prototxt' and 'caffemodel' must be string"
converter.dump(prototxt, caffemodel)
|
4,909 | 53110d6e7923cf65c514d54950a0be165582e9a0 | from basetest import simtest
import testutil
import logging, random
from nitro_parts.lib.imager import ccm as CCM
import numpy
###############################################################################
class DotProductTest(simtest):
def _set_coeff(self, c):
cq = (c * 32).astype(numpy.uint8)
self.dev.set("DotProductTest","c0", cq[0])
self.dev.set("DotProductTest","c1", cq[1])
self.dev.set("DotProductTest","c2", cq[2])
return cq
def _set_data(self, d):
self.dev.set("DotProductTest","d0", d[0])
self.dev.set("DotProductTest","d1", d[1])
self.dev.set("DotProductTest","d2", d[2])
def _check(self, c, d):
d = numpy.array(d)
ds = d.copy()
ds[d>511] = d[d>511] - 1024
c = numpy.array(c)
cq = self._set_coeff(c)
cs = cq.astype(numpy.int8)
cs[cq>127] = cq[cq>127] - 256
self._set_data(d)
do = self.dev.get("DotProductTest","datao_uu")
data_uu = min(1023, sum(cq * d)/32)
#print "UU", do, data_uu
self.assertTrue( do == data_uu, "UU")
data_su = max(0, min(1023, sum(cs * d)/32))
do = self.dev.get("DotProductTest", "datao_su")
#print "SU", d, cs, do, data_su
self.assertTrue( do == data_su, "SU " + str(d) + " " + str(cs) + " " + str(do) + " " + str(data_su))
data_us = max(-512, min(511, sum(cq * ds)/32))
do = self.dev.get("DotProductTest", "datao_us")
if(do > 511): do = do - 1024
#print "US", do, ds, data_us
self.assertTrue( do == data_us, "US")
data_ss = max(-512, min(511, sum(cs * ds)/32))
do = self.dev.get("DotProductTest", "datao_ss")
if(do > 511): do = do - 1024
#print "SS", ds, cq, cs, do, data_ss
self.assertTrue( do == data_ss, "SS")
def testDotProduct(self):
"""Setup up Dot Product with various input and test output matches expectation."""
self._check([0.000, 1.000, 0.000], [ 1023, 1023, 1023 ])
self._check([0.125, 0.750, 0.125], [ 1023, 1023, 1023 ])
self._check([1/32., 1.000, 0.000], [ 1023, 1023, 1023 ])
self._check([1.000, 1.000, 1.000], [ 1023, 1023, 1023 ])
self._check([0, 0, 0], [ 1023, 1023, 1023 ])
self._check([1/32., 0, 0], [ 1, 100, 100 ])
self._check([1.0, 0, 0], [ 1, 100, 100 ])
self._check([0, 1.0, 0], [ 1, 100, 100 ])
self._check([0, 0, 1.0], [ 1, 100, 100 ])
self._check([1.000, 1.000, 1.000], [ 513, 513, 513 ])
self._check([1.000, 1.000, 1.000], [ 512, 512, 512 ])
self._check([1.000, 1.000, 1.000], [ 0, 512, 0 ])
self._check([0.000, 1.5, 0.000], [ 0, 680, 0 ])
self._check([0.000, 1.5, 0.000], [ 0, 681, 0 ])
self._check([0.000, 1.5, 0.000], [ 0, 682, 0 ])
self._check([0.000, 1.5, 0.000], [ 0, 683, 0 ])
self._check([0.000, 1.5, 0.000], [ 0, 339, 0 ])
self._check([0.000, 1.5, 0.000], [ 0, 340, 0 ])
self._check([0.000, 1.5, 0.000], [ 0, 341, 0 ])
self._check([0.000, 1.5, 0.000], [ 0, 342, 0 ])
self._check([0.000, 1.5, 0.000], [ 0, 1023-338, 0 ])
self._check([0.000, 1.5, 0.000], [ 0, 1023-339, 0 ])
self._check([0.000, 1.5, 0.000], [ 0, 1023-340, 0 ])
self._check([0.000, 1.5, 0.000], [ 0, 1023-341, 0 ])
self._check([0.000, -1.0, 0.000], [ 0, 500, 0 ])
self._check([1/32., -1.0, 1/32.], [ 500, 500, 500 ])
self._check([-1/32., -1.0, -1/32.], [ 400, 400, 400 ])
for idx in range(100):
data = [ random.randint(0,1023) for r in range(3) ]
coeff = [ max(-2.0, min(127/32., random.random() * 4 - 2)) for r in range(3) ]
#print coeff, data
self._check(coeff, data)
|
4,910 | 55d4f4bba2b72ec93cb883527d2a9c2ebe8ec337 | from __future__ import annotations
import logging
import os
import sys
from argparse import Namespace
from pathlib import Path
from uuid import uuid4
import pytest
from virtualenv.discovery.builtin import Builtin, get_interpreter
from virtualenv.discovery.py_info import PythonInfo
from virtualenv.info import fs_supports_symlink
@pytest.mark.skipif(not fs_supports_symlink(), reason="symlink not supported")
@pytest.mark.parametrize("case", ["mixed", "lower", "upper"])
def test_discovery_via_path(monkeypatch, case, tmp_path, caplog, session_app_data):
caplog.set_level(logging.DEBUG)
current = PythonInfo.current_system(session_app_data)
core = f"somethingVeryCryptic{'.'.join(str(i) for i in current.version_info[0:3])}"
name = "somethingVeryCryptic"
if case == "lower":
name = name.lower()
elif case == "upper":
name = name.upper()
exe_name = f"{name}{current.version_info.major}{'.exe' if sys.platform == 'win32' else ''}"
target = tmp_path / current.install_path("scripts")
target.mkdir(parents=True)
executable = target / exe_name
os.symlink(sys.executable, str(executable))
pyvenv_cfg = Path(sys.executable).parents[1] / "pyvenv.cfg"
if pyvenv_cfg.exists():
(target / pyvenv_cfg.name).write_bytes(pyvenv_cfg.read_bytes())
new_path = os.pathsep.join([str(target), *os.environ.get("PATH", "").split(os.pathsep)])
monkeypatch.setenv("PATH", new_path)
interpreter = get_interpreter(core, [])
assert interpreter is not None
def test_discovery_via_path_not_found(tmp_path, monkeypatch):
monkeypatch.setenv("PATH", str(tmp_path))
interpreter = get_interpreter(uuid4().hex, [])
assert interpreter is None
def test_relative_path(session_app_data, monkeypatch):
sys_executable = Path(PythonInfo.current_system(app_data=session_app_data).system_executable)
cwd = sys_executable.parents[1]
monkeypatch.chdir(str(cwd))
relative = str(sys_executable.relative_to(cwd))
result = get_interpreter(relative, [], session_app_data)
assert result is not None
def test_discovery_fallback_fail(session_app_data, caplog):
caplog.set_level(logging.DEBUG)
builtin = Builtin(
Namespace(app_data=session_app_data, try_first_with=[], python=["magic-one", "magic-two"], env=os.environ),
)
result = builtin.run()
assert result is None
assert "accepted" not in caplog.text
def test_discovery_fallback_ok(session_app_data, caplog):
caplog.set_level(logging.DEBUG)
builtin = Builtin(
Namespace(app_data=session_app_data, try_first_with=[], python=["magic-one", sys.executable], env=os.environ),
)
result = builtin.run()
assert result is not None, caplog.text
assert result.executable == sys.executable, caplog.text
assert "accepted" in caplog.text
|
4,911 | e56a7912b9940b1cab6c19d0047f1f60f0083f66 | from data_structures.datacenter import Datacenter, urllib, json,
URL = "http://www.mocky.io/v2/5e539b332e00007c002dacbe"
def get_data(url, max_retries=5, delay_between_retries=1):
"""
Fetch the data from http://www.mocky.io/v2/5e539b332e00007c002dacbe
and return it as a JSON object.
Args:
url (str): The url to be fetched.
max_retries (int): Number of retries.
delay_between_retries (int): Delay between retries in seconds.
Returns:
data (dict)
"""
pass # the rest of your logic here
for i in max_retries:
while True:
try
time.sleep(delay_between_tries)
response = urllib.request.urlopen(url)
data = json.loads(response.read())
print (data)
break
except Exception:
continue
def main():
"""
Main entry to our program.
"""
data = get_data(URL)
if not data:
raise ValueError('No data to process')
datacenters = [
Datacenter(key, value)
for key, value in data.items()
]
pass # the rest of your logic here
if __name__ == '__main__':
main()
|
4,912 | eeece3bf423f85f05ef11db47909215578e64aec | from application.routes import pad_num, tracking_gen
from flask import url_for
from flask_testing import TestCase
from application import app, db
from application.models import Users, Orders
from os import getenv
class TestCase(TestCase):
def create_app(self):
app.config.update(
SQLALCHEMY_DATABASE_URI="sqlite:///test.db",
SECRET_KEY="TEST_SECRET_KEY",
DEBUG=True,
WTF_CSRF_ENABLED=False,
)
return app
def setUp(self):
db.create_all()
new_user = Users(
email="test@gmail.com",
name="Test",
house_number="8",
postcode="G3 8PX",
phone="07999999999",
)
db.session.add(new_user)
new_order = Orders(customer_id=1)
db.session.add(new_order)
new_order = Orders(customer_id=1, order_status="out for delivery")
db.session.add(new_order)
new_order = Orders(customer_id=1, order_status="delivered")
db.session.add(new_order)
db.session.commit()
def tearDown(self):
db.session.remove()
db.drop_all()
class TestPadNum(TestCase):
def test_pad_num(self):
self.assertEqual(len(pad_num(3)), 4)
class TestTrackingGen(TestCase):
def test_tracking_gen(self):
self.assertEqual(len(tracking_gen()), 8)
class TestViews(TestCase):
def test_home_get(self):
response = self.client.get(url_for("home"))
self.assertEqual(response.status_code, 200)
def test_add_order_get(self):
response = self.client.get(url_for("add_order"))
self.assertEqual(response.status_code, 200)
def test_view_order_get(self):
response = self.client.get(url_for("view_order", id=1))
self.assertEqual(response.status_code, 200)
def test_register_get(self):
response = self.client.get(url_for("register"))
self.assertEqual(response.status_code, 200)
def test_update_order_get(self):
response = self.client.get(url_for("update_order", id=1))
self.assertEqual(response.status_code, 200)
def test_delete_get(self):
response = self.client.get(url_for("delete", id=1))
self.assertEqual(response.status_code, 405)
def test_delivered_get(self):
response = self.client.get(url_for("delivered", id=1))
self.assertEqual(response.status_code, 405)
class TestCreateUser(TestCase):
def test_create_user(self):
response = self.client.post(
url_for("register"),
data=dict(
email="test2@gmail.com",
name="Test2",
house_number="82",
postcode="G2 8PX",
phone="0788888888",
),
follow_redirects=True,
)
user = Users.query.filter_by(id=2).first()
self.assertEqual("test2@gmail.com", user.email)
self.assertEqual("Test2", user.name)
self.assertEqual("82", user.house_number)
self.assertEqual("G2 8PX", user.postcode)
self.assertEqual("0788888888", user.phone)
class TestDuplicateEmail(TestCase):
def test_duplicate_email(self):
response = self.client.post(
url_for("register"),
data=dict(
email="test@gmail.com",
name="Test",
house_number="82",
postcode="G2 8PX",
phone="0788888888",
),
follow_redirects=True,
)
class TestAddOrder(TestCase):
def test_add_order(self):
response = self.client.post(
url_for("add_order", id=1),
data=dict(email="test@gmail.com"),
follow_redirects=True,
)
order = Orders.query.filter_by(id=1).first()
user = Users.query.filter_by(id=1).first()
self.assertEqual(1, order.customer_id)
self.assertEqual("order placed", order.order_status)
self.assertEqual(None, order.tracking_num)
self.assertIn(order, user.orders)
class TestAddOrderNoUser(TestCase):
def test_add_order_no_user(self):
response = self.client.post(
url_for("add_order"), data=dict(email="nonexistingemail@gmail.com")
)
class TestViewOrder(TestCase):
def test_view_order(self):
response = self.client.get(
url_for("view_order", id=1),
data=dict(
id="0006",
name="Test",
house_number="8",
postode="G3 8PX",
phone="07999999999",
),
)
self.assertIn(b"0001", response.data)
self.assertIn(b"Test", response.data)
self.assertIn(b"8", response.data)
self.assertIn(b"G3 8PX", response.data)
self.assertIn(b"07999999999", response.data)
class TestUpdateOrder(TestCase):
def test_update_order(self):
response = self.client.post(
url_for("update_order", id=1),
data=dict(
status="out for delivery",
tracking_num_len=8,
),
)
order = Orders.query.filter_by(id=1).first()
self.assertEqual("out for delivery", order.order_status)
self.assertEqual(len(order.tracking_num), 8)
class TestDelivered(TestCase):
def test_delivered(self):
response = self.client.post(url_for("delivered", id=1))
order = Orders.query.filter_by(id=1).first()
self.assertEqual("delivered", order.order_status)
class TestDelete(TestCase):
def test_delete(self):
response = self.client.post(url_for("delete", id=1))
order = Orders.query.filter_by(id=1).first()
self.assertEqual(order, None)
|
4,913 | 21fb9622add4d19b2914118e3afd3867b2368a50 | #/usr/bin/env python3
def nth_prime(n):
ans = 2
known = []
for _ in range(n):
while not all(ans%x != 0 for x in known):
ans += 1
known.append(ans)
return ans
if __name__ == "__main__":
n = int(input("Which one? "))
print(nth_prime(n))
|
4,914 | 6bd423223e1ec2bb3a213158ac6da3a6483b531f | from django.db import models
from accounts.models import User
from cmdb.models.base import IDC
from cmdb.models.asset import Server, NetDevice
class CPU(models.Model):
# Intel(R) Xeon(R) Gold 5118 CPU @ 2.30GHz
version = models.CharField('型号版本', max_length=100, unique=True)
speed = models.PositiveSmallIntegerField('频率MHz')
process = models.PositiveSmallIntegerField('线程数')
created_date = models.DateTimeField('创建时间', auto_now_add=True)
class Meta:
db_table = 'cmdb_acc_cpu'
verbose_name = u'配件CPU表'
verbose_name_plural = u'配件CPU表'
class Memory(models.Model):
ram_type = models.CharField('内存类型', max_length=4, choices=(('ddr3', 'DDR3'), ('ddr4', 'DDR4'), ('ddr5', 'DDR5')))
ram_size = models.PositiveSmallIntegerField('内存容量(G)')
speed = models.PositiveSmallIntegerField('速率(MT/s)')
created_date = models.DateTimeField('创建时间', auto_now_add=True)
class Meta:
db_table = 'cmdb_acc_memory'
unique_together = ('ram_type', 'ram_size', 'speed')
verbose_name = u'配件内存表'
verbose_name_plural = u'配件内存表'
class Disk(models.Model):
device_type = models.CharField('硬盘类型', max_length=4, choices=(('sata', 'SATA'), ('sas', 'SAS'), ('ssd', 'SSD')))
capacity = models.PositiveSmallIntegerField('容量(G)')
rpm = models.PositiveSmallIntegerField('转率')
dimensions = models.CharField('尺寸(英寸)', max_length=3, choices=(('2.5', '2.5寸'), ('3.5', '3.5寸')))
created_date = models.DateTimeField('创建时间', auto_now_add=True)
class Meta:
db_table = 'cmdb_acc_disk'
unique_together = ('device_type', 'capacity', 'rpm', 'dimensions')
verbose_name = u'配件硬盘表'
verbose_name_plural = u'配件硬盘表'
class Caddy(models.Model):
caddy_dimensions = {
'2.5s': '2.5寸 R740', '2.5': '2.5寸', '3.5': '3.5寸'
}
dimensions = models.CharField('尺寸(英寸)', max_length=4, choices=caddy_dimensions.items(), unique=True)
created_date = models.DateTimeField('创建时间', auto_now_add=True)
class Meta:
db_table = 'cmdb_acc_caddy'
verbose_name = u'配件硬盘托架表'
verbose_name_plural = u'配件硬盘托架表'
class NetworkAdapter(models.Model):
speed = models.CharField('网卡速率', max_length=6, choices=(('100MbE', '百兆'), ('GbE', '千兆'), ('10GbE', '万兆')), unique=True)
created_date = models.DateTimeField('创建时间', auto_now_add=True)
class Meta:
db_table = 'cmdb_acc_network_adapter'
verbose_name = u'配件网卡表'
verbose_name_plural = u'配件网卡表'
class NetworkCable(models.Model):
cat = models.CharField('网线类型', max_length=2, choices=(('5', '5类线'), ('5e', '超5类线'), ('6', '6类线'), ('6e', '超6类线')))
length = models.PositiveSmallIntegerField('长度(米)')
created_date = models.DateTimeField('创建时间', auto_now_add=True)
class Meta:
db_table = 'cmdb_acc_network_cable'
unique_together = ('cat', 'length')
verbose_name = u'配件网线表'
verbose_name_plural = u'配件网线表'
class OpticalTransceiver(models.Model):
# Small form-factor pluggable transceiver 小型可插拔光模块
"""
Mfg. Compatibility: Cisco
Part Number: SFP-10G-LR-10pk
Form Factor: SFP+
TX Wavelength: 1310nm
Reach: 10km
Cable Type: SMF
Rate Category: 10GBase
Interface Type: LR
DDM: Yes
Connector Type: Dual-LC
"""
information = models.CharField('综述介绍', max_length=20, blank=True, null=True)
mode = models.CharField('模式', max_length=6, choices=(('single', '单模'), ('multi', '多模')))
reach = models.FloatField('最大传输距离(km)')
rate = models.CharField('传输速率', max_length=6, choices=(('100MbE', '百兆'), ('GbE', '千兆'), ('10GbE', '万兆')))
image = models.ImageField(u'图片', upload_to='images/accessory/%Y%m%d', null=True, blank=True)
created_date = models.DateTimeField('创建时间', auto_now_add=True)
class Meta:
db_table = 'cmdb_acc_optical_transceiver'
unique_together = ('mode', 'reach', 'rate')
verbose_name = u'配件光模块表'
verbose_name_plural = u'配件光模块表'
class JumpWire(models.Model):
information = models.CharField('综述介绍', max_length=20, blank=True, null=True)
mode = models.CharField('模式', max_length=6, choices=(('single', '单模'), ('multi', '多模')))
interface = models.CharField('光纤接口', max_length=6, choices=(('lc', '小方头'), ('sc', '大方头'), ('fc', '圆头')))
length = models.PositiveSmallIntegerField('长度(米)')
image = models.ImageField(u'图片', upload_to='images/accessory/%Y%m%d', null=True, blank=True)
created_date = models.DateTimeField('创建时间', auto_now_add=True)
class Meta:
db_table = 'cmdb_acc_jump_wire'
unique_together = ('mode', 'interface', 'length')
verbose_name = u'配件跳线表'
verbose_name_plural = u'配件跳线表'
accessory_item = {
'cpu': 'CPU', 'memory': '内存', 'disk': '硬盘', 'caddy': '硬盘托架', 'network_adapter': '网卡', 'network_cable': '网线',
'transceiver': '光模块', 'jump_wire': '跳线'
}
class Accessory(models.Model):
storehouse = models.ForeignKey(IDC, on_delete=models.CASCADE, help_text='仓库')
mode = models.CharField('配件类型', max_length=20, choices=accessory_item.items())
mode_id = models.IntegerField('配件型号表主键ID')
manufacturer = models.CharField('硬件制造商', max_length=20, blank=True, null=True)
sn = models.CharField('Serial Number', max_length=50, blank=True, null=True)
vendor = models.CharField('采购渠道(供应商)', max_length=20)
trade_date = models.DateField('采购时间', blank=True, null=True)
expired_date = models.DateField('过保时间', blank=True, null=True)
comment = models.CharField('备注', max_length=50, blank=True, null=True)
is_active = models.BooleanField('是否可用', default=True)
created_date = models.DateTimeField('创建时间', auto_now_add=True)
class Meta:
db_table = 'cmdb_acc_accessory'
verbose_name = u'配件详细表'
verbose_name_plural = u'配件详细表'
class UseRecord(models.Model):
"""
CPU、内存、硬盘、网卡、光模块 配件,需要知道被哪个资产使用
"""
accessory = models.ForeignKey(Accessory, on_delete=models.CASCADE, help_text='配件')
server = models.ForeignKey(Server, on_delete=models.CASCADE, help_text='服务器', blank=True, null=True)
net_device = models.ForeignKey(NetDevice, on_delete=models.CASCADE, help_text='网络设备', blank=True, null=True)
operate = models.CharField('操作', max_length=7, choices=(('install', '安装'), ('remove', '取下')), default='install')
created_date = models.DateTimeField('创建时间', auto_now_add=True)
class Meta:
db_table = 'cmdb_acc_use_record'
verbose_name = u'配件使用记录表'
verbose_name_plural = u'配件使用记录表'
class InventoryRecord(models.Model):
accessory = models.CharField('配件', max_length=20, choices=accessory_item.items())
operate = models.CharField('操作', max_length=8, choices=(('purchase', '采购'), ('receive', '领用'), ('revert', '归还')))
server = models.ForeignKey(Server, on_delete=models.CASCADE, help_text='服务器', blank=True, null=True)
net_device = models.ForeignKey(NetDevice, on_delete=models.CASCADE, help_text='网络设备', blank=True, null=True)
content = models.CharField('内容', max_length=250, blank=True, null=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, help_text='操作员')
created_date = models.DateTimeField('创建时间', auto_now_add=True)
class Meta:
db_table = 'cmdb_acc_inventory_record'
verbose_name = u'配件进货及消费记录表'
verbose_name_plural = u'配件进货及消费记录表'
|
4,915 | 9f8d79d141d414c1256e39f58e59f97711acfee4 | #!/usr/bin/env python3
"""
Main chat API module
"""
import json
import os
import signal
import traceback
import tornado.escape
import tornado.gen
import tornado.httpserver
import tornado.ioloop
import tornado.locks
import tornado.web
from jsonschema.exceptions import ValidationError
from db import DB, DatabaseError
from logging_utils import get_logger, init_logging
from messages import MessagesNewAPI
from messages import MessagesUpdatesAPI
from users import UsersAPI
from chats import ChatsAPI, ChatsUserAPI
from contacts import ContactsAPI
LOGGER = get_logger(__name__)
SERVER_VERSION = os.getenv('VERSION', 'unknown')
PUBLIC_API_PORT = 8888
DATABASE_LOCATION = os.getenv('DATABASE_LOCATION', '/tmp/cryptochat_db.json')
_SHUTDOWN_TIMEOUT = 3
class BaseHandler(tornado.web.RequestHandler):
"""Base handler setting CORS headers."""
messages_new_api = None
messages_updates_api = None
users_api = None
chats_api = None
chats_user_api = None
contacts_new_api = None
def data_received(self, chunk):
pass
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Headers", "Content-Type")
def options(self):
"""Answer OPTIONS request."""
self.finish()
def get_post_data(self):
"""extract input JSON from POST request"""
json_data = ''
# check if JSON is passed as a file or as a body of POST request
if self.request.files:
json_data = self.request.files['file'][0][
'body'] # pick up only first file (index 0)
elif self.request.body:
json_data = self.request.body
try:
data = json.loads(json_data)
except ValueError:
data = None
return data
async def handle_request(self, api_endpoint, api_version):
"""Takes care of validation of input and execution of POST and GET methods."""
code = 400
data = self.get_post_data()
request_method = self.request.method.lower()
if data:
try:
# will call process_get or process_post methods for the given API
res = await getattr(api_endpoint, 'process_' + request_method)(api_version, data)
code = 200
except ValidationError as validerr:
if validerr.absolute_path:
res = '%s : %s' % (
validerr.absolute_path.pop(), validerr.message)
else:
res = '%s' % validerr.message
LOGGER.error('ValidationError: %s', res)
raise tornado.web.HTTPError(reason=res)
except ValueError as valuerr:
res = str(valuerr)
LOGGER.error('ValueError: %s', res)
raise tornado.web.HTTPError(reason=res)
except DatabaseError as dberr:
err_id = dberr.__hash__()
res = str(dberr.reason)
LOGGER.error(res)
LOGGER.info("Input data for <%s>: %s", err_id, data)
raise dberr
except Exception as err: # pylint: disable=broad-except
err_id = err.__hash__()
res = 'Internal server error <%s>:' \
'please include this error id in bug report.' % err_id
code = 500
LOGGER.exception(res)
LOGGER.info("Input data for <%s>: %s", err_id, data)
raise tornado.web.HTTPError(reason=res)
else:
res = 'Error: malformed input JSON.'
LOGGER.error(res)
raise tornado.web.HTTPError(reason=res)
# raise tornado.web.HTTPError(status_code=444, reason='error happened')
self.set_status(code)
self.write(res)
def write_error(self, status_code, **kwargs):
self.set_header('Content-Type', 'application/json')
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
# in debug mode, try to send a traceback
lines = []
for line in traceback.format_exception(*kwargs["exc_info"]):
lines.append(line)
self.finish(json.dumps({
'error': {
'code': status_code,
'message': self._reason,
'traceback': lines,
}
}))
else:
self.finish(json.dumps({
'error': {
'code': status_code,
'message': self._reason,
}
}))
class MainHandler(BaseHandler):
"""Handler for the API root."""
def get(self):
"""Returns the root endpoint of the API."""
self.write(
'{"error": "cryptochat-server main page, '
'please refer to /api/message/new or /api/message/updates"}')
class MessageNewHandler(BaseHandler):
"""Post a new message to the chat room."""
async def post(self):
"""
Add a new message to the server.
"""
await self.handle_request(self.messages_new_api, 1)
class MessageUpdatesHandler(BaseHandler):
"""Long-polling request for new messages.
Waits until new messages are available before returning anything.
"""
async def post(self):
"""Checks for the new message updates, waits until
new messages are available."""
await self.handle_request(self.messages_updates_api, 1)
# def on_connection_close(self):
# self.wait_future.cancel()
class UsersHandler(BaseHandler):
"""Handler class providing /users POST requests."""
async def post(self):
"""Adds a new user to the database."""
await self.handle_request(self.users_api, 1)
async def get(self):
"""Returns details of particular user."""
await self.handle_request(self.users_api, 1)
class ChatsHandler(BaseHandler):
"""Handler providing /chats POST requests"""
async def post(self):
"""Adds a new chat to the database."""
await self.handle_request(self.chats_api, 1)
async def get(self):
"""Returns details of particular chat."""
await self.handle_request(self.chats_api, 1)
class ChatsUserHandler(BaseHandler):
"""Handler providing /chats/user GET requests"""
async def get(self):
"""Returns chats for the given user."""
await self.handle_request(self.chats_user_api, 1)
class ContactsNewHandler(BaseHandler):
"""Handler providing /contacts POST requests"""
async def post(self):
"""Adds a new contact to the database"""
await self.handle_request(self.contacts_new_api, 1)
async def get(self):
"""Returns details of particular contact."""
await self.handle_request(self.contacts_new_api, 1)
class Application(tornado.web.Application):
""" main cryptochat application class """
def __init__(self):
handlers = [
(r"/", MainHandler),
(r"/api/message/new", MessageNewHandler),
(r"/api/message/updates", MessageUpdatesHandler),
(r"/api/users", UsersHandler),
(r"/api/chats", ChatsHandler),
(r"/api/chats/user", ChatsUserHandler),
(r"/api/contacts", ContactsNewHandler),
]
tornado.web.Application.__init__(self, handlers, debug=True, serve_traceback=False)
def main():
""" The main function. It creates cryptochat application, run everything."""
async def shutdown():
server.stop()
await tornado.gen.sleep(_SHUTDOWN_TIMEOUT)
tornado.ioloop.IOLoop.current().stop()
LOGGER.info("Server was successfully shut down.")
def exit_handler(sig, frame): # pylint: disable=unused-argument
def get_sig_name(sig):
return dict((k, v) for v, k in reversed(sorted(signal.__dict__.items()))
if v.startswith('SIG') and not v.startswith('SIG_')).pop(sig)
LOGGER.warning("Registered %s, shutting down.", get_sig_name(sig))
tornado.ioloop.IOLoop.instance().add_callback_from_signal(shutdown)
signal.signal(signal.SIGTERM, exit_handler)
signal.signal(signal.SIGINT, exit_handler)
init_logging()
cryptochat_db = DB(DATABASE_LOCATION)
cryptochat_app = Application()
server = tornado.httpserver.HTTPServer(cryptochat_app)
server.bind(PUBLIC_API_PORT)
server.start()
LOGGER.info("Starting cryptochat (version %s).", SERVER_VERSION)
BaseHandler.messages_new_api = MessagesNewAPI(cryptochat_db)
BaseHandler.messages_updates_api = MessagesUpdatesAPI(cryptochat_db)
BaseHandler.users_api = UsersAPI(cryptochat_db)
BaseHandler.chats_api = ChatsAPI(cryptochat_db)
BaseHandler.chats_user_api = ChatsUserAPI(cryptochat_db)
BaseHandler.contacts_new_api = ContactsAPI(cryptochat_db)
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()
|
4,916 | 8b965fd91396735e0153390b4eff540d3aac3aff | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2020, Lukas Bestle <project-ansible@lukasbestle.com>
# Copyright: (c) 2017, Michael Heap <m@michaelheap.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: mas
short_description: Manage Mac App Store applications with mas-cli
description:
- Installs, uninstalls and updates macOS applications from the Mac App Store using the C(mas-cli).
version_added: '0.2.0'
author:
- Michael Heap (@mheap)
- Lukas Bestle (@lukasbestle)
options:
id:
description:
- The Mac App Store identifier of the app(s) you want to manage.
- This can be found by running C(mas search APP_NAME) on your machine.
type: list
elements: int
state:
description:
- Desired state of the app installation.
- The C(absent) value requires root permissions, also see the examples.
type: str
choices:
- absent
- latest
- present
default: present
upgrade_all:
description:
- Upgrade all installed Mac App Store apps.
type: bool
default: "no"
aliases: ["upgrade"]
requirements:
- macOS 10.11+
- "mas-cli (U(https://github.com/mas-cli/mas)) 1.5.0+ available as C(mas) in the bin path"
- The Apple ID to use already needs to be signed in to the Mac App Store (check with C(mas account)).
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Install Keynote
community.general.mas:
id: 409183694
state: present
- name: Install Divvy with command mas installed in /usr/local/bin
community.general.mas:
id: 413857545
state: present
environment:
PATH: /usr/local/bin:{{ ansible_facts.env.PATH }}
- name: Install a list of apps
community.general.mas:
id:
- 409183694 # Keynote
- 413857545 # Divvy
state: present
- name: Ensure the latest Keynote version is installed
community.general.mas:
id: 409183694
state: latest
- name: Upgrade all installed Mac App Store apps
community.general.mas:
upgrade_all: yes
- name: Install specific apps and also upgrade all others
community.general.mas:
id:
- 409183694 # Keynote
- 413857545 # Divvy
state: present
upgrade_all: yes
- name: Uninstall Divvy
community.general.mas:
id: 413857545
state: absent
become: yes # Uninstallation requires root permissions
'''
RETURN = r''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from distutils.version import StrictVersion
import os
class Mas(object):
def __init__(self, module):
self.module = module
# Initialize data properties
self.mas_path = self.module.get_bin_path('mas')
self._checked_signin = False
self._installed = None # Populated only if needed
self._outdated = None # Populated only if needed
self.count_install = 0
self.count_upgrade = 0
self.count_uninstall = 0
self.result = {
'changed': False
}
self.check_mas_tool()
def app_command(self, command, id):
''' Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' '''
if not self.module.check_mode:
if command != 'uninstall':
self.check_signin()
rc, out, err = self.run([command, str(id)])
if rc != 0:
self.module.fail_json(
msg="Error running command '{0}' on app '{1}': {2}".format(command, str(id), out.rstrip())
)
# No error or dry run
self.__dict__['count_' + command] += 1
def check_mas_tool(self):
''' Verifies that the `mas` tool is available in a recent version '''
# Is the `mas` tool available at all?
if not self.mas_path:
self.module.fail_json(msg='Required `mas` tool is not installed')
# Is the version recent enough?
rc, out, err = self.run(['version'])
if rc != 0 or not out.strip() or StrictVersion(out.strip()) < StrictVersion('1.5.0'):
self.module.fail_json(msg='`mas` tool in version 1.5.0+ needed, got ' + out.strip())
def check_signin(self):
''' Verifies that the user is signed in to the Mac App Store '''
# Only check this once per execution
if self._checked_signin:
return
rc, out, err = self.run(['account'])
if out.split("\n", 1)[0].rstrip() == 'Not signed in':
self.module.fail_json(msg='You must be signed in to the Mac App Store')
self._checked_signin = True
def exit(self):
''' Exit with the data we have collected over time '''
msgs = []
if self.count_install > 0:
msgs.append('Installed {0} app(s)'.format(self.count_install))
if self.count_upgrade > 0:
msgs.append('Upgraded {0} app(s)'.format(self.count_upgrade))
if self.count_uninstall > 0:
msgs.append('Uninstalled {0} app(s)'.format(self.count_uninstall))
if msgs:
self.result['changed'] = True
self.result['msg'] = ', '.join(msgs)
self.module.exit_json(**self.result)
def get_current_state(self, command):
''' Returns the list of all app IDs; command can either be 'list' or 'outdated' '''
rc, raw_apps, err = self.run([command])
rows = raw_apps.split("\n")
if rows[0] == "No installed apps found":
rows = []
apps = []
for r in rows:
# Format: "123456789 App Name"
r = r.split(' ', 1)
if len(r) == 2:
apps.append(int(r[0]))
return apps
def installed(self):
''' Returns the list of installed apps '''
# Populate cache if not already done
if self._installed is None:
self._installed = self.get_current_state('list')
return self._installed
def is_installed(self, id):
''' Checks whether the given app is installed '''
return int(id) in self.installed()
def is_outdated(self, id):
''' Checks whether the given app is installed, but outdated '''
return int(id) in self.outdated()
def outdated(self):
''' Returns the list of installed, but outdated apps '''
# Populate cache if not already done
if self._outdated is None:
self._outdated = self.get_current_state('outdated')
return self._outdated
def run(self, cmd):
''' Runs a command of the `mas` tool '''
cmd.insert(0, self.mas_path)
return self.module.run_command(cmd, False)
def upgrade_all(self):
''' Upgrades all installed apps and sets the correct result data '''
outdated = self.outdated()
if not self.module.check_mode:
self.check_signin()
rc, out, err = self.run(['upgrade'])
if rc != 0:
self.module.fail_json(msg='Could not upgrade all apps: ' + out.rstrip())
self.count_upgrade += len(outdated)
def main():
module = AnsibleModule(
argument_spec=dict(
id=dict(type='list', elements='int'),
state=dict(type='str', default='present', choices=['absent', 'latest', 'present']),
upgrade_all=dict(type='bool', default=False, aliases=['upgrade']),
),
supports_check_mode=True
)
mas = Mas(module)
if module.params['id']:
apps = module.params['id']
else:
apps = []
state = module.params['state']
upgrade = module.params['upgrade_all']
# Run operations on the given app IDs
for app in sorted(set(apps)):
if state == 'present':
if not mas.is_installed(app):
mas.app_command('install', app)
elif state == 'absent':
if mas.is_installed(app):
# Ensure we are root
if os.getuid() != 0:
module.fail_json(msg="Uninstalling apps requires root permissions ('become: yes')")
mas.app_command('uninstall', app)
elif state == 'latest':
if not mas.is_installed(app):
mas.app_command('install', app)
elif mas.is_outdated(app):
mas.app_command('upgrade', app)
# Upgrade all apps if requested
mas._outdated = None # Clear cache
if upgrade and mas.outdated():
mas.upgrade_all()
# Exit with the collected data
mas.exit()
if __name__ == '__main__':
main()
|
4,917 | 7da5a7476c807619bed805cb892774c23c04c6f7 | from django import forms
class LoginForm(forms.Form):
usuario=forms.CharField(label="Usuario",max_length=20, required=True, widget=forms.TextInput(
attrs={'class':'form-control'}
))
contraseña=forms.CharField(label="Contraseña",max_length=20, widget=forms.PasswordInput(
attrs={'class':'form-control'}
)) |
4,918 | 6072fc22872ee75c9501ac607a86ee9137af6a5d | def readfasta (fasta):
input = open(fasta, 'r')
seqs = {}
for line in input:
if line[0] == '>':
name = line[1:].rstrip()
seqs[name] = []
else:
seqs[name].append(line.rstrip())
for name in seqs:
seqs[name] = ''.join(seqs[name])
return seqs
seqs = readfasta('cons.fasta')
length = len(seqs.values()[0])
nts = dict(A = [0] * length, C = [0] * length, G = [0] * length, T = [0] * length)
consensus = [None] * length
for name in seqs:
n = 0
for char in seqs[name]:
if char in nts: nts[char][n] += 1
else: print 'error'
n += 1
for n in range(0, length):
max_n = 0
max_nt = ''
for nt in nts:
if nts[nt][n] > max_n:
max_n = nts[nt][n]
max_nt = nt
consensus[n] = max_nt
consensus = ''.join(consensus)
print consensus
for k, v in nts.iteritems():
print str(k) + ": " + ' '.join(map(lambda k: str(k), v)) |
4,919 | 6e8ef901fc614ecbba25df01f84a43c429f25cf6 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
n_points = 100
n_sims = 1000
def simulate_one_realisation():
return np.random.normal(1, 2, size=n_points)
def infer(sample):
return {'mean': np.mean(sample), 'std': np.std(sample)}
inference = [infer(simulate_one_realisation()) for _ in range(n_sims)]
means = np.percentile([x['mean'] for x in inference], [25, 50, 75])
print(means)
plt.hist([x['mean'] for x in inference], bins=25)
plt.show()
standard_error = np.percentile([x['std'] for x in inference], [25, 50, 75])
print(standard_error)
plt.hist([x['std'] for x in inference], bins=25)
plt.show()
# The sample standard deviation and estimated standard error of the sample mean both have a slight right skew. But the skewness of the sample standard deviation is much more than the sample mean.
# The sample standar deviation histogram has many modes signifying a random distributionwhereas the sample mean histogram has a more uniform distribution shape with only one mode.
|
4,920 | 6f1b08a5ae1a07a30d89f3997461f4f97658f364 | import logging
from .const import (
DOMAIN,
CONF_SCREENS
)
from typing import Any, Callable, Dict, Optional
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import (
ConfigType,
DiscoveryInfoType,
HomeAssistantType,
)
from homeassistant.core import callback
from homeassistant.helpers.event import async_track_state_change
from .dgus_protocol import create_protocol
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(
hass: HomeAssistantType,
config: ConfigType,
async_add_entities: Callable,
discovery_info: Optional[DiscoveryInfoType] = None,
) -> None:
sensors = [DGUSScreen(hass, screen) for screen in config[CONF_SCREENS]]
async_add_entities(sensors, update_before_add=True)
class StateConverters:
@staticmethod
def extract_attr(state, attr):
if attr:
return state.attributes[attr]
else:
return state.as_dict()['state']
@staticmethod
def send_int(state, settings, protocol):
vp = settings['vp']
attr = settings.get('attribute', None)
try:
value = int(float(StateConverters.extract_attr(state, attr)))
protocol.write_vp(vp, value)
except Exception as er:
_LOGGER.error("Can't send value: %s", str(er))
@staticmethod
def send_map(state, settings, protocol):
vp = settings['vp']
map_state = settings['map']
attr = settings.get('attribute', None)
key = str(StateConverters.extract_attr(state, attr))
value = int(map_state[key])
protocol.write_vp(vp, value)
class DGUSScreen(Entity):
def __init__(self, hass, screen):
self._state = None
self._hass = hass
self._name = screen['name']
self._state_track_settings = {
entry['entity_id']: entry for entry in screen.get('show_states', [])}
try:
self._protocol = create_protocol(
screen['port_name'], screen['bound_rate'], self.on_data)
except Exception as er:
_LOGGER.error("Can't open serial port %s, : %s",
screen['port_name'], str(er))
entiti_ids = [entry['entity_id'] for entry in screen['show_states']]
async_track_state_change(hass, entiti_ids, self.state_listener)
def state_listener(self, entity, old_state, new_state):
settings = self._state_track_settings[entity]
if settings['type'] == 'int':
StateConverters.send_int(
new_state, settings, self._protocol.protocol)
elif settings['type'] == 'map':
StateConverters.send_map(
new_state, settings, self._protocol.protocol)
@property
def name(self):
return self._name
@property
def state(self):
return self._state
def on_data(self, vp, value):
"""fire event for data, received from screen"""
eventName = self.name + "_set_vp"
self._hass.bus.fire(eventName, {"vp": vp, "value": value})
|
4,921 | 93fe16e5a97ec2652c4f6b8be844244d9776ea2e | from tkinter import *
# Everything in tkinter is a widget
# We start with the Root Widget
root = Tk()
# Creating a Label Widget
myLabel1 = Label(root, text="Hello User!")
myLabel2 = Label(root, text="Welcome to medBOT")
# Put labels onto the screen
myLabel1.grid(row=0, column=0)
myLabel2.grid(row=1, column=0)
# Grid assigns the texts exacts in the position
# Grid creates a relative position
root.mainloop()
|
4,922 | 98d2196439a8dc3d511d176e61897aa67663a0b5 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: NVLGPSStatus.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='NVLGPSStatus.proto',
package='',
syntax='proto2',
serialized_options=None,
serialized_pb=_b('\n\x12NVLGPSStatus.proto\"\x8d\x03\n\x0cNVLGPSStatus\x12\x12\n\ntracker_id\x18\x01 \x02(\x0c\x12\x12\n\ngps_active\x18\x02 \x02(\x08\x12\x10\n\x08\x64\x61te_day\x18\x03 \x01(\x05\x12\x12\n\ndate_month\x18\x04 \x01(\x05\x12\x11\n\tdate_year\x18\x05 \x01(\x05\x12\x12\n\ntime_hours\x18\x06 \x01(\x05\x12\x14\n\x0ctime_minutes\x18\x07 \x01(\x05\x12\x14\n\x0ctime_seconds\x18\x08 \x01(\x05\x12\x19\n\x11time_microseconds\x18\t \x01(\x05\x12\x10\n\x08latitude\x18\n \x01(\x01\x12\x11\n\tlongitude\x18\x0b \x01(\x01\x12\x1f\n\x17speed_over_ground_knots\x18\x0c \x01(\x02\x12\x1b\n\x13track_angle_degrees\x18\r \x01(\x02\x12\x1a\n\x12magnetic_variation\x18\x0e \x01(\x02\x12\x12\n\nfuel_level\x18\x0f \x01(\x05\x12\x15\n\rvoltage_level\x18\x10 \x01(\x02\x12\x17\n\x0fvehicle_running\x18\x11 \x01(\x08')
)
_NVLGPSSTATUS = _descriptor.Descriptor(
name='NVLGPSStatus',
full_name='NVLGPSStatus',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tracker_id', full_name='NVLGPSStatus.tracker_id', index=0,
number=1, type=12, cpp_type=9, label=2,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gps_active', full_name='NVLGPSStatus.gps_active', index=1,
number=2, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='date_day', full_name='NVLGPSStatus.date_day', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='date_month', full_name='NVLGPSStatus.date_month', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='date_year', full_name='NVLGPSStatus.date_year', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='time_hours', full_name='NVLGPSStatus.time_hours', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='time_minutes', full_name='NVLGPSStatus.time_minutes', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='time_seconds', full_name='NVLGPSStatus.time_seconds', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='time_microseconds', full_name='NVLGPSStatus.time_microseconds', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='latitude', full_name='NVLGPSStatus.latitude', index=9,
number=10, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='longitude', full_name='NVLGPSStatus.longitude', index=10,
number=11, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='speed_over_ground_knots', full_name='NVLGPSStatus.speed_over_ground_knots', index=11,
number=12, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='track_angle_degrees', full_name='NVLGPSStatus.track_angle_degrees', index=12,
number=13, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='magnetic_variation', full_name='NVLGPSStatus.magnetic_variation', index=13,
number=14, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fuel_level', full_name='NVLGPSStatus.fuel_level', index=14,
number=15, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='voltage_level', full_name='NVLGPSStatus.voltage_level', index=15,
number=16, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='vehicle_running', full_name='NVLGPSStatus.vehicle_running', index=16,
number=17, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=23,
serialized_end=420,
)
DESCRIPTOR.message_types_by_name['NVLGPSStatus'] = _NVLGPSSTATUS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
NVLGPSStatus = _reflection.GeneratedProtocolMessageType('NVLGPSStatus', (_message.Message,), dict(
DESCRIPTOR = _NVLGPSSTATUS,
__module__ = 'NVLGPSStatus_pb2'
# @@protoc_insertion_point(class_scope:NVLGPSStatus)
))
_sym_db.RegisterMessage(NVLGPSStatus)
# @@protoc_insertion_point(module_scope)
|
4,923 | 3b26181097025add5919e752aa53e57eea49c943 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##########
# websocket-client
# https://pypi.python.org/pypi/websocket-client/
# sudo -H pip install websocket-client
#####
from websocket import create_connection
ws = create_connection( "ws://192.168.1.132:81/python" )
msg = '#0000FF'
print "Envoi d’un message à l’ESP"
print( msg )
ws.send( msg )
print "Fin de l’envoi\n"
print "Réception..."
result = ws.recv()
print "Reçu : '%s'" % result
ws.close()
|
4,924 | cde62c5032109bb22aa81d813e30097dad80a9c3 | # -*- coding: utf-8 -*-
#
# Akamatsu CMS
# https://github.com/rmed/akamatsu
#
# MIT License
#
# Copyright (c) 2020 Rafael Medina García <rafamedgar@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""This module contains user profile views."""
from flask import current_app, flash, redirect, render_template, url_for
from flask_babel import _
from flask_login import current_user, fresh_login_required, login_required
from sqlalchemy.exc import IntegrityError
from akamatsu import crypto_manager, db
from akamatsu.views.admin import bp_admin
from akamatsu.forms import PasswordResetForm, ProfileForm
@bp_admin.route('/profile', methods=['GET', 'POST'])
@login_required
def profile_edit():
"""Show user profile edition form."""
form = ProfileForm(obj=current_user)
if form.validate_on_submit():
form.populate_obj(current_user)
try:
correct = True
db.session.commit()
flash(_('Profile updated correctly'), 'success')
return render_template('admin/profile/edit.html', form=form)
except IntegrityError:
# Email already exists
correct = False
form.errors.email.append(_('Email is already registered'))
return render_template('admin/profile/edit.html', form=form)
except Exception:
# Catch anything unknown
correct = False
flash(_('Failed to update profile, contact an administrator'), 'error')
return render_template('admin/profile/edit.html', form=form)
finally:
if not correct:
db.session.rollback()
return render_template('admin/profile/edit.html', form=form)
@bp_admin.route('/profile/change-password', methods=['GET', 'POST'])
@fresh_login_required
def change_password():
"""Show form to update user password.
Requires confirming current password.
"""
form = PasswordResetForm()
if form.validate_on_submit():
# Update user
current_user.password = crypto_manager.hash(form.password.data)
try:
correct = True
db.session.commit()
flash(_('Password updated correctly'), 'success')
return redirect(url_for('admin.profile_edit'))
except Exception:
correct = False
current_app.logger.exception('Failed to update user password')
flash(_('Error updating password, contact an administrator'), 'error')
return render_template('admin/profile/change_password.html', form=form)
finally:
if not correct:
db.session.rollback()
return render_template('admin/profile/change_password.html', form=form)
|
4,925 | 9e8ddf6c35ebad329e1f5a48513e4bfaae0d9a6f | import collections
import datetime
import os
import pickle
import random
import time
from lastfm_utils import PlainRNNDataHandler
from test_util import Tester
reddit = "subreddit"
lastfm = "lastfm"
instacart = "instacart"
#
# Choose dataset here
#
dataset = lastfm
#
# Specify the correct path to the dataset
#
dataset_path = os.path.expanduser('~') + '/datasets/'+dataset+'/4_train_test_split.pickle'
date_now = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d')
log_file = './testlog/'+str(date_now)+'-testing'
# Does not really matter. Only needs to be here because of my earler short sightedness. Used by test_util
BATCHSIZE = 2
datahandler = PlainRNNDataHandler(dataset_path, BATCHSIZE, log_file)
num_train_batches = datahandler.get_num_training_batches()
num_test_batches = datahandler.get_num_test_batches()
num_items = datahandler.get_num_items()
#
# MAX_SESSION_LENGTH -1. Change this if you change the length in preprocessing
#
num_predictions = 19
# Log dataset and baseline model
def log_config(baseline):
message = "------------------------------------------------------------------------"
message += "\nDATASET: "+dataset
message += "\nBASELINE: "+baseline
datahandler.log_config(message)
print(message)
# Create sequence of predictions for one session, with the 'most recent' baseline
def most_recent_sequence_predicions(sequence, sequence_length):
full_prediction_sequence = random.sample(range(1, num_items), num_predictions)
predictions = []
for i in range(sequence_length):
current_item = sequence[i]
if current_item in full_prediction_sequence:
index = full_prediction_sequence.index(current_item)
del(full_prediction_sequence[index])
full_prediction_sequence.insert(0, current_item)
predictions.append(full_prediction_sequence[:num_predictions])
return predictions
# The 'most recent' baseline. A stack where the most recent item in the session is pushed on top.
def most_recent():
log_config("most_recent")
datahandler.reset_user_batch_data()
tester = Tester()
x, y, sl = datahandler.get_next_test_batch()
while len(x) > int(BATCHSIZE/2):
prediction_batch = []
for i in range(len(x)):
prediction_batch.append(most_recent_sequence_predicions(x[i], sl[i]))
tester.evaluate_batch(prediction_batch, y, sl)
x, y, sl = datahandler.get_next_test_batch()
test_stats, _1, _2 = tester.get_stats_and_reset()
print(test_stats)
datahandler.log_test_stats(0, 0, test_stats)
# The 'most popular' baseline. Count frequence of all items, and predict the top k (20) most frequent items
def most_popular():
log_config("most_popular")
datahandler.reset_user_batch_data()
popularity_count = [0]*(num_items+1)
tester = Tester()
# Training
x, y, sl = datahandler.get_next_train_batch()
while len(x) > int(BATCHSIZE/2):
for i in range(len(x)):
sequence_length = sl[i]+1
items = x[i][:sequence_length]
for item in items:
popularity_count[item] += 1
x, y, sl = datahandler.get_next_train_batch()
top_k = sorted(range(len(popularity_count)), key=lambda i:popularity_count[i])
top_k = top_k[-num_predictions:]
top_k = list(reversed(top_k))
# Testing
datahandler.reset_user_batch_data()
x, y, sl = datahandler.get_next_test_batch()
while len(x) > int(BATCHSIZE/2):
prediction_batch = []
for i in range(len(x)):
sequence_predictions = []
for j in range(sl[i]):
sequence_predictions.append(top_k)
prediction_batch.append(sequence_predictions)
tester.evaluate_batch(prediction_batch, y, sl)
x, y, sl = datahandler.get_next_test_batch()
test_stats, _1, _2 = tester.get_stats_and_reset()
print(test_stats)
datahandler.log_test_stats(0, 0, test_stats)
# Item-kNN baseline. Count cooccurences of items. Predict items with highest cooccurences with the current item
def knn():
global num_train_batches
log_config("kNN")
datahandler.reset_user_batch_data()
cooccurrances = []
for i in range(num_items):
cooccurrances.append({})
# Training
x, y, sl = datahandler.get_next_train_batch()
while len(x) > int(BATCHSIZE/2):
print("train", num_train_batches)
num_train_batches -= 1
for b in range(len(x)):
sequence_length = sl[b]+1
items = x[b][:sequence_length]
# For each item in the session, increment cooccurences with the remaining items in the session
for i in range(len(items)-1):
for j in range(i+1, len(items)):
if items[j] not in cooccurrances[items[i]]:
cooccurrances[items[i]][items[j]] = 0
cooccurrances[items[i]][items[j]] += 1
x, y, sl = datahandler.get_next_train_batch()
# Find the highest cooccurences
preds = [None]*num_items
for i in range(num_items):
d = cooccurrances[i]
d = list(d.items())
d = sorted(d, key=lambda x:x[1])
d = [x[0] for x in d[-num_predictions:]]
preds[i] = list(reversed(d))
del(cooccurrances)
#Testing
tester = Tester()
datahandler.reset_user_batch_data()
x, y, sl = datahandler.get_next_test_batch()
while len(x) > int(BATCHSIZE/2):
prediction_batch = []
for b in range(len(x)):
sequence_predictions = []
for i in range(sl[b]):
current_item = x[b][i]
sequence_predictions.append(preds[current_item])
prediction_batch.append(sequence_predictions)
tester.evaluate_batch(prediction_batch, y, sl)
x, y, sl = datahandler.get_next_test_batch()
test_stats, _1, _2 = tester.get_stats_and_reset()
print(test_stats)
datahandler.log_test_stats(0, 0, test_stats)
most_recent()
most_popular()
knn() |
4,926 | 9fa1dab7cb0debf363ae0864af1407c87aad063a | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from nova.compute import multi_cell_list
from nova import test
class TestUtils(test.NoDBTestCase):
def test_compare_simple(self):
dt1 = datetime.datetime(2015, 11, 5, 20, 30, 00)
dt2 = datetime.datetime(1955, 10, 25, 1, 21, 00)
inst1 = {'key0': 'foo', 'key1': 'd', 'key2': 456, 'key4': dt1}
inst2 = {'key0': 'foo', 'key1': 's', 'key2': 123, 'key4': dt2}
# Equal key0, inst == inst2
ctx = multi_cell_list.RecordSortContext(['key0'], ['asc'])
self.assertEqual(0, ctx.compare_records(inst1, inst2))
# Equal key0, inst == inst2 (direction should not matter)
ctx = multi_cell_list.RecordSortContext(['key0'], ['desc'])
self.assertEqual(0, ctx.compare_records(inst1, inst2))
# Ascending by key1, inst1 < inst2
ctx = multi_cell_list.RecordSortContext(['key1'], ['asc'])
self.assertEqual(-1, ctx.compare_records(inst1, inst2))
# Descending by key1, inst2 < inst1
ctx = multi_cell_list.RecordSortContext(['key1'], ['desc'])
self.assertEqual(1, ctx.compare_records(inst1, inst2))
# Ascending by key2, inst2 < inst1
ctx = multi_cell_list.RecordSortContext(['key2'], ['asc'])
self.assertEqual(1, ctx.compare_records(inst1, inst2))
# Descending by key2, inst1 < inst2
ctx = multi_cell_list.RecordSortContext(['key2'], ['desc'])
self.assertEqual(-1, ctx.compare_records(inst1, inst2))
# Ascending by key4, inst1 > inst2
ctx = multi_cell_list.RecordSortContext(['key4'], ['asc'])
self.assertEqual(1, ctx.compare_records(inst1, inst2))
# Descending by key4, inst1 < inst2
ctx = multi_cell_list.RecordSortContext(['key4'], ['desc'])
self.assertEqual(-1, ctx.compare_records(inst1, inst2))
def test_compare_multiple(self):
# key0 should not affect ordering, but key1 should
inst1 = {'key0': 'foo', 'key1': 'd', 'key2': 456}
inst2 = {'key0': 'foo', 'key1': 's', 'key2': 123}
# Should be equivalent to ascending by key1
ctx = multi_cell_list.RecordSortContext(['key0', 'key1'],
['asc', 'asc'])
self.assertEqual(-1, ctx.compare_records(inst1, inst2))
# Should be equivalent to descending by key1
ctx = multi_cell_list.RecordSortContext(['key0', 'key1'],
['asc', 'desc'])
self.assertEqual(1, ctx.compare_records(inst1, inst2))
def test_wrapper(self):
inst1 = {'key0': 'foo', 'key1': 'd', 'key2': 456}
inst2 = {'key0': 'foo', 'key1': 's', 'key2': 123}
# Should sort by key1
ctx = multi_cell_list.RecordSortContext(['key0', 'key1'],
['asc', 'asc'])
iw1 = multi_cell_list.RecordWrapper(ctx, inst1)
iw2 = multi_cell_list.RecordWrapper(ctx, inst2)
# Check this both ways to make sure we're comparing against -1
# and not just nonzero return from cmp()
self.assertTrue(iw1 < iw2)
self.assertFalse(iw2 < iw1)
# Should sort reverse by key1
ctx = multi_cell_list.RecordSortContext(['key0', 'key1'],
['asc', 'desc'])
iw1 = multi_cell_list.RecordWrapper(ctx, inst1)
iw2 = multi_cell_list.RecordWrapper(ctx, inst2)
# Check this both ways to make sure we're comparing against -1
# and not just nonzero return from cmp()
self.assertTrue(iw1 > iw2)
self.assertFalse(iw2 > iw1)
|
4,927 | 795f936423965063c44b347705c53fd1c306692f | # Generated by Django 3.0.3 on 2020-05-30 05:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('people', '0110_auto_20200530_0631'),
]
operations = [
migrations.AlterField(
model_name='site',
name='password_reset_email_from',
field=models.CharField(blank=True, default='', max_length=100),
),
migrations.AlterField(
model_name='site',
name='password_reset_email_title',
field=models.CharField(blank=True, default='', max_length=100),
),
]
|
4,928 | be37a7596850050af58f735e60bdf13594715caf | a,b,c,d=map(int,input().split())
ans=0
if a>=0:
if c>=0:
ans=b*d
elif d>=0:
ans=b*d
else:
ans=a*d
elif b>=0:
if c>=0:
ans=b*d
elif d>=0:
ans=max(b*d,a*c)
else:
ans=a*c
else:
if c>=0:
ans=b*c
elif d>=0:
ans=a*c
else:
ans=a*c
print(ans) |
4,929 | b453c8e9cc50066d1b5811493a89de384a000f37 | from django.shortcuts import render, redirect
from datetime import datetime
from fichefrais.models import FicheFrais, Etat, LigneFraisForfait, LigneFraisHorsForfait, Forfait
def home_admin(request):
"""
:view home_admin: Menu principale des Administrateurs
:template home_admin.html:
"""
if not request.user.is_authenticated():
return redirect("login")
title = "Accueil"
today = datetime.now()
etat = Etat.objects
fiche_frais = FicheFrais.objects
frais_forfait = Forfait.objects
lignes_frais_forfait = LigneFraisForfait.objects
lignes_frais_hors_forfait = LigneFraisHorsForfait.objects
context = {
"title": title,
"user": request.user,
"fiche_frais": fiche_frais,
"lignes_frais_forfait": lignes_frais_forfait,
"lignes_frais_hors_forfait": lignes_frais_hors_forfait,
"etat": etat,
"today": today,
"frais_forfait": frais_forfait,
}
return render(request, "fichefrais/administrateur/home_admin.html", context)
|
4,930 | f0ff15a2392b439a54c5ec304192117c08978755 | from api.serializers.cart import CartSerializer
from api.serializers.product import ProductSerializer, ProductPopular
from api.serializers.type import TypeSerializer
from api.serializers.user import UserCreationSerializer, UserSerializer
from api.serializers.history import HistorySerializer
from api.serializers.order import OrderSerializer
from api.serializers.comment import CommentSerializer
from api.serializers.reply import ReplySerializer
from api.serializers.reason import ReasonSerializer
from api.serializers.waitinglist import WaitinglistSerializer
|
4,931 | d5efbbb6e818e797652f304f3d022e04be245778 | import os
import unittest
import tempfile
from bpython import config
TEST_THEME_PATH = os.path.join(os.path.dirname(__file__), "test.theme")
class TestConfig(unittest.TestCase):
def test_load_theme(self):
struct = config.Struct()
struct.color_scheme = dict()
config.load_theme(struct, TEST_THEME_PATH, struct.color_scheme, dict())
expected = {"keyword": "y"}
self.assertEquals(struct.color_scheme, expected)
defaults = {"name": "c"}
expected.update(defaults)
config.load_theme(struct, TEST_THEME_PATH, struct.color_scheme, defaults)
self.assertEquals(struct.color_scheme, expected)
def test_load_config(self):
struct = config.Struct()
with tempfile.NamedTemporaryFile() as f:
f.write(''.encode('utf8'))
f.write('[keyboard]\nhelp = C-h\n'.encode('utf8'))
f.flush()
config.loadini(struct, f.name)
self.assertEqual(struct.help_key, 'C-h')
self.assertEqual(struct.backspace_key, '')
|
4,932 | 2e5bbc8c6a5eac2ed71c5d8619bedde2e04ee9a6 | __version__ = '1.1.3rc0'
|
4,933 | e12905efa0be7d69e2719c05b40d18c50e7e4b2e | import re
# Wordcount: count the occurrences of each word in that phrase.
def word_count(phrase):
phrase = re.sub(r'\W+|_', ' ', phrase.lower(), flags=re.UNICODE)
word_list = phrase.split()
wordfreq = [word_list.count(p) for p in word_list]
return dict(zip(word_list, wordfreq))
|
4,934 | 59a75f78c7a146dcf55d43be90f71abce2bcf753 | from tkinter import *
root = Tk()
root.title("Calculator")
e = Entry(root, width = 50, borderwidth = 5)
e.grid(row = 0, column = 0, columnspan = 4, padx = 10, pady = 20)
def button_click(number):
digit = e.get()
e.delete(0, END)
e.insert(0, str(digit) + str(number))
def button_add():
global first_num
global math
math = "addition"
first_num = e.get()
e.delete(0, END)
def button_mul():
global first_num
global math
math = "multiplication"
first_num = e.get()
e.delete(0, END)
def button_sub():
global first_num
global math
math = "subtraction"
first_num = e.get()
e.delete(0, END)
def button_div():
global first_num
global math
math = "division"
first_num = e.get()
e.delete(0, END)
def button_equal():
sec_num = e.get()
e.delete(0, END)
if math == "addition":
e.insert(0, int(first_num) + int(sec_num))
if math == "multiplication":
e.insert(0, int(first_num) * int(sec_num))
if math == "subtraction":
e.insert(0, int(first_num) - int(sec_num))
if math == "division":
e.insert(0, int(first_num) / int(sec_num))
def clear():
e.delete(0, END)
#creating buttons
button_1 = Button(root, text = "1", height = 5, width = 10,command = lambda:button_click(1))
button_2 = Button(root, text = "2", height = 5, width = 10, command = lambda:button_click(2))
button_3 = Button(root, text = "3", height = 5, width = 10, command = lambda:button_click(3))
button_4 = Button(root, text = "4", height = 5, width = 10, command = lambda:button_click(4))
button_5 = Button(root, text = "5", height = 5, width = 10, command = lambda:button_click(5))
button_6 = Button(root, text = "6", height = 5, width = 10, command = lambda:button_click(6))
button_7 = Button(root, text = "7", height = 5, width = 10, command = lambda:button_click(7))
button_8 = Button(root, text = "8", height = 5, width = 10, command = lambda:button_click(8))
button_9 = Button(root, text = "9", height = 5, width = 10, command = lambda:button_click(9))
button_0 = Button(root, text = "0", height = 5, width = 10, command = lambda:button_click(0))
button_add = Button(root, text = "+", height = 5, width = 10, bg = "#A1CAE2", command = button_add)
button_mul = Button(root, text = "*", height = 5, width = 10, bg = "#A1CAE2", command = button_mul)
button_sub = Button(root, text = "-", height = 5, width = 10, bg = "#A1CAE2", command = button_sub)
button_div = Button(root, text = "/", height = 5, width = 10, bg = "#A1CAE2", command = button_div)
button_equal = Button(root, text = "=", height = 5, width = 10, bg = "#A1CAE2", command = button_equal)
button_clear = Button(root, text = "Clear", height = 5, width = 10, bg = "#A1CAE2", command = clear)
#placing buttons
button_1.grid(row = 3, column = 0)
button_2.grid(row = 3, column = 1)
button_3.grid(row = 3, column = 2)
button_4.grid(row = 2, column = 0)
button_5.grid(row = 2, column = 1)
button_6.grid(row = 2, column = 2)
button_7.grid(row = 1, column = 0)
button_8.grid(row = 1, column = 1)
button_9.grid(row = 1, column = 2)
button_0.grid(row = 4, column = 0)
button_add.grid(row = 4, column = 1)
button_sub.grid(row = 1, column = 4)
button_mul.grid(row = 2, column = 4)
button_div.grid(row = 3, column = 4)
button_equal.grid(row = 4, column = 2)
button_clear.grid(row = 4, column = 4)
root.mainloop() |
4,935 | 929f580e8e559f8309e19f72208bf4ff0d537668 | x = str(input("please input your name:"))
y = int(input("please input your age:"))
p = int(2017-y+100)
print("your name is:"+x)
print (p) |
4,936 | 00af9627242648a5a16a34a18bfc117945f1bc08 | import requests
if __name__ == "__main__":
# individual datacake webhook url
# Change this to the webhook url of your datacake device/product
datacake_url = "https://api.datacake.co/integrations/api/ae6dd531-4cf6-4966-b5c9-6c43939aae90/"
# Serial number
# Include Serial Number in Payload so Datacake can route information
# based on serial of device
serial = "python0001"
# Just some random demo data
number_of_persons_a = 234
number_of_persons_b = 345
additional_payload = "bla bla"
some_data = 23.456
a_boolean = True
# create api call
r = requests.post(datacake_url, json={
"number_of_persons_a": number_of_persons_a,
"number_of_persons_b": number_of_persons_b,
"additional_payload": additional_payload,
"some_data": some_data,
"a_boolean": a_boolean,
"serial": serial
})
print(r)
|
4,937 | 4d31357936ce53b2be5f9a952b99df58baffe7ea | import webbrowser
import time
x=10
while x > 0:
print (x), time.sleep(1)
x=x-1
while x==0:
print ("MEOW")
webbrowser.open("https://www.youtube.com/watch?v=IuysY1BekOE")
|
4,938 | fc04623db0d07f3a0a55ad49a74643a74e5203a6 | from sys import stdin
def main():
lines = stdin
n, k = map(int, lines.next().split())
if k > n:
print -1
else:
arr = map(int, lines.next().split())
arr.sort(reverse = True)
print "%d %d" % (arr[k - 1], arr[k - 1])
main()
|
4,939 | b377a652eec55b03f689a5097bf741b18549cba0 | #상관분석
"""
유클리디안 거리 공식의 한계점: 특정인의 점수가 극단적으로 높거나 낮다면 제대로된 결과를 도출해내기 어렵다.
=>상관분석:두 변수간의 선형적 관계를 분석하겠다는 의미
"""
#BTS와 유성룡 평점, 이황, 조용필
import matplotlib as mpl
mpl.rcParams['axes.unicode_minus']=False #한글 깨짐 방지
from matplotlib import font_manager, rc
import matplotlib.pyplot as plt
from math import sqrt
font_name = font_manager.FontProperties(fname='c:/Windows/Fonts/malgun.ttf').get_name()
rc('font',family=font_name)
critics = {
'조용필': {
'택시운전사': 2.5,
'겨울왕국': 3.5,
'리빙라스베가스': 3.0,
'넘버3': 3.5,
'사랑과전쟁': 2.5,
'세계대전': 3.0,
},
'BTS': {
'택시운전사': 1.0,
'겨울왕국': 4.5,
'리빙라스베가스': 0.5,
'넘버3': 1.5,
'사랑과전쟁': 4.5,
'세계대전': 5.0,
},
'강감찬': {
'택시운전사': 3.0,
'겨울왕국': 3.5,
'리빙라스베가스': 1.5,
'넘버3': 5.0,
'세계대전': 3.0,
'사랑과전쟁': 3.5,
},
'을지문덕': {
'택시운전사': 2.5,
'겨울왕국': 3.0,
'넘버3': 3.5,
'세계대전': 4.0,
},
'김유신': {
'겨울왕국': 3.5,
'리빙라스베가스': 3.0,
'세계대전': 4.5,
'넘버3': 4.0,
'사랑과전쟁': 2.5,
},
'유성룡': {
'택시운전사': 3.0,
'겨울왕국': 4.0,
'리빙라스베가스': 2.0,
'넘버3': 3.0,
'세계대전': 3.5,
'사랑과전쟁': 2.0,
},
'이황': {
'택시운전사': 3.0,
'겨울왕국': 4.0,
'세계대전': 3.0,
'넘버3': 5.0,
'사랑과전쟁': 3.5,
},
'이이': {'겨울왕국': 4.5, '사랑과전쟁': 1.0,
'넘버3': 4.0},
}
def drawGraph(data, name1, name2):
plt.figure(figsize=(14,8))
#plot하기 위한 좌표를 저장하는 list 정의
li = []#name1의 평점을 저장
li2 =[]#name2의 평점을 저장
for i in critics[name1]:
if i in data[name2]: #같은 영화에 대한 평점이 있다면
li.append(critics[name1][i])#name1의 i영화에 대한 평점
li2.append(critics[name2][i])
plt.text(critics[name1][i],critics[name2][i],i)
plt.plot(li,li2, 'ro')
plt.axis([0,6,0,6])
plt.xlabel(name1)
plt.ylabel(name2)
# plt.show()
drawGraph(critics, 'BTS','유성룡')
drawGraph(critics, '이황','조용필') #이황과 조용필의 상관계수가 높게 나옴
## 피어슨 상관계수:x,y의 변화하는 정도를 -1~1 사이로 기술한 통계치, x,y가 함께 변화하는 정도(공분산)/(x가 변하는 정도*y가 변하는 정도)
def sim_pearson(data, name1, name2):
sumX=0 #x의 합
sumY=0 #y의 합
sumPowX=0 #x제곱의 합
sumPowY=0 #y제곱의 합
sumXY=0 #X*Y의 합
count=0 #영화의개수(n)
for i in data[name1]:
if i in data[name2]:#BTS와 유성룡이 모두 본 영화
sumX+=data[name1][i]#BTS의 i영화에 대한 평점
sumY+=data[name2][i]#유성룡의 i영화에 대한 평점
sumPowX+=pow(data[name1][i],2)
sumPowY+= pow(data[name2][i],2)
sumXY+=data[name1][i]*data[name2][i]
count+=1
return (sumXY - ((sumX * sumY) / count)) / \
sqrt((sumPowX - (pow(sumX, 2) / count)) *
(sumPowY - (pow(sumY, 2) / count)))
print("BTS와 유성룡 피어슨 상관계수:", sim_pearson(critics,'BTS','유성룡'))
print("이황과 조용필 피어슨 상관계수:",sim_pearson(critics,'이황','조용필'))
#딕셔너리를 수행하면서 기준(BTS)과 다른 데이터(그외 관객)와의 상관계수->내림차순 정렬
def top_match(data, name, index=2, sim_function=sim_pearson):
#data:영화평점 딕셔너리, name:기준이 되는 사람의 이름, index:피어슨 상관계수에서 상위(가장 가까운)몇명을 추출
#피어슨 함수 호출
li = []
for i in data: #전체영화를 돌겠다
if name !=i: #BTS, 자신이 아니라면
li.append((sim_function(data, name, i),i))
li.sort()
li.reverse()
return li[:index]
#BTS와 성향이 가장 비슷한 3명 추출
print(top_match(critics, 'BTS',3))
#영화를 추천하는 시스템 구성, 예상되는 평점 출력
"""
*추천 시스템 구성 순서*
1)자신을 제외한 나머지 사람들과의 평점에 대한 유사도를 구함
추측되는 평점 = 유사도*(다른사람의)영화평점
2)추측되는 평점들의 총합을 구함
3)추측되는 평점들의 총합/유사도의 총합 =>모든 사람들을 근거로 했을 때 예상되는 평점이 추출됨
4)아직 안본 영화를 대상으로 예상되는 평점을 구하여, 예상되는 평점이 가장 높은 영화를 추천.
"""
def getRecommendation(data, person, sim_function=sim_pearson):
li=[] #결과를 최종적으로 리턴하는 리스트
score_dic={} #유사도의 총합을 저장하기 위한 딕셔너리
sim_dic={}#평점의 총합을 저장하기 위한 딕셔너리
score = 0
result = top_match(data, person, len(data))
print("중간:",result)
for sim, name in result: #유사도, 이름
if sim<0: continue #음의 상관관계를 갖고 있는 사람들 제외
for movie in data[name]:
# print(name, "movie:",movie)
if movie not in data[person]:#다른사람들이 본 영화가 이이가 본영화 목록에 없다면 즉, 이이가 안본영화
score+=sim*data[name][movie] #score변수에 (유사도*이이가 아닌 다름 사람의 영화 평점) 누적
score_dic.setdefault(movie,0) #각 무비에 대한 점수 초기화(딕셔너리)
score_dic[movie]+=score #평점 총합
#유사도의 누적합
sim_dic.setdefault(movie,0)
sim_dic[movie] +=sim
score = 0
for key in score_dic:
score_dic[key] = score_dic[key]/sim_dic[key] # 평점들의 총합/유사도의 총합
li.append((score_dic[key], key))
li.sort()
li.reverse()
return li[0][1]
print("이이님에게는 ",getRecommendation(critics,'이이'),"영화를 가장 추천합니다.")
#기준이 되는 사람(이이)가 안본 영화를 추출, 안본 영화 각각에 대한 예상 평점 추출 => 예상평점이 가장 큰 영화를 추천
# movie="가나다라"
# score_dic={}
# score_dic.setdefault(movie,0)
# print(score_dic)
# 출력 결과 ==> {'가나다라': 0} |
4,940 | 5c15252611bee9cd9fbb5d91a19850c242bb51f1 | import json
import yaml
import argparse
import sys
def json2yaml(json_input, yaml_input):
json_data = json.load(open(json_input, 'r'))
yaml_file = open(yaml_input, 'w')
yaml.safe_dump(json_data, yaml_file, allow_unicode=True, default_flow_style=False)
yaml_data = yaml.load_all(open(yaml_input, 'r'), Loader=yaml.FullLoader)
print("\n" + yaml.dump_all(yaml_data))
print("\n############################################################################")
print("\nOUTPUT: JSON FILE " + json_input.split('/')[-1] + " CONVERTED TO YAML FILE " + yaml_input.split('/')[-1] + "\n")
print("############################################################################\n")
def yaml2json(json_input, yaml_input):
yaml_data = yaml.safe_load(open(yaml_input, 'r'))
# print(yaml_data)
json_file = open(json_input, 'w')
json.dump(yaml_data, json_file, indent=2)
json_file.close()
json_data = open(json_input, 'r').read()
print("\n" + json_data)
print("\n############################################################################")
print("\nOUTPUT: YAML FILE " + yaml_input.split('/')[-1] + " CONVERTED TO JSON FILE " + json_input.split('/')[
-1] + "\n")
print("############################################################################\n")
def run():
argParse = argparse.ArgumentParser(description="CONVERT JSON TO YAML & YAML TO JSON")
argParse.add_argument('-u', '--usage', help="COMMAND USAGE FORMAT")
req_args_grp = argParse.add_argument_group('REQUIRED ARGUMENTS')
req_args_grp.add_argument('-j', '--json', help="JSON FILE", required=True)
req_args_grp.add_argument('-y', '--yaml', help="YAML FILE", required=True)
req_args_grp.add_argument('-m', '--mode', help="CONVERSION MODE", choices=['j2y','json2yaml', 'y2j', 'yaml2json'], required=True)
if len(sys.argv) == 1:
argParse.print_help()
sys.exit(1)
elif '-h' in sys.argv or '--help' in sys.argv:
argParse.print_help()
sys.exit(1)
elif '-u' in sys.argv or '--usage' in sys.argv:
argParse.print_usage()
sys.exit(1)
elif '-j' in sys.argv or '--json' in sys.argv and '-y' in sys.argv or '--yaml' in sys.argv:
arguments = argParse.parse_args()
json_input = arguments.json
yaml_input = arguments.yaml
mode_input = arguments.mode
if 'j2y' in mode_input or 'json2yaml' in mode_input:
json2yaml(json_input, yaml_input)
elif 'y2j' in mode_input or 'yaml2json' in mode_input:
yaml2json(json_input, yaml_input)
if __name__ == '__main__':
run() |
4,941 | ec395b93cecf8431fd0df1aa0151ebd32244c367 |
class RetModel(object):
def __init__(self, code = 0, message = "success", data = None):
self.code = code
self.msg = message
self.data = data
|
4,942 | 1f63ce2c791f0b8763aeae15df4875769f6de848 | # Generated by Django 2.1.7 on 2019-03-23 17:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('currency_exchange', '0007_auto_20190323_1751'),
]
operations = [
migrations.AddField(
model_name='tasks',
name='hours',
field=models.DecimalField(decimal_places=12, default=0, max_digits=24),
),
migrations.AddField(
model_name='tasks',
name='status',
field=models.CharField(default='in progress', max_length=100),
),
]
|
4,943 | e59a51641dc2966b0170678de064e2845e170cf5 | from typing import Tuple, List
import math
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
self.constraints = []
def __str__(self):
return f"({self.x}, {self.y})"
class Line:
def __init__(self, point1, point2):
if isinstance(point1, Point):
self.p1 = point1
elif isinstance(point1, (Tuple, List)):
self.p1 = Point(*point1)
else:
raise TypeError("Incorrect types")
if isinstance(point2, Point):
self.p2 = point2
elif isinstance(point1, (Tuple, List)):
self.p2 = Point(*point2)
else:
raise TypeError("Incorrect types")
self.constraints = []
def middle(self):
x = (self.p1.x + self.p2.x)/2
y = (self.p1.y + self.p2.y)/2
return Point(x, y)
def length(self):
return math.sqrt((self.p2.x - self.p1.x)**2 + (self.p2.y - self.p1.y)**2)
def tang(self):
return (self.p2.y - self.p1.y)/(self.p2.x - self.p1.x)
def __str__(self):
return f"p1={self.p1} p2={self.p2}"
class Constraints:
def __init__(self):
pass
class Parallelism(Constraints):
def __init__(self, line1, line2):
super().__init__()
self.line1 = line1
self.line2 = line2
def get_const(self):
dx = self.line2.length() / math.sqrt(1 + self.line1.tang()**2)
dy = self.line1.tang() * dx
self.line2.p2.x, self.line2.p2.y = self.line2.p1.x + dx, self.line2.p2.y + dy
p1 = Point(0, 0)
p2 = Point(2, 6)
p3 = Point(7, 0)
p4 = Point(10, 6)
line11 = Line(p1, p2)
line22 = Line(p3, p4)
parall = Parallelism(line11, line22)
parall.get_const()
print(line22)
|
4,944 | 8917481957ecd4c9692cfa93df0b759feaa344af | while True:
print("running")
|
4,945 | bb81027ed5311e625591d98193997e5c7b533b70 | """
This is the interface that allows for creating nested lists.
You should not implement it, or speculate about its implementation
class NestedInteger(object):
def isInteger(self):
# @return {boolean} True if this NestedInteger holds a single integer,
# rather than a nested list.
def getInteger(self):
# @return {int} the single integer that this NestedInteger holds,
# if it holds a single integer
# Return None if this NestedInteger holds a nested list
def getList(self):
# @return {NestedInteger[]} the nested list that this NestedInteger holds,
# if it holds a nested list
# Return None if this NestedInteger holds a single integer
"""
# Version 1: DFS Recursive
class Solution(object):
# @param {NestedInteger[]} nestedList a list of NestedInteger Object
# @return {int} an integer
def depthSum(self, nestedList):
return self.dfs(nestedList, 1)
def dfs(self, nestedList, depth):
sum = 0
for item in nestedList:
if item.isInteger():
sum += item.getInteger() * depth
else:
sum += self.dfs(item.getList(), depth + 1)
return sum
# Version 2: BFS, Non-Recursive
class Solution(object):
# @param {NestedInteger[]} nestedList a list of NestedInteger Object
# @return {int} an integer
def depthSum(self, nestedList):
if len(nestedList) == 0:
return 0
from queue import Queue
q = Queue()
sum = 0
depth = 1
for item in nestedList:
q.put(item)
while not q.empty():
for _ in range(q.qsize()):
item = q.get()
if item.isInteger():
sum += item.getInteger() * depth
else:
for next in item.getList():
q.put(next)
depth += 1
return sum |
4,946 | 458bc2b5f843e4c5bb3f9180ab2cbec7409b8d3e | # dates.py
"""Date/time parsing and manipulation functions
"""
# Some people, when confronted with a problem, think
# "I know, I'll use regular expressions."
# Now they have two problems.
# -- Jamie Zawinski
import datetime as dt
import time
import re
_months = [
'january',
'february',
'march',
'april',
'may',
'june',
'july',
'august',
'september',
'october',
'november',
'december',
]
# Formatting directives and corresponding regular expression
_regexps = {
'B': r'(?P<b>' + '|'.join(_months) + ')',
'b': r'(?P<b>' + '|'.join(m[0:3] for m in _months) + ')',
'm': r'(?P<m>\d\d?)',
'd': r'(?P<d>\d\d?)',
'Y': r'(?P<Y>\d\d\d\d)',
'y': r'(?P<y>\d\d)',
'I': r'(?P<H>0?[1-9]|1[012])',
'H': r'(?P<H>[01]?[0-9]|2[0-3])',
'M': r'(?P<M>[0-5]\d)',
'S': r'(?P<S>[0-5]\d)',
'f': r'(?P<f>\d+)',
'p': r'(?P<p>am|pm)',
}
# Support date formats and examples
_date_formats = [
'B d, Y', # October 15, 2006
'b d, Y', # Oct 15, 2006
'B d Y', # October 15 2006
'b d Y', # Oct 15 2006
'B d', # October 15
'b d', # Oct 15
'Y/m/d', # 2006/10/15
'Y-m-d', # 2006-10-15
'm/d/Y', # 10/15/2006
'm-d-Y', # 10-15-2006
'm/d/y', # 10/15/06
'm-d-y', # 10-15-06
'y/m/d', # 06/10/15
'y-m-d', # 06-10-15
]
# Supported time formats and examples
_time_formats = [
'I:M:S.f p', # 3:05:29.108 PM
'H:M:S.f', # 15:05:29.108
'I:M:S p', # 3:05:29 PM
'H:M:S', # 15:05:29
'I:M p', # 3:05 PM
'H:M', # 15:05
]
class CannotParse (Exception):
"""Failure to parse a date or time.
"""
pass
def parse(string, format):
"""Attempt to parse the given string as a date in the given format.
This is similar to `datetime.strptime`, but this can handle date strings
with trailing characters. If it still fails to parse, raise a
`CannotParse` exception.
Examples::
>>> parse('2010/08/28', '%Y/%m/%d')
datetime.datetime(2010, 8, 28, 0, 0)
>>> parse('2010/08/28 extra stuff', '%Y/%m/%d')
datetime.datetime(2010, 8, 28, 0, 0)
>>> parse('2010/08/28', '%m/%d/%y')
Traceback (most recent call last):
CannotParse: time data '2010/08/28' does not match format '%m/%d/%y'
"""
# Count the number of spaces in the format string (N), and
# truncate everything after the (N+1)th space
spaces = format.count(' ') + 1
string = ' '.join(string.split()[:spaces])
try:
result = dt.datetime.strptime(string, format)
except ValueError, err:
raise CannotParse(str(err))
else:
return result
def format_regexp(simple_format):
r"""Given a simplified date or time format string, return ``(format,
regexp)``, where ``format`` is a `strptime`-compatible format string, and
``regexp`` is a regular expression that matches dates or times in that
format.
The ``simple_format`` string supports a subset of `strptime` formatting
directives, with the leading ``%`` characters removed.
Examples::
>>> format_regexp('Y/m/d')
('%Y/%m/%d', '(?P<Y>\\d\\d\\d\\d)/(?P<m>\\d\\d?)/(?P<d>\\d\\d?)')
>>> format_regexp('H:M:S')
('%H:%M:%S', '(?P<H>[01]?[0-9]|2[0-3]):(?P<M>[0-5]\\d):(?P<S>[0-5]\\d)')
"""
format, regexp = ('', '')
for char in simple_format:
if char in _regexps:
format += '%' + char
regexp += _regexps[char]
else:
format += char
regexp += char
return (format, regexp)
def _compiled_format_regexps(date_formats, time_formats):
"""Return a list of ``(format, compiled_regexp)`` for all combinations
of ``date_formats`` and ``time_formats``.
"""
# List of all combinations of date_formats and time_formats
date_time_formats = []
for df in date_formats:
for tf in time_formats:
date_time_formats.append(df + ' ' + tf)
# Add date-only formats
for df in date_formats:
date_time_formats.append(df)
# Add time-only formats
for tf in time_formats:
date_time_formats.append(tf)
# (format, compiled_regexp) for each supported format
format_regexps = []
for dt_format in date_time_formats:
format, regexp = format_regexp(dt_format)
# Compile the regexp
format_regexps.append(
(format, re.compile(regexp, re.IGNORECASE))
)
return format_regexps
def guess_format(string):
"""Try to guess the date/time format of ``string``, or raise a
`CannotParse` exception.
Examples::
>>> guess_format('2010/01/28 13:25:49')
'%Y/%m/%d %H:%M:%S'
>>> guess_format('01/28/10 1:25:49 PM')
'%m/%d/%y %I:%M:%S %p'
>>> guess_format('01/28/2010 13:25:49.123')
'%m/%d/%Y %H:%M:%S.%f'
>>> guess_format('Aug 15 2009 15:24')
'%b %d %Y %H:%M'
>>> guess_format('3-14-15 9:26:53.589')
'%m-%d-%y %H:%M:%S.%f'
Leading and trailing text may be present::
>>> guess_format('FOO April 1, 2007 3:45 PM BAR')
'%B %d, %Y %I:%M %p'
>>> guess_format('[[2010-09-25 14:19:24]]')
'%Y-%m-%d %H:%M:%S'
"""
format_regexps = _compiled_format_regexps(_date_formats, _time_formats)
for format, regexp in format_regexps:
if regexp.search(string):
return format
# Nothing matched
raise CannotParse("Could not guess date/time format in: %s" % string)
def guess_file_date_format(filename):
"""Open the given file and use `guess_format` to look for a
date/time at the beginning of each line. Return the format string for
the first one that's found. Raise `CannotParse` if none is found.
"""
for line in open(filename):
try:
format = guess_format(line)
except CannotParse:
pass
else:
return format
raise CannotParse("No date/time strings found in '%s'" % filename)
def date_chop(line, dateformat='%m/%d/%y %I:%M:%S %p', resolution=60):
"""Given a ``line`` of text, get a date/time formatted as ``dateformat``,
and return a `datetime` object rounded to the nearest ``resolution``
seconds. If ``line`` fails to match ``dateformat``, a `CannotParse`
exception is raised.
Examples::
>>> date_chop('1976/05/19 12:05:17', '%Y/%m/%d %H:%M:%S', 60)
datetime.datetime(1976, 5, 19, 12, 5)
>>> date_chop('1976/05/19 12:05:17', '%Y/%m/%d %H:%M:%S', 3600)
datetime.datetime(1976, 5, 19, 12, 0)
"""
timestamp = parse(line, dateformat)
# Round the timestamp to the given resolution
# First convert to seconds-since-epoch
epoch_seconds = int(time.mktime(timestamp.timetuple()))
# Then do integer division to truncate
rounded_seconds = (epoch_seconds / resolution) * resolution
# Convert back to a datetime
return dt.datetime.fromtimestamp(rounded_seconds)
|
4,947 | 640eae824e43e394bf0624dd4cf7dcec78f43604 | #Eyal Reis - 203249354
from view import View
def main():
"""
primary game method
"""
view = View()
view.root.mainloop()
if __name__ == "__main__":
main()
|
4,948 | 4a886437727ed6b48206e12b686a59a1d2a1c489 | # Counts number of dumbbell curls in the video
import cv2
import mediapipe as mp
import base
import math
import numpy as np
class PoseEstimator(base.PoseDetector):
def __init__(self, mode=False, upperBody = False, smooth=True, detectConf=.5, trackConf=.5,
outFile="output.mp4", outWidth=720, outHeight=1280):
super().__init__(mode, upperBody, smooth, detectConf, trackConf, outFile, outWidth, outHeight)
self.count = 0
self.dir = 0
def findAngle(self, img, p1, p2, p3, draw=True):
x1,y1 = self.lms[p1][1:]
x2,y2 = self.lms[p2][1:]
x3,y3 = self.lms[p3][1:]
angle = math.degrees(math.atan2(y3-y2,x3-x2) - math.atan2(y1-y2,x1-x2))
if angle<0:
angle += 360
if draw:
cv2.line(img, (x1,y1), (x2,y2), (255,255,255) ,2)
cv2.line(img, (x3,y3), (x2,y2), (255,255,255) ,2)
cv2.circle(img, (x1,y1), 8, (0,0,255), cv2.FILLED)
cv2.circle(img, (x1,y1), 12, (0,0,255), 2)
cv2.circle(img, (x2,y2), 8, (0,0,255), cv2.FILLED)
cv2.circle(img, (x2,y2), 12, (0,0,255), 2)
cv2.circle(img, (x3,y3), 8, (0,0,255), cv2.FILLED)
cv2.circle(img, (x3,y3), 12, (0,0,255), 2)
cv2.putText(img, str(int(angle)), (x2-40,y2+50), cv2.FONT_HERSHEY_PLAIN, 2, (255,0,255), 2)
return angle
def countReps(self, img, p1, p2, p3):
angle = self.findAngle(img, p1, p2, p3)
perc = np.interp(angle, (210,320), (0,100))
color = (0,255,0)
if perc > 95:
color = (0,0,255)
if self.dir == 0:
self.count += .5
self.dir = 1
if perc == 0:
color = (255,0,0)
if self.dir == 1:
self.count += .5
self.dir = 0
cv2.putText(img, f'{int(self.count)}', (30,120), cv2.FONT_HERSHEY_PLAIN, 9, (255,0,0), 4)
bar = np.interp(perc, (0,100), (800,200))
cv2.rectangle(img, (50,200), (100,800), color, 3)
cv2.rectangle(img, (50,int(bar)), (100,800), color, cv2.FILLED)
cv2.putText(img, f'{int(perc)}%', (30,870), cv2.FONT_HERSHEY_PLAIN, 4, (255,0,0), 4)
def main():
cap = cv2.VideoCapture("media/1.mp4")
estimator = PoseEstimator()
while True:
_, img = cap.read()
img = cv2.resize(img, (720, 1280))
img = estimator.findPose(img)
lms = estimator.findPosition(img, draw=False)
if len(lms)>28:
estimator.countReps(img,11,13,15)
# estimator.writeFrame(img)
cv2.imshow("Correct Pose Estimation", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
if __name__ == "__main__":
main() |
4,949 | 9f478df4ff19cfe6c6559b6489c874d49377b90e | """
A module to generate simulated 2D time-series SOSS data
Authors: Joe Filippazzo
"""
import os
from pkg_resources import resource_filename
import multiprocessing
import time
from functools import partial
import warnings
import numpy as np
from astropy.io import fits
from bokeh.plotting import figure, show
from hotsoss import utils
from svo_filters import svo
from scipy.interpolate import interp1d
from scipy.ndimage.interpolation import rotate
from scipy.interpolate import interp2d, RectBivariateSpline
try:
import webbpsf
except ImportError:
print("Could not import `webbpsf` package. Functionality limited.")
warnings.simplefilter('ignore')
def calculate_psf_tilts():
"""
Calculate the tilt of the psf at the center of each column
using all binned pixels in the given wavelength calibration file
for both orders and save to file
"""
for order in [1, 2]:
# Get the file
path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)
psf_file = resource_filename('awesimsoss', path)
# Dimensions
subarray = 'SUBSTRIP256'
X = range(2048)
Y = range(256)
# Get the wave map
wave_map = utils.wave_solutions(subarray, order).astype(float)
# Get the y-coordinate of the trace polynomial in this column
# (center of the trace)
coeffs = trace_polynomials(subarray=subarray, order=order)
trace = np.polyval(coeffs, X)
# Interpolate to get the wavelength value at the center
wave = interp2d(X, Y, wave_map)
# Get the wavelength of the trace center in each column
trace_wave = []
for x, y in zip(X, trace):
trace_wave.append(wave(x, y)[0])
# For each column wavelength (defined by the wavelength at
# the trace center) define an isowavelength contour
angles = []
for n, x in enumerate(X):
w = trace_wave[x]
# Edge cases
try:
w0 = trace_wave[x-1]
except IndexError:
w0 = 0
try:
w1 = trace_wave[x+1]
except IndexError:
w1 = 10
# Define the width of the wavelength bin as half-way
# between neighboring points
dw0 = np.mean([w0, w])
dw1 = np.mean([w1, w])
# Get the coordinates of all the pixels in that range
yy, xx = np.where(np.logical_and(wave_map >= dw0, wave_map < dw1))
# Find the angle between the vertical and the tilted wavelength bin
if len(xx) >= 1:
angle = get_angle([xx[-1], yy[-1]], [x, trace[x]])
else:
angle = 0
# Don't flip them upside down
angle = angle % 180
# Add to the array
angles.append(angle)
# Save the file
np.save(psf_file, np.array(angles))
print('Angles saved to', psf_file)
def nuke_psfs(tilts=True, raw=True, final=True):
"""Generate all the psf cubes from scratch"""
# Calculate the psf tilts
if tilts:
calculate_psf_tilts()
for filt in ['CLEAR', 'F277W']:
# Calculate the raw psfs from WebbPSF
if raw:
generate_SOSS_psfs(filt)
# Generate the rotated and interpolated psfs ready for trace assembly
if final:
SOSS_psf_cube(filt=filt, generate=True)
def generate_SOSS_ldcs(wavelengths, ld_profile, grid_point, model_grid='', subarray='SUBSTRIP256', n_bins=100, plot=False, save=''):
"""
Generate a lookup table of limb darkening coefficients for full
SOSS wavelength range
Parameters
----------
wavelengths: sequence
The wavelengths at which to calculate the LDCs
ld_profile: str
A limb darkening profile name supported by
`ExoCTK.ldc.ldcfit.ld_profile()`
grid_point: dict, sequence
The stellar parameters [Teff, logg, FeH] or stellar model
dictionary from `ExoCTK.modelgrid.ModelGrid.get()`
n_bins: int
The number of bins to break up the grism into
save: str
The path to save to file to
Example
-------
from awesimsoss.sim2D import awesim
lookup = awesim.soss_ldc('quadratic', [3300, 4.5, 0])
"""
try:
from exoctk import modelgrid
from exoctk.limb_darkening import limb_darkening_fit as lf
except ImportError:
return
# Get the model grid
if not isinstance(model_grid, modelgrid.ModelGrid):
model_grid = modelgrid.ModelGrid(os.environ['MODELGRID_DIR'], resolution=700)
# Load the model grid
model_grid = modelgrid.ModelGrid(os.environ['MODELGRID_DIR'], resolution=700, wave_rng=(0.6, 2.8))
# Get the grid point
if isinstance(grid_point, (list, tuple, np.ndarray)):
grid_point = model_grid.get(*grid_point)
# Abort if no stellar dict
if not isinstance(grid_point, dict):
print('Please provide the grid_point argument as [Teff, logg, FeH] or ExoCTK.modelgrid.ModelGrid.get(Teff, logg, FeH).')
return
# Break the bandpass up into n_bins pieces
bandpass = svo.Filter('NIRISS.GR700XD', n_bins=n_bins, verbose=False)
# Calculate the LDCs
ldc_results = lf.ldc(None, None, None, model_grid, [ld_profile],
bandpass=bandpass, grid_point=grid_point.copy(),
mu_min=0.08, verbose=False)
# Interpolate the LDCs to the desired wavelengths
coeff_table = ldc_results[ld_profile]['coeffs']
coeff_cols = [c for c in coeff_table.colnames if c.startswith('c')]
coeffs = [np.interp(wavelengths, coeff_table['wavelength'], coeff_table[c]) for c in coeff_cols]
return np.array(coeffs).T
def generate_SOSS_psfs(filt):
"""
Gnerate a cube of the psf at 100 wavelengths from the min to the max wavelength
Parameters
----------
filt: str
The filter to use, ['CLEAR', 'F277W']
"""
# Get the file
file = resource_filename('awesimsoss', 'files/SOSS_{}_PSF.fits'.format(filt))
# Get the NIRISS class from webbpsf and set the filter
ns = webbpsf.NIRISS()
ns.filter = filt
ns.pupil_mask = 'GR700XD'
# Get the min and max wavelengths
wavelengths = utils.wave_solutions('SUBSTRIP256').flatten()
wave_min = np.max([ns.SHORT_WAVELENGTH_MIN * 1E6, np.min(wavelengths[wavelengths > 0])])
wave_max = np.min([ns.LONG_WAVELENGTH_MAX * 1E6, np.max(wavelengths[wavelengths > 0])])
# webbpsf.calc_datacube can only handle 100 but that's sufficient
W = np.linspace(wave_min, wave_max, 100)*1E-6
# Calculate the psfs
print("Generating SOSS psfs. This takes about 8 minutes...")
start = time.time()
PSF = ns.calc_datacube(W, oversample=1)[0].data
print("Finished in", time.time()-start)
# Make the HDUList
psfhdu = fits.PrimaryHDU(data=PSF)
wavhdu = fits.ImageHDU(data=W*1E6, name='WAV')
hdulist = fits.HDUList([psfhdu, wavhdu])
# Write the file
hdulist.writeto(file, overwrite=True)
hdulist.close()
def get_angle(pf, p0=np.array([0, 0]), pi=None):
"""Compute angle (in degrees) for pf-p0-pi corner
Parameters
----------
pf: sequence
The coordinates of a point on the rotated vector
p0: sequence
The coordinates of the pivot
pi: sequence
The coordinates of the fixed vector
Returns
-------
float
The angle in degrees
"""
if pi is None:
pi = p0 + np.array([0, 1])
v0 = np.array(pf) - np.array(p0)
v1 = np.array(pi) - np.array(p0)
angle = np.math.atan2(np.linalg.det([v0, v1]), np.dot(v0, v1))
angle = np.degrees(angle)
return angle
def get_SOSS_psf(wavelength, filt='CLEAR', psfs=None, cutoff=0.005, plot=False):
"""
Retrieve the SOSS psf for the given wavelength,
scale the total flux to 1, and set pixels below
cutoff value to zero
Parameters
----------
wavelength: float
The wavelength to retrieve [um]
filt: str
The filter to use, ['CLEAR', 'F277W']
psfs: numpy.interp1d object (optional)
The interpolator
plot: bool
Plot the psf
Returns
-------
np.ndarray
The 2D psf for the input wavelength
"""
if psfs is None:
# Get the file
file = resource_filename('awesimsoss', 'files/SOSS_{}_PSF.fits'.format(filt))
# Load the SOSS psf cube
cube = fits.getdata(file).swapaxes(-1, -2)
wave = fits.getdata(file, ext=1)
# Initilize interpolator
psfs = interp1d(wave, cube, axis=0, kind=3)
# Check the wavelength
if wavelength < psfs.x[0]:
wavelength = psfs.x[0]
if wavelength > psfs.x[-1]:
wavelength = psfs.x[-1]
# Interpolate and scale psf
psf = psfs(wavelength)
psf *= 1./np.sum(psf)
# Remove background
# psf[psf < cutoff] = 0
if plot:
fig = figure()
fig.image([psf], x=0, y=0, dw=psf.shape[0], dh=psf.shape[1])
show(fig)
else:
return psf
def make_frame(psfs):
"""
Generate a frame from an array of psfs
Parameters
----------
psfs: sequence
An array of psfs of shape (2048, 76, 76)
Returns
-------
np.ndarray
An array of the SOSS psf at 2048 wavelengths for each order
"""
# Empty frame
frame = np.zeros((256, 2124))
# Add each psf
for n, psf in enumerate(psfs):
frame[:, n:n+76] += psf
return frame[:, 38:-38]
def psf_lightcurve(psf, ld_coeffs, rp, time, tmodel, plot=False):
"""
Generate a lightcurve for a (76, 76) psf of a given wavelength
Parameters
----------
psf: sequencs
The flux-scaled psf for the given wavelength
ld_coeffs: sequence
The limb darkening coefficients to use
rp: float
The planet radius
time: sequence
The time axis for the TSO
tmodel: batman.transitmodel.TransitModel
The transit model of the planet
plot: bool
Plot the lightcurve
Returns
-------
sequence
A 1D array of the lightcurve with the same length as *t*
Example 1
---------
# No planet
import numpy as np
from awesimsoss.make_trace import psf_lightcurve
psf = np.ones((76, 76))
time = np.linspace(-0.2, 0.2, 200)
lc = psf_lightcurve(psf, None, None, time, None, plot=True)
Example 2
---------
# With a planet
import batman
import numpy as np
import astropy.units as q
from awesimsoss.make_trace import psf_lightcurve
params = batman.TransitParams()
params.t0 = 0. # time of inferior conjunction
params.per = 5.7214742 # orbital period (days)
params.a = 0.0558*q.AU.to(q.R_sun)*0.66 # semi-major axis (in units of stellar radii)
params.inc = 89.8 # orbital inclination (in degrees)
params.ecc = 0. # eccentricity
params.w = 90. # longitude of periastron (in degrees)
params.teff = 3500 # effective temperature of the host star
params.logg = 5 # log surface gravity of the host star
params.feh = 0 # metallicity of the host star
params.limb_dark = 'quadratic' # limb darkening profile to use
params.u = [1, 1] # limb darkening coefficients
tmodel = batman.TransitModel(params, time)
lc = psf_lightcurve(psf, [0.1, 0.1], 0.05, time, tmodel, plot=True)
"""
# Expand to shape of time axis
flux = np.tile(psf, (len(time), 1, 1))
# If there is a transiting planet...
if ld_coeffs is not None and rp is not None and str(type(tmodel)) == "<class 'batman.transitmodel.TransitModel'>":
# Set the wavelength dependent orbital parameters
tmodel.u = ld_coeffs
tmodel.rp = rp
# Generate the light curve for this pixel
lightcurve = tmodel.light_curve(tmodel)
# Scale the flux with the lightcurve
flux *= lightcurve[:, None, None]
return flux
def psf_tilts(order):
"""
Get the psf tilts for the given order
Parameters
----------
order: int
The order to use, [1, 2]
Returns
-------
np.ndarray
The angle from the vertical of the psf in each of the 2048 columns
"""
if order not in [1, 2]:
raise ValueError('Only orders 1 and 2 are supported.')
# Get the file
path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)
psf_file = resource_filename('awesimsoss', path)
if not os.path.exists(psf_file):
calculate_psf_tilts()
return np.load(psf_file)
def put_psf_on_subarray(psf, y, frame_height=256):
"""Make a 2D SOSS trace from a sequence of psfs and trace center locations
Parameters
----------
psf: sequence
The 2D psf
y: float
The grid y value to place the center of the psf
grid: sequence
The [x, y] grid ranges
Returns
-------
np.ndarray
The 2D frame with the interpolated psf
"""
# Create spline generator
dim = psf.shape[0]
mid = (dim - 1.0) / 2.0
arr = np.arange(dim, dtype=np.float)
spline = RectBivariateSpline(arr, arr, psf.T, kx=3, ky=3, s=0)
# Create output frame, shifted as necessary
yg, xg = np.indices((frame_height, dim), dtype=np.float64)
yg += mid-y
# Resample onto the subarray
frame = spline.ev(xg, yg)
# Fill resampled points with zeros
extrapol = (((xg < -0.5) | (xg >= dim - 0.5)) | ((yg < -0.5) | (yg >= dim - 0.5)))
frame[extrapol] = 0
return frame
def SOSS_psf_cube(filt='CLEAR', order=1, subarray='SUBSTRIP256', generate=False):
"""
Generate/retrieve a data cube of shape (3, 2048, 76, 76) which is a
76x76 pixel psf for 2048 wavelengths for each trace order. The PSFs
are scaled to unity and rotated to reproduce the trace tilt at each
wavelength then placed on the desired subarray.
Parameters
----------
filt: str
The filter to use, ['CLEAR', 'F277W']
order: int
The trace order
subarray: str
The subarray to use, ['SUBSTRIP96', 'SUBSTRIP256', 'FULL']
generate: bool
Generate a new cube
Returns
-------
np.ndarray
An array of the SOSS psf at 2048 wavelengths for each order
"""
if generate:
print('Coffee time! This takes about 5 minutes.')
# Get the wavelengths
wavelengths = np.mean(utils.wave_solutions(subarray), axis=1)[:2 if filt == 'CLEAR' else 1]
coeffs = trace_polynomials(subarray)
# Get the file
psf_path = 'files/SOSS_{}_PSF.fits'.format(filt)
psf_file = resource_filename('awesimsoss', psf_path)
# Load the SOSS psf cube
cube = fits.getdata(psf_file).swapaxes(-1, -2)
wave = fits.getdata(psf_file, ext=1)
# Initilize interpolator
psfs = interp1d(wave, cube, axis=0, kind=3)
trace_cols = np.arange(2048)
# Run datacube
for n, wavelength in enumerate(wavelengths):
# Evaluate the trace polynomial in each column to get the y-position of the trace center
trace_centers = np.polyval(coeffs[n], trace_cols)
# Don't calculate order2 for F277W or order 3 for either
if (n == 1 and filt.lower() == 'f277w') or n == 2:
pass
else:
# Get the psf for each column
print('Calculating order {} SOSS psfs for {} filter...'.format(n+1, filt))
start = time.time()
pool = multiprocessing.Pool(8)
func = partial(get_SOSS_psf, filt=filt, psfs=psfs)
raw_psfs = np.array(pool.map(func, wavelength))
pool.close()
pool.join()
del pool
print('Finished in {} seconds.'.format(time.time()-start))
# Get the PSF tilt at each column
angles = psf_tilts(order)
# Rotate the psfs
print('Rotating order {} SOSS psfs for {} filter...'.format(n+1, filt))
start = time.time()
pool = multiprocessing.Pool(8)
func = partial(rotate, reshape=False)
rotated_psfs = np.array(pool.starmap(func, zip(raw_psfs, angles)))
pool.close()
pool.join()
del pool
print('Finished in {} seconds.'.format(time.time()-start))
# Scale psfs to 1
rotated_psfs = np.abs(rotated_psfs)
scale = np.nansum(rotated_psfs, axis=(1, 2))[:, None, None]
rotated_psfs = rotated_psfs/scale
# Split it into 4 chunks to be below Github file size limit
chunks = rotated_psfs.reshape(4, 512, 76, 76)
for N, chunk in enumerate(chunks):
idx0 = N*512
idx1 = idx0+512
centers = trace_centers[idx0:idx1]
# Interpolate the psfs onto the subarray
print('Interpolating chunk {}/4 for order {} SOSS psfs for {} filter onto subarray...'.format(N+1, n+1, filt))
start = time.time()
pool = multiprocessing.Pool(8)
data = zip(chunk, centers)
subarray_psfs = pool.starmap(put_psf_on_subarray, data)
pool.close()
pool.join()
del pool
print('Finished in {} seconds.'.format(time.time()-start))
# Get the filepath
filename = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt, n+1, N+1)
file = resource_filename('awesimsoss', filename)
# Delete the file if it exists
if os.path.isfile(file):
os.system('rm {}'.format(file))
# Write the data
np.save(file, np.array(subarray_psfs))
print('Data saved to', file)
else:
# Get the chunked data and concatenate
full_data = []
for chunk in [1, 2, 3, 4]:
path = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt, order, chunk)
file = resource_filename('awesimsoss', path)
full_data.append(np.load(file))
return np.concatenate(full_data, axis=0)
|
4,950 | 4cb5dcf0d943ef15421bb6bced65804533d232e3 | import mysql.connector
import hashlib
import time
from datetime import datetime
from datetime import timedelta
from pymongo import MongoClient
from pymongo import IndexModel, ASCENDING, DESCENDING
class MongoManager:
def __init__(self, server_ip='localhost', client=None, expires=timedelta(days=30)):
"""
client: mongo database client
expires: timedelta of amount of time before a cache entry is considered expired
"""
# if a client object is not passed
# then try connecting to mongodb at the default localhost port
self.client = MongoClient(server_ip, 27017) if client is None else client
#create collection to store cached webpages,
# which is the equivalent of a table in a relational database
self.db = self.client.spider
# create index if db is empty
if self.db.locations.count() is 0:
self.db.mfw.create_index([("url", ASCENDING)])
def query_by_url(self, url):
records = self.db.mfw.find({'url':url})
if records:
return records
else:
return None
def insert_page(self, url, html):
self.db.mfw.insert(
'url': url,
'html':html
)
def clear(self):
self.db.mfw.drop()
if __name__ == '__main__':
mongo_mgr = MongoManager() |
4,951 | 6a9d64b1ef5ae8e9d617c8b0534e96c9ce7ea629 |
import os
import config
############################
# NMJ_RNAI LOF/GOF GENE LIST
def nmj_rnai_set_path():
return os.path.join(config.datadir, 'NMJ RNAi Search File.txt')
def nmj_rnai_gain_of_function_set_path():
return os.path.join(config.datadir, 'NMJ_RNAi_gain_of_function_flybase_ids.txt')
def get_nmj_rnai_genes():
'''
Return a list of flybase gene ids.
The hits from three different screens (Aaron D'Antonio, Sanyal and
Featherstone). This contains FBgn IDs, which can be converted to gene
symbols using flybase ID converter
'''
path = nmj_rnai_set_path()
print path
with open(path) as fh:
# Skip first line, the header
genes = [line.strip() for i, line in enumerate(fh) if i > 0 and line.strip()]
return genes
def get_nmj_rnai_gain_of_function_genes():
'''
Return a list of flybase gene ids.
The gain of function genes should be a curated subset of the NMJ RNAi
genes. They were defined in a file Elizabeth McNeill sent,
"NMJ RNAi Gainoffunctionscreens.xlsx".
The hits from three different screens (Aaron D'Antonio, Sanyal and
Featherstone). This contains FBgn IDs, which can be converted to gene
symbols using flybase ID converter.
'''
path = nmj_rnai_gain_of_function_set_path()
print path
with open(path) as fh:
# Skip first line, the header
genes = [line.strip() for i, line in enumerate(fh) if i > 0 and line.strip()]
return genes
|
4,952 | 3b96cc4ef538a06251958495e36fe5dbdf80c13d | import asyncio
def callback():
print('callback invoked')
def stopper(loop):
print('stopper invoked')
loop.stop()
event_loop = asyncio.get_event_loop()
try:
print('registering callbacks')
# the callbacks are invoked in the order they are scheduled
event_loop.call_soon(callback)
event_loop.call_soon(stopper, event_loop)
print('entering event loop')
event_loop.run_forever()
finally:
print('closing event loop')
event_loop.close()
|
4,953 | a01f812584e4cee14c9fe15e9fb6ede4ae3e937a | import os
import pickle
from matplotlib import pyplot as plt
cwd = os.path.join(os.getcwd(), 'DEDA_2020SS_Crypto_Options_RND_HD',
'CrypOpt_RiskNeutralDensity')
data_path = os.path.join(cwd, 'data') + '/'
day = '2020-03-11'
res = pickle.load(open(data_path + 'results_{}.pkl'.format(day), 'rb'))
# ---------------------------------------------------------------------- SMILES
fig1, axes = plt.subplots(2,4, figsize=(10,7))
for key, ax in zip(sorted(res), axes.flatten()):
print(key, ax)
ax.plot(res[key]['df'].M, res[key]['df'].iv, '.')
ax.plot(res[key]['M'], res[key]['smile'])
ax.text(0.99, 0.99, r'$\tau$ = ' + str(key),
horizontalalignment='right',
verticalalignment='top',
transform=ax.transAxes)
axes.flatten()[0].set_ylabel('implied volatility')
axes.flatten()[4].set_ylabel('implied volatility')
axes.flatten()[4].set_xlabel('moneyness')
axes.flatten()[5].set_xlabel('moneyness')
axes.flatten()[6].set_xlabel('moneyness')
axes.flatten()[7].set_xlabel('moneyness')
plt.tight_layout()
fig1.savefig(os.path.join(cwd, '{}_smiles.png'.format(day)), transparent=True)
# ------------------------------------------------------------------------ RNDs
fig2, axes = plt.subplots(2,4, figsize=(10,7))
for key, ax in zip(sorted(res), axes.flatten()):
print(key, ax)
ax.plot(res[key]['K'][::-1], res[key]['q'])
ax.text(0.99, 0.99, r'$\tau$ = ' + str(key),
horizontalalignment='right',
verticalalignment='top',
transform=ax.transAxes)
ax.set_yticks([])
axes.flatten()[0].set_ylabel('risk neutral density')
axes.flatten()[4].set_ylabel('risk neutral density')
axes.flatten()[4].set_xlabel('spot price')
axes.flatten()[5].set_xlabel('spot price')
axes.flatten()[6].set_xlabel('spot price')
axes.flatten()[7].set_xlabel('spot price')
plt.tight_layout()
fig2.savefig(os.path.join(cwd, '{}_RND.png'.format(day)), transparent=True)
# ----------------------------------------------------------------- DERIVATIVES
fig3, axes = plt.subplots(2,4, figsize=(10,7))
for key, ax in zip(sorted(res), axes.flatten()):
print(key, ax)
ax.plot(res[key]['M'], res[key]['smile'])
ax.plot(res[key]['M'], res[key]['first'])
ax.plot(res[key]['M'], res[key]['second'])
ax.text(0.99, 0.01, r'$\tau$ = ' + str(key),
horizontalalignment='right',
verticalalignment='bottom',
transform=ax.transAxes)
ax.set_yticks([])
axes.flatten()[0].set_ylabel('implied volatility')
axes.flatten()[4].set_ylabel('implied volatility')
axes.flatten()[4].set_xlabel('moneyness')
axes.flatten()[5].set_xlabel('moneyness')
axes.flatten()[6].set_xlabel('moneyness')
axes.flatten()[7].set_xlabel('moneyness')
plt.tight_layout()
fig3.savefig(os.path.join(cwd, '{}_derivatives.png'.format(day)), transparent=True)
# ----------------------------------------------------------------- TAU PROCESS
for key in res:
s = res[key]
fig4, axes = plt.subplots(1,3, figsize=(10,4))
ax = axes[0]
ax.plot(s['df'].M, s['df'].iv, '.', c='r')
ax.plot(s['M'], s['smile'])
ax.set_xlabel('moneyness')
ax.set_ylabel('implied volatility')
ax = axes[1]
ax.plot(s['M'], s['smile'])
ax.plot(s['M'], s['first'])
ax.plot(s['M'], s['second'])
ax.set_xlabel('moneyness')
ax.set_ylabel('implied volatility')
ax = axes[2]
ax.plot(s['S'], s['q'])
ax.set_xlabel('spot price')
ax.set_ylabel(r'risk neutral density')
ax.set_yticks([])
plt.tight_layout()
fig4.savefig(os.path.join(cwd, '{}_T{}.png'.format(day, key)), transparent=True)
|
4,954 | d89f0ef24d8e8d23a77cbbb0ae8723c7dec8c00a | class Config:
DEBUG = False
TESTING = False
# mysql+pymysql://user:password@host:port/database
# SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://gjp:976431@49.235.194.73:3306/test'
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:root@127.0.0.1:3306/mydb'
SQLALCHEMY_TRACK_MODIFICATIONS = True
SECRET_KEY = 'hdfjds38948938bmbfsd90008'
class DevelopmentConfig(Config):
DEBUG = True
ENV = 'development'
class ProductionConfig(Config):
DATABASE_URI = ''
class TestingConfig(Config):
TESTING = True
|
4,955 | dc928da92dc7e8a37a7f32dd4a579fd09b89eb01 | __author__ = 'jacek gruzewski'
#!/user/bin/python3.4
"""
To do: throw exceptions rather than calling sys.exit(1)
"""
############################################################
# IMPORTS
############################################################
# Python's libraries
import time
import sys
import logging
import os
import requests
# AWS Boto library
from boto import ec2, route53, exception
#####################################################################
# Static data and configuration
#####################################################################
# Static AWS Rest service for getting instance details
AWS_METADATA = 'http://169.254.169.254/latest/meta-data/instance-id'
log_path = '/var/log/'
file_name = 'blue-green-deploy'
#####################################################################
# Functions
#####################################################################
def read_config_file(logger):
# Config file imports
import aws_config
try:
# Checking if all attributes were set.
domain = getattr(aws_config, "domain")
config = {
'reg': getattr(aws_config, "region"),
'access': getattr(aws_config, "access_key"),
'secret': getattr(aws_config, "secret_key"),
'srv': getattr(aws_config, "instance_name"),
'domain': domain,
'alias': getattr(aws_config, "live_record_name") + "." + domain,
'image': getattr(aws_config, "ami_id"),
'key': getattr(aws_config, "key_pair"),
'sec': [getattr(aws_config, "security_group")],
'subnet': getattr(aws_config, "subnet_id"),
'type': getattr(aws_config, "instance_size"),
'shutdown': getattr(aws_config, "shutdown_behavior"),
'dry-run': getattr(aws_config, "dry_run")
}
except AttributeError as at_err:
# Falling back to local variables. Worth to try!
logger.error('Could not read parameters from aws_config.py file. [%s]', at_err)
region = os.environ['AWS_DEFAULT_REGION']
aws_access_key = os.environ['AWS_ACCESS_KEY_ID']
aws_secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
if region is None or aws_access_key is None or aws_secret_key is None:
# At least we tried.
logger.error('Could not find AWS credentials in local variables')
sys.exit(1)
else:
logger.info('Got AWS credentials from local variables')
return config
def set_up_logging(path, file):
# Log file. Always in /var/log!! It will log into the file and console
logging.basicConfig(level=logging.WARN)
log_formatter = logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s")
root_logger = logging.getLogger()
file_handler = logging.FileHandler("{0}/{1}.log".format(path, file))
file_handler.setFormatter(log_formatter)
root_logger.addHandler(file_handler)
return root_logger
def connect_to_aws(region, aws_access_key, aws_secret_key):
"""
:param:
region: AWS region
aws_access_key: AWS Access Key
aws_secret_key: AWS Secret Key
:return: map of aws services and connection handles for them.
"""
ec2_conn = ec2.connect_to_region(region_name=region,
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key)
route53_conn = route53.Route53Connection(aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key)
if ec2_conn is None:
logging.error('Could not connect to Ec2 with this parameters: %s, %s, <secret key>', region, aws_access_key)
sys.exit(1)
else:
logging.info('Connected to AWS EC2 [%s]', region)
if route53_conn is None:
logging.error('Could not connect to Route53 with this parameters: %s, <secret key>', aws_access_key)
sys.exit(1)
else:
logging.info('Connected to AWS Route53')
return {'ec2': ec2_conn, 'route53': route53_conn}
def get_specific_instances(ec2_conn, tag_key, tag_value, instance_state):
"""
:description: Returns requested instance - uses filters to get it.
:param
ec2_conn: Connections to AWS EC2.
tag_key: Name of the tag.
tag_value: Value of the tag.
instance_state: One of three states - "running" / "pending" / "stopped".
:return: boolean result.
"""
# Filters instances with specific tag and in specific state.
instances = ec2_conn.get_only_instances(filters={"tag:{0}".format(tag_key): tag_value,
"instance-state-name": instance_state})
return instances
def create_new_instance(ec2_conn, image_id, ssh_key, sec_group, subnet_id, env, instance_name, user_data=None,
instance_size='t2.micro', shutdown='stop', dry_run=False):
"""
:param
ec2_conn: connection to AWS EC2 service
image_id: Amazon Machine Image ID with all your software
ssh_key: AWS key pair name
sec_group: Security group ID that should be allocated
subnet_id: Subnet ID in which your instance should be created
env: Environment (blue / green / old_app)
instance_name: Name tag value
user_data: Cloud-Init script that will run once
instance_size: String with instance size
shutdown_behaviour: stop or termination
dry-run: True or False. If True, it will not make any changes.
:return: instance ID if created or None
"""
# Checks (by filtering instances currently running) if there is no other instance running with the same tags.
instances = get_specific_instances(ec2_conn, "Environment", env, ["running", "pending"])
if not instances:
# If list is not empty. Creates new instance.
try:
reservations = ec2_conn.run_instances(image_id,
key_name=ssh_key,
user_data=user_data,
instance_type=instance_size,
subnet_id=subnet_id,
security_group_ids=sec_group,
instance_initiated_shutdown_behavior=shutdown,
dry_run=dry_run)
if reservations is not None and not dry_run:
# When instance was created, we have to assign tags.
tag_new_instance(reservations.instances[0], instance_name, env)
else:
LOGGER.error('Something went wrong when creating new instance.')
sys.exit(1)
except exception.EC2ResponseError:
if dry_run:
LOGGER.warn('New instance would be created and this tags should be assigned')
LOGGER.warn('Name: %s' % instance_name)
LOGGER.warn('Environment: %s' % env)
LOGGER.warn('Deployment Date: %s' % time.strftime("%d-%m-%Y"))
return 'OK'
else:
LOGGER.error('Something went wrong when creating new instance.')
try:
# Last chance - waiting 1 minute to tag instance.
time.sleep(60)
tag_new_instance(reservations.instances[0], instance_name, env)
except exception.EC2ResponseError:
sys.exit(1)
else:
# Looks like there was another instance running with the same tags.
LOGGER.warn('There is another instance running with %s environment tag (id: %s).' % (env, instances[0]))
return None
return reservations.instances
def tag_instance(instance, tag_name, tag_key):
"""
:description: Removes old tag and creates new one with updated value.
:param
instance: Instance that should be tagged.
tag_name: Name of the tag.
tag_key: Value of the tag.
:return: None
"""
instance.remove_tag('{0}'.format(tag_name))
instance.add_tag('{0}'.format(tag_name), '{0}'.format(tag_key))
def tag_new_instance(instance, instance_name, environment):
"""
:description: Tags new instance.
:param
instance: Instance that should be tagged.
instance_name: Name of the instance.
environment: blue org green.
:return: None
"""
instance.add_tag('Name', instance_name)
instance.add_tag('Environment', environment)
instance.add_tag('Deployment Date', time.strftime("%d-%m-%Y"))
def stop_instance(aws_connection, env, domain, live_alias, tag, dry_run=False):
"""
:description: Stops past live instance.
:param
aws_connection: Connections to AWS Route53 service and EC2.
env: Blue or green depends which instance you want to stop (cross check).
domain: Your Domain.
live_alias: Your external DNS record pointing to live web server.
dry-run: True or False. If True, it will not make any changes.
:return: boolean result.
"""
result = False
tag = ''.join(tag.values())
# Gets past live instance.
instances = get_specific_instances(aws_connection.get('ec2'), "Environment", env, "running")
if check_which_is_live(aws_connection.get('route53'), domain, live_alias) != (env + "." + domain) and instances:
# Instance is not live
try:
aws_connection.get('ec2').stop_instances(instance_ids=[instances[0].id], dry_run=dry_run)
tag_instance(instances[0], 'Environment', tag)
except exception.EC2ResponseError:
LOGGER.warn('Instance %s would be stopped and tagged with Environment:%s' % (instances[0].id, tag))
result = True
else:
if dry_run:
LOGGER.warning('Old instance with tag %s would be stopped.' % env)
else:
LOGGER.error('Could not stop the old instance. It looks like it is live or doesnt exist. '
'I tried to stop %s instance.' % env)
return result
def check_which_is_live(route53_conn, domain, live_alias):
"""
:description: Checks which alias (blue.<domain> or green.<domain>) is live.
:param
route53_conn: Connection to AWS Route53 service
domain: Your Domain
live_alias: Your external DNS record pointing to live web server.
:return: fqdn of live sub alias (blue or green)
"""
live_fqdn = route53_conn.get_zone(domain).get_a(live_alias).alias_dns_name
return live_fqdn
def get_env(fqdn, domain):
"""
:description: Give you environment from given fqdn by removing domain from fqdn.
:param
fqdn: Fully Qualified Domain Name.
domain: Your domain name.
:return: environment (blue or green).
"""
env = fqdn.replace("." + domain, "")
return env
def swap_dns(live_alias, future_value, alias_dns_name, zone, records):
"""
:description: Changes alias (blue.<domain> or green.<domain>) that is behind live url.
:param
live_alias: Your external DNS record pointing to live web server.
future_alias: blue.<domain> or green.<domain> depends which is going to be live.
zone: handle to zone that hosts dns records.
records: sets of dns records from the zone..
:return: Result of the change (AWS respond).
"""
try:
change = records.add_change(action='UPSERT',
name=live_alias,
ttl=300,
type='A',
alias_dns_name=alias_dns_name,
alias_hosted_zone_id=zone.id,
alias_evaluate_target_health=False)
change.add_value(future_value)
result = records.commit()
except Exception as ex:
LOGGER.error('Could not swap dns entry for %s. Exception: %s' % (live_alias, ex))
sys.exit(1)
return result
def swap_live_with_staging(aws_connection, domain, current_live, live_alias, blue_alias, green_alias, dry_run=False):
"""
:description: Changes alias (blue.<domain> or green.<domain>) that is behind live url.
:param
aws_connection: Connections to AWS Route53 service and EC2
domain: Your Domain
current_live: blue.<domain> or green.<domain> depends which is live
live_alias: Your external DNS record pointing to live web server.
dry-run: True or False. If True, it will not make any changes.
:return: Result of the change (AWS respond).
"""
route53_conn = aws_connection.get('route53')
zone = route53_conn.get_zone(domain)
records = route53.record.ResourceRecordSets(connection=route53_conn, hosted_zone_id=zone.id)
if dry_run:
# Dry run
LOGGER.warn('DNS record %s would be updated with %s' %
(live_alias, green_alias if current_live == blue_alias else blue_alias))
result = 'OK'
else:
if current_live == blue_alias:
# Blue was live so now time for Green.
#if simple_check(green_alias):
result = swap_dns(live_alias, green_alias, green_alias, zone, records)
#else:
# LOGGER.error('Staging is not running.')
# sys.exit(1)
else:
# This time Green was live. Blue, are you ready?
#if simple_check(blue_alias):
result = swap_dns(live_alias, blue_alias, blue_alias, zone, records)
#else:
# LOGGER.error('Staging is not running.')
# sys.exit(1)
return result
def assign_to_staging(route53_conn, domain, current_live, instance_public_ip, live_alias, blue_alias, green_alias,
dry_run=False):
"""
:description: Assigns newly created instance to staging url
:param
route53_conn: Connection to AWS Route53 service
domain: Your Domain
current_live: blue.<domain> or green.<domain> depends which one was behind your live url.
instance_public_ip: Public IP of newly created instance that would be assigned to staging url.
dry-run: True or False. If True, it will not make any changes.
:return: Result of the change (AWS respond).
"""
zone = route53_conn.get_zone(domain)
records = route53.record.ResourceRecordSets(connection=route53_conn, hosted_zone_id=zone.id)
if dry_run:
LOGGER.warn('Public IP %s would be assigned to %s' % (instance_public_ip, live_alias))
result = 'OK'
else:
result = swap_dns(blue_alias if current_live == green_alias else green_alias, instance_public_ip, None, zone,
records)
return result
def delete_old_instance(ec2_conn, tag, dry_run=False):
"""
:description: Deletes instance for given tag only if it is stopped
:param
ec2_conn: Connection to AWS EC2 service
old_tag: Dictionary with <tag_name> <tag_value> pair
dry-run: True or False. If True, it will not make any changes.
:return: boolean status
"""
result = False
# Filters instances with tag Environment = old-app and only in stopped state.
instances = get_specific_instances(ec2_conn, ''.join(tag.keys()), ''.join(tag.values()), "stopped")
if len(instances) is 1:
# If there is only 1 instance in that state.
old = instances[0]
LOGGER.debug("I am going to delete %s" % old.id)
try:
deleted_old = ec2_conn.terminate_instances(instance_ids=[old.id], dry_run=dry_run)
# Previous line should return instance that was deleted. Worth to check if it was the one we want to delete.
if deleted_old[0].id == old.id:
LOGGER.info('Deleted %s' % deleted_old[0].id)
result = True
except exception.EC2ResponseError as ex:
if dry_run:
LOGGER.error('Instance %s would be deleted.' % old.id)
else:
LOGGER.error('Something went wrong when deleting old instance.')
LOGGER.error(ex)
else:
# It could be none or multiple instance in that state. Better notify before someone starts complaining.
LOGGER.warn('No old instance or more than 1 instance was found. I hope you are aware of that. Continue.')
result = True # I am returning true because it shouldn't be a big issue
return result
def wait_for_public_ip(ec2_conn, instance_id):
"""
:description: Gets instance's Public IP. Retries every 5 seconds for 30 seconds.
:param
ec2_conn: Connection to AWS EC2 service
instance_id: ID of instance :)
:return: Public IP or exits the script
"""
counter = 0
while counter < 24:
# We are going to check every 10 seconds for 2 minutes.
stg_instance = ec2_conn.get_only_instances(instance_ids=[instance_id])
if stg_instance[0].ip_address is None:
# Still not available so wait 5 seconds.
time.sleep(10)
else:
# We got it!
public_ip = stg_instance[0].ip_address
return str(public_ip)
counter += 1
# Unfortunately we couldn't get Public IP so logging and exiting.
stg_instance = ec2_conn.get_only_instances(instance_ids=[instance_id])
LOGGER.error('Cannot get Public IP from instance %s' % stg_instance[0].id)
sys.exit(1)
def simple_check(url):
"""
:description: Checks if given url is returning 200 respond code for 10 minutes in 60 seconds intervals.
:param
url: link which should be checked
:return: Boolean
"""
counter = 0
while counter < 10:
try:
r = requests.head('http://' + url)
LOGGER.debug(r.status_code)
if r.status_code == 200:
return True
else:
time.sleep(60)
except requests.ConnectionError:
LOGGER.error("Failed to get respond code from %s - attempt #%s" % (url, counter + 1))
return False
def write_to_file(to_write):
f = open('parameters.properties', 'w')
f.write(to_write)
def switch(region, access_key, secret_key, tag, domain, live_url, blue_alias, green_alias, dry_run=False):
"""
:description: Rolls back deployment by starting instance with old-app tag and swapping dns entry.
:param
ec2_conn: Connection to AWS EC2 service
old_tag: Dictionary with <tag_name> <tag_value> pair
dry-run: True or False. If True, it will not make any changes.
:return: boolean status
"""
result = True
# 1. Connects to AWS
aws_conn = connect_to_aws(region, access_key, secret_key)
# 2. Check which is live at the moment and which should be stopped.
live = check_which_is_live(aws_conn.get('route53'), domain, live_url)
# 3. Swap DNS
result = swap_live_with_staging(aws_conn, domain, live, live_url, blue_alias, green_alias, dry_run)
# 4. Stop and tag old one. We will do it after 5 minutes to give chance to safely close all connections.
time.sleep(300)
stop_instance(aws_conn, get_env(live, domain), domain, live_url, tag, dry_run)
return result
def roll_back(region, access_key, secret_key, tag, domain, live_alias, blue_alias, green_alias, dry_run=False):
"""
:description: Rolls back deployment by starting instance with old-app tag and swapping dns entry.
:param
ec2_conn: Connection to AWS EC2 service
old_tag: Dictionary with <tag_name> <tag_value> pair
dry-run: True or False. If True, it will not make any changes.
:return: boolean status
"""
result = True
# 1. Connects to AWS
aws_conn = connect_to_aws(region, access_key, secret_key)
# 2. Get instance ID of old instance. Check which environment is live.
old_instance = get_specific_instances(aws_conn.get('ec2'), ''.join(tag.keys()), ''.join(tag.values()),
['stopped', 'running'])
current_live = check_which_is_live(aws_conn.get('route53'), domain, live_alias)
env = get_env(current_live, domain)
# 3. Do the Magic ;)
if not old_instance:
LOGGER.error('No instance with tag %s was found. No chance to roll back Sir!' % ''.join(tag.values()))
else:
try:
if dry_run:
LOGGER.warning('Instance %s would be started and tagged with %s' % (old_instance, env))
else:
# Start old instance
old_instance[0].start()
tag_instance(old_instance[0], 'Environment', 'blue' if env == 'green' else 'green')
# Refresh its public IP as it could change.
instance_public_ip = wait_for_public_ip(aws_conn.get('ec2'), old_instance[0].id)
assign_to_staging(aws_conn.get('route53'), domain, current_live, instance_public_ip, live_alias,
blue_alias, green_alias, dry_run=False)
swap_live_with_staging(aws_conn, domain, current_live, live_alias, blue_alias, green_alias, dry_run)
stop_instance(aws_conn, env, domain, live_alias, tag, dry_run)
except exception.EC2ResponseError:
LOGGER.error('Could not start %s instance.' % old_instance)
result = False
return result
def deployment_stage(region, access_key, secret_key, srv_name, domain, live_url, blue_alias, green_alias, tag, image_id,
ssh_key, sec_group, subnet_id, instance_size, shutdown, dry_run=False):
"""
:description: Delivers new instance with staging dns (blue / green).
:param
region: region to which you want to deploy your instance
access_key: AWS Access Key
secret_key: AWS Secret Key
srv_name: How you want to call your web server
domain: Your domain
live_url: DNS record for your live website
blue_url: Blue Url
green_url: Green Url
old_tag: Dictionary with <tag_name> <tag_value> pair
image_id: Amazon Machine Image ID with all your software
ssh_key: AWS key pair name
sec_group: Security group ID that should be allocated
subnet_id: Subnet ID in which your instance should be created
instance_size: String with instance size
shutdown_behaviour: stop or termination
dry-run: True or False. If True, it will not make any changes.
:return: string with url and ip address to staging server
"""
# 1. Connects to AWS
aws_connections = connect_to_aws(region, access_key, secret_key)
# 2. Delete old instance which should be stopped
deleted = delete_old_instance(aws_connections.get('ec2'), tag, dry_run)
# 3. Check which environment (blue/green) is live
live = check_which_is_live(aws_connections.get('route53'), domain, live_url)
if live == blue_alias:
env = 'green'
else:
env = 'blue'
# 4. If deleted then we can create new instance
if dry_run:
# Dry Run
create_new_instance(aws_connections.get('ec2'), image_id, ssh_key, sec_group, subnet_id, env, srv_name, None,
instance_size, shutdown, dry_run)
assign_to_staging(aws_connections.get('route53'), domain, live, "127.0.0.1", live_url, blue_alias,
green_alias, dry_run)
sys.exit(0)
elif deleted:
staging_instance = create_new_instance(aws_connections.get('ec2'), image_id, ssh_key, sec_group, subnet_id, env,
srv_name, None, instance_size, shutdown, dry_run)
# 5. Assign right dns alias only if we managed to create instance in previous step
if staging_instance is None:
# There were some problems with creating new instance
LOGGER.error('Could not create new instance.')
sys.exit(1)
else:
# Everything was all right. Waiting for Public IP
if staging_instance[0].ip_address is None:
# Unfortunately Public IP is not available straight away so we have to wait for it.
public_ip = wait_for_public_ip(aws_connections.get('ec2'), staging_instance[0].id)
if public_ip is None:
LOGGER.error('Cannot get Public IP from instance %s' % staging_instance[0].id)
sys.exit(1)
else:
# Or maybe it is? :)
public_ip = staging_instance[0].ip_address
assign_to_staging(aws_connections.get('route53'), domain, live, public_ip, live_url, blue_alias, green_alias,
dry_run)
write_to_file("staging-server = " + public_ip)
return str(env + "." + domain + ": " + public_ip)
LOGGER = set_up_logging(log_path, file_name)
|
4,956 | a54c8ab63c1e0f50d254d6c97ca3f167db7142e9 | import sys
sys.path.append('../')
from IntcodeComputer.intcode import Program
if __name__ == '__main__':
fn = 'input.txt'
with open(fn) as f:
program = Program([int(i) for i in f.readline().split(',')])
program.run()
result = program.instructions
|
4,957 | 394ebfe25bbf8eaf427509f28a82a98b9b481b63 | from .dispatch import dispatch_expts |
4,958 | ea25aedc4728c18ac3d5da22c76cb7f1ef65e827 | from tw.core import *
|
4,959 | 02b760b16cdcd42f8d8d7222b439da87fb8076a3 | import numpy as np
from flask import Flask,request,render_template
import pickle
from werkzeug.serving import run_simple
app=Flask(__name__,template_folder='template')
model=pickle.load(open("model.pkl",'rb'))
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict',methods=['POST'])
def predict():
arr=[int(x) for x in request.form.values()]
arr2=[np.array(arr)]
output=model.predict(arr2)
# o2=round(output)
return render_template('index.html',prediction_text=output)
if __name__ == "__main__":
run_simple('localhost',8001,app,use_reloader=False) |
4,960 | 27c364ccf4a6703f74c95ebb386f8ced38b1eafd | try:
from zcrmsdk.src.com.zoho.crm.api.dc.data_center import DataCenter
except Exception as e:
from .data_center import DataCenter
class EUDataCenter(DataCenter):
"""
This class represents the properties of Zoho CRM in EU Domain.
"""
@classmethod
def PRODUCTION(cls):
"""
This method represents the Zoho CRM Production environment in EU domain
:return: An instance of Environments
"""
return DataCenter.Environment("https://www.zohoapis.eu", cls().get_iam_url(), cls().get_file_upload_url())
@classmethod
def SANDBOX(cls):
"""
This method represents the Zoho CRM Sandbox environment in EU domain
:return: An instance of Environment
"""
return DataCenter.Environment("https://sandbox.zohoapis.eu", cls().get_iam_url(), cls().get_file_upload_url())
@classmethod
def DEVELOPER(cls):
"""
This method represents the Zoho CRM Developer environment in EU domain
:return: An instance of Environment
"""
return DataCenter.Environment("https://developer.zohoapis.eu", cls().get_iam_url(), cls().get_file_upload_url())
def get_iam_url(self):
return "https://accounts.zoho.eu/oauth/v2/token"
def get_file_upload_url(self):
return "https://content.zohoapis.eu"
|
4,961 | 0e8a11c5b5a95929c533597d79ee4f3d037c13e0 | """
bets.models
Models relating to bets placed
"""
import datetime
from mongoengine import *
from decimal import Decimal
from app.groups.models import Group
from app.users.models import User
from app.matches.models import Match
from app.project.config import CURRENCIES
class GroupMatch(Document):
"""Associate each match with the group"""
group = ReferenceField(Group)
match = ReferenceField(Match)
cutoff = DateTimeField()
created = DateTimeField(default=datetime.datetime.now())
meta = {
'indexes': ['group', 'match']
}
def __str__(self):
return "%s: %s" % (self.match, self.group)
def time_remaining(self):
return self.cutoff - datetime.datetime.now()
def amount_bet(self, user):
"""If the user has bet any amount on this match,
return the amount, or 0"""
try:
return Bet.objects.get(group_match = self, user=user).amount
except Bet.DoesNotExist:
return Decimal(0)
class Bet(Document):
"""Bet that a user has placed"""
OUTCOME = (
(-1, 'Team 2 wins'),
(0, 'Draw'),
(1, 'Team 1 wins'),
)
group_match = ReferenceField(GroupMatch)
user = ReferenceField(User)
amount = DecimalField()
currency = StringField(max_length=3, choices=CURRENCIES)
outcome = IntField(choices=OUTCOME)
created = DateTimeField(default=datetime.datetime.now())
meta = {
'indexes': ['user']
}
def __str__(self):
return "%s: %s" % (self.bet, self.user)
def pot(self):
bets = Bet.objects(group_match = self.group_match)
return sum(map(lambda x: x.amount, bets))
class WinnerBet(Document):
"""Bet placed at the beginning of the tournament on who
will win the worldcup"""
user = ReferenceField(User)
team = ReferenceField(User)
amount = DecimalField()
currency = StringField(max_length=3, choices=CURRENCIES)
cutoff = DateTimeField()
created = DateTimeField(default=datetime.datetime.now())
meta = {
'indexes': ['user', 'team']
}
def __str__(self):
return u"%s: %s" % (str(self.user), str(self.team))
|
4,962 | 16bf4583b872f038edccbac4e567c1854d65e216 | import tensorflow as tf
import numpy as np
def safe_nanmax(x):
with np.warnings.catch_warnings():
np.warnings.filterwarnings('ignore',
r'All-NaN (slice|axis) encountered')
return np.nanmax(x)
def safe_nanargmax(x):
try:
return np.nanargmax(x)
except ValueError:
return np.nan
def upper_triangular_flat(A):
ones = tf.ones_like(A)
mask_a = tf.matrix_band_part(ones, 0, -1)
mask_b = tf.matrix_band_part(ones, 0, 0)
mask = tf.cast(mask_a - mask_b, dtype=tf.bool)
return tf.boolean_mask(A, mask)
def pairwise_distances(embeddings, squared=False):
"""Compute the 2D matrix of distances between all the embeddings.
Args:
embeddings: tensor of shape (batch_size, embed_dim)
squared: Boolean. If true, output is the pairwise squared euclidean
distance matrix.
If false, output is the pairwise euclidean distance matrix.
Returns:
pairwise_distances: tensor of shape (batch_size, batch_size)
"""
dot_product = tf.matmul(embeddings, tf.transpose(embeddings))
square_norm = tf.diag_part(dot_product)
# ||a - b||^2 = ||a||^2 - 2 <a, b> + ||b||^2
# shape (batch_size, batch_size)
distances = tf.expand_dims(square_norm, 1) - 2.0 * \
dot_product + tf.expand_dims(square_norm, 0)
distances = tf.maximum(distances, 0.0)
if not squared:
mask = tf.to_float(tf.equal(distances, 0.0))
distances = distances + mask * 1e-16
distances = tf.sqrt(distances)
distances = distances * (1.0 - mask)
return distances
def contrastive_score(labels, dist, thresholds, metric="accuracy"):
d = {}
if isinstance(metric, list):
for m in metric:
d[m] = True
else:
d[metric] = True
res = {}
if "total" in d:
res["total"] = tf.size(labels)
if "f1" in d:
precision = contrastive_score(
labels, dist, thresholds, metric="precision")
recall = contrastive_score(labels, dist, thresholds, metric="recall")
res["f1"] = 2 * precision * recall / (precision + recall)
if "bacc" in d:
specificity = contrastive_score(
labels, dist, thresholds, metric="specificity")
recall = contrastive_score(labels, dist, thresholds, metric="recall")
res["metric"] = (specificity + recall) / 2
th = tf.reshape(thresholds, [1, -1])
dist = tf.reshape(dist, [-1, 1])
labels = tf.cast(tf.reshape(labels, [-1, 1]), tf.int32)
pred = tf.cast(dist < th, tf.int32)
tp = pred * labels
tn = (1 - pred) * (1 - labels)
corr = tp + tn
tp = tf.reduce_sum(tf.cast(tp, tf.float32), axis=0)
tn = tf.reduce_sum(tf.cast(tn, tf.float32), axis=0)
pred = tf.cast(pred, tf.float32)
corr = tf.cast(corr, tf.float32)
labels = tf.cast(labels, tf.float32)
if "accuracy" in d:
res["accuracy"] = tf.reduce_mean(corr, axis=0)
if "precision" in d:
res["precision"] = tp / tf.reduce_sum(pred, axis=0)
if "recall" in d:
res["recall"] = tp / tf.reduce_sum(labels)
if "specificity" in d:
res["specificity"] = tn / tf.reduce_sum(1 - labels)
if "tp" in d:
res["tp"] = tp
if "tn" in d:
res["tn"] = tn
if "pcp" in d:
res["pcp"] = tf.reduce_sum(pred, axis=0)
if "pcn" in d:
res["pcn"] = tf.reduce_sum(1 - pred, axis=0)
if "cp" in d:
res["cp"] = tf.reduce_sum(labels)
if "cn" in d:
res["cn"] = tf.reduce_sum(1 - labels)
if len(d) != len(res):
raise NotImplementedError("some metrics were not implemented")
if not isinstance(metric, list):
return next(iter(res.values()))
return res
def triplet_score(labels, embeddings, thresholds, metric="accuracy"):
dist = pairwise_distances(embeddings)
labels = tf.reshape(labels, [-1, 1])
pair_labels = tf.cast(tf.equal(labels, tf.transpose(labels)), tf.int32)
flat_labels = upper_triangular_flat(pair_labels)
flat_dist = upper_triangular_flat(dist)
return contrastive_score(flat_labels, flat_dist, thresholds, metric=metric)
class BatchScorer:
def __init__(self):
self._tp = 0
self._tn = 0
self._pcp = 0
self._pcn = 0
self._cp = 0
self._cn = 0
self._total = 0
def score(self, y_true, y_pred, metric):
raise NotImplementedError()
def handle(self, y_true, y_pred):
d = self.score(y_true, y_pred,
["tp", "tn", "pcp", "pcn", "cp", "cn", "total"])
self._tp += d["tp"]
self._tn += d["tn"]
self._pcp += d["pcp"]
self._pcn += d["pcn"]
self._cp += d["cp"]
self._cn += d["cn"]
self._total += d["total"]
def result(self, metric):
with np.warnings.catch_warnings():
np.warnings.filterwarnings("ignore")
if metric == "accuracy":
return (self._tp + self._tn) / self._total
if metric == "precision":
return self._tp / self._pcp
if metric == "recall":
return self._tp / self._cp
if metric == "specificity":
return self._tn / self._cn
if metric == "f1":
precision = self.result("precision")
recall = self.result("recall")
return 2 * precision * recall / (precision + recall)
if metric == "bacc":
recall = self.result("recall")
specificity = self.result("specificity")
return (recall + specificity) / 2
raise NotImplementedError()
class ContrastiveBatchScorer(BatchScorer):
def __init__(self, margin, *args, **kwargs):
self._margin = margin
self._sess = tf.Session()
super().__init__(*args, **kwargs)
def score(self, y_true, y_pred, metric):
graph = tf.Graph()
with tf.Session(graph=graph) as sess:
with graph.as_default():
return sess.run(
contrastive_score(
tf.convert_to_tensor(y_true, tf.float32),
tf.convert_to_tensor(y_pred, tf.float32),
tf.convert_to_tensor(self._margin, tf.float32),
metric=metric))
class TripletBatchScorer(ContrastiveBatchScorer):
def score(self, y_true, y_pred, metric):
graph = tf.Graph()
with tf.Session(graph=graph) as sess:
with graph.as_default():
return sess.run(
triplet_score(
tf.convert_to_tensor(y_true, tf.float32),
tf.convert_to_tensor(y_pred, tf.float32),
tf.convert_to_tensor(self._margin, tf.float32),
metric=metric))
class FlatPairBatchScorer(ContrastiveBatchScorer):
def score(self, y_true, y_pred, metric):
assert y_pred.shape[0] == y_true.shape[0] * 2
a, b = np.split(y_pred, 2)
dist = np.linalg.norm(a - b, axis=1)
return super().score(y_true, dist, metric)
class ContrastiveOnKerasMetric:
def __init__(self, margin, metric="accuracy"):
self.__name__ = "contrastive_{}".format(metric)
self._margin = margin
self._metric = metric
def __call__(self, labels, embeddings):
return contrastive_score(
labels,
embeddings,
tf.convert_to_tensor(self._margin),
metric=self._metric)
class TripletOnKerasMetric:
def __init__(self, margin, metric="accuracy"):
self.__name__ = "triplet_{}".format(metric)
self._margin = margin
self._metric = metric
def __call__(self, labels, embeddings):
return triplet_score(
labels,
embeddings,
tf.convert_to_tensor(self._margin),
metric=self._metric)
class OfflineMetric:
def __init__(self, *args, **kwargs):
self.__name__ = self.name()
def name(self):
raise NotImplementedError()
def handle_batch(self, model, x, labels, pred):
raise NotImplementedError()
def result(self):
raise NotImplementedError()
def reset(self):
pass
class SimilarityValidationMetric(OfflineMetric):
def __init__(self,
margin,
*args,
id="sim",
metric=["accuracy"],
argmax=None,
**kwargs):
self._margin = np.array(margin)
assert argmax is None or (self._margin.ndim == 1 and argmax in metric)
self._metric = metric if isinstance(metric, list) else [metric]
self._argmax = argmax
self._scorer = None
self._id = id
super().__init__(self, *args, **kwargs)
def name(self):
metrics = list(
map(lambda x: "val_{}_{}".format(self._id, x), self._metric))
if self._argmax is not None:
metrics.append("val_{}_argmax_{}".format(self._id, self._argmax))
return tuple(metrics)
def handle_batch(self, model, x, labels, pred):
self._scorer.handle(labels, pred)
def result(self):
if self._argmax is None:
metrics = map(lambda x: safe_nanmax(self._scorer.result(x)),
self._metric)
return tuple(metrics)
else:
argmax = safe_nanargmax(self._scorer.result(self._argmax))
metrics = map(lambda x: self._scorer.result(x)[argmax],
self._metric)
return tuple(metrics) + (self._margin[argmax], )
class TripletValidationMetric(SimilarityValidationMetric):
def __init__(self, *args, id="triplet", **kwargs):
super().__init__(*args, id=id, **kwargs)
def reset(self):
self._scorer = TripletBatchScorer(self._margin)
class ContrastiveValidationMetric(SimilarityValidationMetric):
def __init__(self, *args, id="contrastive", **kwargs):
super().__init__(*args, id=id, **kwargs)
def reset(self):
self._scorer = ContrastiveBatchScorer(self._margin)
class FlatPairValidationMetric(SimilarityValidationMetric):
def __init__(self, *args, id="fpair", **kwargs):
super().__init__(*args, id=id, **kwargs)
def reset(self):
self._scorer = FlatPairBatchScorer(self._margin)
|
4,963 | 79e8ed64058dda6c8d7bacc08727bc978088ad2d | # %%
import pandas as pd
import numpy as np
from dataprep.eda import plot
from dataprep.eda import plot_correlation
from dataprep.eda import plot_missing
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="whitegrid", color_codes=True)
sns.set(font_scale=1)
# %%
# Minimal Processing
wines = pd.read_csv("winemag-data-130k-v2.csv")
wines.columns
wines.drop(columns='Unnamed: 0', inplace=True)
wines.dropna(axis='index', subset=['price'], inplace=True)
wines.drop_duplicates(inplace=True)
# %%
# Overall Distribution
plot(wines)
# %% # Price Dist -> Clean
plot(wines, "price")
# %%
plot(wines, "points")
# %%
plot(wines, "price", "points")
# %%
plot_correlation(wines, "price", "points")
# %%
plot_missing(wines)
# %%
plot_missing(wines, "price", "points")
# %%
plot_correlation(wines, "price")
# %%
# END EDA
# %
|
4,964 | d1fe06766958e8532c49d33e887d6c4996573c22 | #!/usr/bin/env python
import optparse
import os
import shutil
import sys
from AutoCrab.AutoCrab2 import core
def main():
parser = optparse.OptionParser()
parser.add_option("-r", "--recursive", dest="recursive", action="store_true", help="Recursively look for CRAB job files and directories.")
(opts, args) = parser.parse_args()
for arg in args:
if not core.isValidCommand(arg):
print "Error: unrecognized autocrab command."
else:
core.doAutoCrab(arg, opts.recursive)
if __name__ == '__main__':
main()
|
4,965 | 0f1bad350faaff6aab339944b4d24c4801fa8c64 | from itertools import count, islice
from math import sqrt
def is_prime(x):
if x<2:
return False
for i in range(2, int(sqrt(x)) + 1):
if x%i == 0:
return False
return True
def primes(x):
return islice((p for p in count() if is_prime(p)), x)
print(list(primes(1000))[-10:])
print(sum(primes(1000)))
print(any([True, True]))
print(any([True, False]))
print(any([False, False])) # is there a TRUE
print(all([True, True])) # are all of them TRUE
print(all([True, False]))
print(all([False, False]))
print("Is there a prime between 1328 and 1361:", any(is_prime(x) for x in range(1328, 1361)))
monday = [11, 12, 13, 14, 15, 16, 17, 17, 17, 16, 16, 15, 14, 13, 12, 11, 11]
tuesday = [x*2-10 for x in monday]
print(monday, tuesday)
for item in zip(monday, tuesday):
print(item, type(item))
for d1, d2 in zip(monday, tuesday):
print(f"Hourly average is {(d1 + d2)/2}°C")
wednesday = [x*2-20 for x in tuesday]
for temps in zip(monday, tuesday, wednesday):
print(f"min={min(temps):4.1f}\t max={max(temps):4.1f}\t avg={sum(temps)/len(temps):4.1f}")
from itertools import chain
temperatures = chain(monday, tuesday, wednesday)
print(monday, tuesday, wednesday) # concatenation
print(list(temperatures)) # lazy concatenation
from md_lucas import lucas
from time import perf_counter as tc
start = tc()
for x in (p for p in lucas() if is_prime(p)):
print(x, "time:", tc()-start) |
4,966 | 836df02495ee581f138050be6b7a7a076ea899eb | from .base import * # noqa
from .base import env
# exemple https://github.com/pydanny/cookiecutter-django/blob/master/%7B%7Bcookiecutter.project_slug%7D%7D/config/settings/production.py
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env("DJANGO_SECRET_KEY")
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = [x.split(':') for x in env.list('DJANGO_ALLOWED_HOSTS')]
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
# Who to sent emails when errors arise
ADMINS = [x.split(':') for x in env.list('DJANGO_ADMINS')]
# DATABASES
# ------------------------------------------------------------------------------
DATABASES["default"] = env.db("DATABASE_URL") # noqa F405
DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405
DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
# avec cpanel à voir ce que l'on peut configurer avec xtremcache/varnish et django-varnish
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
},
# "default": {
# "BACKEND": "django_redis.cache.RedisCache",
# "LOCATION": env("XTREM_CACHE_URL"),
# "OPTIONS": {
# "CLIENT_CLASS": "django_redis.client.DefaultClient",
# # Mimicing memcache behavior.
# # http://jazzband.github.io/django-redis/latest/#_memcached_exceptions_behavior
# "IGNORE_EXCEPTIONS": True,
# },
# }
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
|
4,967 | 33e9e45fbe0e3143d75d34c1db283c01e2693f68 | import json
import pandas as pd
import matplotlib.pyplot as plt
f = open('Maradona-goals.json')
jsonObject = json.load(f)
f.close()
l = []
for c, cl in jsonObject.items():
for d in cl:
d.update({'player' : c})
l.append(d)
df = pd.DataFrame(l)
labels = df["year"]
width = 0.75
fig = plt.figure(figsize=(16,8))
ax = fig.add_subplot(1,1,1)
ax.set_xticks(labels)
ax.set_xticklabels(labels, rotation=45)
ax.set_yticks(range(0,45))
ax.bar(labels, df["club_goals"], width, label='Club')
ax.bar(labels, df["country_goals"], width, label='Country')
#ax.grid(color='LIGHTGRAY')
ax.set_ylabel('Goals')
ax.set_xlabel('Years')
ax.set_title('Goals by year')
ax.legend()
plt.show()
|
4,968 | 1b773f2ca01f07d78d2d7edc74cd2df6630aa97a | import pandas as pd
import numpy as np
#I'm adding these too avoid any type of na value.
missing_values = ["n/a", "na", "--", " ?","?"]
# Name Data Type Meas. Description
# ---- --------- ----- -----------
# Sex nominal M, F, and I (infant)
# Length continuous mm Longest shell measurement
# Diameter continuous mm perpendicular to length
# Height continuous mm with meat in shell
# Whole weight continuous grams whole abalone
# Shucked weight continuous grams weight of meat
# Viscera weight continuous grams gut weight (after bleeding)
# Shell weight continuous grams after being dried
# Rings integer +1.5 gives the age in years
names=["Sex", "Length", "Diameter", "Height", "Whole-w", "Shucked-w","Viscera-w", "Shell-w", "Rings"]
#reading the data
data = pd.read_csv('data/abalone.data', names=names,na_values=missing_values)
#data= pd.notnull(data)
#checking the dataset
#print(data.tail)
#print(data.shape)
#I'll split the data here since both of the questions needs same type of it.
from sklearn.model_selection import train_test_split
data = data.sample(frac=1, random_state=42) #mixing database for more random sample
#let's label the data with its ages according to
#output: class label
#(less than 8 in age belongs to class 1 (young), between 8 and 12 to class 2 (middle-aged), greater than 12 to class 3 (old))
#rings +1.5 gives the age so lets update the data accordingly
col_name='Rings'
conditions = [
data[col_name] < 8- float(1.5), #adding 1,5 will give the age so doing like that will work as well
data[col_name].between(6.5,(12-1.5)),
12-1.5 < data[col_name]
]
labels = ["young","middle-aged","old"]
data['age']=np.select(conditions,labels)
data.drop("Rings", axis=1, inplace=True)
def tt(X,y, sample):
X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=sample, random_state=1) #for calling from
return {"X_train":X_train, "X_valid":X_valid, "y_train":y_train, "y_valid":y_valid} |
4,969 | 38f7c529cd0a8d85de266c6a932e6c8342aee273 | # -*- coding: utf-8 -*-
'''
=======================================================================
AutoTest Team Source File.
Copyright(C), Changyou.com
-----------------------------------------------------------------------
Created: 2017/3/2 by ChengLongLong
-----------------------------------------------------------------------
Description:
-----------------------------------------------------------------------
History:
2017/3/2
=======================================================================
''' |
4,970 | 55ea522b096b189ff67b0da0058af777b0a910e3 | """These are views that are used for viewing and editing characters."""
from django.contrib import messages
from django.contrib.auth.mixins import UserPassesTestMixin,\
LoginRequiredMixin, PermissionRequiredMixin
from django.db import transaction
from django.db.models import F
from django.http import HttpResponseRedirect
from django.template.loader import render_to_string
from django.urls import reverse, reverse_lazy
from django.views import View
from django.views.generic.edit import FormMixin, CreateView, UpdateView
from django.views.generic import DeleteView, DetailView, FormView, ListView
from rest_framework.status import HTTP_412_PRECONDITION_FAILED
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import BasePermission
from rest_framework.response import Response
from rest_framework.views import APIView
from talesofvalor import get_query
from talesofvalor.events.models import Event
from talesofvalor.players.models import Registration
from talesofvalor.skills.models import Header, HeaderSkill
from .models import Character
from .forms import CharacterForm, CharacterSkillForm,\
CharacterConceptApproveForm, CharacterHistoryApproveForm
class OwnsCharacter(BasePermission):
"""
The current user is staff or owns the that is being manipulated.
"""
message = "You don't own this character"
def has_object_permission(self, request, view, obj):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return (player.user == self.request.user)
except Character.DoesNotExist:
return False
return False
class CharacterCreateView(LoginRequiredMixin, CreateView):
model = Character
form_class = CharacterForm
def get_initial(self):
# Get the initial dictionary from the superclass method
initial = super(CharacterCreateView, self).get_initial()
# Copy the dictionary so we don't accidentally change a mutable dict
initial = initial.copy()
# default to getting the player from the query String.
try:
initial['player'] = self.request.GET['player']
except KeyError:
initial['player'] = self.request.user.player
# etc...
return initial
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user # pass the 'user' in kwargs
return kwargs
def get_success_url(self):
return reverse(
'characters:character_skill_update',
kwargs={'pk': self.object.pk}
)
def form_valid(self, form):
"""
If this form is valid, then add the current player to the character
if the current user is not an admin
If the user doesn't have any other active characters, set this one
to active.
"""
if not self.request.user.has_perm('players.view_any_player'):
form.instance.player = self.request.user.player
if not form.instance.player.character_set.filter(active_flag=True).exists():
form.instance.active_flag = True
messages.info(self.request, 'New Character, "{}" created.'.format(
form.instance.name
))
return super().form_valid(form)
class CharacterUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Character
form_class = CharacterForm
def test_func(self):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return (player.user == self.request.user)
except Character.DoesNotExist:
return False
return False
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user # pass the 'user' in kwargs
return kwargs
def get_success_url(self):
return reverse(
'characters:character_detail',
kwargs={'pk': self.object.pk}
)
class CharacterDeleteView(
PermissionRequiredMixin,
UserPassesTestMixin,
DeleteView
):
"""
Removes a character permanantly.
Removing a character may have strange effects on other views.
"""
model = Character
permission_required = ('characters.change_character', )
success_url = reverse_lazy('characters:character_list')
def test_func(self):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return (player.user == self.request.user)
except Character.DoesNotExist:
return False
return False
class CharacterResetView(
PermissionRequiredMixin,
UserPassesTestMixin,
View
):
"""
Resets a characters skills to none and returns their points to them.
"""
model = Character
permission_required = ('characters.change_character', )
success_url = reverse_lazy('characters:character_list')
def test_func(self):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return (player.user == self.request.user)
except Character.DoesNotExist:
return False
return False
def get(self, request, *args, **kwargs):
"""
Send the user back to the the originating page or back to the
character they are setting active
"""
with transaction.atomic():
character = self.model.objects.get(pk=self.kwargs['pk'])
character.cp_available += character.cp_spent
character.cp_spent = 0
character.save(update_fields=['cp_available', 'cp_spent'])
character.characterskills_set.all().delete()
character.headers.clear()
messages.info(self.request, 'Character skills reset for {}.'.format(
character.name
))
return HttpResponseRedirect(
self.request.META.get(
'HTTP_REFERER',
reverse(
'characters:character_detail',
kwargs={'pk': self.kwargs['pk']}
)
)
)
class CharacterSetActiveView(
LoginRequiredMixin,
UserPassesTestMixin,
View
):
"""
Set the active character for the characters player to the sent id.
"""
model = Character
fields = '__all__'
def test_func(self):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return (player.user == self.request.user)
except Character.DoesNotExist:
return False
return False
def get(self, request, *args, **kwargs):
"""
Send the user back to the the originating page or back to the
character they are setting active
"""
character = self.model.objects.get(pk=self.kwargs['pk'])
character.player.character_set.update(active_flag=False)
character.active_flag = True
character.save()
messages.info(self.request, 'Active Character changed to {}.'.format(
character.name
))
return HttpResponseRedirect(
self.request.META.get(
'HTTP_REFERER',
reverse(
'characters:character_detail',
kwargs={'pk': self.kwargs['pk']}
)
)
)
class CharacterSkillUpdateView(
LoginRequiredMixin,
UserPassesTestMixin,
FormMixin,
DetailView):
"""
Allow a user to update their chosen skills
"""
template_name = 'characters/character_skill_form.html'
form_class = CharacterSkillForm
model = Character
def test_func(self):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return (player.user == self.request.user)
except Character.DoesNotExist:
return False
return False
def get_success_url(self):
return reverse(
'characters:character_detail',
kwargs={'pk': self.object.pk}
)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
self.skills = Header.objects\
.order_by('hidden_flag', 'category', 'name')\
.all()
kwargs.update({'skills': self.skills})
return kwargs
def get_context_data(self, **kwargs):
context = super().get_context_data(**self.kwargs)
# remove skills not in the hash.
available_skills = self.object.skillhash.keys()
context['skills'] = filter(lambda x: x.id in available_skills or self.request.user.has_perm('player.view_any_player'), self.skills)
context['skill_hash'] = self.object.skillhash
# add the bare skills granted by the rules
context['granted_skills'] = self.object.skill_grants
return context
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
def form_valid(self, form):
"""
Form is valid. Save the skills to that character and remove the
appropriate number of characters points.
"""
return super().form_valid(form)
class ResetPointsView(
PermissionRequiredMixin,
View
):
"""
Resets the points for the season.
"""
permission_required = ('characters.reset_points', )
def get(self, request, *args, **kwargs):
"""
Send the user back to the the originating page or back to the main
page if the referrer isn't set.
"""
Character.objects.all().update(cp_transferred=0)
messages.info(self.request, 'Point cap reset!')
return HttpResponseRedirect(
self.request.META.get(
'HTTP_REFERER',
'/'
)
)
'''
Put the AJAX work for Characters here
'''
class CharacterAddHeaderView(APIView):
'''
Set of AJAX views for a Characters
This handles different API calls for character actions.
'''
authentication_classes = [SessionAuthentication]
permission_classes = [OwnsCharacter]
def post(self, request, format=None):
header_id = int(request.POST.get('header_id', 0))
character_id = int(request.POST.get('character_id', 0))
cp_available = int(request.POST.get('cp_available', 0))
# get the character and then see if the header is allowed
header = Header.objects.get(pk=header_id)
character = Character.objects.get(pk=character_id)
# Default to error.
content = {
'error': "prerequisites not met"
}
status = None
# if the prerequisites are met, add the header to the user and return
# the list of skills
if character.check_header_prerequisites(header):
# see if the character has enough points to add the header
if (cp_available - header.cost) >= 0:
character.cp_available -= header.cost
character.cp_spent += header.cost
character.headers.add(header)
character.save()
skill_item_template_string = render_to_string(
"characters/includes/character_skill_update_item.html",
{
'header': header,
'header_skills': header.skills.all(),
'header_costs': character.skillhash[header.id]
},
request
)
content = {
'success': header.cost * -1,
'skills': skill_item_template_string
}
else:
content = {
'error': "You don't have enough points available for this character to add this header."
}
status = HTTP_412_PRECONDITION_FAILED
else:
status = HTTP_412_PRECONDITION_FAILED
return Response(content, status)
class CharacterDropHeaderView(APIView):
'''
Set of AJAX views for a Characters
This handles different API calls for character actions.
'''
authentication_classes = [SessionAuthentication]
permission_classes = [OwnsCharacter]
def post(self, request, format=None):
header_id = int(request.POST.get('header_id', 0))
character_id = int(request.POST.get('character_id', 0))
# get the character and header
header = Header.objects.get(pk=header_id)
character = Character.objects.get(pk=character_id)
# Default to error.
content = {
'error': "Header is not already bought!"
}
status = None
# if the character has the header, drop it and refund the CP
content['header_list'] = []
if header in character.headers.all():
print(f'Header present! Dropping and adding back in {header.cost} CP...')
character.cp_available += header.cost
character.cp_spent -= header.cost
character.headers.remove(header)
skill_item_template_string = render_to_string(
"characters/includes/character_skill_update_item.html",
{
'header': header,
'header_skills': header.skills.all(),
'header_costs': character.skillhash[header.id]
},
request
)
content = {
'success': header.cost,
}
else:
status = HTTP_412_PRECONDITION_FAILED
return Response(content, status)
class CharacterAddSkillView(APIView):
'''
Set of AJAX views for a Characters
This handles different API calls for character actions.
'''
authentication_classes = [SessionAuthentication]
permission_classes = [OwnsCharacter]
def post(self, request, format=None):
skill_id = int(request.POST.get('skill_id', 0))
header_id = int(request.POST.get('header_id', 0))
character_id = int(request.POST.get('character_id', 0))
cp_available = int(request.POST.get('cp_available', 0))
try:
vector = int(request.POST.get('vector'))
except AttributeError:
return {
'error': "No change indicated"
}
# get the character and then see if the skill is allowed
header_skill = HeaderSkill.objects.get(skill_id=skill_id, header_id=header_id)
character = Character.objects.get(pk=character_id)
# check that the skill is allowed.
# if the prerequisites are met, add the header to the user and return
# the list of skills
# otherwise, return an error
content = {
'success': "testing right now"
}
status = None
if character.check_skill_prerequisites(header_skill.skill, header_skill.header):
# since vector is the direction, we want to reverse it when
# dealing with what we want to change for the available points
# see if the character has enough points to add the header
cost = character.skill_cost(header_skill) * vector
if (cp_available - cost) >= 0:
# when this is returned, change the available costs
(character_skill, created) = character.characterskills_set.get_or_create(
skill=header_skill
)
if character_skill.count and (character_skill.count + vector < 0):
content = {
'error': f"You don't have any points in {header_skill.skill}"
}
status = HTTP_412_PRECONDITION_FAILED
else:
content = {
'success': cost * -1
}
character_skill.count = F('count') + vector
character_skill.save()
character.cp_spent = F('cp_spent') + cost
character.cp_available = F('cp_available') - cost
character.save()
else:
content = {
'error': "You don't have enough points available to purchase this skill . . ."
}
status = HTTP_412_PRECONDITION_FAILED
else:
status = HTTP_412_PRECONDITION_FAILED
return Response(content, status)
class CharacterDetailView(LoginRequiredMixin, UserPassesTestMixin, DetailView):
"""
Show the details for a character.
From here you can edit the details of a character or choose skills.
"""
model = Character
fields = '__all__'
def test_func(self):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return (player.user == self.request.user)
except Character.DoesNotExist:
return False
return False
class CharacterConceptApproveView(PermissionRequiredMixin, FormView):
"""
Approve the concept for a character.
Grant the CP for the character
Set the history approved flag.
"""
permission_required = 'players.change_any_player'
form_class = CharacterConceptApproveForm
def form_valid(self, form):
self.object = Character.objects.get(pk=form.cleaned_data['character_id'])
self.object.player.cp_available += 3
self.object.player.save(update_fields=['cp_available'])
self.object.concept_approved_flag = True
self.object.save(update_fields=['concept_approved_flag'])
messages.info(self.request, f"{self.object} concept approved!")
return super().form_valid(form)
def form_invalid(self, form):
self.object = Character.objects.get(pk=form.cleaned_data['character_id'])
for key, error in form.errors.items():
messages.error(self.request, error.as_text())
return HttpResponseRedirect(reverse(
'characters:character_detail',
kwargs={'pk': self.object.pk}
))
def get_success_url(self):
return reverse(
'characters:character_detail',
kwargs={'pk': self.object.pk}
)
class CharacterHistoryApproveView(PermissionRequiredMixin, FormView):
"""
Approve the history for a character.
Grant the CP for the character
Set the history approved flag.
"""
permission_required = 'players.change_any_player'
form_class = CharacterHistoryApproveForm
def form_valid(self, form):
self.object = Character.objects.get(pk=form.cleaned_data['character_id'])
self.object.player.cp_available += 3
self.object.player.save(update_fields=['cp_available'])
self.object.history_approved_flag = True
self.object.save(update_fields=['history_approved_flag'])
messages.info(self.request, f"{self.object} history approved!")
return super().form_valid(form)
def form_invalid(self, form):
self.object = Character.objects.get(pk=form.cleaned_data['character_id'])
for key, error in form.errors.items():
messages.error(self.request, error.as_text())
return HttpResponseRedirect(reverse(
'characters:character_detail',
kwargs={'pk': self.object.pk}
))
def get_success_url(self):
return reverse(
'characters:character_detail',
kwargs={'pk': self.object.pk}
)
class CharacterListView(LoginRequiredMixin, ListView):
"""
Show the list of characters.
From here, you can view, edit, delete a character.
"""
model = Character
paginate_by = 25
def get_queryset(self):
queryset = super().get_queryset()
criteria = self.request.GET.get('criteria', '')
if (criteria.strip()):
entry_query = get_query(
criteria,
['name', 'description', 'concept', 'history', 'player_notes']
)
queryset = queryset.filter(entry_query)
history_approved_flag = self.request.GET.get('history_approved_flag', False)
if history_approved_flag:
queryset = queryset.filter(history_approved_flag=True)
concept_approved_flag = self.request.GET.get('concept_approved_flag', False)
if concept_approved_flag:
queryset = queryset.filter(concept_approved_flag=True)
return queryset
def get_context_data(self, **kwargs):
'''
Add the form so we can filter the characters.
'''
# get the context data to add to.
context_data = super().get_context_data(**kwargs)
context_data.update(**self.request.GET)
# return the resulting context
return context_data
class CharacterPrintListView(LoginRequiredMixin, ListView):
"""
Show a list of characters to print.
"""
model = Character
template_name = "characters/character_print_list.html"
def get_queryset(self):
queryset = super().get_queryset() # filter by event
event_id = self.kwargs.get('event_id', None)
if not event_id:
event_id = Event.next_event().id
player_ids = Registration.objects.filter(event__id=event_id).values_list('player_id', flat=True)
queryset = queryset.filter(player__id__in=player_ids, npc_flag=False, active_flag=True)
return queryset
|
4,971 | c5842b17b2587149cd13448593a6ed31b091ba77 | import sys
from sklearn.svm import SVC
from sklearn.model_selection import KFold,cross_validate,GridSearchCV
from data_prepr import data_preprocessing
import numpy as np
def main():
#if dataset is not provided on call terminate
if len(sys.argv)<2:
print("usage: python svm_parameter_tuning.py <input_file> ")
sys.exit()
#pass dataset and get the matrix containing the data vectors and data targets
ret_value=data_preprocessing(sys.argv[1])
data_matrix=ret_value[0]
category_labels=ret_value[1]
#create k_fold iterator to calculate metrics
k_fold = KFold(n_splits=10)
#perform grid search to determine parameter tuning
c_range = [np.power(2.0,i) for i in range(-5, 10)]
gamma_range = [np.power(2.0,i) for i in range(-10, -5)]
param_grid = [{'kernel': ['rbf'], 'gamma': gamma_range,'C':c_range},{'kernel': ['linear'], 'C': c_range}]
clf = GridSearchCV(SVC(),param_grid,cv=k_fold,scoring='accuracy',n_jobs=-1)
clf.fit(data_matrix,category_labels)
#print chosen hyperparameters
print "Best accuracy achieved:"+ str(clf.best_score_) + " with below settings."
for key,value in clf.best_params_.iteritems():
print key + ":" + str(value)
#save best hyperparameter values on a dictionary in file hyperparameter_values.py
output=open('./hyperparameter_values.py','w')
output.write('HYPERPARAMETER_VALUES={')
first=True
for key,value in clf.best_params_.iteritems():
if first==True:
output.write("\'"+key+"\':")
first=False
else:
output.write(",\'"+key+"\':")
if isinstance(value,str):
output.write("\'"+value+"\'")
else:
output.write(str(value))
output.write('}')
if __name__ == '__main__':
main() |
4,972 | 3eeed39bf775e2ac1900142b348f20d15907c6e6 | """
@author Lucas
@date 2019/3/29 21:46
"""
# 二分查找
def search(nums, target):
left = 0
right = len(nums) - 1
while left <= right:
mid = int((left + right)/2)
if target > nums[mid]:
left = mid + 1
elif target < nums[mid]:
right = mid - 1
else:
return mid
return -1
if __name__ == '__main__':
print(search([-1, 0, 3, 5, 9, 12], 12))
|
4,973 | d8e2613b45b3f4a24db0b07a01061c6057c9feed | from lredit import *
# customization of MainWindow
def configure(window):
#----------------------------------------------
# Generic edit config
# tab width and indent width
Mode.tab_width = 4
# make TAB character visible
Mode.show_tab = True
# make space character visible
Mode.show_space = False
# make full-width space character visible
Mode.show_wspace = True
# make line-end visible
Mode.show_lineend = True
# make end-of-fileline visible
Mode.show_fileend = True
# cancel selection when text is copied into clipboard
Mode.cancel_selection_on_copy = False
# copy current line if text is not selected
Mode.copy_line_if_not_selected = True
# cut current line if text is not selected
Mode.cut_line_if_not_selected = True
#----------------------------------------------
# Specific mode config
# use space character instead of TAB
PythonMode.tab_by_space = True
#----------------------------------------------
# key binding
# F3 : search next
window.keymap[ "F3" ] = window.command.SearchNext
# Shift-F3 : search previous
window.keymap[ "S-F3" ] = window.command.SearchPrev
#----------------------------------------------
# extension menu
window.ext_menu_items = [
( "Another Pane", "C-W", window.command.AnotherPane ),
( "Project Files", "C-P", window.command.ProjectFileList ),
( "Recent Files", "C-H", window.command.RecentFileList ),
( "Bookmark List", "C-M", window.command.BookmarkList ),
( "Document List", "C-D", window.command.DocumentList ),
( "Outline Analysis", "C-O", window.command.Outline ),
( "Search Result", "C-S", window.command.SearchResultList ),
]
#----------------------------------------------
# user defined command
def command_MyTool1(info):
# print to log pane
print( "Hello World!" )
def command_MyTool2(info):
# insert text into active edit
edit = window.activeEditPane().edit
edit.modifyText( text="Hello World!" )
window.launcher.command_list += [
( "MyTool1", command_MyTool1 ),
( "MyTool2", command_MyTool2 ),
]
#----------------------------------------------
# user menu
def command_MyMenu(info):
items = [
( "My Tool 1", "C-1", command_MyTool1 ),
( "My Tool 2", "C-2", command_MyTool2 ),
]
window.menu( None, items )
window.keymap[ "C-T" ] = command_MyMenu
#----------------------------------------------
# customization of menu bar
# add [Tool] > [Extra]
window.insertMenu( ("tool","custom_tools_end"),
MenuNode(
"extra", "&Extra",
items=[
MenuNode( "focus_left", "Focus to &Left", window.command.FocusLeftEdit ),
MenuNode( "focus_right", "Focus to &Right", window.command.FocusRightEdit ),
]
)
)
# open specified document
class command_SwitchDocument:
def __init__( self, doc ):
self.doc = doc
def __call__( self, info ):
window.activeOpen( doc=self.doc )
# function to display opened documents
def menuitems_Documents():
items = []
i = 0
items.append( MenuNode( separator=True ) )
for edit in window.edit_list:
name = edit.doc.getName()
items.append( MenuNode( "doc_%d"%i, name, command_SwitchDocument(edit.doc) ) )
i+=1
items.append( MenuNode( separator=True ) )
return items
# add menu items of documents at the bottom of [View] menu
window.appendMenu( ("view",), menuitems_Documents )
#----------------------------------------------
# misc tools
# remove continuing overlapped lines
def command_Unique(info):
edit = window.activePane().edit
previous_line = [None]
def func( text, info ):
if previous_line[0]==text:
return False
else:
previous_line[0]=text
return True
edit.filterLines(func)
# search by previous condition and bookmark the found lines
def command_SearchAndBookmark(info):
if not window.search_object: return
edit = window.activePane().edit
point = edit.pointDocumentBegin()
count = 0
while point:
point = edit.search( search_object=window.search_object, point=point, direction=1, move_cursor=False, select=False, hitmark=False, paint=False, message=False )
if point:
edit.bookmark( point.line, [ 1 ], paint=False )
point.line += 1
point.index = 0
count += 1
msg = "found %d lines" % ( count )
window.setStatusMessage( msg, 3000 )
window.paint()
window.launcher.command_list += [
( "Unique", command_Unique ),
( "SearchAndBookmark", command_SearchAndBookmark ),
]
#----------------------------------------------
# association between filename pattern and mode
window.fileext_list = [
( "*.ini", "ini" ),
( "*.py *.pyw *.pys", "python" ),
( "*.pl", "perl" ),
( "*.js", "javascript" ),
( "*.cpp *.cc *.cxx *.hpp *.hh *.hxx *.h", "c++" ),
( "*.c *.h", "c" ),
( "*.mm *.h", "objective-c++" ),
( "*.m *.h", "objective-c" ),
( "*.cs", "c#" ),
( "*.java", "java" ),
( "*.vert *.frag *.geo", "glsl" ),
( "*.xml", "xml" ),
( "*.html *.htm", "html" ),
( "makefile *.mk", "makefile" ),
( "*.bat", "batch" ),
( "*.sql", "sql" ),
( "*", "text" ),
]
#----------------------------------------------
# add mode
# lexer class of Ini file
class IniLexer(RegexLexer):
def __init__(self):
RegexLexer.__init__(self)
self.rule_map['root'] = [
(r'\s+', Token_Text),
(r'[;#].*?$', Token_Comment),
(r'\[.*?\]$', Token_Keyword),
(r'(.*?)([ \t]*=[ \t]*)(.*?)$', ( Token_Name, Token_Text, Token_String) ),
(None, Token_Text)
]
# mode definition of Ini file
class IniMode(Mode):
name = "ini"
def __init__(self):
Mode.__init__(self)
self.lexer = IniLexer()
self.completion = WordCompletion()
@staticmethod
def staticconfigure(window):
Mode.staticconfigure(window)
callConfigFunc("staticconfigure_IniMode",window)
def configure( self, edit ):
Mode.configure( self, edit )
callConfigFunc("configure_IniMode",self,edit)
# add ini file mode
window.mode_list.append( IniMode )
# association of ini filename pattern
window.fileext_list.insert( 0, ( "*.ini", "ini" ) )
#----------------------------------------------
# customization of PythonMode
# configuration of PythonMode object (such as key binding)
def configure_PythonMode( mode, edit ):
# F6 : output 'Hello' in log pane
def command_Print( info ):
print( "Hello" )
edit.keymap[ "F6" ] = command_Print
# static configuration of PythonMode class (such as menu bar customization)
def staticconfigure_PythonMode(window):
# command to insert 'Hello Python!' into active edit area
def command_InsertHello(info):
edit = window.activeEditPane().edit
edit.modifyText( text="Hello Python!" )
# function to check if the active edit area is Python mode
def isPythonMode():
return isinstance( window.activeEditPaneMode(), PythonMode )
# add menu item to display only when active mode is Python mode
window.insertMenu( ("tool","custom_tools_end"), MenuNode( "insert_hello", "Insert &Hello", command_InsertHello, visible=isPythonMode ) )
|
4,974 | 953186a330ae9dff15c037b556746590d748c7ad | from django import forms
class SignupAliasForm(forms.Form):
alias = forms.CharField(max_length=20, required=True)
email_secret = forms.CharField(max_length=100, required=True) |
4,975 | f831b77850dfe22232092f66705e36970828a75b | import os
import re
import click
import pandas as pd
from pymongo import MongoClient
from pathlib import Path, PurePath
def extract_dir_name(input_file):
"""
creates a directory path based on the specified file name
:param input_file: file bane
:return: full path, minus extension
"""
fname = PurePath(input_file).__str__()
s = fname.split('.')
name = '.'.join(s[:-1])
return name
def prep_file_name(path, file):
"""
append the original path and file name
* strips special chars
* remove spaces (replace with underscore)
* convert to lowercase
:param path: the path part of the new file name
:param file: the original file name
:return: sanitized name
"""
name = path.__str__() + '~' + file.__str__()
name = name.lower()
name = name.replace(' ', '_')
name = re.sub('[^a-z0-9\-_!.~]+', '', name)
return name
def open_dir(input_path, patterns):
"""
Opens the specified input path and returns any located excel file
:param patterns: the file extensions to glob over (eg xls, csv)
:param input_path: the starting path
:return: generator of all found files
"""
for ext in patterns:
for file in Path(input_path).glob('**/*.' + ext):
yield file
def shred_sheets(subdomain, audit_date, input_file, _format):
"""
Opens an excel workbook, and converts all sheets to a new file of the specified format
:param subdomain: appended to data frame
:param audit_date: appended to data fram
:param input_file: the path to the excel book
:param _format: the format to convert all sheets
:return:
"""
name = extract_dir_name(input_file)
fname = PurePath(input_file).name.__str__()
try:
os.makedirs(name)
except:
pass
wb = pd.ExcelFile(input_file)
for ws in wb.sheet_names:
data = pd.read_excel(input_file, sheet_name=ws)
# add constants
data.index.names = ['ix']
data['subdomin'] = subdomain
data['audit_date'] = audit_date
# strip chars we don't want in colum names
cols = data.columns
renamed = []
for col in cols:
col = re.sub('[^a-zA-Z0-9]', '', col)
renamed.append(col)
data.columns = renamed
# build output formats
if _format == 'mongo':
client = MongoClient('mongodb://localhost:27017/')
db = client.Sitebulb
cl = db.August5
try:
cl.insert_many(data.to_dict('records'))
except Exception as e:
click.secho(f'\nERROR in [{input_file},{ws}] -- {e}', fg='red')
continue
if _format == 'json' or _format == 'all':
try:
new_file = os.path.join(name, fname + '~' + ws + '.json')
data.to_json(new_file, orient="records")
except Exception as e:
click.secho(f'\nERROR in [{input_file},{ws}] -- {e}', fg='red')
continue
if _format == 'csv' or _format == 'all':
try:
new_file = os.path.join(name, fname + '~' + ws + '.csv')
data.to_csv(new_file)
except Exception as e:
click.secho(f'\nERROR in [{input_file},{ws}] -- {e}', fg='red')
continue
|
4,976 | a1f0eced5d122fe8557ebc4d707c87b4194513e3 | import subprocess
import logging
import time
import argparse
import threading
import os
import matplotlib.pyplot as plt
import numpy as np
import argparse
def runWeka(wekapath, modelpath, datapath):
os.chdir(wekapath)
proc = subprocess.Popen(['/usr/bin/java', '-classpath', 'weka.jar', 'weka.classifiers.functions.MultilayerPerceptron', '-l', modelpath, '-T', datapath, '-p', '0'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
return out
"""
Test offline trained model in Weka on a test set
"""
if __name__ == '__main__':
#Input arguments
my_arg_parser = argparse.ArgumentParser()
my_arg_parser.add_argument("-p","--weka-path", help="Path to Weka application folder", dest="wekapath")
my_arg_parser.add_argument("-m","--weka-model", help="Path to Weka serialized model", dest="modelpath")
my_arg_parser.add_argument("-d","--weka-dataset", help="Path to testset", default="", dest="datapath")
my_args = my_arg_parser.parse_args()
#wekapath="/home/mkulin/Desktop/eWINE/Experiments/Repository/Testing/Weka_stable-3-6/weka/"
#modelpath="/home/mkulin/Desktop/eWINE/Experiments/Repository/Testing/Neural_network_MACperf_prediction.model"
#datapath="/home/mkulin/Desktop/eWINE/Experiments/Repository/Testing/802_15_4_perf_30s_testset_Weka.csv"
predictions=runWeka(my_args.wekapath, my_args.modelpath, my_args.datapath)
k=1
matrix = []
for row in predictions.split('\n'):
if k<6:
k=k+1
continue
else:
if row=='':
continue
instance, actual, predicted, error=row.split()
matrix.append([int(instance), float(actual), float(predicted)])
matrix=np.array(matrix)
matrix[:,2][matrix[:,2]<0]=0 #disable negative predictions
#Visualize results
plt.style.use('ggplot')
f=plt.figure(1)
plt.plot(matrix[:,0], matrix[:,1], label='actual', color='red')
plt.plot(matrix[:,0], matrix[:,2], label='predicted', color='royalblue')
plt.xlabel('Instance number')
plt.ylabel('Packet Loss Rate')
plt.grid(True)
plt.legend(loc=1)
plt.show()
|
4,977 | 3f5ae2b25fc506b980de3ee87c952ff699e10003 | import numpy as np
import cv2
import sys
import math
cap = cv2.VideoCapture(0)
while(True):
_, img = cap.read()
#gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
img = cv2.imread("zielfeld_mit_Zeugs_2.png")
#img = cv2.imread("zielfeld_mit_Zeugs_2.jpg")
#imS = cv2.resize(img, (480, 480))
# img[img] = [255]
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
ret,thresh1 = cv2.threshold(gray,70,255,cv2.THRESH_BINARY)
th2 = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,11,2)
th3 = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,11,2)
upper = 120
lower_red = np.array([1,1,1])
upper_red = np.array([upper,upper,upper])
mask = cv2.inRange(hsv, lower_red, upper_red)
maskedImg = cv2.bitwise_and(img,img, mask= mask)
thresh1_d = cv2.dilate(maskedImg,None)
thresh1_e = cv2.erode(thresh1_d, None)
# cv2.imshow('video_1',gray)
dst = cv2.cornerHarris(np.float32(gray),2,3,0.04)
#result is dilated for marking the corners, not important
dst = cv2.dilate(dst,None)
edge_indices = np.transpose(np.where(dst>=0.01*dst.max()))
#for i in edge_indices:
#img[i[0],i[1]]=[0,255,0]
ret, dst = cv2.threshold(dst,0.01*dst.max(),255,0)
dst = np.uint8(dst)
# find centroids
ret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst)
# define the criteria to stop and refine the corners
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)
corners = cv2.cornerSubPix(gray,np.float32(centroids),(5,5),(-1,-1),criteria)
# Now draw them
res = np.hstack((centroids,corners))
res = np.int0(res)
res = res[res[:,1].argsort()]
num_edges = len(res)
required_matches = 4
threshold = 10
prev_dist = 0
dist_to_nearest = sys.maxint
num_matches = 0
next_idx = 0;
i = 0
while i < int(math.ceil(num_edges/float(2))):
idxs = [i+1,i+2,num_edges-i-1, num_edges-i-2]
for j in idxs:
d1 = int(math.fabs(res[i,0]-res[j,0]))+int(math.fabs(res[i,1]-res[j,1]))
if d1 < dist_to_nearest:
dist_to_nearest = d1
next_idx = j;
within_threshold = (prev_dist+threshold) > dist_to_nearest and (prev_dist-threshold) < dist_to_nearest
print i,next_idx, dist_to_nearest
if i == 0:
within_threshold = True
prev_dist = dist_to_nearest
if within_threshold:
img[res[i,1],res[i,0]]=[0,0,255]
num_matches += 1
if num_matches == required_matches:
break
i = next_idx
else:
prev_dist = dist_to_nearest
num_matches = 0
i += 1
dist_to_nearest = sys.maxint
# img[res[i,1],res[i,0]]=[0,255,0]
#img[res[len(res)-1,1],res[len(res)-1,0]]=[0,255,0]
## res = res[1:]
## for i1 in range(len(res)/2):
## h_y = (res[i1][1] + res[len(res)-i1-1][1]) / 2
##
## a = res[res[:,0].argsort()]
## h_x = (a[0][1] + a[len(a)-1][1]) / 2
##
## img[h_y][h_x-10:h_x+10]=[0,0,255]
#cv2.line(img,(res[0][0], res[0][1]),(res[len(res)-1][0],res[len(res)-1][1]),(255,0,0),5)
# cv2.line(img,(a[0][0], a[0][1]),(a[len(a)-1][0],a[len(a)-1][1]),(0,0,255),5)
cv2.imshow('video',img)
if cv2.waitKey(1)==27:# esc Key
break
cap.release()
cv2.destroyAllWindows()
|
4,978 | 0878bfa1151371ff3aaa59f8be5ea9af74ada331 | from django import forms
class UploadForm(forms.Form):
file = forms.FileField(label="Json с данными об отправлении")
|
4,979 | 7955479c70de679cfb7575c8bd9208d00a4893df | from channels.generic.websocket import WebsocketConsumer, AsyncWebsocketConsumer
from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
import json
class AsyncConsumer(AsyncWebsocketConsumer):
chats = dict()
async def connect(self): # 连接时触发
self.room_name = self.scope['url_route']['kwargs']['room_name']
self.room_group_name = 'chat_%s' % self.room_name # 直接从用户指定的房间名称构造Channels组名称,不进行任何引用或转义。
# 将新的连接加入到群组
await self.channel_layer.group_add(self.room_group_name, self.channel_name)
try:
AsyncConsumer.chats[self.room_name].add(self)
except:
AsyncConsumer.chats[self.room_name] = set([self])
print('长度', len(AsyncConsumer.chats[self.room_name]))
await self.accept()
async def disconnect(self, close_code): # 断开时触发
# 将关闭的连接从群组中移除
await self.channel_layer.group_discard(self.room_group_name, self.channel_name)
AsyncConsumer.chats[self.room_name].remove(self)
print('关闭后', len(AsyncConsumer.chats[self.room_name]))
# 接收消息
async def receive(self, text_data):
# print(text_data)
text_data_json = {'message': text_data}
message = text_data_json['message']
message_list = message.split('; ')
data = [i for i in message_list]
to_user = data[1]
print(to_user)
length = len(AsyncConsumer.chats[self.room_name])
print(length, '链接数')
if length == 2:
print(self.room_group_name, '走到2')
await self.channel_layer.group_send(
self.room_group_name,
{'type': 'chat.message', # 发送类型: chat.为双通道
'connections': length, # 当前链接数
'user_room_number': data[0], # 自己的组号
'mper_room_number': data[1], # 被接收者的组号
'user_header_image': data[2], # 自己的头像
# 'mper_header_image': data[3], # 被接收者的头像
'message': data[3], # 消息
'group': self.room_name # 组名
}
)
else:
print(to_user, '走到1')
await self.channel_layer.group_send(
'chat_%s' % to_user,
{
"type": "push.message", # 发送类型: push.为单通道
'connections': length, # 当前链接数
'user_room_number': data[0], # 自己的组号
'mper_room_number': data[1], # 被接收者的组号
'user_header_image': data[2], # 自己的头像
# 'mper_header_image': data[3], # 被接收者的头像
'message': data[3], # 消息
'group': self.room_name # 组名
}
)
# 从聊天组中接收消息
async def chat_message(self, event):
print(event)
await self.send(text_data=json.dumps({
'type': 'chat.message',
'connections': event['connections'],
'user_room_number': event['user_room_number'],
'mper_room_number': event['mper_room_number'],
'user_header_image': event['user_header_image'],
# 'mper_header_image': event['mper_header_image'],
'message': event['message'],
'group': event['group'],
}))
async def push_message(self, event):
print(event)
await self.send(text_data=json.dumps({
'type': 'push.message',
'connections': event['connections'],
'user_room_number': event['user_room_number'],
'mper_room_number': event['mper_room_number'],
'user_header_image': event['user_header_image'],
# 'mper_header_image': event['mper_header_image'],
'message': event['message'],
'group': event['group'],
}))
# 需要被调用的函数
from channels.layers import get_channel_layer
def push(username, event):
channel_layer = get_channel_layer()
async_to_sync(channel_layer.group_send)(
username,
{
"type": "push.message",
"event": event
}
)
|
4,980 | 39b9106a3b0305db8cc7316be3b76e58e5577b92 | #adapted from https://github.com/DeepLearningSandbox/DeepLearningSandbox/tree/master/transfer_learning
import os
import sys
import glob
import argparse
import matplotlib.pyplot as plt
from keras.applications.imagenet_utils import preprocess_input
from keras.models import Model
from keras.layers import GlobalAveragePooling2D,Dropout,Convolution2D,Activation
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD
from squeezenet import fire_module,SqueezeNet
IM_WIDTH, IM_HEIGHT = 227, 227 #fixed size for squeezenet
NB_EPOCHS = 3
BAT_SIZE = 32
def get_nb_files(dir):
if not os.path.exists(dir):
return 0
cnt = 0
for r,dirs,files in os.walk(dir):
for dr in dirs:
cnt += len(glob.glob(os.path.join(r,dr+"/*")))
return cnt
def setup_to_transfer_learn(model):
"""Freeze all layers and compile the model"""
for layer in model.layers:
layer.trainable = False
#model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
def add_new_last_layer(base_model, nb_classes):
x = base_model.output
x = Dropout(0.5, name='drop9')(x)
x = Convolution2D(nb_classes, (1, 1), padding='valid', name='conv10')(x)
x = Activation('relu', name='relu_conv10')(x)
x = GlobalAveragePooling2D()(x)
predictions = Activation('softmax')(x)
return Model(inputs=base_model.input, outputs=predictions)
def setup_to_finetune(model):
#5 layers in final output, 7 layers per fire module, finetune last 4 fire modules = 28 + 5 = 33 layers unfrozen
#67 layers total, 0-indexed
#layers 0-33 should be frozen, layers 34-66 trainable
#layer 26 = finetune last 5 fire modules
for layer in model.layers[:11]:
layer.trainable=False
for layer in model.layers[11:]:
layer.trainable=True
model.compile(optimizer=SGD(lr=0.0001,momentum=0.9),loss='categorical_crossentropy',metrics=['accuracy'])
def train(args):
nb_train_samples = get_nb_files(args.train_dir)
nb_classes = len(glob.glob(args.train_dir + "/*"))
nb_val_samples = get_nb_files(args.val_dir)
nb_epoch = int(args.nb_epoch)
batch_size = int(args.batch_size)
steps_per_epoch = nb_train_samples/batch_size
validation_steps = nb_val_samples/batch_size
train_datagen = ImageDataGenerator(
preprocessing_function=preprocess_input
)
test_datagen = ImageDataGenerator(
preprocessing_function=preprocess_input
)
train_generator = train_datagen.flow_from_directory(
args.train_dir,
target_size = (IM_WIDTH,IM_HEIGHT),
batch_size = batch_size,
shuffle=True
)
val_generator = test_datagen.flow_from_directory(
args.val_dir,
target_size = (IM_WIDTH,IM_HEIGHT),
batch_size = batch_size,
shuffle=True
)
base_model = SqueezeNet()
setup_to_transfer_learn(base_model)
model = add_new_last_layer(base_model,nb_classes)
#sgd = SGD(lr=0.001,decay=0.0002,momentum=0.9)
#model.compile(optimizer=sgd,loss='categorical_crossentropy',metrics=['accuracy'])
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
history_tl = model.fit_generator(
generator=train_generator,
epochs=nb_epoch,
steps_per_epoch=steps_per_epoch,
validation_data=val_generator,
validation_steps = validation_steps,
class_weight="auto"
)
setup_to_finetune(model)
history_ft = model.fit_generator(
generator=train_generator,
epochs=nb_epoch,
steps_per_epoch=steps_per_epoch,
validation_data=val_generator,
validation_steps=validation_steps,
class_weight="auto"
)
model.save(args.output_model_file)
if args.plot:
plot_training(history_ft)
def plot_training(history):
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'r.')
plt.plot(epochs, val_acc, 'r')
plt.title('Training and validation accuracy')
plt.savefig("accuracy_plot.png")
plt.close()
plt.plot(epochs, loss, 'r.')
plt.plot(epochs, val_loss, 'r-')
plt.title('Training and validation loss')
plt.savefig("loss_plot.png")
if __name__=="__main__":
a = argparse.ArgumentParser()
a.add_argument("--train_dir")
a.add_argument("--val_dir")
a.add_argument("--nb_epoch", default=NB_EPOCHS)
a.add_argument("--batch_size", default=BAT_SIZE)
a.add_argument("--output_model_file", default="inceptionv3-ft.model")
a.add_argument("--plot", action="store_true")
args = a.parse_args()
if args.train_dir is None or args.val_dir is None:
a.print_help()
sys.exit(1)
if (not os.path.exists(args.train_dir)) or (not os.path.exists(args.val_dir)):
print("directories do not exist")
sys.exit(1)
train(args) |
4,981 | 2bf5ec4b4c0f0eed8364dcc9f1be599a804846f2 | # Generated by Django 2.2.7 on 2019-11-23 18:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ml', '0003_auto_20191123_1835'),
]
operations = [
migrations.AlterField(
model_name='ml',
name='file',
field=models.ImageField(upload_to='images'),
),
]
|
4,982 | da783355c5f888a66f623fa7eeeaf0e4e9fcfa48 | # Generated by Django 3.0.5 on 2020-05-18 12:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cart', '0010_auto_20200518_1718'),
]
operations = [
migrations.AlterField(
model_name='order',
name='fianl_code',
field=models.PositiveIntegerField(blank=True, null=True),
),
]
|
4,983 | a521220ac287a840b5c69e2d0f33daa588132083 | from cryptography.exceptions import UnsupportedAlgorithm
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import load_ssh_public_key
from ingredients_http.schematics.types import ArrowType, KubeName
from schematics import Model
from schematics.exceptions import ValidationError
from schematics.types import UUIDType, IntType, StringType
from deli.kubernetes.resources.v1alpha1.keypair.keypair import Keypair
class ParamsKeypair(Model):
keypair_name = KubeName(required=True)
class ParamsListKeypair(Model):
limit = IntType(default=100, max_value=100, min_value=1)
marker = UUIDType()
class RequestCreateKeypair(Model):
name = KubeName(required=True, min_length=3)
public_key = StringType(required=True)
def validate_public_key(self, data, value):
try:
load_ssh_public_key(value.encode(), default_backend())
except ValueError:
raise ValidationError("public_key could not be decoded or is not in the proper format")
except UnsupportedAlgorithm:
raise ValidationError("public_key serialization type is not supported")
return value
class ResponseKeypair(Model):
name = KubeName(required=True, min_length=3)
public_key = StringType(required=True)
created_at = ArrowType(required=True)
updated_at = ArrowType(required=True)
@classmethod
def from_database(cls, keypair: Keypair):
model = cls()
model.name = keypair.name
model.public_key = keypair.public_key
model.created_at = keypair.created_at
model.updated_at = keypair.updated_at
return model
|
4,984 | 572a098053ebae4f42cd020d1003cc18eceb6af0 | # encoding: utf-8
'''
Created on Nov 26, 2015
@author: tal
Based in part on:
Learn math - https://github.com/fchollet/keras/blob/master/examples/addition_rnn.py
See https://medium.com/@majortal/deep-spelling-9ffef96a24f6#.2c9pu8nlm
"""
Modified by Pavel Surmenok
'''
import argparse
import numpy as np
from keras.layers import Activation, TimeDistributed, Dense, RepeatVector, Dropout
from keras.layers import recurrent
from keras.models import Sequential
from keras.callbacks import ModelCheckpoint, TensorBoard, CSVLogger, LambdaCallback
from numpy.random import seed as random_seed
from numpy.random import randint as random_randint
import os
import pickle
from data import DataSet
random_seed(123) # Reproducibility
# Parameters for the model and dataset
DATASET_FILENAME = 'data/dataset/news.2011.en.shuffled'
NUMBER_OF_EPOCHS = 100000
RNN = recurrent.LSTM
INPUT_LAYERS = 2
OUTPUT_LAYERS = 2
AMOUNT_OF_DROPOUT = 0.3
BATCH_SIZE = 32
SAMPLES_PER_EPOCH = 65536
HIDDEN_SIZE = 700
INITIALIZATION = "he_normal" # : Gaussian initialization scaled by fan_in (He et al., 2014)
NUMBER_OF_CHARS = 100 # 75
CHARS = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ .")
INVERTED = True
MODEL_CHECKPOINT_DIRECTORYNAME = 'models'
MODEL_CHECKPOINT_FILENAME = 'weights.{epoch:02d}-{val_loss:.2f}.hdf5'
MODEL_DATASET_PARAMS_FILENAME = 'dataset_params.pickle'
MODEL_STARTING_CHECKPOINT_FILENAME = 'weights.hdf5'
CSV_LOG_FILENAME = 'log.csv'
def generate_model(output_len, chars=None):
"""Generate the model"""
print('Build model...')
chars = chars or CHARS
model = Sequential()
# "Encode" the input sequence using an RNN, producing an output of HIDDEN_SIZE
# note: in a situation where your input sequences have a variable length,
# use input_shape=(None, nb_feature).
for layer_number in range(INPUT_LAYERS):
model.add(recurrent.LSTM(HIDDEN_SIZE, input_shape=(None, len(chars)), init=INITIALIZATION,
return_sequences=layer_number + 1 < INPUT_LAYERS))
model.add(Dropout(AMOUNT_OF_DROPOUT))
# For the decoder's input, we repeat the encoded input for each time step
model.add(RepeatVector(output_len))
# The decoder RNN could be multiple layers stacked or a single layer
for _ in range(OUTPUT_LAYERS):
model.add(recurrent.LSTM(HIDDEN_SIZE, return_sequences=True, init=INITIALIZATION))
model.add(Dropout(AMOUNT_OF_DROPOUT))
# For each of step of the output sequence, decide which character should be chosen
model.add(TimeDistributed(Dense(len(chars), init=INITIALIZATION)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
class Colors(object):
"""For nicer printouts"""
ok = '\033[92m'
fail = '\033[91m'
close = '\033[0m'
def show_samples(model, dataset, epoch, logs, X_dev_batch, y_dev_batch):
"""Selects 10 samples from the dev set at random so we can visualize errors"""
for _ in range(10):
ind = random_randint(0, len(X_dev_batch))
row_X, row_y = X_dev_batch[np.array([ind])], y_dev_batch[np.array([ind])]
preds = model.predict_classes(row_X, verbose=0)
q = dataset.character_table.decode(row_X[0])
correct = dataset.character_table.decode(row_y[0])
guess = dataset.character_table.decode(preds[0], calc_argmax=False)
if INVERTED:
print('Q', q[::-1]) # inverted back!
else:
print('Q', q)
print('A', correct)
print(Colors.ok + '☑' + Colors.close if correct == guess else Colors.fail + '☒' + Colors.close, guess)
print('---')
def iterate_training(model, dataset, initial_epoch):
"""Iterative Training"""
checkpoint = ModelCheckpoint(MODEL_CHECKPOINT_DIRECTORYNAME + '/' + MODEL_CHECKPOINT_FILENAME,
save_best_only=True)
tensorboard = TensorBoard()
csv_logger = CSVLogger(CSV_LOG_FILENAME)
X_dev_batch, y_dev_batch = next(dataset.dev_set_batch_generator(1000))
show_samples_callback = LambdaCallback(
on_epoch_end=lambda epoch, logs: show_samples(model, dataset, epoch, logs, X_dev_batch, y_dev_batch))
train_batch_generator = dataset.train_set_batch_generator(BATCH_SIZE)
validation_batch_generator = dataset.dev_set_batch_generator(BATCH_SIZE)
model.fit_generator(train_batch_generator,
samples_per_epoch=SAMPLES_PER_EPOCH,
nb_epoch=NUMBER_OF_EPOCHS,
validation_data=validation_batch_generator,
nb_val_samples=SAMPLES_PER_EPOCH,
callbacks=[checkpoint, tensorboard, csv_logger, show_samples_callback],
verbose=1,
initial_epoch=initial_epoch)
def save_dataset_params(dataset):
params = { 'chars': dataset.chars, 'y_max_length': dataset.y_max_length }
with open(MODEL_CHECKPOINT_DIRECTORYNAME + '/' + MODEL_DATASET_PARAMS_FILENAME, 'wb') as f:
pickle.dump(params, f)
def main_news(checkpoint_filename=None, dataset_params_filename=None, initial_epoch=1):
"""Main"""
dataset = DataSet(DATASET_FILENAME)
if not os.path.exists(MODEL_CHECKPOINT_DIRECTORYNAME):
os.makedirs(MODEL_CHECKPOINT_DIRECTORYNAME)
if dataset_params_filename is not None:
with open(dataset_params_filename, 'rb') as f:
dataset_params = pickle.load(f)
assert dataset_params['chars'] == dataset.chars
assert dataset_params['y_max_length'] == dataset.y_max_length
else:
save_dataset_params(dataset)
model = generate_model(dataset.y_max_length, dataset.chars)
if checkpoint_filename is not None:
model.load_weights(checkpoint_filename)
iterate_training(model, dataset, initial_epoch)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Trains a deep spelling model.')
parser.add_argument('--checkpoint', type=str,
help='Filename of a model checkpoint to start the training from.')
parser.add_argument('--datasetparams', type=str,
help='Filename of a file with dataset params to load for continuing model training.')
parser.add_argument('--initialepoch', type=int,
help='Initial epoch parameter for continuing model training.', default=0)
args = parser.parse_args()
main_news(args.checkpoint, args.datasetparams, args.initialepoch)
|
4,985 | d56e313318635788ae5b3d3a3f767450ab2f2296 | # Copyright 2011 Isaku Yamahata
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Block Device utility functions.
"""
from unittest import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
from nova import block_device
from nova.compute import api as compute_api
from nova import context
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit import fake_block_device
from nova.tests.unit import matchers
from nova.volume import cinder
class BlockDeviceTestCase(test.NoDBTestCase):
def setUp(self):
super(BlockDeviceTestCase, self).setUp()
BDM = block_device.BlockDeviceDict
self.new_mapping = [
BDM({'id': 1, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdb1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'volume_size': 1,
'guest_format': 'swap',
'boot_index': -1}),
BDM({'id': 2, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'volume_size': 10,
'delete_on_termination': True,
'boot_index': -1}),
BDM({'id': 3, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda1',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'connection_info': "{'fake': 'connection_info'}",
'boot_index': 0}),
BDM({'id': 4, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda2',
'source_type': 'snapshot',
'destination_type': 'volume',
'connection_info': "{'fake': 'connection_info'}",
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1}),
BDM({'id': 5, 'instance_uuid': uuids.instance,
'no_device': True,
'device_name': '/dev/vdc'}),
]
def test_properties(self):
root_device0 = '/dev/sda'
root_device1 = '/dev/sdb'
mappings = [{'virtual': 'root',
'device': root_device0}]
properties0 = {'mappings': mappings}
properties1 = {'mappings': mappings,
'root_device_name': root_device1}
self.assertIsNone(block_device.properties_root_device_name({}))
self.assertEqual(root_device0,
block_device.properties_root_device_name(properties0))
self.assertEqual(root_device1,
block_device.properties_root_device_name(properties1))
def test_ephemeral(self):
self.assertFalse(block_device.is_ephemeral('ephemeral'))
self.assertTrue(block_device.is_ephemeral('ephemeral0'))
self.assertTrue(block_device.is_ephemeral('ephemeral1'))
self.assertTrue(block_device.is_ephemeral('ephemeral11'))
self.assertFalse(block_device.is_ephemeral('root'))
self.assertFalse(block_device.is_ephemeral('swap'))
self.assertFalse(block_device.is_ephemeral('/dev/sda1'))
self.assertEqual(0, block_device.ephemeral_num('ephemeral0'))
self.assertEqual(1, block_device.ephemeral_num('ephemeral1'))
self.assertEqual(11, block_device.ephemeral_num('ephemeral11'))
self.assertFalse(block_device.is_swap_or_ephemeral('ephemeral'))
self.assertTrue(block_device.is_swap_or_ephemeral('ephemeral0'))
self.assertTrue(block_device.is_swap_or_ephemeral('ephemeral1'))
self.assertTrue(block_device.is_swap_or_ephemeral('swap'))
self.assertFalse(block_device.is_swap_or_ephemeral('root'))
self.assertFalse(block_device.is_swap_or_ephemeral('/dev/sda1'))
def test_mappings_prepend_dev(self):
mapping = [
{'virtual': 'ami', 'device': '/dev/sda'},
{'virtual': 'root', 'device': 'sda'},
{'virtual': 'ephemeral0', 'device': 'sdb'},
{'virtual': 'swap', 'device': 'sdc'},
{'virtual': 'ephemeral1', 'device': 'sdd'},
{'virtual': 'ephemeral2', 'device': 'sde'}]
expected = [
{'virtual': 'ami', 'device': '/dev/sda'},
{'virtual': 'root', 'device': 'sda'},
{'virtual': 'ephemeral0', 'device': '/dev/sdb'},
{'virtual': 'swap', 'device': '/dev/sdc'},
{'virtual': 'ephemeral1', 'device': '/dev/sdd'},
{'virtual': 'ephemeral2', 'device': '/dev/sde'}]
prepended = block_device.mappings_prepend_dev(mapping)
self.assertEqual(sorted(expected, key=lambda v: v['virtual']),
sorted(prepended, key=lambda v: v['virtual']))
def test_strip_dev(self):
self.assertEqual('sda', block_device.strip_dev('/dev/sda'))
self.assertEqual('sda', block_device.strip_dev('sda'))
self.assertIsNone(block_device.strip_dev(None))
def test_strip_prefix(self):
self.assertEqual('a', block_device.strip_prefix('/dev/sda'))
self.assertEqual('a', block_device.strip_prefix('a'))
self.assertEqual('a', block_device.strip_prefix('xvda'))
self.assertEqual('a', block_device.strip_prefix('vda'))
self.assertEqual('a', block_device.strip_prefix('hda'))
self.assertIsNone(block_device.strip_prefix(None))
def test_get_device_letter(self):
self.assertEqual('', block_device.get_device_letter(''))
self.assertEqual('a', block_device.get_device_letter('/dev/sda1'))
self.assertEqual('b', block_device.get_device_letter('/dev/xvdb'))
self.assertEqual('d', block_device.get_device_letter('/dev/d'))
self.assertEqual('a', block_device.get_device_letter('a'))
self.assertEqual('b', block_device.get_device_letter('sdb2'))
self.assertEqual('c', block_device.get_device_letter('vdc'))
self.assertEqual('c', block_device.get_device_letter('hdc'))
self.assertIsNone(block_device.get_device_letter(None))
def test_generate_device_name(self):
expected = (
('vda', ("vd", 0)),
('vdaa', ("vd", 26)),
('vdabc', ("vd", 730)),
('vdidpok', ("vd", 4194304)),
('sdc', ("sd", 2)),
('sdaa', ("sd", 26)),
('sdiw', ("sd", 256)),
('hdzz', ("hd", 701))
)
for res, args in expected:
self.assertEqual(res, block_device.generate_device_name(*args))
def test_volume_in_mapping(self):
swap = {'device_name': '/dev/sdb',
'swap_size': 1}
ephemerals = [{'num': 0,
'virtual_name': 'ephemeral0',
'device_name': '/dev/sdc1',
'size': 1},
{'num': 2,
'virtual_name': 'ephemeral2',
'device_name': '/dev/sdd',
'size': 1}]
block_device_mapping = [{'mount_device': '/dev/sde',
'device_path': 'fake_device'},
{'mount_device': '/dev/sdf',
'device_path': 'fake_device'}]
block_device_info = {
'root_device_name': '/dev/sda',
'swap': swap,
'ephemerals': ephemerals,
'block_device_mapping': block_device_mapping}
def _assert_volume_in_mapping(device_name, true_or_false):
in_mapping = block_device.volume_in_mapping(
device_name, block_device_info)
self.assertEqual(true_or_false, in_mapping)
_assert_volume_in_mapping('sda', False)
_assert_volume_in_mapping('sdb', True)
_assert_volume_in_mapping('sdc1', True)
_assert_volume_in_mapping('sdd', True)
_assert_volume_in_mapping('sde', True)
_assert_volume_in_mapping('sdf', True)
_assert_volume_in_mapping('sdg', False)
_assert_volume_in_mapping('sdh1', False)
def test_get_root_bdm(self):
root_bdm = {'device_name': 'vda', 'boot_index': 0}
bdms = [root_bdm,
{'device_name': 'vdb', 'boot_index': 1},
{'device_name': 'vdc', 'boot_index': -1},
{'device_name': 'vdd'}]
self.assertEqual(root_bdm, block_device.get_root_bdm(bdms))
self.assertEqual(root_bdm, block_device.get_root_bdm([bdms[0]]))
self.assertIsNone(block_device.get_root_bdm(bdms[1:]))
self.assertIsNone(block_device.get_root_bdm(bdms[2:]))
self.assertIsNone(block_device.get_root_bdm(bdms[3:]))
self.assertIsNone(block_device.get_root_bdm([]))
def test_get_bdm_ephemeral_disk_size(self):
size = block_device.get_bdm_ephemeral_disk_size(self.new_mapping)
self.assertEqual(10, size)
def test_get_bdm_swap_list(self):
swap_list = block_device.get_bdm_swap_list(self.new_mapping)
self.assertEqual(1, len(swap_list))
self.assertEqual(1, swap_list[0].get('id'))
def test_get_bdm_local_disk_num(self):
size = block_device.get_bdm_local_disk_num(self.new_mapping)
self.assertEqual(2, size)
def test_new_format_is_swap(self):
expected_results = [True, False, False, False, False]
for expected, bdm in zip(expected_results, self.new_mapping):
res = block_device.new_format_is_swap(bdm)
self.assertEqual(expected, res)
def test_new_format_is_ephemeral(self):
expected_results = [False, True, False, False, False]
for expected, bdm in zip(expected_results, self.new_mapping):
res = block_device.new_format_is_ephemeral(bdm)
self.assertEqual(expected, res)
def test_validate_device_name(self):
for value in [' ', 10, None, 'a' * 260]:
self.assertRaises(exception.InvalidBDMFormat,
block_device.validate_device_name,
value)
def test_validate_and_default_volume_size(self):
bdm = {}
for value in [-1, 'a', 2.5]:
bdm['volume_size'] = value
self.assertRaises(exception.InvalidBDMFormat,
block_device.validate_and_default_volume_size,
bdm)
def test_get_bdms_to_connect(self):
root_bdm = {'device_name': 'vda', 'boot_index': 0}
bdms = [root_bdm,
{'device_name': 'vdb', 'boot_index': 1},
{'device_name': 'vdc', 'boot_index': -1},
{'device_name': 'vde', 'boot_index': None},
{'device_name': 'vdd'}]
self.assertNotIn(root_bdm, block_device.get_bdms_to_connect(bdms,
exclude_root_mapping=True))
self.assertIn(root_bdm, block_device.get_bdms_to_connect(bdms))
class TestBlockDeviceDict(test.NoDBTestCase):
def setUp(self):
super(TestBlockDeviceDict, self).setUp()
BDM = block_device.BlockDeviceDict
self.api_mapping = [
{'id': 1, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdb1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'guest_format': 'swap',
'boot_index': -1},
{'id': 2, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'boot_index': -1},
{'id': 3, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda1',
'source_type': 'volume',
'destination_type': 'volume',
'uuid': 'fake-volume-id-1',
'boot_index': 0},
{'id': 4, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda2',
'source_type': 'snapshot',
'destination_type': 'volume',
'uuid': 'fake-snapshot-id-1',
'boot_index': -1},
{'id': 5, 'instance_uuid': uuids.instance,
'no_device': True,
'device_name': '/dev/vdc'},
]
self.new_mapping = [
BDM({'id': 1, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdb1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'guest_format': 'swap',
'boot_index': -1}),
BDM({'id': 2, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'boot_index': -1}),
BDM({'id': 3, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda1',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'connection_info': "{'fake': 'connection_info'}",
'boot_index': 0}),
BDM({'id': 4, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda2',
'source_type': 'snapshot',
'destination_type': 'volume',
'connection_info': "{'fake': 'connection_info'}",
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1}),
BDM({'id': 5, 'instance_uuid': uuids.instance,
'no_device': True,
'device_name': '/dev/vdc'}),
]
self.legacy_mapping = [
{'id': 1, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdb1',
'delete_on_termination': True,
'virtual_name': 'swap'},
{'id': 2, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdc1',
'delete_on_termination': True,
'virtual_name': 'ephemeral0'},
{'id': 3, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda1',
'volume_id': 'fake-volume-id-1',
'connection_info': "{'fake': 'connection_info'}"},
{'id': 4, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda2',
'connection_info': "{'fake': 'connection_info'}",
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2'},
{'id': 5, 'instance_uuid': uuids.instance,
'no_device': True,
'device_name': '/dev/vdc'},
]
self.new_mapping_source_image = [
BDM({'id': 6, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda3',
'source_type': 'image',
'destination_type': 'volume',
'connection_info': "{'fake': 'connection_info'}",
'volume_id': 'fake-volume-id-3',
'boot_index': -1}),
BDM({'id': 7, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda4',
'source_type': 'image',
'destination_type': 'local',
'connection_info': "{'fake': 'connection_info'}",
'image_id': 'fake-image-id-2',
'boot_index': -1}),
]
self.legacy_mapping_source_image = [
{'id': 6, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda3',
'connection_info': "{'fake': 'connection_info'}",
'volume_id': 'fake-volume-id-3'},
]
def test_init(self):
def fake_validate(obj, dct):
pass
self.stub_out('nova.block_device.BlockDeviceDict._fields',
set(['field1', 'field2']))
self.stub_out('nova.block_device.BlockDeviceDict._db_only_fields',
set(['db_field1', 'db_field2']))
self.stub_out('nova.block_device.BlockDeviceDict._validate',
fake_validate)
# Make sure db fields are not picked up if they are not
# in the original dict
dev_dict = block_device.BlockDeviceDict({'field1': 'foo',
'field2': 'bar',
'db_field1': 'baz'})
self.assertIn('field1', dev_dict)
self.assertIn('field2', dev_dict)
self.assertIn('db_field1', dev_dict)
self.assertNotIn('db_field2', dev_dict)
# Make sure all expected fields are defaulted
dev_dict = block_device.BlockDeviceDict({'field1': 'foo'})
self.assertIn('field1', dev_dict)
self.assertIn('field2', dev_dict)
self.assertIsNone(dev_dict['field2'])
self.assertNotIn('db_field1', dev_dict)
self.assertNotIn('db_field2', dev_dict)
# Unless they are not meant to be
dev_dict = block_device.BlockDeviceDict({'field1': 'foo'},
do_not_default=set(['field2']))
self.assertIn('field1', dev_dict)
self.assertNotIn('field2', dev_dict)
self.assertNotIn('db_field1', dev_dict)
self.assertNotIn('db_field2', dev_dict)
# Passing kwargs to constructor works
dev_dict = block_device.BlockDeviceDict(field1='foo')
self.assertIn('field1', dev_dict)
self.assertIn('field2', dev_dict)
self.assertIsNone(dev_dict['field2'])
dev_dict = block_device.BlockDeviceDict(
{'field1': 'foo'}, field2='bar')
self.assertEqual('foo', dev_dict['field1'])
self.assertEqual('bar', dev_dict['field2'])
def test_init_prepend_dev_to_device_name(self):
bdm = {'id': 3, 'instance_uuid': uuids.instance,
'device_name': 'vda',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'boot_index': 0}
bdm_dict = block_device.BlockDeviceDict(bdm)
self.assertEqual('/dev/vda', bdm_dict['device_name'])
bdm['device_name'] = '/dev/vdb'
bdm_dict = block_device.BlockDeviceDict(bdm)
self.assertEqual('/dev/vdb', bdm_dict['device_name'])
bdm['device_name'] = None
bdm_dict = block_device.BlockDeviceDict(bdm)
self.assertIsNone(bdm_dict['device_name'])
def test_init_boolify_delete_on_termination(self):
# Make sure that when delete_on_termination is not passed it's
# still set to False and not None
bdm = {'id': 3, 'instance_uuid': uuids.instance,
'device_name': 'vda',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'boot_index': 0}
bdm_dict = block_device.BlockDeviceDict(bdm)
self.assertFalse(bdm_dict['delete_on_termination'])
def test_validate(self):
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict,
{'bogus_field': 'lame_val'})
lame_bdm = dict(self.new_mapping[2])
del lame_bdm['source_type']
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict,
lame_bdm)
lame_bdm['no_device'] = True
block_device.BlockDeviceDict(lame_bdm)
lame_dev_bdm = dict(self.new_mapping[2])
lame_dev_bdm['device_name'] = "not a valid name"
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict,
lame_dev_bdm)
lame_dev_bdm['device_name'] = ""
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict,
lame_dev_bdm)
cool_volume_size_bdm = dict(self.new_mapping[2])
cool_volume_size_bdm['volume_size'] = '42'
cool_volume_size_bdm = block_device.BlockDeviceDict(
cool_volume_size_bdm)
self.assertEqual(42, cool_volume_size_bdm['volume_size'])
lame_volume_size_bdm = dict(self.new_mapping[2])
lame_volume_size_bdm['volume_size'] = 'some_non_int_string'
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict,
lame_volume_size_bdm)
truthy_bdm = dict(self.new_mapping[2])
truthy_bdm['delete_on_termination'] = '1'
truthy_bdm = block_device.BlockDeviceDict(truthy_bdm)
self.assertTrue(truthy_bdm['delete_on_termination'])
verbose_bdm = dict(self.new_mapping[2])
verbose_bdm['boot_index'] = 'first'
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict,
verbose_bdm)
def test_from_legacy(self):
for legacy, new in zip(self.legacy_mapping, self.new_mapping):
self.assertThat(
block_device.BlockDeviceDict.from_legacy(legacy),
matchers.IsSubDictOf(new))
def test_from_legacy_mapping(self):
def _get_image_bdms(bdms):
return [bdm for bdm in bdms if bdm['source_type'] == 'image']
def _get_bootable_bdms(bdms):
return [bdm for bdm in bdms
if (bdm['boot_index'] is not None and
bdm['boot_index'] >= 0)]
new_no_img = block_device.from_legacy_mapping(self.legacy_mapping)
self.assertEqual(0, len(_get_image_bdms(new_no_img)))
for new, expected in zip(new_no_img, self.new_mapping):
self.assertThat(new, matchers.IsSubDictOf(expected))
new_with_img = block_device.from_legacy_mapping(
self.legacy_mapping, 'fake_image_ref')
image_bdms = _get_image_bdms(new_with_img)
boot_bdms = _get_bootable_bdms(new_with_img)
self.assertEqual(1, len(image_bdms))
self.assertEqual(1, len(boot_bdms))
self.assertEqual(0, image_bdms[0]['boot_index'])
self.assertEqual('image', boot_bdms[0]['source_type'])
new_with_img_and_root = block_device.from_legacy_mapping(
self.legacy_mapping, 'fake_image_ref', 'sda1')
image_bdms = _get_image_bdms(new_with_img_and_root)
boot_bdms = _get_bootable_bdms(new_with_img_and_root)
self.assertEqual(0, len(image_bdms))
self.assertEqual(1, len(boot_bdms))
self.assertEqual(0, boot_bdms[0]['boot_index'])
self.assertEqual('volume', boot_bdms[0]['source_type'])
new_no_root = block_device.from_legacy_mapping(
self.legacy_mapping, 'fake_image_ref', 'sda1', no_root=True)
self.assertEqual(0, len(_get_image_bdms(new_no_root)))
self.assertEqual(0, len(_get_bootable_bdms(new_no_root)))
def test_from_api(self):
for api, new in zip(self.api_mapping, self.new_mapping):
new['connection_info'] = None
if new['snapshot_id']:
new['volume_id'] = None
self.assertThat(
block_device.BlockDeviceDict.from_api(api, False),
matchers.IsSubDictOf(new))
def test_from_api_invalid_blank_id(self):
api_dict = {'id': 1,
'source_type': 'blank',
'destination_type': 'volume',
'uuid': 'fake-volume-id-1',
'delete_on_termination': True,
'boot_index': -1}
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict.from_api, api_dict,
False)
def test_from_api_invalid_source_to_local_mapping(self):
api_dict = {'id': 1,
'source_type': 'image',
'destination_type': 'local',
'uuid': 'fake-volume-id-1'}
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict.from_api, api_dict,
False)
def test_from_api_valid_source_to_local_mapping(self):
api_dict = {'id': 1,
'source_type': 'image',
'destination_type': 'local',
'volume_id': 'fake-volume-id-1',
'uuid': 1,
'boot_index': 0}
retexp = block_device.BlockDeviceDict(
{'id': 1,
'source_type': 'image',
'image_id': 1,
'destination_type': 'local',
'volume_id': 'fake-volume-id-1',
'boot_index': 0})
self.assertEqual(retexp,
block_device.BlockDeviceDict.from_api(api_dict, True))
def test_from_api_valid_source_to_local_mapping_with_string_bi(self):
api_dict = {'id': 1,
'source_type': 'image',
'destination_type': 'local',
'volume_id': 'fake-volume-id-1',
'uuid': 1,
'boot_index': '0'}
retexp = block_device.BlockDeviceDict(
{'id': 1,
'source_type': 'image',
'image_id': 1,
'destination_type': 'local',
'volume_id': 'fake-volume-id-1',
'boot_index': 0})
self.assertEqual(retexp,
block_device.BlockDeviceDict.from_api(api_dict, True))
def test_from_api_invalid_image_to_destination_local_mapping(self):
api_dict = {'id': 1,
'source_type': 'image',
'destination_type': 'local',
'uuid': 'fake-volume-id-1',
'volume_type': 'fake-lvm-1',
'boot_index': 1}
ex = self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict.from_api,
api_dict, False)
self.assertIn('Mapping image to local is not supported', str(ex))
def test_from_api_invalid_volume_type_to_destination_local_mapping(self):
api_dict = {'id': 1,
'source_type': 'volume',
'destination_type': 'local',
'uuid': 'fake-volume-id-1',
'volume_type': 'fake-lvm-1'}
ex = self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict.from_api,
api_dict, False)
self.assertIn('Specifying a volume_type with destination_type=local '
'is not supported', str(ex))
def test_from_api_invalid_specify_volume_type_with_source_volume_mapping(
self):
api_dict = {'id': 1,
'source_type': 'volume',
'destination_type': 'volume',
'uuid': 'fake-volume-id-1',
'volume_type': 'fake-lvm-1'}
ex = self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict.from_api,
api_dict, False)
self.assertIn('Specifying volume type to existing volume is '
'not supported', str(ex))
def test_image_mapping(self):
removed_fields = ['id', 'instance_uuid', 'connection_info',
'created_at', 'updated_at', 'deleted_at', 'deleted']
for bdm in self.new_mapping:
mapping_bdm = fake_block_device.FakeDbBlockDeviceDict(
bdm).get_image_mapping()
for fld in removed_fields:
self.assertNotIn(fld, mapping_bdm)
def _test_snapshot_from_bdm(self, template):
snapshot = block_device.snapshot_from_bdm('new-snapshot-id', template)
self.assertEqual('new-snapshot-id', snapshot['snapshot_id'])
self.assertEqual('snapshot', snapshot['source_type'])
self.assertEqual('volume', snapshot['destination_type'])
self.assertEqual(template.volume_size, snapshot['volume_size'])
self.assertEqual(template.delete_on_termination,
snapshot['delete_on_termination'])
self.assertEqual(template.device_name, snapshot['device_name'])
for key in ['disk_bus', 'device_type', 'boot_index']:
self.assertEqual(template[key], snapshot[key])
def test_snapshot_from_bdm(self):
for bdm in self.new_mapping:
self._test_snapshot_from_bdm(objects.BlockDeviceMapping(**bdm))
def test_snapshot_from_object(self):
for bdm in self.new_mapping[:-1]:
obj = objects.BlockDeviceMapping()
obj = objects.BlockDeviceMapping._from_db_object(
None, obj, fake_block_device.FakeDbBlockDeviceDict(
bdm))
self._test_snapshot_from_bdm(obj)
class GetBDMImageMetadataTestCase(test.NoDBTestCase):
def setUp(self):
super().setUp()
self.compute_api = compute_api.API()
self.context = context.RequestContext('fake', 'fake')
def _test_get_bdm_image_metadata__bootable(self, is_bootable=False):
block_device_mapping = [{
'id': 1,
'device_name': 'vda',
'no_device': None,
'virtual_name': None,
'snapshot_id': None,
'volume_id': '1',
'delete_on_termination': False,
}]
expected_meta = {
'min_disk': 0, 'min_ram': 0, 'properties': {}, 'size': 0,
'status': 'active',
}
def get_vol_data(*args, **kwargs):
return {'bootable': is_bootable}
with mock.patch.object(
self.compute_api.volume_api, 'get', side_effect=get_vol_data,
):
if not is_bootable:
self.assertRaises(
exception.InvalidBDMVolumeNotBootable,
block_device.get_bdm_image_metadata,
self.context,
self.compute_api.image_api,
self.compute_api.volume_api,
block_device_mapping)
else:
meta = block_device.get_bdm_image_metadata(
self.context, self.compute_api.image_api,
self.compute_api.volume_api, block_device_mapping)
self.assertEqual(expected_meta, meta)
def test_get_bdm_image_metadata__non_bootable(self):
self._test_get_bdm_image_metadata__bootable(False)
def test_get_bdm_image_metadata__bootable(self):
self._test_get_bdm_image_metadata__bootable(True)
def test_get_bdm_image_metadata__basic_property(self):
block_device_mapping = [{
'id': 1,
'device_name': 'vda',
'no_device': None,
'virtual_name': None,
'snapshot_id': None,
'volume_id': '1',
'delete_on_termination': False,
}]
fake_volume = {
'volume_image_metadata': {
'min_ram': 256, 'min_disk': 128, 'foo': 'bar',
},
}
with mock.patch.object(
self.compute_api.volume_api, 'get', return_value=fake_volume,
):
meta = block_device.get_bdm_image_metadata(
self.context, self.compute_api.image_api,
self.compute_api.volume_api, block_device_mapping)
self.assertEqual(256, meta['min_ram'])
self.assertEqual(128, meta['min_disk'])
self.assertEqual('active', meta['status'])
self.assertEqual('bar', meta['properties']['foo'])
def test_get_bdm_image_metadata__snapshot_basic_property(self):
block_device_mapping = [{
'id': 1,
'device_name': 'vda',
'no_device': None,
'virtual_name': None,
'snapshot_id': '2',
'volume_id': None,
'delete_on_termination': False,
}]
fake_volume = {
'volume_image_metadata': {
'min_ram': 256, 'min_disk': 128, 'foo': 'bar',
},
}
fake_snapshot = {'volume_id': '1'}
with test.nested(
mock.patch.object(
self.compute_api.volume_api, 'get',
return_value=fake_volume),
mock.patch.object(
self.compute_api.volume_api, 'get_snapshot',
return_value=fake_snapshot),
) as (volume_get, volume_get_snapshot):
meta = block_device.get_bdm_image_metadata(
self.context, self.compute_api.image_api,
self.compute_api.volume_api, block_device_mapping)
self.assertEqual(256, meta['min_ram'])
self.assertEqual(128, meta['min_disk'])
self.assertEqual('active', meta['status'])
self.assertEqual('bar', meta['properties']['foo'])
volume_get_snapshot.assert_called_once_with(
self.context, block_device_mapping[0]['snapshot_id'])
volume_get.assert_called_once_with(
self.context, fake_snapshot['volume_id'])
@mock.patch.object(
cinder.API, 'get',
side_effect=exception.CinderConnectionFailed(reason='error'))
def test_get_bdm_image_metadata__cinder_down(self, mock_get):
bdms = [
objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict({
'id': 1,
'volume_id': 1,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': 'vda',
})
)
]
self.assertRaises(
exception.CinderConnectionFailed,
block_device.get_bdm_image_metadata,
self.context,
self.compute_api.image_api,
self.compute_api.volume_api,
bdms, legacy_bdm=True)
class GetImageMetadataFromVolumeTestCase(test.NoDBTestCase):
def test_inherit_image_properties(self):
properties = {'fake_prop': 'fake_value'}
volume = {'volume_image_metadata': properties}
image_meta = block_device.get_image_metadata_from_volume(volume)
self.assertEqual(properties, image_meta['properties'])
def test_image_size(self):
volume = {'size': 10}
image_meta = block_device.get_image_metadata_from_volume(volume)
self.assertEqual(10 * units.Gi, image_meta['size'])
def test_image_status(self):
volume = {}
image_meta = block_device.get_image_metadata_from_volume(volume)
self.assertEqual('active', image_meta['status'])
def test_values_conversion(self):
properties = {'min_ram': '5', 'min_disk': '7'}
volume = {'volume_image_metadata': properties}
image_meta = block_device.get_image_metadata_from_volume(volume)
self.assertEqual(5, image_meta['min_ram'])
self.assertEqual(7, image_meta['min_disk'])
def test_suppress_not_image_properties(self):
properties = {
'min_ram': '256', 'min_disk': '128', 'image_id': 'fake_id',
'image_name': 'fake_name', 'container_format': 'ami',
'disk_format': 'ami', 'size': '1234', 'checksum': 'fake_checksum',
}
volume = {'volume_image_metadata': properties}
image_meta = block_device.get_image_metadata_from_volume(volume)
self.assertEqual({}, image_meta['properties'])
self.assertEqual(0, image_meta['size'])
# volume's properties should not be touched
self.assertNotEqual({}, properties)
|
4,986 | f5331b56abea41873bd3936028471d0da1c58236 | #Developer: Chritian D. Goyes
'''
this script show your name and your age.
'''
myName = 'Christian D. Goyes'
myDate = 1998
year = 2020
age = year - myDate
print ("yourname is: ", age, "and your are", "years old") |
4,987 | 7ea1ee7c55cd53f7137c933790c3a22957f0ffea | from django.db import models
from django.core.validators import RegexValidator, MaxValueValidator
# from Delivery.models import Delivery
# from Customers.models import Customer, Address, Order, Item
# Create your models here.
class Restaurant(models.Model):
Restaurant_ID = models.AutoField(primary_key=True)
Restaurant_Name = models.CharField(max_length=250)
Restaurant_Logo = models.ImageField(upload_to='Restaurants/Pictures/Logo')
# + str(Restaurant_ID) + '/' + str(Restaurant_Name))
Restaurant_Area = models.CharField(max_length=250)
Restaurant_Pin = models.CharField(max_length=6, default=132658)
Restaurant_City = models.CharField(max_length=250)
Restaurant_State = models.CharField(max_length=250)
Restaurant_Regex = RegexValidator(regex=r'^\+?1?\d{9,15}$',
message="Phone number must be entered in the format:" +
" '+999999999'. Up to 15 digits allowed.")
Restaurant_Num = models.CharField(validators=[Restaurant_Regex], max_length=17)
Restaurant_Email = models.CharField(max_length=250)
Restaurant_Ratings_Count = models.IntegerField()
Restaurant_Rating = models.IntegerField(MaxValueValidator(10))
class FoodCategory(models.Model):
FoodCategory_ID = models.AutoField(primary_key=True)
FoodCategory_Name = models.CharField(max_length=250)
class Food(models.Model):
Food_ID = models.AutoField(primary_key=True)
Food_Name = models.CharField(max_length=250)
Food_Pic = models.ImageField(upload_to='Restaurants/Pictures/Food')
Food_Category_ID = models.ForeignKey(FoodCategory, on_delete=models.CASCADE)
Food_Price = models.IntegerField()
Food_Discount = models.IntegerField(default=0)
Food_Res_ID = models.ForeignKey(Restaurant, on_delete=models.CASCADE)
|
4,988 | 4f93af104130f5a7c853ee0e7976fd52847e588a | from django.db.models.signals import post_save
from django.dispatch import receiver
from django.contrib.auth import get_user_model
from .models import Profile
User = get_user_model()
# this wan't run on creating superuser
@receiver(post_save, sender=User)
def save_profile(sender, created, instance, **kwargs):
if created:
profile = Profile.objects.create(user=instance)
profile.save()
|
4,989 | 0125abab0312d8f007e76ee710348efc9daae31e | # First, we'll import pandas, a data processing and CSV file I/O library
import pandas as pd
# We'll also import seaborn, a Python graphing library
import warnings # current version of seaborn generates a bunch of warnings that we'll ignore
warnings.filterwarnings("ignore")
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="white", color_codes=True)
# Next, we'll load the Iris flower dataset, which is in the "../input/" directory
iris = pd.read_csv("finalOutputV1.csv") # the iris dataset is now a Pandas DataFrame
# We can look at an individual feature in Seaborn through a boxplot
sns.boxplot(x="Species", y="PetalLengthCm", data=iris)
plt.show() |
4,990 | 739921a6a09edbb81b442f4127215746c601a69a | # Written by Jagannath Bilgi <jsbilgi@yahoo.com>
import sys
import json
import re
"""
Program accepts *.md document and converts to csv in required format
Program parse line by line and uses recursive method to traverse from leaf to root.
Single turn object (string, int etc) is used as point of return from recursion.
"""
default_input_file = ''
default_output_file = ''
no_of_parameters = len(sys.argv)
if no_of_parameters == 1:
f = open('awesome-transform.param')
for lno, fname in enumerate(f):
if lno == 0:
default_input_file = fname.rstrip()
else:
default_output_file = fname.rstrip()
f.close()
if no_of_parameters < 2:
input_file = default_input_file
else:
input_file = sys.argv[1]
if no_of_parameters < 3:
output_file = default_output_file
else:
output_file = sys.argv[2]
with open(input_file) as f:
json_data = json.load(f)
def obj_rec(obj, t, flag=0,acc=''):
v_obj = type(obj)
r = ''
if type(obj) not in [dict, list, map]:
ref_url = re.findall(r'\((http.*?)\)', obj)
ref_title = re.findall(r'\[[^\[\]]*\]', obj)
if ref_url :
url = ref_url[len(ref_url)-1].strip('[]')
title = ref_title[len(ref_title)-1].strip('[]')
url_title = title + ',' + url
else:
url = ''
title = ''
url_title = title + ',' + url
return acc
if acc:
if flag == 0:
return acc + '\n'
else:
return acc + url_title + ',' + '"' + t + '"' + '\n'
else:
if flag == 0:
return ',,"' + url_title + ',' + t + '"' + '\n'
else:
return ',' + url_title + ',' + '"' + t + '"' + '\n'
elif v_obj == list:
if obj :
return obj_rec(obj[1:], t, flag, obj_rec(obj[0], t , 1, acc))
else:
return acc
elif v_obj == dict:
if bool(obj):
for o in obj:
k = o
oo = obj[o]
if type(oo) in [list,dict]:
r = obj_rec(oo, t + ',' + o, 1, acc)
acc = ""
else:
ref_url = re.findall(r'\((http.*?)\)', oo)
ref_title = re.findall(r'\[[^\[\]]*\]', oo)
if ref_url:
url = ref_url[len(ref_url) - 1].strip('[]')
title = ref_title[len(ref_title)-1].strip('[]')
url_title = title + ',' + url
else:
url = ''
title = ''
url_title = title + ',' + url
if not acc:
sep = ','
else:
sep = ''
if not url:
r = sep + url_title + ',' + '"' + t + ',' + k + '",\n'
break
del obj[k]
if not obj:
sep = '\n'
else:
sep = ''
return obj_rec(obj, t, flag, acc + r + sep)
else:
return acc[:-1]
itemlist = []
for o in json_data:
itemlist.append((obj_rec(json_data[o], o.split("(",1)[0])[1:]))
with open(output_file, 'w') as outfile:
outfile.writelines(["%s\n" % item for item in itemlist]) |
4,991 | 0ced42c8bfaad32fc2b397326150e6c7bc5cedab | import torch
from training import PointNetTrain, PointAugmentTrain, Model
#from PointAugment.Augment.config import opts
from data_utils.dataloader import DataLoaderClass
from mpl_toolkits import mplot3d
import matplotlib.pyplot as plt
import numpy as np
import yaml
def visualize_batch(pointclouds, pred_labels, labels, categories):
batch_size = len(pointclouds)
fig = plt.figure(figsize=(8, batch_size / 2))
ncols = 5
nrows = max(1, batch_size // 5)
for idx, pc in enumerate(pointclouds):
label = categories[int(labels[idx].item())]
pred = categories[int(pred_labels[idx])]
colour = 'g' if label == pred else 'r'
pc = pc.cpu().numpy()
ax = fig.add_subplot(nrows, ncols, idx + 1, projection='3d')
ax.scatter(pc[:, 0], pc[:, 1], pc[:, 2], c=colour, s=2)
ax.axis('off')
ax.set_title('GT: {0}\nPred: {1}'.format(label, pred))
plt.show()
if __name__ == '__main__':
with open("config.yaml", "r") as yamlfile:
config = yaml.load(yamlfile, Loader=yaml.FullLoader)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# PointNet
training_instance_2 = PointNetTrain(config['MODEL']['POINTNET'], device)
modelnet10_dataloader = DataLoaderClass(config['DATA']['MODELNET10'], config['MODEL']['POINTNET']['TRAINING'])
#training_instance_2.train(modelnet10_dataloader.trainloader, modelnet10_dataloader.validloader, adv = False)
training_instance_2.test(modelnet10_dataloader.validloader)
# Point Augment
#training_instance_1 = PointAugmentTrain(config['MODEL']['POINT_AUGMENT'], device)
#modelnet10_dataloader = DataLoaderClass(config['DATA']['MODELNET10'], config['MODEL']['POINTNET']['TRAINING'])
#training_instance_1.train(modelnet10_dataloader.trainloader, modelnet10_dataloader.validloader, adv = False)
#training_instance_1.test(modelnet10_dataloader.validloader)
|
4,992 | f4ea36c3154f65c85647da19cfcd8a058c507fe1 | from flask import Flask
from apis import api
app = Flask(__name__)
app.config.from_object('config')
api.init_app(app)
if __name__ == "__main__":
app.run() |
4,993 | 1a561ca0268d084c8fdde5de65ce0c7e68154eec | # -*- coding: utf-8 -*-
import urllib
from urllib2 import HTTPError
from datetime import datetime
from flask.views import MethodView
from flask.ext.login import current_user, login_required
from flask.ext.paginate import Pagination as PaginationBar
from flask import render_template, redirect, url_for, request, jsonify, flash, current_app, abort
from koushihime.auth.models import UserOperation, User, Role
from koushihime.auth.constants import Permission, Operation
from koushihime.utils import Pagination, admin_required, Env
from koushihime.utils.moegirl import MoegirlQuery, MoegirlImage
from . import main
from utils import recent_have_pushed, have_auto_catched
from models import WaitingQueue, BanList, RulePushCount
from forms import PushForm, AddUserForm, EditProfileForm, AdminEditProfileForm, BanKeywordForm, CookieForm
@main.before_request
def before_request():
if current_user.is_anonymous:
return redirect(url_for('auth.login'))
elif current_user.is_blocked:
return render_template('main/auth/block.html')
else:
current_user.last_seen = datetime.utcnow()
current_user.save()
class Index(MethodView):
def get(self):
if not current_user:
return redirect(url_for("auth.login"))
config = current_app.config["WEIBO_AUTH_CONFIG"]
callback = urllib.quote(config["CALLBACK"])
app_key = config["APP_KEY"]
return render_template('main/index.html', callback=callback, app_key=app_key)
class Update(MethodView):
decorators = [login_required]
def get(self, page):
per_page = 10
unpushed_entry = WaitingQueue.query.order_by(WaitingQueue.cutting_weight.desc()).all()
pagination = Pagination(unpushed_entry, per_page)
current_page = pagination.page(page)
foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',
show_single_page=True, page=page,
per_page=per_page, total=len(unpushed_entry),
format_total=True, format_number=True)
result = {
"titles": current_page,
"current_time": datetime.utcnow(),
"pushtime": 10,
"deltime": 999,
"page": page,
"per_page": per_page,
"pagination": foot_bar
}
return render_template('main/update.html', **result)
def post(self, page):
data = request.get_json()
if data['action'] == 'post':
title = data["title"]
env = Env()
current_weight = env.get("CUTTING_WEIGHT_INIT")
entry = WaitingQueue.query.filter_by(title=title).first()
if entry:
entry.cutting_weight = current_weight + 1 # FIXME: 即使条目处于权重最高状态亦可增加权限
entry.save()
env.set("CUTTING_WEIGHT_INIT", entry.cutting_weight)
elif data['action'] == 'del':
title = data['title']
UserOperation(user_id=current_user.id, operation=Operation.DELETE, title=title).save()
query = WaitingQueue.query.filter_by(title=data['title']).first()
if query:
query.delete()
response = jsonify({'result': True})
return response
class ManualUpdate(MethodView):
decorators = [login_required]
def __init__(self):
self.form = PushForm
def get(self):
return render_template('main/mupdate.html', form=self.form(), pushtime=10)
def post(self):
if not current_user.can(Permission.MANUAL_PUSH):
flash(u"你没有权限")
form = self.form(request.form)
if not form.validate():
flash(u"条目格式有问题,请检查并重新填写")
title = form.pushtitle.data
result = self.check_push_validate(title.encode("utf-8"))
if not result:
flash(u"推送条目被ban,或者已经在24小时之内推送过,或者已经进入待推送列表")
try:
image = MoegirlImage(title)
except HTTPError as e:
flash(u"请求萌百错误,错误码如下{},请联系管理员".format(e))
return redirect(url_for('main.mupdate'))
if not image.path:
flash(u"无法取得图片,请重试")
entry = WaitingQueue(title=title, image=image.path)
env = Env()
current_weight = env.get("CUTTING_WEIGHT_INIT")
entry.cutting_weight = current_weight + 1
entry.save()
env.set("CUTTING_WEIGHT_INIT", entry.cutting_weight)
UserOperation(user_id=current_user.id, title=title, operation=Operation.PUSH).save()
if form.industry.data:
try:
from koushihime.crontab import push
push()
except Exception as e:
flash(u"推送失败: {}".format(str(e)))
flash(u"操作成功,词条将立即推送")
return redirect(url_for('main.mupdate'))
@staticmethod
def check_push_validate(title):
moegirl_entry = MoegirlQuery(title)
namespace = moegirl_entry.get_namespace()
if namespace is 0:
baned_from_moegirl = moegirl_entry.banned_moegirl_category()
baned_from_regex = moegirl_entry.ban_from_regex()
has_pushed = recent_have_pushed(title.decode("utf-8")) # TODO: 改成自动冒泡
has_catched = have_auto_catched(title.decode("utf-8"))
result = baned_from_moegirl is False \
and has_pushed is False \
and has_catched is False \
and baned_from_regex is False
return result
else:
return False
class UserInfo(MethodView):
decorators = [login_required]
def get(self, username):
is_admin = current_user.can(Permission.ADMINISTER)
if current_user.username == username or is_admin is True:
user_info = User.query.filter_by(username=username, deleted=False).first()
if not user_info:
abort(404)
return render_template('main/user.html', u=user_info, username=user_info.username)
else:
abort(403)
class UserList(MethodView):
decorators = [login_required, admin_required]
def __init__(self):
self.form = AddUserForm
def get(self):
userlist = User.query.filter_by(deleted=False).all()
return render_template('main/userlist.html', userlist=userlist, form=self.form())
def post(self):
data = request.get_json()
if data:
if data['action'] == 'edit':
username = data['username']
else:
username = data['username']
try:
User.query.filter_by(username=username, deleted=False).first().delete()
except:
flash(u'用户不存在')
return jsonify({"status": 302, "location": url_for('main.editprofile', username=username)})
elif request.form:
self.add_user()
return redirect('userlist')
def add_user(self):
form = self.form(request.form)
if form.validate():
role = Role.query.filter_by(name=form.role.data).first()
if role:
if not User.query.filter_by(email=form.email.data).first():
user = User(email=form.email.data, username=form.username.data,
role=role, password=form.password.data)
user.save()
else:
flash(u'已经存在该用户')
else:
flash(u'不存在该用户组')
return redirect(url_for('main.userlist'))
class EditProfile(MethodView):
decorators = [login_required]
def __init__(self):
self.form = EditProfileForm
self.admin_form = AdminEditProfileForm
def get(self, username):
if not username: # 用户访问自己的个人信息编辑页
form = self.form()
form.email.data = current_user.email
form.about_me.data = current_user.aboutme
else:
if current_user.can(Permission.ADMINISTER):
user_info = User.query.filter_by(username=username, deleted=False).first()
if user_info:
form = self.admin_form()
form.email.data = user_info.email
form.about_me.data = user_info.aboutme
form.role.data = user_info.role.name
else:
flash(u'用户不存在')
return redirect(url_for('main.index'))
else:
abort(403)
return render_template('main/edit_profile.html', form=form, u=current_user)
def post(self, username):
if not username:
form = self.form(request.form)
user = current_user
else:
if current_user.can(Permission.ADMINISTER):
form = self.form(request.form)
user = User.query.filter_by(username=username, deleted=False).first()
if user:
if not current_user.verify_password(form.oripassword.data):
flash(u'管理员密码输入错误')
return redirect(url_for('main.editprofile', username=username))
else:
flash(u'用户不存在')
return redirect(url_for('main.index'))
else:
abort(403)
self.change_profile(user, form, True if username else False)
return redirect(url_for('main.user', username=username))
@staticmethod
def change_profile(user, form, admin=False):
user.password = form.password.data
user.email = form.email.data
user.aboutme = form.about_me.data
if admin:
new_role = Role.query.filter_by(name=form.role.data)
if new_role:
user.role = new_role
user.save()
class OperationLog(MethodView):
decorators = [login_required, admin_required]
def get(self, page):
per_page = 10
count = UserOperation.query.count()
query = UserOperation.query.order_by(UserOperation.id.desc())\
.paginate(page=page, per_page=per_page, error_out=False)
foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',
show_single_page=False, page=page, per_page=per_page,
total=count, format_total=True, format_number=True)
return render_template('main/log.html', records=query.items,
page=page, per_page=per_page, pagination=foot_bar, Operation=Operation)
class KeywordBan(MethodView):
decorators = [login_required, admin_required]
def __init__(self):
self.form = BanKeywordForm
def get(self, page):
per_page = 10
count = BanList.query.filter_by(deleted=False).count()
# TODO: 把关键词读入配置减少查询次数
pagination = BanList.query.filter_by(deleted=False)\
.paginate(page=page, per_page=per_page, error_out=False)
foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',
show_single_page=False, page=page, per_page=per_page,
total=count, format_total=True, format_number=True)
template_param = {
'keywords': pagination.items,
'page': page,
'per_page': per_page,
'pagination': foot_bar,
'form': self.form()
}
return render_template('main/ban.html', **template_param)
def post(self, page):
data = request.get_json()
if data:
keyword = data['keyword']
result = BanList.query.filter_by(rule=keyword).first()
if result:
if result.status:
result.status.delete()
result.delete()
flash(u'成功删除关键词')
else:
flash(u'该关键词不存在')
return jsonify({"status": 302, "location": url_for('main.ban')})
elif request.form:
form = self.form(request.form)
if form.validate():
exist = BanList.query.filter_by(rule=form.keyword.data).first()
if not exist:
ban = BanList(rule=form.keyword.data, time_limit=form.time_limit.data)
ban.save()
status = RulePushCount(rule_id=ban.id, count=ban.time_limit)
status.save()
flash(u'添加关键词成功')
else:
if exist.deleted is True:
exist.deleted = False
exist.time_limit = form.time_limit.data
exist.save()
status = RulePushCount(rule_id=exist.id, count=exist.time_limit)
status.save()
else:
flash(u'重复添加关键词')
return redirect(url_for('main.ban'))
# TODO: deprecated
class WeiboAuthCallback(MethodView):
decorators = [login_required, admin_required]
def get(self):
self.auth_code = request.args.get("code")
result = self.fresh_access()
if result is True:
return render_template('main/success.html')
else:
return render_template('main/failed.html', e=result)
def fresh_access(self):
# config = current_app.config["WEIBO_AUTH_CONFIG"]
# callback = config["CALLBACK"]
# app_key = config["APP_KEY"]
# app_secret_key = config["APP_SECRET"]
try:
pass
# client = APIClient(app_key=app_key, app_secret=app_secret_key, redirect_uri=callback)
# token_data = client.request_access_token(self.auth_code)
# access_token, expires_in = token_data.access_token, token_data.expires_in
except BaseException as e:
return e
# config["ACCESS_TOKEN"] = access_token
# config["EXPIRE_TIME"] = expires_in
# env = Env()
# env.set("ACCESS_TOKEN", access_token)
# env = Env()
# env.set("EXPIRE_TIME", expires_in)
return True
class Cookie(MethodView):
decorators = [login_required, admin_required]
def __init__(self):
self.form = CookieForm
def get(self):
return render_template('main/cookie.html', form=self.form(), pushtime=10)
def post(self):
form = self.form(request.form)
if not form.validate():
flash(u"表单不合法")
cookie = form.cookie.data
env = Env()
env.set("COOKIE", cookie)
flash(u"设置 Cookie 成功")
return redirect(url_for('main.cookie'))
|
4,994 | 38751da57ad7c786e9fc0722faf065380e5f7e60 | # Generated by Django 3.1.1 on 2020-10-10 07:38
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('socialapp', '0004_mesage_creation_date'),
]
operations = [
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(max_length=200)),
('creation_date', models.DateTimeField(auto_now_add=True)),
('receiver', models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='receiver_not', to=settings.AUTH_USER_MODEL)),
('sender', models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='sender_not', to=settings.AUTH_USER_MODEL)),
],
),
]
|
4,995 | efba815fe64cddb5315b17b2cbaf1d3fc38c11ee | from django.db import models
import string
import random
def id_generator(size=32, chars=string.ascii_uppercase + string.digits):
exists = True
while exists == True:
ran = ''.join(random.choice(chars) for _ in range(size))
if len(Item.objects.filter(random_str=ran)) == 0:
exists = False
return ran
# Create your models here.
class Item(models.Model):
name = models.CharField(max_length=999, unique=True)
description = models.TextField(blank=True)
random_str = models.CharField(max_length=999, default=id_generator)
original_price = models.FloatField()
markup_percentage = models.PositiveIntegerField(default=120)
price = models.FloatField(blank=True)
discount_percentage = models.PositiveIntegerField(default=0)
#TODO suurused
img = models.ImageField()
img_2 = models.ImageField(null=True, blank=True)
img_3 = models.ImageField(null=True, blank=True)
img_4 = models.ImageField(null=True, blank=True)
def save(self, *args, **kwargs):
if self.price is None:
self.price = self.original_price * self.markup_percentage / 100
super(Item, self).save(*args, **kwargs)
def __str__(self):
if self.discount_percentage == 0:
return self.name + " - " + str(self.price) + "€"
else:
return self.name + " - " + str( self.price*((100-self.discount_percentage)/100) ) + "€ - DISCOUNT " + str(self.discount_percentage) + "%" |
4,996 | da98835e48a759cbe7bd29ddba1fac20c006827d | from ortools.sat.python import cp_model
import os
import math
import csv
import sys
def ortoolsSolverReduceVar(num, cap, refill, fun, goal):
model = cp_model.CpModel()
token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i)
for i in range(1, num + 1)]
play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i)
for i in range(1, num + 1)]
compare = [model.NewBoolVar('c%i' % i)
for i in range(1, num + 1)]
total_fun = sum([fun[i] * play[i] for i in range(num)])
model.Add(total_fun >= goal)
model.Add(token[0] == cap)
for i in range(num):
model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])
model.Add(token[i] - play[i] + refill <=
cap).OnlyEnforceIf(compare[i].Not())
model.Add(play[i] >= 1)
model.Add(play[i] <= token[i])
for i in range(1, num):
model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])
model.Add(token[i] == token[i - 1] - play[i - 1] +
refill).OnlyEnforceIf(compare[i - 1].Not())
model.Maximize(total_fun)
solver = cp_model.CpSolver()
status = solver.Solve(model)
sat = solver.StatusName()
time = solver.UserTime()
if status == cp_model.INFEASIBLE:
token = None
play = None
total_fun = None
else:
token = [solver.Value(token[i]) for i in range(num)]
play = [solver.Value(play[i]) for i in range(num)]
total_fun = solver.Value(total_fun)
return [sat, token, play, total_fun, time]
def ortoolsSolverRange(num, cap, refill, fun, goal):
model = cp_model.CpModel()
token = [model.NewIntVar(1, cap, 't%i' % i)
for i in range(1, num + 1)]
play = [model.NewIntVar(1, cap, 'q%i' % i)
for i in range(1, num + 1)]
compare = [model.NewBoolVar('c%i' % i)
for i in range(1, num + 1)]
total_fun = model.NewIntVar(-100, 1000, 'total_fun')
model.Add(total_fun == sum([fun[i] * play[i] for i in range(num)]))
model.Add(total_fun >= goal)
model.Add(token[0] == cap)
for i in range(num):
model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])
model.Add(token[i] - play[i] + refill <=
cap).OnlyEnforceIf(compare[i].Not())
model.Add(play[i] >= 1)
model.Add(play[i] <= token[i])
for i in range(1, num):
model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])
model.Add(token[i] == token[i - 1] - play[i - 1] +
refill).OnlyEnforceIf(compare[i - 1].Not())
model.Maximize(total_fun)
solver = cp_model.CpSolver()
status = solver.Solve(model)
sat = solver.StatusName()
time = solver.UserTime()
if status == cp_model.INFEASIBLE:
token = None
play = None
total_fun = None
else:
token = [solver.Value(token[i]) for i in range(num)]
play = [solver.Value(play[i]) for i in range(num)]
total_fun = solver.Value(total_fun)
return [sat, token, play, total_fun, time]
def ortoolsSolverNeg(num, cap, refill, fun, goal):
model = cp_model.CpModel()
token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i)
for i in range(1, num + 1)]
play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i)
for i in range(1, num + 1)]
compare = [model.NewBoolVar('c%i' % i)
for i in range(1, num + 1)]
neg = [model.NewBoolVar('n%i' % i)
for i in range(1, num + 1)]
total_fun = model.NewIntVar(-2147483648, 2147483647, 'total_fun')
model.Add(total_fun == sum([fun[i] * play[i] for i in range(num)]))
model.Add(total_fun >= goal)
model.Add(token[0] == cap)
for i in range(num):
model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])
model.Add(token[i] - play[i] + refill <=
cap).OnlyEnforceIf(compare[i].Not())
model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])
model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())
model.Add(play[i] <= token[i])
model.Add(play[i] == 1).OnlyEnforceIf(neg[i])
model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())
for i in range(1, num):
model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])
model.Add(token[i] == token[i - 1] - play[i - 1] +
refill).OnlyEnforceIf(compare[i - 1].Not())
model.Maximize(total_fun)
solver = cp_model.CpSolver()
status = solver.Solve(model)
sat = solver.StatusName()
time = solver.UserTime()
if status == cp_model.INFEASIBLE:
token = None
play = None
total_fun = None
else:
token = [solver.Value(token[i]) for i in range(num)]
play = [solver.Value(play[i]) for i in range(num)]
total_fun = solver.Value(total_fun)
return [sat, token, play, total_fun, time]
def ortoolsSolverComb(num, cap, refill, fun, goal):
model = cp_model.CpModel()
token = [model.NewIntVar(1, cap, 't%i' % i)
for i in range(1, num + 1)]
play = [model.NewIntVar(1, cap, 'q%i' % i)
for i in range(1, num + 1)]
compare = [model.NewBoolVar('c%i' % i)
for i in range(1, num + 1)]
neg = [model.NewBoolVar('n%i' % i)
for i in range(1, num + 1)]
total_fun = sum([fun[i] * play[i] for i in range(num)])
model.Add(total_fun >= goal)
model.Add(token[0] == cap)
for i in range(num):
model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])
model.Add(token[i] - play[i] + refill <=
cap).OnlyEnforceIf(compare[i].Not())
model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])
model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())
model.Add(play[i] <= token[i])
model.Add(play[i] == 1).OnlyEnforceIf(neg[i])
model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())
for i in range(1, num):
model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])
model.Add(token[i] == token[i - 1] - play[i - 1] +
refill).OnlyEnforceIf(compare[i - 1].Not())
model.Maximize(total_fun)
solver = cp_model.CpSolver()
status = solver.Solve(model)
sat = solver.StatusName()
time = solver.UserTime()
if status == cp_model.INFEASIBLE:
token = None
play = None
total_fun = None
else:
token = [solver.Value(token[i]) for i in range(num)]
play = [solver.Value(play[i]) for i in range(num)]
total_fun = solver.Value(total_fun)
return [sat, token, play, total_fun, time]
if __name__ == '__main__':
file = sys.argv[1]
f = open(file)
for i in range(5):
exec(f.readline())
f.close()
[sat, token, play, total_fun, time] = ortoolsSolverComb(
num, cap, refill, fun, goal)
print('Status:', sat)
if sat == 'OPTIMAL':
print('Maximum total fun:', total_fun)
|
4,997 | 43db8ed10face1c668aeadd3cbc5b13f87fb0126 | import os
import time
import torch
from torch.utils.data import DataLoader
from torchvision.datasets import SVHN
from torchvision.transforms import ToTensor
from lib.utils import Logger, normal_logpdf, sumflat, print_model_info, tanh_to_uint8, get_optimizer
from lib.vae import VAE
def train(hp):
os.makedirs(hp.out_dir, exist_ok=True)
device = torch.device('cuda' if hp.use_cuda else 'cpu')
dataset = SVHN(root='svhn', split='train', download=True, transform=ToTensor())
eval_dataset = SVHN(root='svhn', split='test', download=True, transform=ToTensor())
model = VAE(hp.z_dim).to(device)
print_model_info(model)
opt = get_optimizer(hp.opt_name, model.parameters(), lr=hp.lr, **hp.opt_kwargs)
logger = Logger(hp.out_dir)
total_step = 0
error_occured = False
start_time = time.time()
stats = {
'loss': [],
'loss_kl': [],
'loss_rec': [],
'eval_loss': [],
'start_time': start_time,
'epoch_times': [],
}
for epoch in range(1, hp.epochs+1):
loader = DataLoader(dataset=dataset, batch_size=256, shuffle=True)
for x, _ in loader:
total_step += 1
x = x.to(device) * 2 - 1.0
z, mu, sigma, x_hat = model(x)
loss_rec = 0.5 * sumflat((x - x_hat) ** 2)
loss_kl = normal_logpdf(z, mu, sigma) - normal_logpdf(z)
loss = (loss_rec + loss_kl).mean()
if torch.isnan(loss).item():
error_occured = True
break
opt.zero_grad()
loss.backward()
opt.step()
if total_step % 10 == 0:
stats['loss'].append(loss.cpu().item())
stats['loss_rec'].append(loss_rec.cpu().mean().item())
stats['loss_kl'].append(loss_kl.cpu().mean().item())
logger.log_scalars({
'train/loss': stats['loss'][-1],
'train/loss_rec': stats['loss_rec'][-1],
'train/loss_kl': stats['loss_kl'][-1],
}, total_step)
print(f'\rep {epoch:02d} step {total_step:03d} '
f'loss {stats["loss"][-1]:.2f} '
f'loss_rec {stats["loss_rec"][-1]:.2f} '
f'loss_kl {stats["loss_kl"][-1]:.2f} '
f'({time.time() - start_time:.2f} sec) '
' ',
end='', flush=True)
print()
if error_occured:
print('NaN detected -- Ending training!')
break
stats['epoch_times'].append(time.time())
eval_loss = evaluate(model=model, dataset=eval_dataset, logger=logger,
step=total_step, epoch=epoch, device=device, hparams=hp)
stats['eval_loss'].append(eval_loss.cpu().mean().item())
if epoch % hp.ckpt_freq == 0 or epoch == hp.epochs:
torch.save(
{
'model_state_dict': model.state_dict(),
'epoch': epoch,
'total_step': total_step,
'stats': stats,
'hparams': vars(hp),
},
os.path.join(hp.out_dir, f'ckpt_ep={epoch:03d}.pt'))
end_time = time.time()
with open(os.path.join(hp.out_dir, 'FINISHED'), 'w') as f:
f.write(f'Started: {start_time}\n')
f.write(f'Finished: {end_time}\n')
f.write(f'Total time: {end_time - start_time:.2f}\n')
@torch.no_grad()
def evaluate(*, model: torch.nn.Module, dataset, logger: Logger, step: int, epoch: int, device, hparams):
loader = DataLoader(dataset=dataset, batch_size=256, shuffle=False, drop_last=False)
model.eval()
losses = []
for i, (x, _) in enumerate(loader):
x = x.to(device) * 2 - 1.0
z, mu, sigma, x_hat = model(x)
loss_rec = 0.5 * sumflat((x - x_hat) ** 2)
loss_kl = normal_logpdf(z, mu, sigma) - normal_logpdf(z)
loss = loss_rec + loss_kl
losses.append(loss.cpu())
if i == 0 and (epoch % hparams.sample_freq == 0 or epoch == hparams.epochs):
n = 6
samples = model.decoder(torch.randn(n**2, hparams.z_dim, device=device))
logger.log_image_grid('reconstructions', tanh_to_uint8(x_hat[:n**2]), step, nrow=n)
logger.log_image_grid('samples', tanh_to_uint8(samples), step, nrow=n)
losses = torch.cat(losses)
logger.log_scalar('eval/loss', losses.mean().item(), step)
model.train()
return losses |
4,998 | 9f38148c19f0cb9522725d9eb27c91f70055cba1 | import sys
sys.stdin = open("input.txt", "r")
stick = input()
cnt = 0
temp =[]
for i,s in enumerate(stick):
#'('나오면 무조건 추가
if s == '(':
temp.append(s)
else:
#절단인 경우
if stick[i-1] == '(':
temp.pop()
cnt += len(temp)
#길이가 짧아 아웃
else:
temp.pop()
cnt +=1
print(cnt)
|
4,999 | a9302dbf724f9548411fbf2959f36b4cc5742ff8 | import os
from os.path import join
import json
import pandas as pd
import time
import numpy as np
import torch
def str2bool(v):
# convert string to boolean type for argparser input
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def str_or_none(v):
# convert string to boolean type for argparser input
if v is None:
return None
if v.lower() == 'none':
return None
else:
return v
# helper functions for LDA arguments
def dic2name(dic):
return '_'.join(["{}-{}".format(k, dic[k]) for k in sorted(dic)])
def name2dic(s):
return {x.split('-')[0]:x.split('-')[1] for x in s.split('_')}
def get_valid_types(TYPENAME):
with open(join(os.environ['BASEPATH'], 'configs', 'types.json'), 'r') as typefile:
valid_types = json.load(typefile)[TYPENAME]
return valid_types
def df_index_gen(f, table=False):
# merge locator and dataset_id to genearte index table_id
f.loc[:,'table_id'] = f.apply(lambda x: '+'.join([x['locator'], x['dataset_id']]), axis = 1)
if not table:
f.loc[:,'field_id'] = f.apply(lambda x: x['field_id'].split(":")[-1], axis = 1)
f = f.drop(columns=['locator', 'dataset_id']).set_index('table_id')
return f
# load dataframe from pickle or create pickle file
def load_tmp_df(load_path, tmp_path, name, table=False):
start = time.time()
pkl_file = join(tmp_path, "{}.pkl".format(name))
if os.path.exists(pkl_file):
print("{} pickle file found, loading...".format(pkl_file))
df = pd.read_pickle(pkl_file)
else:
#process and save pkl
print("{} pickle file not found, creating...".format(pkl_file))
df = pd.read_csv(join(load_path, "{}.csv".format(name)))
df = df_index_gen(df, table)
df.to_pickle(pkl_file)
print("{} Load complete. Time {}".format(name, time.time()-start))
return df
def logSumExpTensor(vec):
# vec -> 16, tag_size
batch_size = vec.size()[0]
vec = vec.view(batch_size, -1)
max_score = torch.max(vec, 1)[0]
max_score_broadcast = max_score.view(-1, 1).expand(-1, vec.size()[1])
return max_score + \
torch.log(torch.sum(torch.exp(vec - max_score_broadcast), 1))
def logNormalizeTensor(a):
denom = logSumExpTensor(a)
if len(a.size())==2:
denom = denom.view(-1, 1).expand(-1, a.size()[1])
elif len(a.size())==3:
denom = denom.view(a.size()[0], 1, 1).expand(-1, a.size()[1], a.size()[2])
return (a-denom)
def logNormalize(a):
denom = np.logaddexp.reduce(a, 1)
return (a.transpose()- denom).transpose()
def logDot(a, b):
# numeric stable way of calculating log (e^a, e^b)
max_a = np.amax(a)
max_b = np.amax(b)
C = np.dot(np.exp(a - max_a), np.exp(b - max_b))
np.log(C, out=C)
# else:
# np.log(C + 1e-300, out=C)
C += max_a + max_b
return C
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.