index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
5,200 | ee7c63f36b4720566389826680b90c6f68de85b2 | #! /usr/bin/env python3
"""Publishes joint trajectory to move robot to given pose"""
import rospy
from trajectory_msgs.msg import JointTrajectory
from trajectory_msgs.msg import JointTrajectoryPoint
from std_srvs.srv import Empty
import argparse
import time
def argumentParser(argument):
""" Argument parser """
parser = argparse.ArgumentParser(description='Drive robot joint to command position')
parser.add_argument('kinova_robotType', metavar='kinova_robotType', type=str, default='j2n6a300',
help='kinova_RobotType is in format of: [{j|m|r|c}{1|2}{s|n}{4|6|7}{s|a}{2|3}{0}{0}]. eg: j2n6a300 refers to jaco v2 6DOF assistive 3fingers. Please be noted that not all options are valided for different robot types.')
#args_ = parser.parse_args(argument)
argv = rospy.myargv()
args_ = parser.parse_args(argv[1:])
prefix = args_.kinova_robotType
nbJoints = int(args_.kinova_robotType[3])
nbfingers = int(args_.kinova_robotType[5])
return prefix, nbJoints, nbfingers
def moveJoint (jointcmds,prefix,nbJoints):
topic_name = '/' + prefix + '/effort_joint_trajectory_controller/command'
pub = rospy.Publisher(topic_name, JointTrajectory, queue_size=1)
jointCmd = JointTrajectory()
point = JointTrajectoryPoint()
jointCmd.header.stamp = rospy.Time.now() + rospy.Duration.from_sec(0.0);
point.time_from_start = rospy.Duration.from_sec(5.0)
for i in range(0, nbJoints):
jointCmd.joint_names.append(prefix +'_joint_'+str(i+1))
point.positions.append(jointcmds[i])
point.velocities.append(0)
point.accelerations.append(0)
point.effort.append(0)
jointCmd.points.append(point)
rate = rospy.Rate(100)
count = 0
while (count < 50):
pub.publish(jointCmd)
count = count + 1
rate.sleep()
def moveFingers (jointcmds,prefix,nbJoints):
topic_name = '/' + prefix + '/effort_finger_trajectory_controller/command'
pub = rospy.Publisher(topic_name, JointTrajectory, queue_size=1)
jointCmd = JointTrajectory()
point = JointTrajectoryPoint()
jointCmd.header.stamp = rospy.Time.now() + rospy.Duration.from_sec(0.0);
point.time_from_start = rospy.Duration.from_sec(5.0)
for i in range(0, nbJoints):
jointCmd.joint_names.append(prefix +'_joint_finger_'+str(i+1))
point.positions.append(jointcmds[i])
point.velocities.append(0)
point.accelerations.append(0)
point.effort.append(0)
jointCmd.points.append(point)
rate = rospy.Rate(100)
count = 0
while (count < 500):
pub.publish(jointCmd)
count = count + 1
rate.sleep()
if __name__ == '__main__':
try:
rospy.init_node('move_robot_using_trajectory_msg')
prefix, nbJoints, nbfingers = argumentParser(None)
#allow gazebo to launch
time.sleep(5)
# Unpause the physics
rospy.wait_for_service('/gazebo/unpause_physics')
unpause_gazebo = rospy.ServiceProxy('/gazebo/unpause_physics', Empty)
resp = unpause_gazebo()
if (nbJoints==6):
#home robots
moveJoint ([0.0,2.9,1.3,4.2,1.4,0.0],prefix,nbJoints)
else:
moveJoint ([0.0,2.9,0.0,1.3,4.2,1.4,0.0],prefix,nbJoints)
moveFingers ([1,1,1],prefix,nbfingers)
except rospy.ROSInterruptException:
print("program interrupted before completion")
|
5,201 | 8bf0141cee2832134d61e49652330c7d21583dcd | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta, abstractmethod
import numpy as np
from deeprl.trainers import BaseTrainer
from deeprl.callbacks import EGreedyDecay
from deeprl.policy import EGreedyPolicy
class BaseDQNTrainer(BaseTrainer):
__metaclass__ = ABCMeta
def __init__(self, config, agent, env):
super(BaseDQNTrainer, self).__init__(config, agent, env)
self.discount_factor = config.discount_factor
self.policy = EGreedyPolicy(config.e)
self.callbacks.append(EGreedyDecay(self.policy, config.e_min, config.e_decay))
def choose_action(self, state):
q_value = self.agent.model.get_one_q(state)
return self.policy.choose_action(q_value)
def update_model(self, batch):
batch_s = np.array([i[0] for i in batch])
batch_a = np.array([i[1] for i in batch])
batch_r = np.array([i[2] for i in batch])
batch_s1 = np.array([i[3] for i in batch])
batch_done = np.array([i[4] for i in batch])
q_target = self.get_q_target(batch_r, batch_s1, batch_done)
loss = self.agent.model.train(batch_s, batch_a, q_target)
return loss
@abstractmethod
def get_q_target(self, batch_rewards, batch_next_states, batch_dones):
pass
|
5,202 | 898ff6e38e80419d61ec4bbde827e8ca729eb19a | from cache_replacement.double_linked_list import DoubleLinkedList
from cache_replacement.node import Node
class LRUCache:
def __init__(self, capacity):
self.capacity = capacity
self.size = 0
self.cache_map = {}
self.cache_list = DoubleLinkedList(capacity=capacity)
def get(self, key):
if key not in self.cache_map:
return -1
else:
node = self.cache_map.get(key)
self.cache_list.remove(node)
self.cache_list.append_front(node)
return node.value
def put(self, key, value):
if key in self.cache_map:
old_node = self.cache_map.get(key)
self.cache_list.remove(old_node)
new_node = Node(key, value)
self.cache_list.append(new_node)
self.cache_map[key] = new_node
else:
if self.size == self.capacity:
old_node = self.cache_list.remove()
self.cache_map.pop(old_node.key)
else:
self.size += 1
new_node = Node(key, value)
self.cache_list.append_front(new_node)
self.cache_map[key] = new_node
|
5,203 | f96a7bef48e7df2899343029a2fae9697125a5b2 | # Generated by Django 2.2.6 on 2020-06-18 14:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('gestionadmin', '0133_auto_20200618_1339'),
]
operations = [
migrations.RemoveField(
model_name='comprasenc',
name='empleado',
),
]
|
5,204 | d9f08e770dacaa86a03d553afd78fdcd725efb62 | """"""
import random
import nbformat
from textwrap import dedent
from pybryt.preprocessors import IntermediateVariablePreprocessor
def test_preprocessor():
"""
"""
nb = nbformat.v4.new_notebook()
nb.cells.append(nbformat.v4.new_code_cell(dedent("""\
a = True
b = False
f = lambda x: not x
g = f(a) + f(b)
if f(a) and f(b):
print("hi")
if f(a) or f(b):
print("hi")
if a or b:
print("bye")
l = [f(i) for i in [a, b]]
f = lambda x: [not i for i in l]
l = [a, b]
if all(f(l)):
print("ok")
else:
l = any(f(l))
""")))
ivp = IntermediateVariablePreprocessor()
random.seed(42)
nb = ivp.preprocess(nb)
print(nb.cells[0].source)
assert len(nb.cells) == 1
assert nb.cells[0].source.strip() == dedent("""\
a = True
b = False
f = (lambda x: (not x))
var_HBRPOI = f(a)
var_G8F1CB = f(b)
g = (var_HBRPOI + var_G8F1CB)
var_FNO6B9 = f(a)
if (var_FNO6B9):
var_M80O2R = f(b)
if (var_FNO6B9 and var_M80O2R):
var_AK1VRJ = print('hi')
var_AK1VRJ
var_NVGFYG = f(a)
if (not (var_NVGFYG)):
var_WWQC38 = f(b)
if (var_NVGFYG or var_WWQC38):
var_HYF9SX = print('hi')
var_HYF9SX
if (a or b):
var_MECOSF = print('bye')
var_MECOSF
l = [f(i) for i in [a, b]]
f = (lambda x: [(not i) for i in l])
l = [a, b]
var_KXWNRE = f(l)
var_K8PK3Y = all(var_KXWNRE)
if var_K8PK3Y:
var_R9OUDO = print('ok')
var_R9OUDO
else:
var_CUZREN = f(l)
l = any(var_CUZREN)
""").strip()
|
5,205 | 6d543e9e24debaff7640006a3836c59ec0096255 | #H##############################################################
# FILENAME : rec.py
#
# DESCRIPTION :
# Classifies text using defined regular expressions
#
# PUBLIC FUNCTIONS :
# int processToken( string )
#
# NOTES :
# This function uses specific critera to classify
# Criteria described in README.md
#
# Copyright 2018, Jacob Wilkins. All rights reserved.
#
# AUTHOR : Jacob Wilkins START DATE : 24 Sep 18
#
#H#
import re
import sys
def processToken(token) :
idPattern1 = re.compile(r'^([$]|[|]|[a-z])[A-Z0-9]*$')
idPattern2 = re.compile(r'^([|][A-Z0-9]*[|])$')
intPattern = re.compile(r'^(%)([0-9]|[A-Fa-f])+$')
fpPattern = re.compile(r'^[0-9]+[.][0-9]+$')
idMatch1 = idPattern1.match(token)
idMatch2 = idPattern2.match(token)
intMatch = intPattern.match(token)
fpMatch = fpPattern.match(token)
if idMatch1:
print('>%s< matches ID' % (token))
elif idMatch2:
print('>%s< matches ID' % (token))
elif intMatch:
print('>%s< matches INT' % (token))
elif fpMatch:
print('>%s< matches FP' % (token))
else:
print('>%s< does not match' % (token))
def main() :
fName = sys.argv[1]
print('processing tokens from ' + fName + ' ...')
with open(fName, 'r') as fp :
lines = fp.read().replace('\r', '').split('\n')
for line in lines :
for token in line.split() :
processToken(token)
if (__name__ == '__main__') :
main()
|
5,206 | 7346992d69250240207a0fc981d0adc245e69f87 | def calcula_norma(x):
lista=[]
for e in x:
lista.append(e**2)
v=(sum(lista)**(1/2))
return v |
5,207 | 00d2a29774a4278b1b022571b3f16c88224f08fc | import requests
from lxml import html
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36'}
mail_ru_link = "http://mail.ru"
lenta_link = "https://lenta.ru/"
req = requests.get(mail_ru_link, headers=headers).text
root = html.fromstring(req)
news = []
links = root.xpath(
"//div[@class='tabs__content']/*/div[contains(@class, 'news-item')]/a[@name]/@href | "
"//div[@class='tabs__content']/*/div[contains(@class, 'news-item')]/*/a[contains(@href, 'https://')]/@href")
titles = root.xpath("//div[@class='tabs__content']/*/div[contains(@class, 'news-item')]/a[@name]/*/*/h3/text() | "
"//div[@class='tabs__content']/*/div[contains(@class, 'news-item')]/*/a[contains(@href, 'https://')]/text()")
if len(links) > 0:
for i, l in enumerate(links):
article = {'link': l, 'title': titles[i], 'source': mail_ru_link}
news.append(article)
else:
print("Error")
req = requests.get(lenta_link, headers=headers).text
root = html.fromstring(req)
links = root.xpath(
"//div[@class='item']/a/@href")
titles = root.xpath("//div[@class='item']/a/text()")
if len(links) > 0:
for i, l in enumerate(links):
article = {'link': lenta_link + l, 'title': titles[i], 'source': lenta_link}
news.append(article)
else:
print("Error")
print(news) |
5,208 | 3596ef12ce407a8d84319daa38a27a99ed0de763 | '''
Author: Dustin Spicuzza
Date: 3/22/2012
Description:
This mode only feeds another robot, does not move or anything
'''
class FeedOnlyAutonomousMode(object):
# this name should be descriptive and unique. This will be shown to the user
# on the SmartDashboard
MODE_NAME = "Feed Only"
DEFAULT = False
def __init__(self, drive, ramp_arm, ball_handler, robot_manager):
'''Constructor: store components locally here'''
self.drive = drive
self.ramp_arm = ramp_arm
self.ball_handler = ball_handler
self.robot_manager = robot_manager
def OnEnable(self):
'''
This function is called when Autonomous mode is enabled. You should
initialize things needed for your mode here
'''
pass
def OnDisable(self):
'''
This function is called when Autonomous mode is exiting. You should
clean anything up here that needs to be cleaned up
'''
pass
def Update(self, time_elapsed):
'''
This function is called every 10ms or so. This is where you should
make decisions about what needs to happen in your autonomous mode.
You do not need to call the 'Update' functions of any components
here, as they will be called for you automatically.
time_elapsed is the number of seconds that autonomous mode has been
active, in case your mode finds that useful.
'''
self.ball_handler.chamber.Remove()
self.ball_handler.feeder.Expel()
|
5,209 | 496d52a984bb8c0e72948ab0c8db5e6035427a68 | #returns true if given date is a leap year, false otherwise
def is_leap_year(date):
#if divisible by 400, definitely a leap year
if date % 400 == 0: return True
#if divisible by 100 (and not 400), not a leap year
elif date % 100 == 0: return False
#divisible by 4 and not by 100? leap year
elif date % 4 == 0: return True
#otherwise not a leap year
else : return False
|
5,210 | 7fdddf98fc7b588e9b8816ffa22bc24f715d7efe | class Solution(object):
def isIsomorphic(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
n1 = len(s)
n2 = len(t)
if n1 != n2:
return False
else:
map1 = {}
map2 = {}
for i in range(n1):
if s[i] not in map1 and t[i] not in map2:
map1.update({s[i]: t[i]})
map2.update({t[i]: s[i]})
elif s[i] not in map1 or t[i] not in map2 or map1[s[i]] != t[i] or map2[t[i]] != s[i]:
return False
return True
solution = Solution()
s = 'bb'
t = 'ab'
print(solution.isIsomorphic(s, t)) |
5,211 | 03aa33861def30a46de85c5b309878a1180a760f | contador_pares = 0
contador_impares = 0
for i in range(100):
numero = int(input('Digite um valor:'))
if numero % 2 == 0:
contador_pares += 1
else:
contador_impares += 1
print('A quantidade de números pares é igual a:',contador_pares)
print('A quantidade de números ímpares é igual a:',contador_impares)
|
5,212 | e70c25ce1d61437aacfe7fad0a51e096e1ce4f5d |
__all__ = ['language']
from StringTemplate import *
|
5,213 | c52ad4040c14471319939605c400ff4d4ad982a7 | # Stanley H.I. Lio
# hlio@hawaii.edu
# All Rights Reserved. 2018
import logging, time, sys
from serial import Serial
from . import aanderaa_3835
from . import aanderaa_4330f
from . import aanderaa_4531d
from . import aanderaa_4319a
logger = logging.getLogger(__name__)
# works with 3835 (DO), 4330F (DO), 4531D (DO), and 4319A (EC)
def aanderaa_read_universal(port, max_retry=3, parsers=[aanderaa_4531d.parse_4531d, aanderaa_4330f.parse_4330f, aanderaa_3835.parse_3835, aanderaa_4319a.parse_4319a]):
logger.debug('aanderaa_read_universal()')
with Serial(port, 9600, timeout=2) as ser:
r = None
for _ in range(max_retry):
ser.flush()
ser.write(b'\r\ndo sample\r\n')
try:
line = ser.readline()
line = filter(lambda c: c <= 0x7f, line)
line = bytearray(filter(lambda c: c not in ['\x11', '\x13'], line)) # the control characters
line = line.decode().strip()
#print([ord(c) for c in line])
if len(line) <= 0:
logger.debug('(no response)')
continue
elif any([c in line for c in '#*']):
logger.debug('(junk)')
logger.debug(line)
logger.debug([ord(c) for c in line])
continue
elif 'SYNTAX ERROR' in line:
logger.debug('(SYNTAX ERROR)')
logger.debug([ord(c) for c in line])
continue
else:
for f in parsers:
logging.debug(f)
try:
r = f(line)
if r is not None and len(r) > 0:
break
except ValueError:
logger.debug('(valueerror)')
else:
time.sleep(1.29)
ser.flush()
except UnicodeDecodeError:
logger.exception('UnicodeDecodeError: {}'.format(line))
ser.flush()
if r is not None and len(r.keys()):
break
time.sleep(1.17)
ser.flush()
return r
if '__main__' == __name__:
logger.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
DEFAULT_PORT = '/dev/ttyS1'
PORT = input('PORT=? (default={})'.format(DEFAULT_PORT)).strip()
if len(PORT) <= 0:
PORT = DEFAULT_PORT
while True:
try:
print(aanderaa_read_universal(PORT))
except KeyboardInterrupt:
print('user interrupted')
break
|
5,214 | ab6c3d3c6faa2d1fe5e064dbdebd8904b9434f15 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 14 09:54:28 2020
@author: rushirajsinhparmar
"""
import matplotlib.pyplot as plt
from skimage import io
import numpy as np
from skimage.filters import threshold_otsu
import cv2
img = io.imread("texture.png", as_gray=True)
##################################################
#Variance - not a great way to quantify texture
from scipy import ndimage
k=7
img_mean = ndimage.uniform_filter(img, (k, k))
img_sqr_mean = ndimage.uniform_filter(img**2, (k, k))
img_var = img_sqr_mean - img_mean**2
plt.imshow(img_var, cmap='gray')
#######################################################
#GABOR - A great filter for texture but usually efficient
#if we know exact parameters. Good choice for generating features
#for machine learning
ksize=45
theta=np.pi/2
kernel = cv2.getGaborKernel((ksize, ksize), 5.0, theta, 10.0, 0.9, 0, ktype=cv2.CV_32F)
filtered_image = cv2.filter2D(img, cv2.CV_8UC3, kernel)
plt.imshow(filtered_image, cmap='gray')
###########################################################
#Entropy
#Entropy quantifies disorder.
#Since cell region has high variation in pixel values the entropy would be
#higher compared to scratch region
from skimage.filters.rank import entropy
from skimage.morphology import disk
entropy_img = entropy(img, disk(15))
plt.imshow(entropy_img)
#use otsu to threshold high vs low entropy regions.
plt.hist(entropy_img.flat, bins=100, range=(0,7)) #.flat returns the flattened numpy array (1D)
thresh = threshold_otsu(entropy_img)
#binarize the entropy image
binary = entropy_img <= thresh
plt.imshow(binary)
#Sum all pixels in the scratch region (values =1)
scratch_area = np.sum(binary == 1)
print("Scratched area is: ", scratch_area, "Square pixels")
scale = 0.45 # microns/pixel
print("Scratched area in sq. microns is: ", scratch_area*((scale)**2), "Square pixels") |
5,215 | a53d7b4c93fa49fb0162138d4a262fe7a5546148 | import requests
import os
from bs4 import BeautifulSoup
from urllib.parse import urljoin
CURRENT_DIR = os.getcwd()
DOWNLOAD_DIR = os.path.join(CURRENT_DIR, 'malware_album')
os.makedirs(DOWNLOAD_DIR, exist_ok=True)
url = 'http://old.vision.ece.ucsb.edu/~lakshman/malware_images/album/'
class Extractor(object):
"""docstring for Parser"""
def __init__(self, html, base_url):
self.soup = BeautifulSoup(html, "html5lib")
self.base_url = base_url
def get_album(self):
galaries = self.soup.find("div", {"id": "galleries"})
table = galaries.find("table")
families = table.find_all('a', href=True)
for family in families:
family_name = family.text.strip()
if family_name != "":
yield family_name, urljoin(self.base_url, family['href'])
def get_image_table(self):
tables = self.soup.find('table')
for td in tables.find_all('td'):
image_atag = td.find('a', href=True)
if image_atag is not None:
yield image_atag['href']
def get_pages(self):
pages = self.soup.find_all('a', href=True)
seen = list()
for page in pages:
if page is not None:
if 'index' in page['href']:
page_url = page['href']
if page_url not in seen:
seen.append(page_url)
yield page_url
def get_image_link(self):
"""
return downloadable image's url
"""
table = self.soup.find('table')
image_tag = table.find('img')
image_name = self.soup.find_all("b")[1].text
return image_tag['src'], image_name
# image = td.find_all('img')
# print(image)
# if image is not None:
# return urljoin(self.base_url, image['src'])
def fetch(image_url, image_name, folder):
r = requests.get(image_url, stream=True)
image_file = os.path.join(folder, image_name)
with open(image_file, 'wb') as f:
for chunk in r.iter_content(1024):
f.write(chunk)
del r
def extract_image(page_html, family_url, folder):
"""
Extract image from page
"""
image_extractor = Extractor(page_html, family_url)
for url in image_extractor.get_image_table():
image_page_url = urljoin(family_url, url)
# print(image_page_url)
imres = requests.get(image_page_url)
image_page_extractor = Extractor(imres.text, image_page_url)
image_src, image_name = image_page_extractor.get_image_link()
image_link = urljoin(image_page_url, image_src)
print(image_link, image_name)
# Download image
fetch(image_link, image_name, folder)
def download(url):
res = requests.get(url)
parser = Extractor(res.text, url)
# for each family, fetch image
for family, family_url in parser.get_album():
family_folder = os.path.join(DOWNLOAD_DIR, family)
print(family_folder)
os.makedirs(family_folder)
# print(os.path.join(DOWNLOAD_DIR, family_folder))
res = requests.get(family_url)
if res.status_code == 200:
page_extractor = Extractor(res.text, family_url)
count = 1
print('Page ', count)
extract_image(res.text, family_url, family_folder) # Extract on first page
for page in page_extractor.get_pages():
page_url = urljoin(family_url, page)
count += 1
print("Page ", count)
r = requests.get(page_url)
extract_image(r.text, family_url, family_folder)
# print('>', image_extractor.get_image_link())
else:
print('%s has status code: %s' % (family, res.status_code))
if __name__ == '__main__':
download(url)
|
5,216 | f3b466dc5b6149be82b096791ca8445faf169380 | # Generated by Django 3.2 on 2021-05-03 17:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0005_alter_orderitem_price'),
]
operations = [
migrations.AddField(
model_name='order',
name='being_delivered',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='order',
name='payment_id',
field=models.CharField(blank=True, max_length=150),
),
migrations.AddField(
model_name='order',
name='ref_code',
field=models.CharField(blank=True, max_length=20, null=True),
),
]
|
5,217 | 7e1dd242c60ee12dfc4130e379fa35ae626a4d63 | #!/usr/bin/env python3
data = None
with open('./01-data.txt') as f:
data = f.read().splitlines()
ss = {}
s = 0
ss[s] = True
def check(data):
global ss
global s
for line in data:
s += int(line)
if ss.get(s, False):
return s
ss[s] = True
return None
v = check(data)
print('after first pass:', s)
while v is None:
v = check(data)
print('first duplicate:', v) |
5,218 | 73d056d4ab0d268841156b21dfc2c54b5fb2f5f1 | """Support for binary sensor using I2C abelectronicsiopi chip."""
from custom_components.abelectronicsiopi.IOPi import IOPi
import voluptuous as vol
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity
from homeassistant.const import DEVICE_DEFAULT_NAME
import homeassistant.helpers.config_validation as cv
CONF_INVERT_LOGIC = "invert_logic"
CONF_I2C_ADDRESS = "i2c_address"
CONF_PINS = "pins"
CONF_PULL_MODE = "pull_mode"
DEFAULT_INVERT_LOGIC = False
DEFAULT_I2C_ADDRESS = 0x20
DEFAULT_PULL_MODE = True
_SENSORS_SCHEMA = vol.Schema({cv.positive_int: cv.string})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_PINS): _SENSORS_SCHEMA,
vol.Optional(CONF_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean,
vol.Optional(CONF_PULL_MODE, default=DEFAULT_PULL_MODE): cv.boolean,
vol.Optional(CONF_I2C_ADDRESS, default=DEFAULT_I2C_ADDRESS): vol.Coerce(int),
}
)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the abelectronicsiopi binary sensors."""
pull_mode = config[CONF_PULL_MODE]
invert_logic = config[CONF_INVERT_LOGIC]
iopi = IOPi(config.get(CONF_I2C_ADDRESS), True)
binary_sensors = []
pins = config[CONF_PINS]
for pin_num, pin_name in pins.items():
binary_sensors.append(abelectronicsiopiBinarySensor(pin_name, pin_num, pull_mode, invert_logic, iopi))
add_devices(binary_sensors, True)
class abelectronicsiopiBinarySensor(BinarySensorEntity):
"""Represent a binary sensor that uses abelectronicsiopi."""
iobus = None
targetpin = None
_state = False
def __init__(self, pinname, pin, pull_mode, invert_logic, bus):
"""Initialize the pin."""
self._state = None
self._name = pinname
self.targetpin = pin
self.iobus = bus
if pull_mode == True:
self.iobus.set_pin_pullup(self.targetpin, 1)
else:
self.iobus.set_pin_pullup(self.targetpin, 0)
self.iobus.set_pin_direction(self.targetpin, 1)
if invert_logic == True:
self.iobus.invert_pin(self.targetpin, 1)
else:
self.iobus.invert_pin(self.targetpin, 0)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the entity."""
self._state = self.iobus.read_pin(self.targetpin)
return self._state
def update(self):
"""Update the GPIO state."""
self._state = self.iobus.read_pin(self.targetpin)
|
5,219 | b83310c18294def950cef6710c7644c7e8a3208f | # #Create a function that takes a text file and returns the number of words
# ___ count_words filepath
# w.. o.. ? ? __ file # read
# strng = ?.r..
# strng_list = ?.s.. " "
# r.. l.. ?
#
# print ? "words1.txt"
|
5,220 | b9cce77d4d2b9ff5563d17927e21166f9c870e3d | from os.path import abspath, dirname, join, basename
import numpy as np
import cv2
import xiuminglib as xm
logger, thisfile = xm.config.create_logger(abspath(__file__))
class EXR():
"""Reads EXR files.
EXR files can be generic or physically meaningful, such as depth, normal, etc.
When data loaded are physically meaningful, these methods assume the EXR files
are produced by :mod:`xiuminglib.blender.render` and hence follow certain formats.
Args:
exr_path (str, optional): Path to the EXR file.
Attributes:
exr_f (str): Path to the EXR file.
data (dict): Data loaded.
"""
def __init__(self, exr_path=None):
self.exr_f = exr_path
if self.exr_f is not None:
self.data = self.load()
def load(self):
r"""Loads an EXR as a dictionary of NumPy arrays.
Requires writing a .npz to ``/tmp/`` and then loading it, because
the conversion process has to be done in Python 2.x as a subprocess call,
unfortuantely. If :math:`\leq3` channels, can use OpenCV for in-memory loading.
Returns:
dict: Loaded EXR data.
"""
from time import time
from subprocess import Popen
logger_name = thisfile + '->EXR:load()'
assert self.exr_f is not None, "Set the exr_f first"
npz_f = '/tmp/%s_t%s.npz' % \
(basename(self.exr_f).replace('.exr', ''), time())
# Convert to .npz
# cv2.imread() can't load more than three channels from .exr even with IMREAD_UNCHANGED
# Has to go through IO. Maybe there's a better way?
cwd = join(dirname(abspath(__file__)), '..', '..', 'cli')
bash_cmd = 'python2 exr2npz.py %s %s' % (self.exr_f, npz_f)
process = Popen(bash_cmd.split(), cwd=cwd)
_, _ = process.communicate()
# Load this .npz
data = np.load(npz_f)
logger.name = logger_name
logger.info("Loaded %s", self.exr_f)
return data
def extract_depth(self, alpha_exr, outpath, vis=False):
"""Combines a raw (aliased) depth map and its alpha map into anti-aliased depth.
Output has black background, with bright values for closeness to the camera.
If the alpha map is anti-aliased, the result depth map will be nicely anti-aliased.
Args:
alpha_exr (str): Path to the EXR file of the anti-aliased alpha map.
outpath (str): Path to the result .npy file.
vis (bool, optional): Whether to visualize the raw values as an image.
Writes
- A .npy file containing an aliased depth map and its alpha map.
- If ``vis``, a .png image of anti-aliased depth.
"""
logger_name = thisfile + '->EXR:extract_depth()'
dtype = 'uint8'
dtype_max = np.iinfo(dtype).max
# Load alpha
arr = cv2.imread(alpha_exr, cv2.IMREAD_UNCHANGED)
assert (arr[:, :, 0] == arr[:, :, 1]).all() and (arr[:, :, 1] == arr[:, :, 2]).all(), \
"A valid alpha map must have all three channels the same"
alpha = arr[:, :, 0]
# Load depth
arr = cv2.imread(self.exr_f, cv2.IMREAD_UNCHANGED)
assert (arr[..., 0] == arr[..., 1]).all() and (arr[..., 1] == arr[..., 2]).all(), \
"A valid depth map must have all three channels the same"
depth = arr[..., 0] # these raw values are aliased, so only one crazy big value
if not outpath.endswith('.npy'):
outpath += '.npy'
np.save(outpath, np.dstack((arr, alpha)))
if vis:
is_fg = depth < depth.max()
max_val = depth[is_fg].max()
depth[depth > max_val] = max_val # cap background depth at the object maximum depth
min_val = depth.min()
im = dtype_max * (max_val - depth) / (max_val - min_val) # [0, dtype_max]
# Anti-aliasing
bg = np.zeros(im.shape)
im = np.multiply(alpha, im) + np.multiply(1 - alpha, bg)
cv2.imwrite(outpath[:-4] + '.png', im.astype(dtype))
logger.name = logger_name
logger.info("Depth image extractd to %s", outpath)
def extract_normal(self, outpath, vis=False):
"""Converts an RGBA EXR normal map to a .npy normal map.
The background is black, complying with industry standards (e.g., Adobe AE).
Args:
outpath (str): Path to the result .npy file.
vis (bool, optional): Whether to visualize the normal vectors as an image.
Writes
- A .npy file containing an aliased normal map and its alpha map.
- If ``vis``, a .png visualization of anti-aliased normals.
"""
logger_name = thisfile + '->extract_normal()'
dtype = 'uint8'
dtype_max = np.iinfo(dtype).max
# Load RGBA .exr
data = self.data
arr = np.dstack((data['R'], data['G'], data['B']))
alpha = data['A']
if not outpath.endswith('.npy'):
outpath += '.npy'
np.save(outpath, np.dstack((arr, alpha)))
if vis:
# [-1, 1]
im = (1 - (arr / 2 + 0.5)) * dtype_max
# [0, dtype_max]
bg = np.zeros(im.shape)
alpha = np.dstack((alpha, alpha, alpha))
im = np.multiply(alpha, im) + np.multiply(1 - alpha, bg)
cv2.imwrite(outpath[:-4] + '.png', im.astype(dtype)[..., ::-1])
logger.name = logger_name
logger.info("Normal image extractd to %s", outpath)
def extract_intrinsic_images_from_lighting_passes(self, outdir, vis=False):
"""Extract intrinsic images from an EXR of lighting passes into multiple .npy files.
Args:
outdir (str): Directory to save the result .npy files to.
vis (bool, optional): Whether to visualize the values as images.
Writes
- albedo.npy (and its visualization if ``vis``).
- shading.npy (ditto).
- specularity.npy (ditto).
- recon.npy (ditto): reconstruction by combining albedo, shading, and specularity.
- composite.npy (ditto): composite by Blender.
"""
logger_name = thisfile + '->extract_intrinsic_images_from_lighting_passes()'
xm.general.makedirs(outdir)
data = self.data
def collapse_passes(components):
ch_arrays = []
for ch in ['R', 'G', 'B']:
comp_arrs = []
for comp in components:
comp_arrs.append(data[comp + '.' + ch])
ch_array = np.sum(comp_arrs, axis=0) # sum components
ch_arrays.append(ch_array)
# Handle alpha channel
first_alpha = data[components[0] + '.A']
for ci in range(1, len(components)):
assert (first_alpha == data[components[ci] + '.A']).all(), \
"Alpha channels of all passes must be the same"
ch_arrays.append(first_alpha)
return np.dstack(ch_arrays)
# Albedo
albedo = collapse_passes(['diffuse_color', 'glossy_color'])
np.save(join(outdir, 'albedo.npy'), albedo)
if vis:
xm.vis.matrix_as_image(albedo, outpath=join(outdir, 'albedo.png'))
# Shading
shading = collapse_passes(['diffuse_indirect', 'diffuse_direct'])
np.save(join(outdir, 'shading.npy'), shading)
if vis:
xm.vis.matrix_as_image(shading, join(outdir, 'shading.png'))
# Specularity
specularity = collapse_passes(['glossy_indirect', 'glossy_direct'])
np.save(join(outdir, 'specularity.npy'), specularity)
if vis:
xm.vis.matrix_as_image(specularity, join(outdir, 'specularity.png'))
# Reconstruction vs. ...
recon = np.multiply(albedo, shading) + specularity
recon[:, :, 3] = albedo[:, :, 3] # can't add up alpha channels
np.save(join(outdir, 'recon.npy'), recon)
if vis:
xm.vis.matrix_as_image(recon, join(outdir, 'recon.png'))
# ... composite from Blender, just for sanity check
composite = collapse_passes(['composite'])
np.save(join(outdir, 'composite.npy'), composite)
if vis:
xm.vis.matrix_as_image(composite, join(outdir, 'composite.png'))
logger.name = logger_name
logger.info("Intrinsic images extracted to %s", outdir)
def main():
"""Unit tests that can also serve as example usage."""
tmp_dir = xm.constants['dir_tmp']
exr_f = join(tmp_dir, 'test.exr')
exr = EXR(exr_f)
exr.extract_normal(join(tmp_dir, 'test.png'), vis=True)
if __name__ == '__main__':
main()
|
5,221 | c1374a048187807deac5d28dda4fbc7beeccf8f5 | import pygame as pg
screen = pg.display.set_mode((640, 380))
|
5,222 | eedd909e777a4127b5fd55108805314b3b196dd1 | import sys
import memo
from StringIO import StringIO
import inspect
alternate_dict = {}
alternate_dict['cartesian_to_polar'] = ['cartesian_to_polar','cartesianToPolar','cartesion_to_polar','Polar_Coordinates']
alternate_dict['mercator'] = ['mercator','mercator_projection','mecartor','Mercator','Mercator_projection']
alternate_dict['sort3'] = ['sort3','sort','Sort3']
input_dict = {}
input_dict['cartesian_to_polar'] = [[0,0],[-5,0],[0,7],[1.25,1.25],[-1.75,-2.68]]
input_dict['mercator'] = [[0,0,0],[0,-33.9337,-18.8645],[-33.9337,-33.9337,-18.8645],[23,348.8,360],]
input_dict['sort3'] = [[1,2,3],[1,1,1],[-200,-10,-7236],[3.3,2.2,1.4],[-1,0,1]]
def printError(message):
print "<div style=\"color:red;\">Exception:</br>", message, "</div>"
def printMethodName(name):
print "<h3 style=\"color:blue;\">" , name,"</h3>"
def printResults(results):
print "<p>" , results,"</p>"
def printCode(code):
print "<pre>" , code,"</pre>"
def find_test_method(test_method):
for method in dir(test_module): # search for method "method_name" in the test file
if (method in alternate_dict[test_method]):
return method
return None
def printFailTest(inputs, correct, answer):
print "<div style=\"color:red;\">Fail test: input: ", str(inputs), "</div>"
print "<div> Correct answer: ", str(correct) + "</div>"
print "<div> Answer: ", str(answer), "</div>"
def test_approx(test_method):
method = find_test_method(test_method)
if (method == None):
printError("Method not found.")
printResults(dir(test_module))
return ""
printCode(inspect.getsource( eval("test_module." + method)))
func_memo = eval("memo." + test_method)
func_test = eval("test_module." + method)
num_tests_passed = 0 # counts the number of tests passed
passed = True
# run tests
for i, value in enumerate(input_dict[test_method]):
try:
ans_memo = func_memo(*value)
ans_test = func_test(*value)
if (len(ans_memo) == len(ans_test)):
for c in range(len(ans_memo)):
if ( not equals_approx(ans_memo[c], ans_test[c])):
passed = False
break
else:
passed = False
if (passed):
num_tests_passed = num_tests_passed+1
else:
printFailTest(str(value), str(ans_memo), str(ans_test))
except Exception, err:
printError("Test crashed on input: " + str(value) + " Exception: " + str(err))
passed = False
return str(num_tests_passed)+ "/"+ str(i+1) + " tests passed"
def capture(func, *args, **kwargs):
capture = StringIO()
save_stdout = sys.stdout
sys.stdout = capture
try:
result = func(*args)
except Exception, err:
result = str(err)
sys.stdout = save_stdout
value = capture.getvalue()
return (result, value)
def equals(value1, value2):
if (value1 == value2):
return True
else:
return False
def equals_approx(value1, value2):
if(abs(value1 - value2) < 0.00001):
return True
else:
return False
if __name__ == '__main__':
try:
test_module_name = sys.argv[1] #filename of test module
params = [test_module_name, None, None, alternate_dict.keys(),-1]
result = capture(__import__,*params)
test_module = result[0]
if ( not isinstance(test_module, str)):
for method in alternate_dict : # vir elke method wat getoets moet word
try:
printMethodName(method)
printResults(test_approx(method))
except Exception, err:
printError("Error testing method " + method + ": " + str(err))
else:
printError("Error importing module due to " + test_module)
f = open(test_module_name + ".py",'r')
print "<pre>"
for i, value in enumerate(f):
print i +1, "\t", value,
print "</pre>"
except Exception, err:
printError("Error importing module due to " + str(err))
|
5,223 | 86c03fa85ac405a148be13325efeaaf691d9ec26 | #!/usr/bin/env python
def get_attachment_station_coords(station):
if (station == "gripper1"):
coords = [0.48, 0.05, 0.161]
elif (station == "gripper2"):
coords = [0.28, 0.05, 0.13]
elif (station == "syringe"):
coords = [0.405, 0.745, 0.213]
else:
coords = [0.0, 0.0, 0.0]
# Move the gantry to the coordinates
return coords
def get_station_coords(station):
if(station == "steaks"):
coords= [0.4625, 0.375, 0.14]
elif(station == "griddle"):
coords = [0.73, 0.375, 0.05]
elif(station == "steak_flip_initial"):
coords = [0.73, 0.7, 0.166]
elif(station == "steak_flip_done"):
coords = [0.73, 0.33, 0.166]
elif(station == "steak_flip_drop"):
coords = [0.73, 0.6, 0.05]
elif(station == "plate"):
# coords = [1.11, 0.75, 0.1]
coords = [1.11, 0.35, 0.1]
elif(station == "oil"):
coords = [0.9, 0.375, 0.08]
else:
coords = [0.0, 0.0, 0.0]
return coords |
5,224 | 8640de519ebf7f95588ac40b55662da85ffc926e | import urllib
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib import messages
from django.utils.translation import gettext as _
from .forms import CountryForm
from .models import Countries
from django.utils.timezone import datetime
from django.contrib.auth.decorators import login_required
# COUNTRIES LIST
@login_required(login_url='user_login')
def countries_list(request):
countries = Countries.objects.all()
context = {
'countries': countries,
}
return render(request, 'map_app/countries/list.html', context)
# CREATE COUNTRY
@login_required(login_url='user_login')
def countries_add(request):
if request.method == 'POST':
form = CountryForm(request.POST or None)
if form.is_valid():
form.save()
messages.success(
request, 'the country has been added successfuly :) ')
return redirect('countries_add')
else:
form = CountryForm()
context = {
'form': form,
}
return render(request, 'map_app/countries/add.html', context)
# DETAILS OF COUNTRY
@login_required(login_url='user_login')
def country_details(request, id):
country = get_object_or_404(Countries, id=id)
context = {
'country': country,
}
return render(request, 'map_app/countries/details.html', context)
# UPDATE COUNTRY
@login_required(login_url='user_login')
def country_edit(request, id):
country = get_object_or_404(Countries, id=id)
if request.method == 'POST':
form = CountryForm(request.POST or None, instance=country)
if form.is_valid():
inst = form.save(commit=False)
inst.updated_at = datetime.utcnow()
inst.save()
messages.success(
request, 'the country has been updated successfuly :) ')
return redirect('countries_list')
else:
form = CountryForm(instance=country)
context = {
'country': country,
'form': form,
}
return render(request, 'map_app/countries/edit.html', context)
# DELETE COUNTRY
@login_required(login_url='user_login')
def country_delete(request, id):
country = get_object_or_404(Countries, id=id)
if request.method == 'POST':
country.delete()
messages.success(
request, 'the country has been deleted in successfuly')
return redirect('home')
context = {
'country': country,
}
return render(request, 'map_app/countries/delete.html', context)
|
5,225 | bda28e5a0cb8a3dddea58c9c59a165b31274ac03 | """AOC Day 13"""
import pathlib
import time
TEST_INPUT = """6,10
0,14
9,10
0,3
10,4
4,11
6,0
6,12
4,1
0,13
10,12
3,4
3,0
8,4
1,10
2,14
8,10
9,0
fold along y=7
fold along x=5"""
def read_input(input_path: str) -> str:
"""take input file path and return a str with the file's content"""
with open(input_path, 'r') as input_file:
input_data = input_file.read().strip()
return input_data
def extract(input_data: str) -> tuple:
"""take input data and return the appropriate data structure"""
sheet = set()
folds = list()
s_instr, f_instr = input_data.split('\n\n')
for line in s_instr.split('\n'):
sheet.add(tuple(map(int, line.split(','))))
for line in f_instr.split('\n'):
equal_pos = line.index('=')
folds.append((line[equal_pos-1], int(line[equal_pos+1:])))
return (sheet, folds)
def fold(sheet: set, direction: str, axis: int):
folded = set()
for x, y in sheet:
if direction == 'x' and x > axis:
x = 2 * axis - x
elif direction == 'y' and y > axis:
y = 2 * axis - y
folded.add((x, y))
return folded
def part1(entries: tuple) -> int:
"""part1 solver take the entries and return the part1 solution"""
direction, axis = entries[1][0]
sheet = fold(entries[0], direction, axis)
return len(sheet)
def part2(entries: tuple) -> str:
"""part2 solver take the entries and return the part2 solution"""
sheet = entries[0]
fold_instructions = entries[1]
for direction, axis in fold_instructions:
sheet = fold(sheet, direction, axis)
max_x = max(p[0] for p in sheet)
max_y = max(p[1] for p in sheet)
out = ''
for y in range(max_y + 1):
for x in range(max_x + 1):
out += '#' if (x, y) in sheet else ' '
out += '\n'
return out
def test_input_day_13():
"""pytest testing function"""
entries = extract(TEST_INPUT)
assert part1(entries) == 17
def test_bench_day_13(benchmark):
"""pytest-benchmark function"""
benchmark(main)
def main():
"""main function"""
input_path = str(pathlib.Path(__file__).resolve().parent.parent) + "/inputs/" + str(pathlib.Path(__file__).stem)
start_time = time.time()
input_data = read_input(input_path)
entries = extract(input_data)
print("Part 1: %d" % part1(entries))
print("Part 2:\n%s" % part2(entries))
end_time = time.time()
print("Execution time: %f" % (end_time-start_time))
if __name__ == "__main__":
main()
|
5,226 | b80b997f802c7ed4f0a838030703a314f2383c9d | import pygame
from clobber.constants import GREY, ROWS, WHITE, SQUARE_SIZE, COLS, YELLOW, BLACK
from clobber.piece import Piece
class Board:
def __init__(self):
self.board = []
self.selected_piece = None
self.create_board()
def draw_squares(self, win):
win.fill(GREY)
for row in range(ROWS):
for col in range(row % 2, COLS, 2):
pygame.draw.rect(win, WHITE, (row * SQUARE_SIZE, col * SQUARE_SIZE,
SQUARE_SIZE, SQUARE_SIZE))
def create_board(self):
for row in range(ROWS):
self.board.append([])
for col in range(COLS):
if col % 2 == ((row + 1) % 2):
self.board[row].append(Piece(row, col, YELLOW))
else:
self.board[row].append(Piece(row, col, BLACK))
def draw(self, win):
self.draw_squares(win)
for row in range(ROWS):
for col in range(COLS):
piece = self.board[row][col]
if piece != 0:
piece.draw(win)
def move(self, piece, row, col):
self.board[piece.row][piece.col], self.board[row][col] = 0, self.board[piece.row][piece.col]
piece.move(row, col)
def get_piece(self, row, col):
return self.board[row][col]
def get_valid_moves(self, piece):
moves = []
# left
if piece.col != 0:
watch_p = self.board[piece.row][piece.col - 1]
if watch_p != 0 and watch_p.color != piece.color:
moves.append((watch_p.row, watch_p.col))
# top
if piece.row != 0:
watch_p = self.board[piece.row - 1][piece.col]
if watch_p != 0 and watch_p.color != piece.color:
moves.append((watch_p.row, watch_p.col))
# right
if piece.col != len(self.board[piece.row]) - 1:
watch_p = self.board[piece.row][piece.col + 1]
if watch_p != 0 and watch_p.color != piece.color:
moves.append((watch_p.row, watch_p.col))
# down
if piece.row != len(self.board) - 1:
watch_p = self.board[piece.row + 1][piece.col]
if watch_p != 0 and watch_p.color != piece.color:
moves.append((watch_p.row, watch_p.col))
return moves
def evaluate(self):
amount_of_yellow = 0
amount_of_black = 0
for row in self.board:
for piece in row:
if piece == 0:
continue
if piece.color == YELLOW:
if not self.is_dead(piece):
amount_of_yellow += 1
else:
if not self.is_dead(piece):
amount_of_black += 1
return amount_of_yellow - amount_of_black
def is_dead(self, piece):
# left
if piece.col != 0:
watch_p = self.board[piece.row][piece.col - 1]
if watch_p != 0:
return False
# top
if piece.row != 0:
watch_p = self.board[piece.row - 1][piece.col]
if watch_p != 0:
return False
# right
if piece.col != len(self.board[piece.row]) - 1:
watch_p = self.board[piece.row][piece.col + 1]
if watch_p != 0:
return False
# down
if piece.row != len(self.board) - 1:
watch_p = self.board[piece.row + 1][piece.col]
if watch_p != 0:
return False
return True
def get_all_pieces(self, color):
pieces = []
for row in self.board:
for piece in row:
if piece != 0 and piece.color == color:
pieces.append(piece)
return pieces
def winner(self):
for row in self.board:
for piece in row:
if piece != 0:
if len(self.get_valid_moves(piece)) != 0:
return None
return BLACK
|
5,227 | bd6c72c3215265a349c5f47573063a9288f64198 | from django.shortcuts import render
from rest_framework import status
from rest_framework.decorators import api_view, renderer_classes
from rest_framework.renderers import BrowsableAPIRenderer, JSONRenderer
from rest_framework.response import Response
from feedback.models import Feedback
from feedback.serializers import FeedbackSerializer
@api_view(['GET','POST'])
@renderer_classes([JSONRenderer,BrowsableAPIRenderer])
def feedback_list(request, format=None):
"""
List all feedback or create a new feedback
"""
if request.method == 'GET':
feedback = Feedback.objects.all()
serializer = FeedbackSerializer(feedback, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = FeedbackSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data,
status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def feedback_index(request):
feedback = Feedback.objects.all()
context = { 'feedback': feedback }
return render(request, 'feedback_index.html', context)
|
5,228 | ea323a8398ceff8496e7f8d0f365d50f3115e954 | from django.contrib import admin
# from .models import Product, Client
from .models import Board
admin.site.register(Board)
# admin.site.register(Product)
# # admin.site.register(Price)
# admin.site.register(Client)
# # Register your models here.
|
5,229 | d2632461fcdc39509610b96d43dd1ec42dae362f | import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
__author__ = 'alexglenday'
def group(list_df: list, df_col_index: int=0, seaborn_context: str='poster'):
sns.set_context(seaborn_context)
df_labels = []
for df in list_df:
df_labels.append(df.columns[df_col_index])
df_all = pd.DataFrame({label: df.iloc[:, df_col_index] for df, label in zip(list_df, df_labels)})
df_all.plot()
def individual(list_df: list, seaborn_context: str='poster'):
sns.set_context(seaborn_context)
for df in list_df:
df.plot()
|
5,230 | a0a6bd5de39a7599f7872639cdf3a59b8cda5498 | from processing.DLDataEngineering import DLDataEngineering
from sklearn.preprocessing import OneHotEncoder
import pandas as pd
import numpy as np
import h5py
import os
from scipy.ndimage import gaussian_filter
#Deep learning packages
import tensorflow as tf
#from tensorflow import keras
from tensorflow.keras.layers import Input, Conv2D, Dropout, Activation, UpSampling2D, GlobalMaxPooling2D, multiply
from tensorflow.keras.backend import max
from tensorflow.keras.preprocessing.image import ImageDataGenerator
#from tensorflow import keras
from sklearn.metrics import f1_score,roc_auc_score
import matplotlib.pyplot as plt
import cartopy.feature as cf
import cartopy.crs as ccrs
import cartopy
from keras_unet_collection import models, base, utils
class DLModeler(object):
def __init__(self,model_path,hf_path,num_examples,
class_percentages,predictors,model_args,
model_type):
self.model_path = model_path
self.hf_path = hf_path
self.num_examples = num_examples
self.class_percentages = class_percentages
self.model_args = model_args
self.model_type = model_type
long_predictors = []
#Shorten predictor names
for predictor in predictors:
if "_" in predictor:
predictor_name = predictor.split('_')[0].upper() + predictor.split('_')[-1]
elif " " in predictor:
predictor_name = ''.join([v[0].upper() for v in predictor.split()])
else: predictor_name = predictor
long_predictors.append(predictor_name)
self.predictors = np.array(long_predictors)
#Class to read data and standardize
self.dldataeng = DLDataEngineering(self.model_path,self.hf_path,
self.num_examples,self.class_percentages,self.predictors,
self.model_args)
return
def train_models(self,member,train_dates,valid_dates):
"""
Function that reads and extracts pre-processed 2d member data
from an ensemble to train a convolutional neural net (cnn) or
UNET.
The model data is standardized before being input to the cnn,
with the observation data in the shape (# examples, # classes).
Args:
member (str): ensemble member data that trains a DL model
"""
train_data, train_label = self.dldataeng.extract_training_data(member,
train_dates,self.model_type)
#valid_data, valid_label = self.dldataeng.extract_validation_data(member,valid_dates,self.model_type)
valid_data, valid_label = [],[]
if self.model_type == 'CNN':
onehot_encoder = OneHotEncoder(sparse=False,categories='auto')
encoded_label = onehot_encoder.fit_transform(train_label.reshape(-1, 1))
self.train_CNN(member,train_data,encoded_label,valid_data,valid_label)
elif 'UNET' in self.model_type:
#train_label[train_label >= 50.] = 50.
#log_train_label = np.log((train_label+1.0))
self.train_UNET(member,train_data,train_label,valid_data,valid_label)
return
def train_UNET(self,member,trainX,trainY,validX,validY):
model_file = self.model_path + f'/{member}_{self.model_args}_{self.model_type}.h5'
'''
if os.path.exists(model_file):
del trainX,trainY,validX,validY
unet = tf.keras.models.load_model(model_file,compile=False)
print(f'\nOpening {model_file}\n')
#self.validate_UNET(model,validX,validY,threshold_file)
return
'''
print('\nTraining {0} models'.format(member))
print('Training data shape {0}'.format(np.shape(trainX)))
print('Training label data shape {0}\n'.format(np.shape(trainY)))
#print('Validation data shape {0}'.format(np.shape(validX)))
#print('Validation label data shape {0}\n'.format(np.shape(validY)))
model_obj_params = {'input_size':np.shape(trainX[0]),'n_labels':1,
'stack_num_down':2, 'stack_num_up':1, 'activation':'LeakyReLU',
'output_activation':'ReLU', 'batch_norm':False, 'pool':True,
'unpool':False, 'name':f'{self.model_type}'}
if self.model_type == 'UNET':
model_obj_params['filter_num'] = [16, 32, 64, 128]# 256]
unet_model_obj = models.unet_2d
compile_params = {'loss': 'mean_squared_error'}
else:
compile_params = {'loss': ['mean_squared_error',
'mean_squared_error','mean_squared_error',
'mean_squared_error','mean_squared_error'],
'loss_weights':[0.25, 0.25, 0.25, 0.25, 1.0]}
if self.model_type == 'UNET2plus':
plus_model_params = {'filter_num':[16, 32, 64, 128, 256],
'deep_supervision':True}
model_obj_params.update(plus_model_params)
unet_model_obj = models.unet_plus_2d
elif self.model_type == 'UNET3plus':
plus_model_params = {'filter_num_downi':[16, 32, 64, 128, 256],
'filter_num_skip':'auto', 'filter_num_aggregate':'auto',
'deep_supervision':True}
model_obj_params.update(plus_model_params)
unet_model_obj = models.unet_3plus_2d
try: unet_model = unet_model_obj(**model_obj_params)
except:
print(f"{self.model_type} Model type not found.")
return
unet_model.compile(**compile_params,optimizer=tf.keras.optimizers.Adam(lr=1e-4))
print(unet_model.summary())
#Augment data
aug = ImageDataGenerator(
rotation_range=10,zoom_range=0.15,
width_shift_range=0.2,height_shift_range=0.2,
fill_mode="nearest")
#Fit UNET
n_epochs = 15
bs = 256
conv_hist = unet_model.fit(
aug.flow(trainX,trainY,batch_size=bs),
steps_per_epoch=len(trainX)/bs,
epochs=n_epochs,verbose=1)
'''
pred_s = trainX[0].reshape(1,input_shape[0],
input_shape[1],input_shape[2])
prediction = unet.predict(pred_s)[0,:,:,:]
print(prediction.shape)
plt.imshow(prediction)
plt.colorbar()
plt.show()
return
'''
#Save trained model
unet_model.save(model_file)
print(f'Writing out {model_file}')
#Clear graphs
tf.keras.backend.clear_session()
#self.validate_UNET(model,validX,validY,threshold_file)
return
def train_CNN(self,member,input_data):
"""
Function to train a convolutional neural net (CNN) for random
training data and associated labels.
Args:
member (str): Ensemble member
trainX (tuple): Tuple of (train data, train labels,
validation data, validation labels)
"""
trainX,trainY,validX,validY = input_data
print('\nTraining {0} models'.format(member))
print('Training data shape {0}'.format(np.shape(trainX)))
print('Training label data shape {0}\n'.format(np.shape(trainY)))
print('Validation data shape {0}'.format(np.shape(validX)))
print('Validation label data shape {0}\n'.format(np.shape(validY)))
model_file = self.model_path + f'/{member}_{self.model_args}_CNN_model.h5'
print(model_file)
if not os.path.exists(model_file):
# Clear graphs
tf.keras.backend.clear_session()
#Initiliaze Convolutional Neural Net (CNN)
model = models.Sequential()
input_shape = np.shape(trainX[0])
#First layer: input shape (y,x,# variables)
#Add noise
model.add(layers.GaussianNoise(0.01, input_shape=(input_shape)))
for filters in [32,64,128]:
model.add(layers.Conv2D(filters, (3,3),padding='same'))
model.add(layers.Conv2D(filters, (3,3),padding='same'))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU(alpha=0.3))
model.add(layers.MaxPooling2D())
#Flatten the last convolutional layer
model.add(layers.Flatten())
model.add(layers.Dense(256))
model.add(layers.LeakyReLU(alpha=0.3))
model.add(layers.Dense(4,activation='softmax'))
#Compile neural net
model.compile(optimizer='adam',loss='categorical_crossentropy',
metrics=[tf.keras.metrics.AUC()])
print(model.summary())
#fit neural net
n_epochs = 10
bs = 256
#augment data
aug = imagedatagenerator(
rotation_range=10,zoom_range=0.15,
width_shift_range=0.2,height_shift_range=0.2,
fill_mode="nearest")
train_generator = aug.flow(trainx,trainy,batch_size=bs)
conv_hist = model.fit(
train_generator,steps_per_epoch=len(trainx) // bs,
epochs=n_epochs,verbose=1,class_weight=self.class_percentages)
#save trained model
model.save(model_file)
print(f'Writing out {model_file}')
else:
model = tf.keras.models.load_model(model_file)
print(f'\nOpening {model_file}\n')
del trainY,trainX
threshold_file = self.model_path + f'/{member}_{self.model_args}_CNN_model_threshold.h5'
if os.path.exists(threshold_file):
del validX,validY
return
self.validate_CNN(model,validX,validY,threshold_file)
return
def validate_CNN(self,model,validX,validY,threshold_file):
print()
#Predict on validation data
cnn_preds = model.predict(validX)
sev_hail = cnn_preds[:,2]
sig_hail = cnn_preds[:,3]
#combine the severe hail and sig severe hail classes
sev_prob_preds = sev_hail+sig_hail
print('Max probability',np.nanmax(sev_prob_preds))
#classify labels as severe hail or no hail
true_preds = np.where(validY >= 2, 1, 0)
del validX, validY
df_best_score = pd.DataFrame(np.zeros((1,1)),columns=['Size Threshold'])
#Find threshold with the highest validation AUC score
auc_score = []
thresholds = np.arange(0.1,1.01,0.02)
for t in thresholds:
threshold_preds = np.where(sev_prob_preds >= t,1,0)
auc_score.append(roc_auc_score(true_preds, threshold_preds))
print(auc_score)
#output threshold with highest AUC
df_best_score['Size Threshold'] = thresholds[np.argmax(auc_score)]
print(df_best_score)
df_best_score.to_csv(threshold_file)
print(f'Writing out {threshold_file}')
return
def predict_model(self,member,patch_map_conversion_indices,
total_map_shape,subset_map_shape,date,patch_radius,forecast_grid_path,#):
lon_grid,lat_grid):
"""
Function that opens a pre-trained convolutional neural net (cnn).
and predicts hail probability forecasts for a single ensemble member.
Args:
Right now only includes severe hail prediction, not sig-severe
"""
##################
# Load in any saved DL model files
##################
#Clear any saved DL graphs
tf.keras.backend.clear_session()
#Load DL model
model_file = self.model_path + f'/{member}_{self.model_args}_{self.model_type}.h5'
DL_model = tf.keras.models.load_model(model_file,compile=False)
if self.model_type == 'CNN':
#Use minimum prob threshold chosen with validation data
threshold_file = self.model_path + f'/{member}_{self.model_args}_CNN_model_threshold.h5'
if not os.path.exists(threshold_file):
print('No thresholds found')
return
prob_thresh = 0 #pd.read_csv(threshold_file).loc[0,'size_threshold']+0.05
print(prob_thresh)
total_count = 0
##################
#Extract forecast data (#hours, #patches, nx, ny, #variables)
##################
forecast_data = self.dldataeng.read_files('forecast',member,date,[None],[None])
if forecast_data is None:
print('No forecast data found')
return
##################
# Standardize hourly data
##################
standard_forecast_data = np.array([self.dldataeng.standardize_data(member,forecast_data[hour])
for hour in np.arange(forecast_data.shape[0])])
del forecast_data
##################
# Produce gridded hourly hail forecast
##################
total_grid = np.empty( (standard_forecast_data.shape[0],
total_map_shape[0]*total_map_shape[1]) )*np.nan
for hour in np.arange(standard_forecast_data.shape[0]):
print(hour)
#Predict probability of severe hail
DL_prediction = np.array(DL_model.predict(standard_forecast_data[hour]))
######
# Will need to fix CNN code to reflect the conversion inds are in
#patches x (patch_radius*patch_radius) instead of (patches*radius*radius)
#####
if self.model_type == 'CNN':
severe_proba_indices = np.where( (cnn_preds[:,2]+cnn_preds[:,3]) >= prob_thresh)[0]
severe_patches = np.zeros(subset_map_shape)
#If no hourly severe hail predicted, continue
if len(severe_proba_indices) <1 : continue
severe_patches[severe_proba_indices] = np.full((patch_radius,patch_radius), 1)
total_grid[hour,map_conversion_inds] = severe_patches.ravel()
print(hour,len(severe_proba_indices),np.nanmax((cnn_preds[:,2]+cnn_preds[:,3])))
total_count += len(severe_proba_indices)
print('Total severe probs:',total_count)
print()
elif 'UNET' in self.model_type:
for patch in np.arange(standard_forecast_data.shape[1]):
patch_indices = patch_map_conversion_indices[patch]
#Gets rid of overlapping edges
overlap_pt = 4
# If unet3+ then the last output tensor is the correct one
if DL_prediction.ndim > 4:
hourly_patch_data = DL_prediction[-1,patch,overlap_pt:-overlap_pt,
overlap_pt:-overlap_pt,0].ravel()
else:
hourly_patch_data = DL_prediction[patch,overlap_pt:-overlap_pt,
overlap_pt:-overlap_pt,0].ravel()
total_grid[hour,patch_indices] = hourly_patch_data
del DL_prediction
del standard_forecast_data
output_data=total_grid.reshape((total_grid.shape[0],)+total_map_shape)
date_outpath = forecast_grid_path + f'{date[0][:-5]}/'
#Output gridded forecasts
if not os.path.exists(date_outpath): os.makedirs(date_outpath)
gridded_out_file = date_outpath + f'{member}_{date[0]}_forecast_grid.h5'
print(f'Writing out {gridded_out_file}')
with h5py.File(gridded_out_file, 'w') as hf:
hf.create_dataset("data",data=output_data,
compression='gzip',compression_opts=6)
return
def dice_loss(y_true, y_pred):
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.math.sigmoid(y_pred)
numerator = 2 * tf.reduce_sum(y_true * y_pred)
denominator = tf.reduce_sum(y_true + y_pred)
return 1 - numerator / denominator
'''
From: https://idiotdeveloper.com/unet-segmentation-in-tensorflow/
'''
def down_block(x, filters, kernel_size=(3, 3)):
c = layers.Conv2D(filters, kernel_size, padding='same')(x)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
c = layers.Conv2D(filters, kernel_size, padding='same')(c)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
p = layers.MaxPooling2D((2,2))(c)
return c, p
def up_block(x, skip, filters, kernel_size=(3, 3)):
up = layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(x)
concat = layers.Concatenate()([up, skip])
c = layers.Conv2D(filters, kernel_size, padding='same')(concat)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
c = layers.Conv2D(filters, kernel_size, padding='same')(c)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
return c
def bottleneck(x, filters, kernel_size=(3, 3)):
c = layers.Conv2D(filters, kernel_size, padding='same')(x)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
c = layers.Conv2D(filters, kernel_size, padding='same')(c)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
return c
|
5,231 | 56ed5bb22d77f4d8c061f97d832a60ed9a106549 | from trac.db import DatabaseManager
def do_upgrade(env, ver, cursor):
"""Change schema name from taskboard_schema to agiletools_version
"""
cursor.execute('UPDATE system SET name=%s WHERE name=%s',
("agiletools_version", "taskboard_schema"))
|
5,232 | 0cec92bbfad87020baf5ef1bd005e64bc9a6ed01 | # @Author: Chen yunsheng(Leo YS CHen)
# @Location: Taiwan
# @E-mail:leoyenschen@gmail.com
# @Date: 2017-02-14 00:11:27
# @Last Modified by: Chen yunsheng
import click
from qstrader import settings
from qstrader.compat import queue
from qstrader.price_parser import PriceParser
from qstrader.price_handler.yahoo_daily_csv_bar import YahooDailyCsvBarPriceHandler
from qstrader.strategy import Strategies, DisplayStrategy
from qstrader.risk_manager.example import ExampleRiskManager
from qstrader.portfolio_handler import PortfolioHandler
from qstrader.compliance.example import ExampleCompliance
from qstrader.execution_handler.ib_simulated import IBSimulatedExecutionHandler
from qstrader.statistics.simple import SimpleStatistics
from qstrader.trading_session.backtest import Backtest
#====================================================
import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0,dir)
print("parentdir")
print(parentdir)
print("dir")
print(dir)
from custom_strategy import CustomStrategy
from custom_position import CustomPositionSizer
def run(config, testing, tickers, filename):
# Set up variables needed for backtest
events_queue = queue.Queue()
csv_dir = config.CSV_DATA_DIR
initial_equity = PriceParser.parse(500000.00)
# Use Yahoo Daily Price Handler
price_handler = YahooDailyCsvBarPriceHandler(
csv_dir, events_queue, tickers
)
# Use the Buy and Hold Strategy
strategy = CustomStrategy(tickers, events_queue)
strategy = Strategies(strategy, DisplayStrategy())
# Use an example Position Sizer
position_sizer = CustomPositionSizer()
# Use an example Risk Manager
risk_manager = ExampleRiskManager()
# Use the default Portfolio Handler
portfolio_handler = PortfolioHandler(
initial_equity, events_queue, price_handler,
position_sizer, risk_manager
)
# Use the ExampleCompliance component
compliance = ExampleCompliance(config)
# Use a simulated IB Execution Handler
execution_handler = IBSimulatedExecutionHandler(
events_queue, price_handler, compliance
)
# Use the default Statistics
statistics = SimpleStatistics(config, portfolio_handler)
# Set up the backtest
backtest = Backtest(
price_handler, strategy,
portfolio_handler, execution_handler,
position_sizer, risk_manager,
statistics, initial_equity
)
results = backtest.simulate_trading(testing=testing)
statistics.save(filename)
return results
"""
@click.command()
@click.option('--config', default=settings.DEFAULT_CONFIG_FILENAME, help='Config filename')
@click.option('--testing/--no-testing', default=False, help='Enable testing mode')
@click.option('--tickers', default='SP500TR', help='Tickers (use comma)')
@click.option('--filename', default='', help='Pickle (.pkl) statistics filename')
"""
def main(config, testing, tickers, filename):
tickers = tickers.split(",")
config = settings.from_file(config, testing)
run(config, testing, tickers, filename)
if __name__ == "__main__":
main(settings.DEFAULT_CONFIG_FILENAME,False,'SP500TR','')
|
5,233 | dad78d7948fb1038f9cf66732f39c18a18f2a3c8 | from microbit import *
import speech
while True:
speech.say("I am a DALEK - EXTERMINATE", speed=120, pitch=100, throat=100, mouth=200) #kokeile muuttaa parametrejä
|
5,234 | 0926606a222e1277935a48ba7f0ea886fb4e298a | from faker import Faker
from generators.uniform_distribution_gen import UniformDistributionGen
from generators.random_relation_gen import RandomRelationGen
from base.field_base import FieldBase
from generators.normal_distribution_gen import NormalDistributionGen
from generators.first_name_generator import FirstNameGenerator
from generators.last_name_generator import LastNameGenerator
from generators.universal_function_generator import UniversalFunctionGenerator
from generators.print_relations_generator import PrintRelationsGenerator
from base.model_base import ModelBase
from base.class_base import ClassBase
class A:
def __init__(self) -> None:
self.alpha: str = ""
self.C: C = None
class B:
def __init__(self) -> None:
self.alpha: str = ""
self.C: C = None
class C:
def __init__(self) -> None:
self.alpha: str = ""
self.beta: str = ""
self.gamma: str = ""
self.delta: str = ""
if __name__ == "__main__":
model = ModelBase()
# Person
cb_a = ClassBase(model, A, 10)
cb_b = ClassBase(model, B, 10)
cb_c = ClassBase(model, C, 10)
FieldBase(cb_a, PrintRelationsGenerator(),
"alpha", related_fields=["C.alpha", "C.beta", "C.gamma"])
FieldBase(cb_a, RandomRelationGen(cb_c), "C")
FieldBase(cb_b, PrintRelationsGenerator(),
"alpha", related_fields=["C.alpha", "C.beta", "C.gamma"])
FieldBase(cb_b, RandomRelationGen(cb_c), "C")
FieldBase(cb_c, PrintRelationsGenerator(),
"alpha", related_fields=["beta"])
FieldBase(cb_c, PrintRelationsGenerator(),
"beta", related_fields=["gamma"])
FieldBase(cb_c, PrintRelationsGenerator(),
"gamma", related_fields=["delta"])
FieldBase(cb_c, UniversalFunctionGenerator(
f=Faker().paragraph, nb_sentences=1),
"delta")
model.create_instances()
model.map_field_graph_full()
model.print_generation_order()
model.draw_field_graph()
model.fill_in_instances()
print("")
|
5,235 | cdc32e7c767097a0eb0def71e55f0276982d6a96 | #!/usr/bin/env python
# coding: utf-8
# In[19]:
import numpy as np
import pandas as pd
class simple_nn():
'''
This is simple nn class with 3 layers NN. In this class additional layer was added to the original layers
from notebook given by Julian Stier and Sahib Julka.
Moreover those functions were refactored so that final class would look more concise
and easier to read.
Additionaly optimization were done to work with multiclassification tasks (i.e > than 2 classes)
-----------------------------------------------------------------------------------------------
OUTPUT:
weights that must be used to call predict method of the class
loss_res - list that consist of loss value calculated during training steps
accuracy_res - list that consist of accuracy value calculated during training steps
-----------------------------------------------------------------------------------------------
INPUT:
creating a class examplar:
simple_nn(input_dim, output_dim, lr, num_epochs, decay_rate)
where: input_dim - input dimention of NN ,
output_dim - output dimention of NN,
lr -learnin rate,
num_epochs - number of epochs to iterate over
decay_rate - decay rate for learning rate
For example:
model = simple_nn(2, 2, 0.01, 2, 0.5)
Once model is initialized, we can call train method
train(x, y, nn_hdim, batch_size)
where: x, y are self-explanatory,
nn_hdim - num of neurons in hidden layer,
batch_size - size of batch wich will be used to split the data in each epoch
For example:
weights, loss_res, accuracy_res = model.train(X_train, y_train, 10, batch_size=50)
---------------------------------------------------------------------------------------
PREDICT:
Once model is trained it will return weights or also called "model".
Having weights and x is sufficient to execute prediction with simple NN.
Prediction will return predicted classes for the given inputs:
y_hat = model.predict(weights, X_test)
'''
def __init__(self, nn_input_dim, nn_output_dim, lr, epochs, decay_rate):
self.nn_input_dim = nn_input_dim # input layer dimensionality
self.nn_output_dim = nn_output_dim # output layer dimensionality
self.lr_init = lr # learning rate for gradient descent
self.epochs = epochs
self.decay_rate = decay_rate # decay rate for calculating learninng rate decay
self.reg_lambda = 0.01 # regularization strength
def init_weights(self, nn_hdim):
np.random.seed(0)
# when we initialize weights we normalise them by sqrt(n of input)
# that has been empirically proved to improve the rate of convergence
self.W1 = np.random.rand(self.nn_input_dim, nn_hdim)/ np.sqrt(self.nn_input_dim)
self.b1 = np.random.rand(1, nn_hdim)
self.W2 = np.random.rand(nn_hdim, nn_hdim)/ np.sqrt(nn_hdim)
self.b2 = np.random.rand(1, nn_hdim)
# W3 and b3 are added as here we are having +1 layer
self.W3 = np.random.rand(nn_hdim, self.nn_output_dim)/ np.sqrt(nn_hdim)
self.b3 = np.random.rand(1, self.nn_output_dim)
return self.W1, self.b1, self.W2, self.b2, self.W3, self.b3
# sigmoid and sigmoid derivative have been added to this NN
def sigmoid(self, x):
return 1/(1+np.exp(-x))
def sigmoid_deriv(self, x):
f = 1/(1+np.exp(-x))
df = f * (1 - f)
return df
def softmax(self, x):
exp_scores = np.exp(x)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
return probs
def tanh_deriv(self, x):
return 1 - np.power(x, 2)
def lr_decay(self, epoch):
lr = self.lr_init/(1+self.decay_rate * epoch)
return lr
def forward_prop(self, W1, b1, W2, b2, W3, b3, x):
# Forward propagation
z1 = x.dot(W1) + b1
a1 = np.tanh(z1)
# layer 2 was added, i.e z2 and a2
z2 = a1.dot(W2) + b2
a2 = self.sigmoid(z2)
z3 = a2.dot(W3) + b3
a3 = self.softmax(z3)
return z1, a1, z2, a2, z3, a3
def backward_prop(self, z1, a1, z2, a2, z3, a3, W1, W2, W3, x, y):
delta4 = a3
# so delta 4 is error that we want to dissiminate to W3, W2, W1
# assigning to errors -1 ?
delta4[range(self.batch_size), y] -= 1
dW3 = (a2.T).dot(delta4)
db3 = np.sum(delta4, axis=0, keepdims=True)
# delta3 = error * by W3 * by sigmoid derivative
delta3 = delta4.dot(W3.T) * self.sigmoid_deriv(a2)
dW2 = (a1.T).dot(delta3)
db2 = np.sum(delta3, axis=0, keepdims=True)
# shouldn't we pass z1 to tanh_derivative?
delta2 = delta3.dot(W2.T) * self.tanh_deriv(a1)
dW1 = np.dot(x.T, delta2)
db1 = np.sum(delta2, axis=0)
return dW1, db1, dW2, db2, dW3, db3
def params_update(self, W1, b1, W2, b2, W3, b3, dW1, db1, dW2, db2, dW3, db3):
dW3 += self.reg_lambda * W3
dW2 += self.reg_lambda * W2
dW1 += self.reg_lambda * W1
W1 += -self.lr * dW1
b1 += -self.lr * db1
W2 += -self.lr * dW2
b2 += -self.lr * db2
W3 += -self.lr * dW3
b3 += -self.lr * db3
return W1, b1, W2, b2, W3, b3
def train(self, X, y, nn_hdim, batch_size):
# Initialize the parameters to random values. We need to learn these.
W1, b1, W2, b2, W3, b3 = self.init_weights(nn_hdim)
self.batch_size = batch_size
loss_res = []
accuracy_res = []
# This is what we return at the end
self.model = {}
# defining number of batches
num_batches = X.shape[0]//self.batch_size
# Gradient descent
for epoch in range(0, self.epochs):
print('epochs', epoch)
if epoch == 0:
self.lr = self.lr_init
else:
self.lr = self.lr_decay(epoch)
for batch_num in range(num_batches):
print('batch_num', batch_num)
# slicing batch data
start = batch_num * self.batch_size
end = (batch_num + 1) * self.batch_size
self.x_batched = X[start:end]
self.y_batched = np.array(y[start:end])
# training model by applying forward, backwar propagation and updating weithgs
z1, a1, z2, a2, z3, a3 = self.forward_prop(W1, b1, W2, b2, W3, b3, self.x_batched)
dW1, db1, dW2, db2, dW3, db3 = self.backward_prop(z1, a1, z2, a2, z3, a3, W1, W2, W3, self.x_batched, self.y_batched)
W1, b1, W2, b2, W3, b3 = self.params_update(W1, b1, W2, b2, W3, b3, dW1, db1, dW2, db2, dW3, db3)
# Assign new parameters to the model
self.model = {'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2, 'W3': W3, 'b3': b3}
# IMPORTANT
# to compute loss value and accuracy we should use new weights and the same batch of x and y data
loss, acc = self.metrics(W1, W2, W3, b1, b2, b3, self.x_batched, self.y_batched)
loss_res.append(loss)
accuracy_res.append(acc)
return self.model, loss_res, accuracy_res
def metrics(self, W1, W2, W3, b1, b2, b3, X, y):
z1, a1, z2, a2, z3, a3 = self.forward_prop(W1, b1, W2, b2, W3, b3, X)
loss = self.calculate_loss(a3, y, W1, W2, W3)
acc = self.calculate_accuracy(a3, y)
return loss, acc
def calculate_loss(self, a3, y, W1, W2, W3):
corect_logprobs = -np.log(a3[range(self.batch_size), y])
data_loss = np.sum(corect_logprobs)
# Add regulatization term to loss (optional)
data_loss += self.reg_lambda/2 * (np.sum(np.square(W1)) + np.sum(np.square(W2))+np.sum(np.square(W3)))
#print('loss a2',1./self.batch_size * data_loss)
return 1./self.batch_size * data_loss
def calculate_accuracy(self, a3, y_true):
y_hat = np.argmax(a3, axis=1)
correct = sum(y_true == y_hat)
incorrect = len(y_true) - correct
return correct/len(y_true)*100
def predict(self, model, x):
W1, b1, W2, b2, W3, b3 = model['W1'], model['b1'], model['W2'], model['b2'], model['W3'], model['b3']
# Forward propagation
z1, a1, z2, a2, z3, a3 = self.forward_prop(W1, b1, W2, b2, W3, b3, x)
return np.argmax(a3, axis=1)
|
5,236 | 8ae64c65d6d5dc9f2a99aeceff31657deff06c15 | import sys
import os
sys.path.append(os.pardir)
from ch03.softmax import softmax
from ch04.cross_entropy_error_batch import cross_entropy_error
import numpy as np
class SoftmaxWithLossLayer:
"""
x -> [Softmax] -> y -> [CrossEntropyError with t] -> out
In the textbook, this class has `loss` field.
"""
def __init__(self):
self.y = None # output from Softmax
self.t = None # teacher data
def forward(self, x, t):
"""
x: input to softmax
t: teacher data
"""
self.t = t
self.y = softmax(x)
loss = cross_entropy_error(self.y, self.t)
return loss
def backward(self, dout=1):
batch_size = self.t.shape[0]
doutdx = (self.y - self.t) / batch_size
return doutdx
if __name__ == '__main__':
softmax_with_loss_layer = SoftmaxWithLossLayer()
# forward(non-batch)
x = np.array([5, 1, 0]) # x is like t
t = np.array([1, 0, 0])
loss = softmax_with_loss_layer.forward(x, t)
print('loss = {0}'.format(loss))
# backward
dout = 1
doutdx = softmax_with_loss_layer.backward(dout)
print('doutdx = {0}'.format(doutdx))
# forward(batch)
xs = np.array([[5, 1, 0], [3, 0, 2], [1, 1, 5], [4, 1, 1]]) # x[1] and x[2] have large difference with t
ts = np.array([[1, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 0]])
loss = softmax_with_loss_layer.forward(xs, ts)
print('loss = {0}'.format(loss))
# backward
dout = 1
doutdx = softmax_with_loss_layer.backward(dout)
print('doutdx = {0}'.format(doutdx))
|
5,237 | aa2e24d80789f2a6ebd63ec42a17499f1e79ca49 | def guguPrint(n):
print('*' * 30)
for i in range(1, 10):
print('{} X {} = {}'.format(n, i, n * i))
if __name__ =="__main__":
print('Main으로 실행되었음') |
5,238 | 7127df5515e93e27b431c57bec1709475fec8388 | #!/usr/bin/env python
# set up parameters that we care about
PACKAGE = 'jsk_pcl_ros'
from dynamic_reconfigure.parameter_generator_catkin import *;
from math import pi
gen = ParameterGenerator ()
gen.add("segment_connect_normal_threshold", double_t, 0,
"threshold of normal to connect clusters", 0.9, 0.0, 1.0)
gen.add("ewma_tau", double_t, 0,
"tau parameter of EWMA to connect clusters", 0.2, 0.0, 1.0)
gen.add("outlier_threshold", double_t, 0, "outlier threshold", 0.01, 0.0, 0.1)
gen.add("max_iterations", int_t, 0, "maximum iteration", 100, 1, 10000)
gen.add("min_indices", int_t, 0, "maximum iteration", 1000, 1, 10000)
exit (gen.generate (PACKAGE, "jsk_pcl_ros", "LineSegmentCollector"))
|
5,239 | 358a4948ac1f60e0966328cebf401777042c3d0e | from app.routes import home
from .home import bp as home
from .dashboard import bp as dashboard |
5,240 | 9e6fd6620b4ec6a574d7948fb0d14b0a2ad0d24e | # -*- coding: utf-8 -*-
u"""Hellweg execution template.
:copyright: Copyright (c) 2017 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkcollections
from pykern import pkio
from pykern.pkdebug import pkdc, pkdp
from rslinac import solver
from sirepo import simulation_db
from sirepo.template import template_common, hellweg_dump_reader
import math
import numpy as np
import os.path
import py.path
import re
HELLWEG_DUMP_FILE = 'all-data.bin'
HELLWEG_SUMMARY_FILE = 'output.txt'
HELLWEG_INI_FILE = 'defaults.ini'
HELLWEG_INPUT_FILE = 'input.txt'
#: Simulation type
SIM_TYPE = 'hellweg'
WANT_BROWSER_FRAME_CACHE = True
# lattice element is required so make it very short and wide drift
_DEFAULT_DRIFT_ELEMENT = 'DRIFT 1e-16 1e+16 2' + "\n"
_HELLWEG_PARSED_FILE = 'PARSED.TXT'
_REPORT_STYLE_FIELDS = ['colorMap', 'notes']
_SCHEMA = simulation_db.get_schema(SIM_TYPE)
def background_percent_complete(report, run_dir, is_running):
if is_running:
return {
'percentComplete': 0,
'frameCount': 0,
}
dump_file = _dump_file(run_dir)
if os.path.exists(dump_file):
beam_header = hellweg_dump_reader.beam_header(dump_file)
last_update_time = int(os.path.getmtime(dump_file))
frame_count = beam_header.NPoints
return {
'lastUpdateTime': last_update_time,
'percentComplete': 100,
'frameCount': frame_count,
'summaryData': _summary_text(run_dir),
}
return {
'percentComplete': 100,
'frameCount': 0,
'error': _parse_error_message(run_dir)
}
def extract_beam_histrogram(report, run_dir, frame):
beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)
points = hellweg_dump_reader.get_points(beam_info, report.reportType)
hist, edges = np.histogram(points, template_common.histogram_bins(report.histogramBins))
return {
'title': _report_title(report.reportType, 'BeamHistogramReportType', beam_info),
'x_range': [edges[0], edges[-1]],
'y_label': 'Number of Particles',
'x_label': hellweg_dump_reader.get_label(report.reportType),
'points': hist.T.tolist(),
}
def extract_beam_report(report, run_dir, frame):
data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
model = data.models.beamAnimation
model.update(report)
beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)
x, y = report.reportType.split('-')
values = [
hellweg_dump_reader.get_points(beam_info, x),
hellweg_dump_reader.get_points(beam_info, y),
]
model['x'] = x
model['y'] = y
return template_common.heatmap(values, model, {
'x_label': hellweg_dump_reader.get_label(x),
'y_label': hellweg_dump_reader.get_label(y),
'title': _report_title(report.reportType, 'BeamReportType', beam_info),
'z_label': 'Number of Particles',
'summaryData': _summary_text(run_dir),
})
def extract_parameter_report(report, run_dir):
s = solver.BeamSolver(
os.path.join(str(run_dir), HELLWEG_INI_FILE),
os.path.join(str(run_dir), HELLWEG_INPUT_FILE))
s.load_bin(os.path.join(str(run_dir), HELLWEG_DUMP_FILE))
y1_var, y2_var = report.reportType.split('-')
x_field = 'z'
x = s.get_structure_parameters(_parameter_index(x_field))
y1 = s.get_structure_parameters(_parameter_index(y1_var))
y1_extent = [np.min(y1), np.max(y1)]
y2 = s.get_structure_parameters(_parameter_index(y2_var))
y2_extent = [np.min(y2), np.max(y2)]
return {
'title': _enum_text('ParameterReportType', report.reportType),
'x_range': [x[0], x[-1]],
'y_label': hellweg_dump_reader.get_parameter_label(y1_var),
'x_label': hellweg_dump_reader.get_parameter_label(x_field),
'x_points': x,
'points': [
y1,
y2,
],
'y_range': [min(y1_extent[0], y2_extent[0]), max(y1_extent[1], y2_extent[1])],
'y1_title': hellweg_dump_reader.get_parameter_title(y1_var),
'y2_title': hellweg_dump_reader.get_parameter_title(y2_var),
}
def extract_particle_report(report, run_dir):
x_field = 'z0'
particle_info = hellweg_dump_reader.particle_info(_dump_file(run_dir), report.reportType, int(report.renderCount))
x = particle_info['z_values']
return {
'title': _enum_text('ParticleReportType', report.reportType),
'x_range': [np.min(x), np.max(x)],
'y_label': hellweg_dump_reader.get_label(report.reportType),
'x_label': hellweg_dump_reader.get_label(x_field),
'x_points': x,
'points': particle_info['y_values'],
'y_range': particle_info['y_range'],
}
def fixup_old_data(data):
for m in ('beamAnimation', 'beamHistogramAnimation', 'parameterAnimation', 'particleAnimation'):
if m not in data.models:
data.models[m] = pkcollections.Dict({})
template_common.update_model_defaults(data.models[m], m, _SCHEMA)
if 'solenoidFile' not in data['models']['solenoid']:
data['models']['solenoid']['solenoidFile'] = ''
if 'beamDefinition' not in data['models']['beam']:
beam = data['models']['beam']
beam['beamDefinition'] = 'transverse_longitude'
beam['cstCompress'] = '0'
beam['transversalFile2d'] = ''
beam['transversalFile4d'] = ''
beam['longitudinalFile1d'] = ''
beam['longitudinalFile2d'] = ''
beam['cstFile'] = ''
template_common.organize_example(data)
def get_animation_name(data):
return 'animation'
def get_application_data(data):
if data['method'] == 'compute_particle_ranges':
return template_common.compute_field_range(data, _compute_range_across_files)
assert False, 'unknown application data method: {}'.format(data['method'])
def lib_files(data, source_lib):
return template_common.filename_to_path(_simulation_files(data), source_lib)
def get_simulation_frame(run_dir, data, model_data):
frame_index = int(data['frameIndex'])
if data['modelName'] == 'beamAnimation':
args = template_common.parse_animation_args(
data,
{
'1': ['reportType', 'histogramBins', 'startTime'],
'': ['reportType', 'histogramBins', 'plotRangeType', 'horizontalSize', 'horizontalOffset', 'verticalSize', 'verticalOffset', 'isRunning', 'startTime'],
},
)
return extract_beam_report(args, run_dir, frame_index)
elif data['modelName'] == 'beamHistogramAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'histogramBins', 'startTime']},
)
return extract_beam_histrogram(args, run_dir, frame_index)
elif data['modelName'] == 'particleAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'renderCount', 'startTime']},
)
return extract_particle_report(args, run_dir)
elif data['modelName'] == 'parameterAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'startTime']},
)
return extract_parameter_report(args, run_dir)
raise RuntimeError('unknown animation model: {}'.format(data['modelName']))
def models_related_to_report(data):
"""What models are required for this data['report']
Args:
data (dict): simulation
Returns:
list: Named models, model fields or values (dict, list) that affect report
"""
r = data['report']
if r == 'animation':
return []
res = template_common.report_fields(data, r, _REPORT_STYLE_FIELDS) + [
'beam',
'ellipticalDistribution',
'energyPhaseDistribution',
'solenoid',
'sphericalDistribution',
'twissDistribution',
]
for f in template_common.lib_files(data):
res.append(f.mtime())
return res
def python_source_for_model(data, model):
return '''
from rslinac import solver
{}
with open('input.txt', 'w') as f:
f.write(input_file)
with open('defaults.ini', 'w') as f:
f.write(ini_file)
s = solver.BeamSolver('defaults.ini', 'input.txt')
s.solve()
s.save_output('output.txt')
'''.format(_generate_parameters_file(data, is_parallel=len(data.models.beamline)))
def remove_last_frame(run_dir):
pass
def validate_delete_file(data, filename, file_type):
"""Returns True if the filename is in use by the simulation data."""
return filename in _simulation_files(data)
def write_parameters(data, run_dir, is_parallel):
"""Write the parameters file
Args:
data (dict): input
run_dir (py.path): where to write
is_parallel (bool): run in background?
"""
pkio.write_text(
run_dir.join(template_common.PARAMETERS_PYTHON_FILE),
_generate_parameters_file(
data,
run_dir,
is_parallel,
),
)
def _compute_range_across_files(run_dir, data):
res = {}
for v in _SCHEMA.enum.BeamReportType:
x, y = v[0].split('-')
res[x] = []
res[y] = []
dump_file = _dump_file(run_dir)
if not os.path.exists(dump_file):
return res
beam_header = hellweg_dump_reader.beam_header(dump_file)
for frame in xrange(beam_header.NPoints):
beam_info = hellweg_dump_reader.beam_info(dump_file, frame)
for field in res:
values = hellweg_dump_reader.get_points(beam_info, field)
if not len(values):
pass
elif len(res[field]):
res[field][0] = min(min(values), res[field][0])
res[field][1] = max(max(values), res[field][1])
else:
res[field] = [min(values), max(values)]
return res
def _dump_file(run_dir):
return os.path.join(str(run_dir), HELLWEG_DUMP_FILE)
def _enum_text(enum_name, v):
enum_values = _SCHEMA['enum'][enum_name]
for e in enum_values:
if e[0] == v:
return e[1]
raise RuntimeError('invalid enum value: {}, {}'.format(enum_values, v))
def _generate_beam(models):
# BEAM SPH2D 0.564 -15 5 NORM2D 0.30 0.0000001 90 180
beam_def = models.beam.beamDefinition
if beam_def == 'transverse_longitude':
return 'BEAM {} {}'.format(_generate_transverse_dist(models), _generate_longitude_dist(models))
if beam_def == 'cst_pit':
return 'BEAM CST_PIT {} {}'.format(
template_common.lib_file_name('beam', 'cstFile', models.beam.cstFile),
'COMPRESS' if models.beam.cstCompress else '',
)
if beam_def == 'cst_pid':
return 'BEAM CST_PID {} {}'.format(
template_common.lib_file_name('beam', 'cstFile', models.beam.cstFile),
_generate_energy_phase_distribution(models.energyPhaseDistribution),
)
raise RuntimeError('invalid beam def: {}'.format(beam_def))
def _generate_cell_params(el):
#TODO(pjm): add an option field to select auto-calculate
if el.attenuation == 0 and el.aperture == 0:
return '{} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.acceleratingInvariant)
return '{} {} {} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.acceleratingInvariant, el.attenuation, el.aperture)
def _generate_charge(models):
if models.beam.spaceCharge == 'none':
return ''
return 'SPCHARGE {} {}'.format(models.beam.spaceCharge.upper(), models.beam.spaceChargeCore)
def _generate_current(models):
return 'CURRENT {} {}'.format(models.beam.current, models.beam.numberOfParticles)
def _generate_energy_phase_distribution(dist):
return '{} {} {}'.format(
dist.meanPhase,
dist.phaseLength,
dist.phaseDeviation if dist.distributionType == 'gaussian' else '',
)
def _generate_lattice(models):
res = ''
for el in models.beamline:
if el.type == 'powerElement':
res += 'POWER {} {} {}'.format(el.inputPower, el.frequency, el.phaseShift)
elif el.type == 'cellElement':
res += 'CELL {}'.format(_generate_cell_params(el))
has_cell_or_drift = True
elif el.type == 'cellsElement':
res += 'CELLS {} {}'.format(el.repeat, _generate_cell_params(el))
has_cell_or_drift = True
elif el.type == 'driftElement':
res += 'DRIFT {} {} {}'.format(el.length, el.radius, el.meshPoints)
has_cell_or_drift = True
elif el.type == 'saveElement':
#TODO(pjm): implement this
pass
else:
raise RuntimeError('unknown element type: {}'.format(el.type))
res += "\n"
return res
def _generate_longitude_dist(models):
dist_type = models.beam.longitudinalDistribution
if dist_type == 'norm2d':
dist = models.energyPhaseDistribution
if dist.distributionType == 'uniform':
return 'NORM2D {} {} {} {}'.format(
dist.meanEnergy, dist.energySpread, dist.meanPhase, dist.phaseLength)
if dist.distributionType == 'gaussian':
return 'NORM2D {} {} {} {} {} {}'.format(
dist.meanEnergy, dist.energySpread, dist.energyDeviation, dist.meanPhase, dist.phaseLength, dist.phaseDeviation)
raise RuntimeError('unknown longitudinal distribution type: {}'.format(models.longitudinalDistribution.distributionType))
if dist_type == 'file1d':
return 'FILE1D {} {}'.format(
template_common.lib_file_name('beam', 'longitudinalFile1d', models.beam.longitudinalFile1d),
_generate_energy_phase_distribution(models.energyPhaseDistribution),
)
if dist_type == 'file2d':
return 'FILE2D {}'.format(template_common.lib_file_name('beam', 'transversalFile2d', beam.transversalFile2d))
raise RuntimeError('unknown longitudinal distribution: {}'.format(models.beam.longitudinalDistribution))
def _generate_options(models):
if models.simulationSettings.allowBackwardWaves == '1':
return 'OPTIONS REVERSE'
return ''
def _generate_parameters_file(data, run_dir=None, is_parallel=False):
template_common.validate_models(data, _SCHEMA)
v = template_common.flatten_data(data['models'], {})
v['optionsCommand'] = _generate_options(data['models'])
v['solenoidCommand'] = _generate_solenoid(data['models'])
v['beamCommand'] = _generate_beam(data['models'])
v['currentCommand'] = _generate_current(data['models'])
v['chargeCommand'] = _generate_charge(data['models'])
if is_parallel:
v['latticeCommands'] = _generate_lattice(data['models'])
else:
v['latticeCommands'] = _DEFAULT_DRIFT_ELEMENT
return template_common.render_jinja(SIM_TYPE, v)
def _generate_solenoid(models):
solenoid = models.solenoid
if solenoid.sourceDefinition == 'none':
return ''
if solenoid.sourceDefinition == 'values':
#TODO(pjm): latest version also has solenoid.fringeRegion
return 'SOLENOID {} {} {}'.format(
solenoid.fieldStrength, solenoid.length, solenoid.z0)
if solenoid.sourceDefinition == 'file':
return 'SOLENOID {}'.format(
template_common.lib_file_name('solenoid', 'solenoidFile', solenoid.solenoidFile))
raise RuntimeError('unknown solenoidDefinition: {}'.format(solenoid.sourceDefinition))
def _generate_transverse_dist(models):
dist_type = models.beam.transversalDistribution
if dist_type == 'twiss4d':
dist = models.twissDistribution
return 'TWISS4D {} {} {} {} {} {}'.format(
dist.horizontalAlpha, dist.horizontalBeta, dist.horizontalEmittance,
dist.verticalAlpha, dist.verticalBeta, dist.verticalEmittance)
if dist_type == 'sph2d':
dist = models.sphericalDistribution
if dist.curvature == 'flat':
dist.curvatureFactor = 0
return 'SPH2D {} {} {}'.format(dist.radialLimit, dist.curvatureFactor, dist.thermalEmittance)
if dist_type == 'ell2d':
dist = models.ellipticalDistribution
return 'ELL2D {} {} {} {}'.format(dist.aX, dist.bY, dist.rotationAngle, dist.rmsDeviationFactor)
beam = models.beam
if dist_type == 'file2d':
return 'FILE2D {}'.format(template_common.lib_file_name('beam', 'transversalFile2d', beam.transversalFile2d))
if dist_type == 'file4d':
return 'FILE4D {}'.format(template_common.lib_file_name('beam', 'transversalFile4d', beam.transversalFile4d))
raise RuntimeError('unknown transverse distribution: {}'.format(dist_type))
def _parameter_index(name):
return hellweg_dump_reader.parameter_index(name)
def _parse_error_message(run_dir):
path = os.path.join(str(run_dir), _HELLWEG_PARSED_FILE)
if not os.path.exists(path):
return 'No elements generated'
text = pkio.read_text(str(path))
for line in text.split("\n"):
match = re.search('^ERROR:\s(.*)$', line)
if match:
return match.group(1)
return 'No output generated'
def _report_title(report_type, enum_name, beam_info):
return '{}, z={:.4f} cm'.format(
_enum_text(enum_name, report_type),
100 * hellweg_dump_reader.get_parameter(beam_info, 'z'))
def _simulation_files(data):
res = []
solenoid = data.models.solenoid
if solenoid.sourceDefinition == 'file' and solenoid.solenoidFile:
res.append(template_common.lib_file_name('solenoid', 'solenoidFile', solenoid.solenoidFile))
beam = data.models.beam
if beam.beamDefinition == 'cst_pit' or beam.beamDefinition == 'cst_pid':
res.append(template_common.lib_file_name('beam', 'cstFile', beam.cstFile))
if beam.beamDefinition == 'transverse_longitude':
if beam.transversalDistribution == 'file2d':
res.append(template_common.lib_file_name('beam', 'transversalFile2d', beam.transversalFile2d))
elif beam.transversalDistribution == 'file4d':
res.append(template_common.lib_file_name('beam', 'transversalFile4d', beam.transversalFile4d))
if beam.longitudinalDistribution == 'file1d':
res.append(template_common.lib_file_name('beam', 'longitudinalFile1d', beam.longitudinalFile1d))
if beam.longitudinalDistribution == 'file2d':
res.append(template_common.lib_file_name('beam', 'longitudinalFile2d', beam.longitudinalFile2d))
return res
def _summary_text(run_dir):
return pkio.read_text(os.path.join(str(run_dir), HELLWEG_SUMMARY_FILE))
|
5,241 | b0064a5cd494d5ad232f27c63a4df2c56a4c6a66 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-10-28 17:50
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='EMR',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('emergency', models.CharField(default='', max_length=10)),
],
),
migrations.CreateModel(
name='EMRNote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dateCreated', models.DateTimeField(default=django.utils.timezone.now)),
('comments', models.CharField(default='', max_length=500)),
('emr', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='emr.EMR')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='EMRTrackedMetric',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dateCreated', models.DateTimeField(default=django.utils.timezone.now)),
('label', models.CharField(default='', max_length=200)),
('comments', models.CharField(default='', max_length=500)),
('emr', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='emr.EMR')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='EMRVitals',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dateCreated', models.DateTimeField(default=django.utils.timezone.now)),
('restingBPM', models.IntegerField(default=0)),
('bloodPressure', models.CharField(default='', max_length=10)),
('height', models.FloatField(default=0)),
('weight', models.FloatField(default=0)),
('age', models.IntegerField(default=0)),
('comments', models.CharField(default='', max_length=1000)),
('emr', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='emr.EMR')),
],
options={
'abstract': False,
},
),
]
|
5,242 | 07cce6802ab3259dbc78ab86a8dd6d6a4a617c7e | from django.db import models
# Create your models here.
class Remedio(models.Model):
nome = models.CharField(max_length=100, unique=True, help_text='Nome')
valor = models.FloatField(null=False, help_text='Valor')
detalhe = models.CharField(max_length=500, null=True)
foto = models.ImageField(upload_to='media')
def __str__(self):
return self.nome |
5,243 | e976f7e423d75f7fc8a3d5cd597bdd9358ae317e | from flask import logging
from flask_sqlalchemy import SQLAlchemy
from passlib.apps import custom_app_context as pwd_context
logger = logging.getLogger(__name__)
db = SQLAlchemy() # flask-sqlalchemy
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(32), index=True)
password_hash = db.Column(db.String(128))
def hash_password(self, password):
self.password_hash = pwd_context.encrypt(password)
def verify_password(self, password):
return pwd_context.verify(password, self.password_hash)
class Weather(db.Model):
id = db.Column(db.Integer, primary_key=True)
timestamp = db.Column(db.DateTime(timezone=True))
pressure = db.Column(db.Float)
inTemp = db.Column(db.Float)
outTemp = db.Column(db.Float)
windDir = db.Column(db.Float)
windSpeed = db.Column(db.Float)
outHumidity = db.Column(db.Float)
inHumidity = db.Column(db.Float)
rain = db.Column(db.Float)
def save(self):
db.session.add(self)
db.session.commit()
class Webcam(db.Model):
id = db.Column(db.Integer, primary_key=True)
timestamp = db.Column(db.DateTime(timezone=True))
data = db.Column(db.LargeBinary)
def save(self):
db.session.add(self)
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def setup_api_user():
username = "weatherstation"
password = "umevohvoori2zaew2choKaeshooPho"
if User.query.filter_by(username=username).first() is not None:
return
user = User(username=username)
user.hash_password(password)
db.session.add(user)
db.session.commit()
logger.info("User created")
def init_db(app):
db.app = app
db.init_app(app)
db.create_all()
setup_api_user()
if app.config["SQLALCHEMY_BOOTSTRAP_DATA"]:
import_from_json()
return db
def import_from_json():
pass
|
5,244 | 87c200796e1fac508a43e899c0ed53878b8c1d88 | from Smooth import smoothing
def n_grams(unigramsFile, bigramsFile, parameterization, sentences):
words = []
param = []
unigrams = []
bigrams = []
with open(parameterization) as p: #Parametrization file
data = p.read().split()
word = data[0]
param.append(data[1])
param.append(data[2])
param.append(data[4])
#print("PARAM: ", param)# Debug print
with open(unigramsFile) as u: #Unigrams and respective values file
for line in u.readlines():
values = line.split()
if (values[0] in param):
unigrams.append(values)
#print("UNIGRAMS: ", unigrams)# Debug print
with open(bigramsFile) as b: #Bigrams and respective values file
for line in b.readlines():
values = line.split()
if (values[0] in param or values[1] in param):
bigrams.append(values)
#print("BIGRAMS: ", bigrams)# Debug print
with open(sentences) as f: #Text with sentences file
for line in f.readlines():
sentence = line.split()
index = sentence.index(word)
aux = []
if (index > 0):
aux.append(sentence[index-1])
aux.append(sentence[index])
if (index + 1 < len(sentences)):
aux.append(sentence[index+1])
words.append(aux)
#print("WORDS: ", words)# Debug print
for w in words:
bigram1 = 0
bigram2 = 0
option1 = w
print(w)
index = option1.index(word)
option1[index] = param[1]
option2 = w
index = option2.index(word)
option2[index] = param[2]
for unigram in unigrams:
if((option1[0] or option1[1] or option1[2]) in unigram):
unigram1 += float(unigram[1])
elif((option2[0] or option2[1] or option2[2]) in unigram):
unigram2 += float(unigram[1])
for bigram in bigrams:
if ((option1[0:1] or option1[1:2]) in bigram):
bigram1 += float(bigram[2])
elif (option2[0:1] in bigram or option2[1:2] in bigram):
bigram2 += float(bigram[2])
if (((unigram1 > unigram2) and (unigram1 > bigram2)) or ((bigram1 > unigram2) and (bigram1 > bigram2))):
lema = option1
elif (((unigram2 > unigram1) and (unigram2 > bigram1)) or ((bigram2 > unigram1) and (bigram2 > bigram1))):
lema = option2
print("O lema mais provavel para" + str(w) + "e: " + str(lema)) #lema
#print("SENTENCE: ", sentence)# Debug print
|
5,245 | 2c4eb07a32c6903ae31006f42c13c55e6cc42eb5 | __version__ = "alph 1.0"
|
5,246 | 5810739300067e8f207d09bf971484a278372a9a | """asks the user for english words to latinize"""
def latinize_word(word):
"""performs bee latin on a word"""
if word[0].lower() in 'bcdfghjklmnpqrstvwxyz':
word = word[1:] + word[0] + 'uzz'
else:
word += 'buzz'
return word.lower()
def latinize_sentence(sentence):
"""performs bee latin on a sentence"""
words = sentence.split()
latanized_words = [latinize_word(word) for word in words]
return " ".join(latanized_words)
def main():
"""main function"""
english_sentence = input('Enter English sentence: ')
while english_sentence != 'q':
print(f'Bee latin = {latinize_sentence(english_sentence)}')
english_sentence = input('Enter English sentence: ')
print(latinize_word('goodbye'))
main() |
5,247 | 1a4132358fa9bd4cd74970286ec8bb212b1857cd | from __future__ import absolute_import, print_function
from django.db import models
from django.utils import timezone
from sentry.db.models import (
Model,
BaseManager,
UUIDField,
sane_repr,
)
class MonitorLocation(Model):
__core__ = True
guid = UUIDField(unique=True, auto_add=True)
name = models.CharField(max_length=128)
date_added = models.DateTimeField(default=timezone.now)
objects = BaseManager(cache_fields=('guid', ))
class Meta:
app_label = 'sentry'
db_table = 'sentry_monitorlocation'
__repr__ = sane_repr('guid', 'name')
|
5,248 | 74dd9151195fef41862c2793621172518f1f486d | from django.shortcuts import render,redirect
from .forms import UserRegisterForm, IsEmri ,TestForm,PDF_Rapor
from django.contrib import messages
from django.contrib.auth import authenticate, login ,logout
from django.http import HttpResponseRedirect, HttpResponse ,JsonResponse
from django.urls import reverse
from django.db.models import Max
from django.contrib.auth.models import User
from .models import Emir , Test, Bildirim, Uretim, Valf
from .models import Valf_montaj,Valf_test,Valf_govde,Valf_fm200,Valf_havuz,Valf_final_montaj
from django.contrib.auth.decorators import login_required
import json, platform, base64, datetime, os
from django.utils import timezone
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.csrf import csrf_exempt
from django.core.files.storage import FileSystemStorage
from django.template.loader import render_to_string
from weasyprint import HTML
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from base64 import b64decode
# Create your views here.
#mac = platform.machine()[:3] # eğer device ras pi ise 'arm' döner
server = '192.168.1.38:8000'
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
print(ip)
return ip
def bildirim(request):
bugun = timezone.now()
birGunOnce = bugun - timezone.timedelta(days=14)
bildirimq = Bildirim.objects.filter(zaman__range=[birGunOnce,bugun])
temp = []
for o in bildirimq.values():
temp.append(o)
bildirims = list(temp)
print(bildirims)
return JsonResponse(bildirims,safe=False)
@login_required
def index(request):
#Bildirim.objects.all().delete()
grup = request.user.grup
birim = request.user.birim
emirler = Emir.objects.filter(durum="Aktif")
l = list()
for e in emirler.values():
data = dict()
data['is_emri'] = e['is_emri']
data['valfmontaj'] = Valf.objects.filter(is_emri_id=e['id']).filter(valf_montaj_id__isnull=False).values_list('valf_montaj_id',flat=True).count()or 0
data['valftest'] = Valf.objects.filter(is_emri_id=e['id']).filter(valf_test_id__isnull=False).values_list('valf_test_id',flat=True).count()or 0
data['valfgovde'] =Valf.objects.filter(is_emri_id=e['id']).filter(valf_govde_id__isnull=False).values_list('valf_govde_id',flat=True).count()or 0
data['fm200'] = Valf.objects.filter(is_emri_id=e['id']).filter(fm200_azot_id__isnull=False).values_list('fm200_azot_id',flat=True).count()or 0
data['havuztest'] = Valf.objects.filter(is_emri_id=e['id']).filter(havuz_id__isnull=False).values_list('havuz_id',flat=True).count()or 0
data['finalmontaj'] = Valf.objects.filter(is_emri_id=e['id']).filter(valf_final_montaj_id__isnull=False).values_list('valf_final_montaj_id',flat=True).count()or 0
l.append(data)
print(l)
return render(request,'index.html', { 'grup' : grup, "emirler" : emirler, 'birim': birim,'server' : server,'uretims':l})
@login_required
def arama(request):
mac = request.user_agent.os.family
q = request.GET.get('q') or request.GET.get('uretim')
emir = request.GET.get('emir')
emirs = Emir.objects.all()
media_url = settings.MEDIA_URL
aranan = ""
if q:
aranan = q
elif emir:
aranan = "isemri"
else:
print('bos')
grup = request.user.grup
birim = request.user.birim
testler = Test.objects.filter(tur=q)
# valfmontaj=Valf_montaj.objects.all()
# valfgovde=Valf_govde.objects.all()
# finalmontaj=Valf_final_montaj.objects.all()
# fm200=Valf_fm200.objectsobjects.all()
print(q)
# if q == "valfmontaj":
# uretims = Uretim.objects.filter(tur="kurlenme")
# else:
# uretims = Uretim.objects.filter(tur=q)
# print(uretims)
if q == "valfmontaj":
uretims = Valf_montaj.objects.all()
elif q == "valfgovde":
uretims = Valf_govde.objects.all()
elif q == "fm200":
uretims = Valf_fm200.objects.all()
elif q == "havuztest":
uretims = Valf_havuz.objects.all()
elif q == "finalmontaj":
uretims = Valf_final_montaj.objects.all()
else:
uretims = Uretim.objects.filter(tur=q)
print(uretims)
if emir == "tumu":
emirler = Emir.objects.all()
else:
emirler = Emir.objects.filter(is_emri=emir)
return render(request,'arama.html',{ 'mac' : mac , 'testler' : testler , 'grup': grup,"emirler": emirler, "aranan": aranan, "emirs":emirs, 'birim': birim,'media_url':media_url,"uretims":uretims,'server' : server})
@login_required
@csrf_exempt
def giriskalite(request):
mac = request.user_agent.os.family
grup = request.user.grup
birim = request.user.birim
#Test.objects.all().delete() #Test sonuçlarını silmek için
fullname = request.user.first_name + ' ' + request.user.last_name
if request.method == 'POST':
if request.POST.dict()['tur'] == 'basinc':
veris = json.loads(request.POST.dict()['veri'])
for veri in veris:
t = Test(tur='basinc',seri_no = veri[0] , acma = veri[1] , kapatma = veri[2], kabul_durumu = veri[3], testi_yapan = fullname)
t.save(force_insert=True)
elif request.POST.dict()['tur'] == 'manometre':
veris = json.loads(request.POST.dict()['veri'])
for veri in veris:
t = Test(tur='manometre',seri_no = veri[0] , okunan_deger = veri[1], kabul_durumu = veri[2] ,testi_yapan = fullname)
t.save(force_insert=True)
elif request.POST.dict()['tur'] == 'altnipel':
print(request.POST)
kontrolResult= nipelSeriNoKontrol(request)
if kontrolResult == True :
if request.FILES:
upload_file = request.FILES['file']
fs = FileSystemStorage()
fs.save(upload_file.name,upload_file)
next_lot_no = getNextLotNo( request.POST.dict()['tur'])
t = Test(tur='altnipel',lot_no = next_lot_no , pdf_ismi = request.POST.get('pdf_ismi') ,baslangic_seri_no = request.POST.get('baslangic_seri_no'),bitis_seri_no = request.POST.get('bitis_seri_no'), kabul_durumu = request.POST.get('kabulAlt'),testi_yapan = fullname)
t.save(force_insert=True)
messages.success(request,'Alt nipel testi başarıyla kaydedildi.')
elif request.POST.dict()['tur'] == 'ustnipel':
print(request.POST)
kontrolResult= nipelSeriNoKontrol(request)
if kontrolResult == True :
if request.FILES:
upload_file = request.FILES['file']
fs = FileSystemStorage()
fs.save(upload_file.name,upload_file)
next_lot_no = getNextLotNo( request.POST.dict()['tur'])
t = Test(tur='ustnipel',lot_no = next_lot_no , pdf_ismi = request.POST.get('pdf_ismi') ,baslangic_seri_no = request.POST.get('baslangic_seri_no'),bitis_seri_no = request.POST.get('bitis_seri_no'), kabul_durumu = request.POST.get('kabulUst'),testi_yapan = fullname)
t.save(force_insert=True)
messages.success(request,'Üst nipel testi başarıyla kaydedildi.')
elif request.POST.dict()['tur'] == 'bakirmembran':
print(request.POST)
next_lot_no = getNextLotNo( request.POST.get('test_tur') )
if request.FILES:
upload_file = request.FILES['file']
fs = FileSystemStorage()
fs.save(upload_file.name,upload_file)
t = Test(tur=request.POST.get('test_tur'), lot_no = next_lot_no, pdf_ismi = request.POST.get('pdf_ismi') ,test_basinci = request.POST.get('test_basinci'),
patlama_basinci = request.POST.get('patlama_basinci'), kabul_durumu = request.POST.get('kabulBak'),testi_yapan = fullname)
t.save(force_insert=True)
if(request.POST.get('test_tur') =='bakirmembran'):
messages.success(request,'Bakır membran testi başarıyla kaydedildi.')
else:
messages.success(request,'Emniyet ventili testi başarıyla kaydedildi.')
"""
elif request.POST.get('tur') == 'emniyet':
print(request.POST)
if request.FILES:
upload_file = request.FILES['file']
fs = FileSystemStorage()
fs.save(upload_file.name,upload_file)
next_lot_no = getNextLotNo( request.POST.dict()['tur'])
t = Test(tur='emniyet',lot_no =next_lot_no, pdf_ismi = request.POST.get('pdf_ismi') ,test_basinci = request.POST.get('test_basinci'), patlama_basinci = request.POST.get('patlama_basinci'),kabul_durumu = request.POST.get('kabulEmn'),testi_yapan = fullname)
t.save(force_insert=True)
messages.success(request,'Emniyet ventili testi başarıyla kaydedildi.')
"""
return render(request,'giris-kalite-kontrol.html',{ 'mac' : mac , 'grup': grup, 'birim': birim,'server' : server})
def getNextLotNo(tur):
test_with_max_lot_no = Test.objects.filter(tur=tur).order_by('-lot_no').first()
if(test_with_max_lot_no == None):
max_lot_no=0
else:
max_lot_no=test_with_max_lot_no.lot_no
return max_lot_no + 1
def nipelSeriNoKontrol(request):
baslangic_seri_no = request.POST.get('baslangic_seri_no')
bitis_seri_no = request.POST.get('bitis_seri_no')
errorFlag=0
if(int(baslangic_seri_no) > int(bitis_seri_no)):
errorFlag=1
messages.warning(request,'Başlangıç seri numarası, bitiş seri numarasından büyük olamaz!')
return False
testler = Test.objects.filter(tur=request.POST.dict()['tur'] )
seri_no_aralık_range= range(int(baslangic_seri_no),int(bitis_seri_no)+1)
seri_no_aralık_list= set(seri_no_aralık_range)
for test in testler:
seri_no_aralık_test_range= range(int(test.baslangic_seri_no),int(test.bitis_seri_no)+1)
intersection_set= seri_no_aralık_list.intersection(seri_no_aralık_test_range)
if len(intersection_set) != 0 :
messages.warning(request,'Seri numarası aralığı mevcut bir seri numarası aralığı ile çakışmaktadır!')
return False
return True
@login_required
@csrf_exempt
def uretimkontrol(request):
mac = request.user_agent.os.family
ip = get_client_ip(request)
ip == '192.168.1.36'
grup = request.user.grup
birim = request.user.birim
#Uretim.objects.all().delete() #Test sonuçlarını silmek için bu yorumu açabilirsiniz
fullname = request.user.first_name + ' ' + request.user.last_name
if request.method == 'POST':
if request.POST.dict()['tur'] == 'valfmontaj':
veris = json.loads(request.POST.dict()['veri'])
print(veris)
t = Uretim(tur='valfmontaj' , okunan_deger = veris[0] ,personel = request.user.get_full_name())
t.save(force_insert=True)
b = Bildirim(tur="baslangic",kisi = request.user.get_full_name())
b.save(force_insert=True)
elif request.POST.dict()['tur'] == 'kurlenme':
veris = json.loads(request.POST.dict()['veri'])
'''neval
if not Uretim.objects.all():
vsn = 1
else:
a = Uretim.objects.all().order_by('-vsn').values()[0]
s = a['vsn']
vsn = s + 1
v = Valf(vsn=vsn, is_emri=veris[0])
v.save(force_insert=True)
e = Emir.objects.get(is_emri=veris[0])
e.durum = 'Aktif'
e.save()
t = Uretim(tur='montaj_kurlenme' ,vsn = vsn, is_emri = veris[0] ,personel = request.user.get_full_name(),alt_nipel_no = veris[1],bakir_membran_no = veris[2],ust_nipel_no = veris[3],manometre_no = veris[4],basincanahtari_no = veris[5],montaj_kurlenme_zamani=timezone.now()+timezone.timedelta(minutes=10))
t.save(force_insert=True)
return HttpResponse(str(vsn))
'''
print("deneme")
#burası sonradan düzenlenecek Berker
# e = Emir.objects.get(is_emri=veris[0])
# e.durum = 'Aktif'
# e.save()
is_emri_adi=veris[0]
emir=Emir.objects.get(is_emri= is_emri_adi)
personel_id=request.user.id
alt_nipel_no = veris[1]
bakir_membran_no = veris[2]
ust_nipel_no = veris[3]
manometre_no = veris[4]
basincanahtari_no = veris[5]
sibop = veris[6]
print("deneme2")
try:
kayit_tarihi=timezone.now()
#kurlenme_bitis=timezone.now()+timezone.timedelta(minutes=10)
valf_montaj = Valf_montaj(montaj_personel_id= personel_id, alt_nipel_no=alt_nipel_no,bakir_membran_no=bakir_membran_no,ust_nipel_no=ust_nipel_no,manometre_no=manometre_no,basincanahtari_no=basincanahtari_no,montaj_tarihi=kayit_tarihi,sibop=sibop)
valf_montaj.save()
valf = Valf(is_emri=emir,valf_montaj=valf_montaj)
valf.save()
return HttpResponse(str(valf.id))
except Exception as err:
print(" KAyıt HAstası > ", err)
elif request.POST.dict()['tur'] == 'valftest':
try:
valf_seri_no = json.loads(request.POST.dict()['valf_seri_no'])
uygun = json.loads(request.POST.dict()['uygun'])
valf = Valf.objects.get(id=valf_seri_no )
personel_id=User.objects.get(id=request.user.id)
test_tarihi=timezone.now()
acma = str(uygun)
kapama = str(uygun)
sebep = str(uygun)
if (uygun==True):
sebep=None
valf_test= Valf_test( test_personel=personel_id,test_tarihi=test_tarihi,uygun=uygun)
valf_test.save()
valf.valf_test=valf_test
valf.save()
except Exception as err:
print(err)
elif request.POST.dict()['tur'] == 'valfgovde':
veri = json.loads(request.POST.dict()['veri'])
'''neval
v = Valf.objects.get(vsn=veri[3])
is_emri = v.is_emri
print('veri[5],sodyum miktarı:: ',veri[5] )
t = Uretim.objects.get(vsn=veri[3])
t.tur='govde_kurlenme'
t.tork_degeri = veri[0]
t.uygunluk = veri[1]
t.sebep = veri[2]
t.tsn = veri[4]
t.personel = request.user.get_full_name()
t.govde_kurlenme_zamani=timezone.now()+timezone.timedelta(minutes=10)
# t = Uretim(tur='valfgovde',tork_degeri = veri[0] ,is_emri=is_emri, uygunluk = veri[1] , sebep = veri[2],
# vsn = veri[3],tsn = veri[4], personel = request.user.get_full_name(),govde_kurlenme_zamani=timezone.now()+timezone.timedelta(minutes=10))
t.save()
'''
valf_seri_no=veri[3]
valf = Valf.objects.get(id=valf_seri_no )
valf.durum='valf_govde'
valf.save()
personel_id=request.user.id
kayit_tarihi=timezone.now()
kurlenme_bitis=timezone.now()+timezone.timedelta(minutes=10)
tork=veri[0]
tup_seri_no=veri[4]
sodyum_miktari=veri[5]
uygunluk=veri[1]
sebep=veri[2]
if (uygunluk=='on'):
sebep=None
valf_govde= Valf_govde(valf=valf, personel_id=personel_id,kayit_tarihi=kayit_tarihi,kurlenme_bitis=kurlenme_bitis,tork=tork,tup_seri_no=tup_seri_no,sodyum_miktari=sodyum_miktari,uygunluk=uygunluk,sebep=sebep)
valf_govde.save()
elif request.POST.dict()['tur'] == 'fm200':
veri = json.loads(request.POST.dict()['veri'])
'''neval
v = Valf.objects.get(vsn=veri[4])
is_emri = v.is_emri
print(veri)
t = Uretim.objects.get(vsn=veri[4])
t.tur='fm200_kurlenme'
t.bos_agirlik = veri[0]
t.rekorlu_agirlik = veri[1]
t.fm200 = veri[2]
t.azot = veri[3]
t.personel = request.user.get_full_name()
t.fm200_kurlenme_zamani=timezone.now()+timezone.timedelta(minutes=10)
t.save()
'''
valf_seri_no=veri[4]
valf = Valf.objects.get(id=valf_seri_no )
valf.durum='valf_fm200'
valf.save()
personel_id=request.user.id
kayit_tarihi=timezone.now()
kurlenme_bitis=timezone.now()+timezone.timedelta(minutes=10)
bos_agirlik =veri[0]
rekorlu_agirlik=veri[1]
fm200 = veri[2]
azot = veri[3]
valf_fm200= Valf_fm200(valf=valf, personel_id=personel_id,kayit_tarihi=kayit_tarihi,kurlenme_bitis=kurlenme_bitis, bos_agirlik =bos_agirlik,rekorlu_agirlik=rekorlu_agirlik, fm200 = fm200,azot = azot)
valf_fm200.save()
elif request.POST.dict()['tur'] == 'havuztest':
veri = json.loads(request.POST.dict()['veri'])
'''neval
print(veri)
v = Valf.objects.get(vsn=veri[0])
is_emri = v.is_emri
t = Uretim(tur='havuztest',vsn = veri[0],tsn = veri[0],is_emri=is_emri , uygunluk = veri[1] ,
acma = veri[2], kapatma = veri[3],sebep = veri[4], personel = request.user.get_full_name())
t.save(force_insert=True)
'''
print("veri",veri)
valf_seri_no=veri[0]
valf = Valf.objects.get(id=valf_seri_no )
valf.durum='valf_havuz_test'
valf.save()
personel_id=request.user.id
kayit_tarihi=timezone.now()
uygunluk= veri[1]
tup_cidar_sicaklik =veri[2]
tup_basinc = veri[3]
sebep=veri[4]
if (uygunluk):
sebep=None
valf_havuz= Valf_havuz(valf=valf, personel_id=personel_id,kayit_tarihi=kayit_tarihi,tup_cidar_sicaklik=tup_cidar_sicaklik, tup_basinc =tup_basinc,uygunluk=uygunluk, sebep = sebep)
valf_havuz.save()
elif request.POST.dict()['tur'] == 'finalmontaj':
veri = json.loads(request.POST.dict()['veri'])
'''neval
print(veri)
v = Valf.objects.get(vsn=veri[1])
is_emri = v.is_emri
t = Uretim.objects.get(vsn=veri[1])
t.tur='finalmontaj'
t.etiket_seri_no = veri[0]
t.fsn = veri[2]
t.funye_seri_omaj = veri[3]
t.basinc_anahtari_omaj = veri[4]
t. personel = request.user.get_full_name()
#t = Uretim(tur='finalmontaj',etiket_seri_no = veri[0],is_emri=is_emri , vsn = veri[1] , fsn = veri[2],
# funye_seri_omaj = veri[3],basinc_anahtari_omaj = veri[4], personel = request.user.get_full_name())
t.save()
tup_sayisi_str=Emir.objects.filter(is_emri=is_emri).values()[0]['tup_sayisi']
'''
valf_seri_no=veri[1]
valf = Valf.objects.get(id=valf_seri_no )
valf.durum='valf_final_montaj'
valf.save()
personel_id=request.user.id
kayit_tarihi=timezone.now()
etiket_seri_no = veri[0]
funye_seri_no = veri[2]
funye_seri_omaj = veri[3]
basinc_anahtari_omaj = veri[4]
valf_final_montaj= Valf_final_montaj(valf=valf, personel_id=personel_id,kayit_tarihi=kayit_tarihi,etiket_seri_no = etiket_seri_no,funye_seri_no = funye_seri_no ,funye_seri_omaj = funye_seri_omaj,basinc_anahtari_omaj = basinc_anahtari_omaj)
valf_final_montaj.save()
emir = Emir.objects.get(is_emri=valf.is_emri)
emir_tup_sayisi = int(emir.tup_sayisi )
emir_biten_valf_sayi = Valf.objects.filter(is_emri=emir,durum='valf_final_montaj').count()
print('emir_biten_valf_sayi',emir_biten_valf_sayi)
print('emir_tup_sayisi',emir_tup_sayisi)
if(emir_biten_valf_sayi == emir_tup_sayisi):
emir.durum = 'Bitmiş'
emir.save()
b = Bildirim(tur = "bitis" , kisi = request.user.get_full_name())
b.save(force_insert=True)
now = timezone.now()
#montajkurlenmesi=Valf_montaj.objects.filter(kurlenme_bitis_tarihi__gte=now)
montajkurlenmesi=Valf_montaj.objects.all()
# govdekurlenmesi=Valf_govde.objects.filter(kurlenme_bitis__gte=now)
fm200kurlenmesi=Valf_fm200.objects.filter(fm200_kurlenme_bitis_tarihi__gte=now)
#acikemirleri= Emir.objects.filter(durum__in=("Aktif","Başlanmamış"))
acikemirleri=Emir.objects.filter(durum='Aktif').values()
aktifemirler= Emir.objects.filter(durum="Aktif")
####Duplikasyonu önlemek için yaptık ###############
govde_emir = list(dict.fromkeys(Valf.objects.filter(valf_govde_id__isnull=False).values_list('is_emri_id',flat=True)))
fm200_emir = list(dict.fromkeys(Valf.objects.filter(fm200_azot_id__isnull=False).values_list('is_emri_id',flat=True)))
###################################################
#return render(request,'uretim-kontrol.html',{ 'acikemirleri':acikemirleri, 'grup': grup, 'birim': birim, 'ip': ip,'now':now, 'kurlenmes':montajkurlenmesi,'fm200kurlenmes':fm200kurlenmesi, 'govdekurlenmes': govdekurlenmesi ,'server' : server})
return render(request,'uretim-kontrol.html',{'grup': grup, 'birim': birim, 'ip': ip,'now':now,'server':server, 'acikemirleri':acikemirleri,'fm200kurlenmes':fm200kurlenmesi,'kurlenmes':montajkurlenmesi,'aktifemirler':aktifemirler,'govde_emir':govde_emir,'fm200_emir':fm200_emir})
@csrf_exempt
def acikisemirleri(request):
emirler = Emir.objects.filter(durum__in=("Aktif","Başlanmamış"))
temp = []
for o in emirler.values():
temp.append(o['is_emri'])
veri = list(temp)
@login_required
@csrf_exempt
def isemri(request):
mac = request.user_agent.os.family
grup = request.user.grup
birim = request.user.birim
#Emir.objects.all().delete()
fullname = request.user.first_name + ' ' + request.user.last_name
emirler = Emir.objects.all()
form = IsEmri(request.POST)
if request.method == 'POST':
if 'tur' in request.POST.dict():
if request.POST.dict()['tur'] == 'oncelik':
veri = json.loads(request.POST.dict()['veri'])
print(veri)
for key in veri:
em = Emir.objects.get(is_emri=key)
em.oncelik = veri[key]
em.save()
o = Bildirim(tur="oncelik")
o.save()
return HttpResponse('onceliktamam')
else:
if form.is_valid():
if not Emir.objects.all():
son_oncelik = 1
else:
a = Emir.objects.all().order_by('-oncelik').values()[0]
s = a['oncelik']
son_oncelik = s + 1
emir = form.save()
emir.refresh_from_db()
emir.is_emri = form.cleaned_data.get('is_emri')
emir.urun_kodu = form.cleaned_data.get('urun_kodu')
emir.baslangic = form.cleaned_data.get('baslangic')
emir.bitis = form.cleaned_data.get('bitis')
emir.emri_veren = form.cleaned_data.get('emri_veren')
emir.tup_govde_turu = form.cleaned_data.get('tup_govde_turu')
emir.valf_turu = form.cleaned_data.get('valf_turu')
emir.renk = form.cleaned_data.get('renk')
emir.emniyet_ventil_turu = form.cleaned_data.get('emniyet_ventil_turu')
emir.siparis = form.cleaned_data.get('siparis')
emir.fm200bosagirlikmindeger= form.cleaned_data.get('fm200bosagirlikmindeger')
emir.fm200bosagirlikmaxdeger = form.cleaned_data.get('fm200bosagirlikmaxdeger')
emir.fm200dolummiktarimindeger= form.cleaned_data.get('fm200dolummiktarimindeger')
emir.fm200dolummiktarimaxdeger = form.cleaned_data.get('fm200dolummiktarimaxdeger')
#if(request.user.grup == "planlama"):
t = Bildirim(tur = "is emri",emri_veren_grup = grup, emri_veren = request.user.get_full_name(), is_emri = form.cleaned_data.get('is_emri'))
t.save(force_insert=True)
emir.oncelik = son_oncelik
messages.success(request,'Emir başarıyla eklendi!')
emir.save()
form.full_clean()
return(HttpResponseRedirect(reverse('isemri')))
else:
messages.warning(request,'İş emri eklenemedi.Lütfen tekrar deneyin!Hata: {}'.format(form.errors))
else:
form = IsEmri()
form.fields["emri_veren"].initial = fullname
return render(request,'is-emri.html', { 'form' : form , 'emirler': emirler , 'mac' : mac , 'fullname' : fullname ,'grup' : grup , 'birim': birim,'server' : server})
#@login_required
def yetkilendirme(request):
mac = request.user_agent.os.family
#grup = "yonetici"#request.user.grup
#birim = request.user.birim
grup = "Yönetici"
birim = "IT"
kullanicilar = User.objects.all()
if grup == 'Yönetici' and birim == 'IT' or grup == 'Mühendis' and birim == 'IT':
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid(): #and profile_form.is_valid():
user = form.save()
user.refresh_from_db()
user.first_name = form.cleaned_data.get('first_name')
user.last_name = form.cleaned_data.get('last_name')
user.grup = form.cleaned_data.get('grup')
user.save()
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password1')
messages.success(request,'{} isimli kullanıcı {} isimli gruba eklendi!'.format(username,user.grup))
return(HttpResponseRedirect(reverse('yetkilendirme')))
else:
print(form.errors)
else:
form = UserRegisterForm()
return render(request,'kullanici-yetkilendirme.html',{'form':form,'kullanicilar':kullanicilar , 'mac' : mac , 'grup' : grup, 'birim': birim,'server' : server})
else:
return(HttpResponseRedirect(reverse('403')))
@login_required
def performans(request):
mac = request.user_agent.os.family
grup = request.user.grup
birim = request.user.birim
kullanicilar = User.objects.all()
return render(request,'performans.html',{ 'mac' : mac , 'grup':grup, 'birim': birim, 'kullanicilar': kullanicilar,'server' : server})
@login_required
@csrf_exempt
def yazdir(request):
mac = request.user_agent.os.family
grup = request.user.grup
birim = request.user.birim
if True:#grup == 'Yönetici' and birim == 'IT':
if request.method == 'POST':
i = Emir.objects.filter(durum=request.POST['durum'])
temp = []
for obj in i.values():
times = obj['emir_zamani'].strftime("%d %B %Y (%H:%M:%S)")
temp.append(obj['is_emri'] + " " + times)
veri = list(temp)
return JsonResponse(veri,safe=False)
return render(request,'yazdir.html',{ 'mac' : mac , 'grup':grup, 'birim': birim,'server' : server})
else:
return(HttpResponseRedirect(reverse('403')))
@login_required
def ulogout(request):
logout(request)
return(HttpResponseRedirect(reverse('ulogin')))
@csrf_exempt
def ulogin(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username,password=password)
if user:
if user.is_active:
login(request,user)
print('{} kullanıcısı tarafından başarılı giriş'.format(username))
return redirect('arama')
else:
messages.warning(request,'Kullanıcı adınızı yada parolanızı yanlış girdiniz.')
else:
print("Birisi login olmayı denedi ve başarısız oldu!")
messages.warning(request,'Kullanıcı adınızı yada parolanızı yanlış girdiniz.')
return(HttpResponseRedirect(reverse('ulogin')))
else:
return render(request,'login.html',{})
def _403(request):
return render(request,'403.html',{})
def handler404(request,exception):
return render(request, '403.html', status=404)
@csrf_exempt
def kullanicijson(request):
username = request.POST.get('username')
b = User.objects.filter(username=username).values('first_name','last_name','username','grup')
veri = list(b)
return JsonResponse(veri,safe=False)
@csrf_exempt
def kullanicisil(request):
username = request.POST.get('username')
print(username)
sildi = User.objects.filter(username=username).delete()
if sildi:
return HttpResponse('silindi')
else:
return HttpResponse('silinemedi')
@csrf_exempt
def kullaniciduzelt(request):
veri = request.POST.get('bilgi')
veri = json.loads(veri)
a = User.objects.get(username=veri["eskisi"])
a.username = veri["username"]
a.first_name = veri["first_name"]
a.last_name = veri["last_name"]
a.grup = veri["grup"]
a.birim = veri["birim"]
a.save()
return HttpResponse('duzeltildi')
@csrf_exempt
def passwordreset(request):
ps = request.POST.get('ps1')
if request.POST.get('username'):
u = User.objects.get(username=request.POST.get('username'))
u.set_password(ps)
u.save()
return HttpResponse('parola değiştirildi')
return HttpResponse('bir hata var')
def get_first_and_lastname(username):
try:
first_name=User.objects.filter(username=username).first().first_name
last_name=User.objects.filter(username=username).first().last_name
return "{} {}".format(first_name,last_name)
except:
return 'isim soyisim'
@csrf_exempt
def pdf(request):
if request.GET.get('qr'):
qr = request.GET.get('qr')
print(qr.split(" ")[0])
i = qr.split(" ")[0]
# elif request.GET.get('valfqr'):
# qr = request.GET.get('valfqr')
# v = Valf.objects.get(vsn=qr)
# i = v.is_emri
print("---------------------")
valf_no = request.GET.get('vsn')
Valf_montaj_Data=Valf_montaj.objects.filter(id=Valf.objects.filter(id=valf_no).first().valf_montaj_id).first()
Valf_fm200_Data=Valf_fm200.objects.filter(id=Valf.objects.filter(id=valf_no).first().fm200_azot_id).first()
Valf_havuz_Data=Valf_havuz.objects.filter(id=Valf.objects.filter(id=valf_no).first().havuz_id).first()
Valf_final_Data=Valf_final_montaj.objects.filter(id=Valf.objects.filter(id=valf_no).first().valf_final_montaj_id).first()
Valf_test_Data=Valf_test.objects.filter(id=Valf.objects.filter(id=valf_no).first().valf_test_id).first()
Valf_govde_Data=Valf_govde.objects.filter(id=Valf.objects.filter(id=valf_no).first().valf_govde_id).first()
Emir_Data=Emir.objects.filter(is_emri=i).first()
valf_final = Valf.objects.filter(id=valf_no).values_list('valf_final_montaj_id',flat=True).first()
urun_seri_no = Valf_final_montaj.objects.filter(id=valf_final).values_list('urun_seri_no',flat=True).first()
print("---------------------")
try:
valfmontajPersonel = get_first_and_lastname(User.objects.filter(id=Valf_montaj_Data.montaj_personel_id).first().username)
except:
valfmontajPersonel = ''
try:
valfmontajTarih = Valf_montaj_Data.montaj_tarihi
except:
valfmontajTarih = ''
try:
altnipelno = Valf_montaj_Data.alt_nipel_no
except:
altnipelno = ''
try:
ustnipelno = Valf_montaj_Data.ust_nipel_no
except:
ustnipelno = ''
try:
switchno = Valf_montaj_Data.basincanahtari_no
except:
switchno = ''
try:
manometreno = Valf_montaj_Data.manometre_no
except:
manometreno = ''
try:
valftestPersonel = get_first_and_lastname(User.objects.filter(id=Valf_test_Data.test_personel_id).first().username)
except:
valftestPersonel = ''
try:
valftestTarih = Valf_test_Data.test_tarihi
except:
valftestTarih = ''
try:
valfTestUygun = 'Uygun' if Valf_test_Data.uygun == True else 'Uygun Değil'
except:
valfTestUygun = Valf_test_Data.uygun
try:
valfgovdePersonel = get_first_and_lastname(User.objects.filter(id=Valf_govde_Data.govde_personel_id).first().username)
except:
valfgovdePersonel = ''
try:
valfgovdeTarih = Valf_govde_Data.govde_tarihi
except:
valfgovdeTarih = ''
try:
valfGovdeUygun = 'Uygun' if Valf_govde_Data.uygunluk == True else 'Uygun Değil'
except:
valfGovdeUygun = ''
try:
fm200Personel = get_first_and_lastname(User.objects.filter(id=Valf_fm200_Data.fm200_personel_id).first().username)
except:
fm200Personel = ''
try:
fm200Tarih = Valf_fm200_Data.kayit_tarihi
except:
fm200Tarih = ''
try:
bosAgirlik = Valf_fm200_Data.bos_agirlik
except:
bosAgirlik = ''
try:
doluAgirlik = Valf_fm200_Data.dolu_agirlik
except:
doluAgirlik = ''
# try: Duruma Göre sonradan eklenebilir diye silmiyoruz!
# azot = fm200[0]['azot']
# except:
# azot = ''
try:
bar = Valf_fm200_Data.bar
except:
bar = ''
try:
havuztestPersonel = get_first_and_lastname(User.objects.filter(id=Valf_havuz_Data.havuz_personel_id).first().username)
except:
havuztestPersonel = ''
try:
havuztestTarih = Valf_havuz_Data.kayit_tarihi
except:
havuztestTarih = ''
try:
havuzTestUygun = 'Uygun' if Valf_havuz_Data.uygunluk == True else 'Uygun Değil'
except:
havuzTestUygun = ''
try:
finalmontajPersonel = get_first_and_lastname(User.objects.filter(id=Valf_final_Data.personel_id).first().username)
except:
finalmontajPersonel = ''
try:
finalmontajTarih = Valf_final_Data.kayit_tarihi
except:
finalmontajTarih = ''
try:
membranTipi = Emir_Data.valf_turu
except:
membranTipi = ''
try:
ventilTipi = Emir_Data.emniyet_ventil_turu
except:
ventilTipi = ''
try:
tugovdetipi= Emir_Data.tup_govde_turu
except:
tugovdetipi= ''
try:
siboplotno = Valf_montaj_Data.sibop
except:
siboplotno = ''
print(valftestPersonel,Emir_Data.emniyet_ventil_turu)
veri = "veri"
html_string = render_to_string('external/pdf-template.html', {'veri': veri, "qr": urun_seri_no,
'valfmontajPersonel': valfmontajPersonel, 'valfmontajTarih':valfmontajTarih,'valfgovdePersonel':valfgovdePersonel,
'valftestPersonel': valftestPersonel, 'valftestTarih': valftestTarih,'valfTestUygun':valfTestUygun,'havuzTestUygun':havuzTestUygun,
'valfgovdePersonel': valftestPersonel, 'valfgovdeTarih': valfgovdeTarih,'valfGovdeUygun':valfGovdeUygun,'valfMontajUygun':"Uygun*",'fm200Uygun':"Uygun*",'finalMontajUygun':"Uygun*",
'fm200Personel': fm200Personel, 'fm200Tarih': fm200Tarih,
'bosAgirlik' : bosAgirlik, 'doluAgirlik' : doluAgirlik,
'havuztestPersonel': havuztestPersonel, 'havuztestTarih': havuztestTarih,
'finalmontajPersonel': finalmontajPersonel, 'finalmontajTarih': finalmontajTarih,
'altnipelno': altnipelno, 'ustnipelno': ustnipelno, 'switchno': switchno,'manometreno': manometreno,
'is_emri': i,'membranTipi': membranTipi,'ventilTipi': ventilTipi,'urunserino':urun_seri_no,'bar':bar,'tugovdetipi':tugovdetipi,'siboplotno':siboplotno
}, request=request)
html = HTML(string=html_string, base_url=request.build_absolute_uri())
html.write_pdf(target='/tmp/' + qr + '.pdf');
fs = FileSystemStorage('/tmp/')
with fs.open(qr + '.pdf') as pdf:
response = HttpResponse(pdf, content_type='application/pdf')
response['Content-Disposition'] = 'inline; filename="pdf.pdf"'
return response
return response
#Test sonuçları
@csrf_exempt
def dashboard(request):
bugun = timezone.now()
print(request.POST.get('gun_sayisi'))
gun = int(request.POST.get('gun_sayisi'))
kac_gun = bugun - timezone.timedelta(days=gun)
veris = Test.objects.filter(test_tarihi__range=[kac_gun,bugun])
temp = []
for o in veris.values():
temp.append(o)
veri = list(temp)
print("dashboard", veri)
return JsonResponse(veri,safe=False)
@csrf_exempt
def uretimdurum(request):
i = request.POST.get('is_emri')
print(i)
veri = list()
print(Valf.objects.filter(is_emri_id=i).values_list('valf_montaj_id',flat=True).count(),Valf.objects.filter(is_emri_id=i).filter(valf_test_id__isnull=False).values_list('valf_test_id',flat=True).count())
try:
veri.append(Valf.objects.filter(is_emri_id=i).filter(valf_montaj_id__isnull=False).values_list('valf_montaj_id',flat=True).count())
veri.append(Valf.objects.filter(is_emri_id=i).filter(valf_test_id__isnull=False).values_list('valf_test_id',flat=True).count())
veri.append(Valf.objects.filter(is_emri_id=i).filter(valf_govde_id__isnull=False).values_list('valf_govde_id',flat=True).count())
veri.append(Valf.objects.filter(is_emri_id=i).filter(fm200_azot_id__isnull=False).values_list('fm200_azot_id',flat=True).count())
veri.append(Valf.objects.filter(is_emri_id=i).filter(havuz_id__isnull=False).values_list('havuz_id',flat=True).count())
veri.append(Valf.objects.filter(is_emri_id=i).filter(valf_final_montaj_id__isnull=False).values_list('valf_final_montaj_id',flat=True).count())
veri.append(Emir.objects.filter(id=i).values()[0]['tup_sayisi'])
except Exception as err:
print(err)
veri = [0,0,0,0,0,0,10]
print(veri)
return JsonResponse(veri,safe=False)
@csrf_exempt
def personeldurum(request):
p = request.POST.get('personel')
g = request.POST.get('gun_sayisi')
print(p,g)
bugun = timezone.now()
gun = int(request.POST.get('gun_sayisi'))
kac_gun = bugun - timezone.timedelta(days=gun)
veris = Test.objects.filter(test_tarihi__range=[kac_gun,bugun])
veri = list()
try:
veri.append(Test.objects.filter(test_tarihi__range=[kac_gun,bugun]).filter(tur="manometre").filter(testi_yapan=p).count())
veri.append(Test.objects.filter(test_tarihi__range=[kac_gun,bugun]).filter(tur="basinc").filter(testi_yapan=p).count())
veri.append(Test.objects.filter(test_tarihi__range=[kac_gun,bugun]).filter(tur="altnipel").filter(testi_yapan=p).count())
veri.append(Test.objects.filter(test_tarihi__range=[kac_gun,bugun]).filter(tur="ustnipel").filter(testi_yapan=p).count())
veri.append(Test.objects.filter(test_tarihi__range=[kac_gun,bugun]).filter(tur="bakirmembran").filter(testi_yapan=p).count())
veri.append(Test.objects.filter(test_tarihi__range=[kac_gun,bugun]).filter(tur="emniyet").filter(testi_yapan=p).count())
veri.append(Uretim.objects.filter(date__range=[kac_gun,bugun]).filter(tur="kurlenme").filter(personel=p).count())
veri.append(Uretim.objects.filter(date__range=[kac_gun,bugun]).filter(tur="valftest").filter(personel=p).count())
veri.append(Uretim.objects.filter(date__range=[kac_gun,bugun]).filter(tur="valfgovde").filter(personel=p).count())
veri.append(Uretim.objects.filter(date__range=[kac_gun,bugun]).filter(tur="fm200").filter(personel=p).count())
veri.append(Uretim.objects.filter(date__range=[kac_gun,bugun]).filter(tur="havuztest").filter(personel=p).count())
veri.append(Uretim.objects.filter(date__range=[kac_gun,bugun]).filter(tur="finalmontaj").filter(personel=p).count())
except:
veri = [0,0,0,0,0,0,0,0,0,0,0,10]
print(veri)
return JsonResponse(veri,safe=False)
@csrf_exempt
def tupTuru(request):
if request.method == 'POST':
try:
u = Emir.objects.filter(is_emri=request.POST.dict()['is_emri']).first()
bos_agirlik_miktari= u.bos_agirlik_miktari
fm200_miktari= u.fm200_miktari
renk= u.renk
response= bos_agirlik_miktari + ';'+ fm200_miktari +';'+renk
return HttpResponse(str(response))
except e :
print(e)
return str('tur')
@csrf_exempt
def getEmirNo(request):
if request.method == 'POST':
vsn=request.POST.dict()['veri']
print('getEmirNo',vsn)
try:
is_emri = Emir.objects.filter(id=vsn).values_list('is_emri',flat=True).first()
return HttpResponse(str(is_emri))
except:
return HttpResponse(str('NO'))
return str('is_emri')
@csrf_exempt
def kontrolEt(request):
if request.method == 'POST':
tur = request.POST['tur']
veri = request.POST['veri']
isemri = request.POST['isemri']
t = Test.objects.filter(tur=tur)
r = "NO"
if(tur == 'altnipel'):
t = Test.objects.filter(tur=tur)
try:
if(int(veri) in t.values_list('lot_no',flat=True)):
r = ('OK')
else:
r = ('NO')
except:
r = 'NO'
if(tur == 'ustnipel'):
t = Test.objects.filter(tur=tur)
try:
if(veri in t.values_list('baslangic_seri_no',flat=True)):
r = ('OK')
else:
r = ('NO')
except:
r = "NO"
if(tur == 'manometre'):
t = Test.objects.filter(tur=tur)
try:
if(veri in t.values_list('seri_no',flat=True)):
r = ('OK')
else:
r = ('NO')
except Exception as e:
print(e)
r = "NO"
if(tur == 'basinc'):
t = Test.objects.filter(tur=tur)
try:
if(veri in t.values_list('seri_no',flat=True)):
r = ('OK')
else:
r = ('NO')
except:
r = "NO"
if(tur == 'bakirmembran'):
t = Test.objects.filter(tur=tur)
try:
if(int(veri) in t.values_list('lot_no',flat=True)):
r = ('OK')
else:
r = ('NO')
except:
r = "NO"
if(tur == 'emniyet'):
t = Test.objects.filter(tur=tur)
try:
if(veri in t.values_list('lot_no',flat=True)):
r = ('OK')
else:
r = ('NO')
except:
r = "NO"
if(tur == 'valf_govde'):
try:
#Valf.objects.filter(valf_montaj_id=veri).values_list('valf_test_id',flat = True).first()
#a = isinstance(Valf.objects.filter(valf_montaj_id=veri).values_list('valf_test_id',flat = True).first(),int)
valf_id=Valf.objects.filter(valf_montaj_id=veri).values_list('valf_test_id',flat = True).first()
if isinstance(valf_id,int):
Valf_test.objects.filter(id=valf_id).values_list('uygun',flat = True).first()
if (Valf_test.objects.filter(id=valf_id).values_list('uygun',flat = True).first()):
r = ('OK')
else:
r = ('NO')
else:
r = ('NO')
except:
r = "NO"
if(tur == 'sibop'):
print(tur,veri,t.values_list('lot_no',flat=True))
t = Test.objects.filter(tur=tur)
try:
if(int(veri) in t.values_list('lot_no',flat=True)):
r = ('OK')
else:
r = ('NO')
except:
r = "NO"
return HttpResponse(r)
@csrf_exempt
def kurlenmeKontrol(request):
if request.method == 'POST':
r = "NO"
tur = request.POST['tur']
vsn = request.POST['veri']
print('kurlenmeKontrol',tur,vsn)
if(tur == 'montaj_kurlenme'):
try:
u = Uretim.objects.filter(vsn=vsn)
print(u,"---------------")
if(u.values()[0]['montaj_kurlenme_zamani']<timezone.now()):
r = 'OK'
else:
r = 'NO'
except:
r = 'NO'
elif (tur=='govde_kurlenme'):
try:
u = Uretim.objects.filter(vsn=vsn)
print('govde_kurlenme_zamani',u.values()[0]['govde_kurlenme_zamani'])
print('now',timezone.now())
if(u.values()[0]['govde_kurlenme_zamani']<timezone.now()):
r = 'OK'
else:
r = 'NO'
except:
r = 'NO'
elif (tur=='valf_test'):
print("içerdeyim-----> Valf Test")
try:
print(vsn,"----------------------------")
valf_montaj_id = Valf.objects.filter(id=vsn).first().valf_montaj_id
print(valf_montaj_id)
tarih = Valf_montaj.objects.filter(id=valf_montaj_id).first().kurlenme_bitis_tarihi
print(tarih)
print(type(timezone.now()),timezone.now())
print(type(tarih),tarih)
if(tarih<timezone.now()):
print("büyüktür")
r='OK'
else:
print("küçük")
r='NO'
except Exception as err:
print('r',err)
r='NO'
elif (tur=='pdfkontrol'):
print(vsn)
try:
if Valf.objects.filter(valf_montaj_id=vsn).count():
r='OK'
else:
r='NO'
except Exception as err:
r='NO'
print(err)
return HttpResponse(r)
@csrf_exempt
def newVSN(request):
if request.method == 'POST':
vsn = ""
if not Uretim.objects.all():
vsn = 1
else:
a = Uretim.objects.all().order_by('-vsn').values()[0]
s = a['vsn']
print('sssss',s)
vsn = s + 1
print(vsn)
r = (str(vsn))
return HttpResponse(r)
#return HttpResponse(str(vsn))
#return JsonResponse({'vsn':vsn})
@csrf_exempt
def hardreset(request):
print('Hard')
|
5,249 | 3e8860c22ff3092304df57aa7f5dbcb6ccda7dd8 | from pymongo import MongoClient
from modules.linkedinSearch import SearchClass
from config import Config
class LinkedinSearch:
def __init__(self):
self.client = MongoClient(Config.MONGO_URI)
db = self.client.linkedin_db
self.collection = db.search
self.dict = {}
self.obj = SearchClass()
def db_check(self, query):
r = self.obj.search(query)
print(r)
t = 0
for i in r['results']:
if self.collection.find_one({'userid': i['userid']}):
pass
else:
# print(i)
t += 1
self.collection.insert_one(i)
self.client.close()
print('no. of stored pages', t)
# self.loop.close()
results = self.db_fetch(query)
#
# # return {'results': m}
return {'data': results}
# ---------------------fetching total number of query pages from database----------------------------------------
def db_fetch(self, query):
self.collection.create_index([("name", "text")])
lst = []
cursor = self.collection.find(
{"$text": {"$search": query}},
{'score': {'$meta': "textScore"}}).sort([('score', {'$meta': "textScore"})])
total = cursor.count()
n = 0
for i in cursor:
# print(i)
i.pop('_id')
lst.append(i)
n += 1
print('fetched pages from db', len(lst))
# return {'results': lst,
# 'total': n}
return lst
if __name__ == '__main__':
obj = LinkedinSearch()
print(obj.db_check("mark"))
|
5,250 | 9bd55a2f224acfa2cb34d0ca14a25e8864d644b3 | import os, subprocess
def greet(name):
hostname = subprocess.check_output("hostname").decode("utf-8")[:-1]
return "Hello, {}! I'm {}#{}.".format(name, hostname, os.getppid())
|
5,251 | 3d01910ae1c163067f4a23b3cca109a7d9e193d5 | # -*- encoding: utf-8 -*-
class BaseException(object):
""" Common base class for all exceptions """
def with_traceback(self, tb): # real signature unknown; restored from __doc__
"""
Exception.with_traceback(tb) --
set self.__traceback__ to tb and return self.
"""
pass
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __setstate__(self, *args, **kwargs): # real signature unknown
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
args = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__cause__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""exception cause"""
__context__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""exception context"""
__suppress_context__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__traceback__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__dict__ = None # (!) real value is "mappingproxy({'__repr__': <slot wrapper '__repr__' of 'BaseException' objects>, '__str__': <slot wrapper '__str__' of 'BaseException' objects>, '__getattribute__': <slot wrapper '__getattribute__' of 'BaseException' objects>, '__setattr__': <slot wrapper '__setattr__' of 'BaseException' objects>, '__delattr__': <slot wrapper '__delattr__' of 'BaseException' objects>, '__init__': <slot wrapper '__init__' of 'BaseException' objects>, '__new__': <built-in method __new__ of type object at 0x00007FFC49400810>, '__reduce__': <method '__reduce__' of 'BaseException' objects>, '__setstate__': <method '__setstate__' of 'BaseException' objects>, 'with_traceback': <method 'with_traceback' of 'BaseException' objects>, '__suppress_context__': <member '__suppress_context__' of 'BaseException' objects>, '__dict__': <attribute '__dict__' of 'BaseException' objects>, 'args': <attribute 'args' of 'BaseException' objects>, '__traceback__': <attribute '__traceback__' of 'BaseException' objects>, '__context__': <attribute '__context__' of 'BaseException' objects>, '__cause__': <attribute '__cause__' of 'BaseException' objects>, '__doc__': 'Common base class for all exceptions'})"
# __context__ :当在except子异常或finally子异常中引发(或重新引发)异常时,(既有多个try)
# __context__ 被自动设置为捕获的最后一个异常;如果没有处理新的异常,最终显示的回溯将包括最初的异常和最终的异常
# try:
# try:
# raise ValueError("ValueError")
# except ValueError as first:
# raise TypeError("TypeError") from first
# except TypeError as second:
# print("The exception was", repr(second))
# print("Its __context__ was", repr(second.__context__))
# print("Its __cause__ was", repr(second.__cause__))
#
# The exception was TypeError('TypeError')
# Its __context__ was ValueError('ValueError')
# Its __cause__ was ValueError('ValueError')
## 必须要有raise from
## context 为raise from 的 excepton
# try:
# try:
# raise AttributeError("1")
# except Exception as e1:
# raise AttributeError from e1
# except AttributeError as exc_1:
# print("context::",repr(exc_1.__context__))
# print("cause::",repr(exc_1.__cause__))
# AttributeError 是raise from e1 即context,cause为 e1
# print("context::",repr(exc_1.__context__))
# print("cause::",repr(exc_1.__cause__))
# try:
# try:
# try:
# raise AttributeError("1")
# except Exception as e1:
# raise AttributeError("2") from e1
# except AttributeError as e2:
# print("context::",repr(e2.__context__))
# print("cause::",repr(e2.__cause__))
# # context:: AttributeError('1')
# # cause:: AttributeError('1')
# raise AttributeError("3") from e2
# except AttributeError as e3:
# print("context::", repr(e3.__context__))
# print("cause::", repr(e3.__cause__))
# context:: AttributeError('2')
# cause:: AttributeError('2')
# with_traceback(tb)
# This method sets tb as the new traceback for the exception and returns the exception object.
# 即设置异常 的trackback
# try:
# raise AttributeError("1")
# except AttributeError as exc:
# import sys
# tb = sys.exc_info()[2]
# raise AttributeError("2")
# Traceback (most recent call last):
# File "F:/PYTHON_CODE/python-expections-learn/_base_exception.py", line 125, in <module>
# raise AttributeError("1")
# AttributeError: 1
#
# During handling of the above exception, another exception occurred:
#
# Traceback (most recent call last):
# File "F:/PYTHON_CODE/python-expections-learn/_base_exception.py", line 129, in <module>
# raise AttributeError("2")
# AttributeError: 2
# try:
# raise AttributeError("1")
# except AttributeError as exc:
# import sys
# tb = sys.exc_info()[2]
# raise AttributeError("2").with_traceback(tb)
# Traceback (most recent call last):
# File "F:/PYTHON_CODE/python-expections-learn/_base_exception.py", line 125, in <module>
# raise AttributeError("1")
# AttributeError: 1
#
# During handling of the above exception, another exception occurred:
#
# Traceback (most recent call last):
# File "F:/PYTHON_CODE/python-expections-learn/_base_exception.py", line 129, in <module>
# raise AttributeError("2").with_traceback(tb)
# File "F:/PYTHON_CODE/python-expections-learn/_base_exception.py", line 125, in <module>
# raise AttributeError("1")
# AttributeError: 2
try:
try:
raise AttributeError("1")
except AttributeError as exc1:
raise AttributeError("2")
except AttributeError as exc2:
import sys
tb = sys.exc_info()[2]
raise AttributeError("3").with_traceback(tb)
# Traceback (most recent call last):
# File "F:/PYTHON_CODE/python-expections-learn/_base_exception.py", line 173, in <module>
# raise AttributeError("3").with_traceback(tb)
# File "F:/PYTHON_CODE/python-expections-learn/_base_exception.py", line 169, in <module>
# raise AttributeError("2") from exc1
# AttributeError: 3
|
5,252 | 4a09096abf073294afcf21b1eff9350329d4db33 | import json
import pika
import urllib.request
def validate_urls():
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = connection.channel()
channel.queue_declare(queue='urlValidationQueue')
channel.basic_consume(validate_url,
queue='urlValidationQueue',
no_ack=True)
channel.start_consuming()
def validate_url(ch, method, properties, body):
message = json.loads(body)
valid = True
print(f'Got new URL to check: {message["url"]}.')
try:
urllib.request.urlopen('https://github.com/' + message["url"])
except urllib.error.HTTPError as e:
if e.code != 200:
valid = False
print(f'Checking done. Link accessible: {valid}.')
request = urllib.request.Request('http://localhost:5002/post/' + str(message["id"]) + '/update',
json.dumps({'link_accessible': valid}).encode('utf8'), method='POST',
headers={'content-type': 'application/json'})
urllib.request.urlopen(request)
print(f'Post status updated.')
if __name__ == '__main__':
print("Validator worker started. Waiting for tasks to do...")
validate_urls()
|
5,253 | d40e1cfa2ef43f698e846c25ac9f5471d69e71a0 | # Generated by Django 2.2.5 on 2020-01-05 04:05
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.TextField(verbose_name='Title')),
('body', models.TextField(verbose_name='Body')),
('view_count', models.IntegerField(verbose_name='View Count')),
('root_category', models.CharField(max_length=64, verbose_name='Root Category')),
('category', models.CharField(max_length=64, verbose_name='Category')),
('image', models.TextField(verbose_name='Image')),
('publish_time', models.TimeField(verbose_name='Publish Time')),
('publish_date', models.DateField(verbose_name='Date')),
('lead', models.TextField(verbose_name='Lead Text')),
],
),
]
|
5,254 | 45b56103db0a72ebbc7de340c4293e1f70552414 | #Проверяем, является ли введенная пользователем строка полиндромом
list_1 = input('Enter something: ')
list_1_rev = list_1[::-1]
if list_1 == list_1_rev:
print('You entered a polindrom!')
else: print('Your string is not a polindrom')
|
5,255 | 4eb3d94a5fd22fc29000ec32475de9cbae1c183a | # OpenWeatherMap API Key
api_key = "078c8443640961d5ce547c8269db5fd7"
|
5,256 | 2ffd0de2888872cfa664919fcfc54b8e60b03280 | #! /usr/local/env python
#coding:utf-8
import urllib.request
import urllib.error
try:
urllib.request.urlopen("http://blog.csdn.net/jo_andy")
except urllib.error.URLError as e:
if hasattr(e,"code"):
print(e.code)
if hasattr(e,'reason'):
print(e.reason) |
5,257 | 7b4c2689ad1d4601a108dd8aa6e3c4d1e9730dc5 |
#Merge Sort
#O(nlogn)
#Merge Part
from __future__ import division #use for python2
def merge(A, B): #Merge A[0:m], B[0,n]
(C, m, n) = ([], len(A), len(B))
(i, j) = (0, 0) #Current positions in A, B
while (i + j) < (m + n): #i+j is no. of elements merged so far
if i == m: #case 1: A is empty
C.append(B[j])
j = j+1
elif j == n: #case 2: B is empty
C.append(A[i])
i = i+1
elif A[i] < B[j]: #case 3: Head of A is smaller
C.append(A[i])
i = i+1
elif A[i] > B[j]: #case 4: Head of B is smaller
C.append(B[j])
j = j+1
else:
pass
return C
#A = range(0, 100, 2) # generate the lists
#B = range(1, 75, 2) # generate the lists
#print merge(A, B)
#print "\n"
#print len(A) + len(B)
#Sort Part
def mergeSort(A, left, right):
#Sort the slice A[left:right]
if (right - left) <= 1: #Base Case
return A[left:right]
if (right - left) > 1: #Recursive call
mid = (left + right)//2
L = mergeSort(A, left, mid)
R = mergeSort(A, mid, right)
return (merge(L,R))
a = range(1, 100, 2) + range(0, 100, 2)
#print a
#print mergeSort(a, 0, len(a))
|
5,258 | 240f5e9cbb38f319b6e03b1b7f9cae7655ac4385 | """
*** Three Number Sum ***
Write a function that takes in a non-empty array of distinct integers and an integer representing a target sum. The function
should find all triplets. The numbers in each triplet should be ordered in ascending order, and the triplets themeselves
should be ordered in ascending order with respect to the number they hold.
If no theree numbers sum up to the target sum, the function should return an empty array.
Sample Input:
array = [12, 3, 1, 2, -6, 5, -8, 6]
target sum = 0
Sample Output:
[[-8, 2, 6], [-8, 3, 5], [-6, 1, 5]]
"""
# O(n^2) time | O(n) space
def threeNumberSum(array, targetSum):
array.sort()
triplet = []
for i in range(len(array)-2):
left = i + 1
right = len(array)-1
#while they are not overlap each other
while left < right:
current_sum = array[i] + array[left] + array[right]
if current_sum == targetSum:
triplet.append([array[i], array[left], array[right]])
left += 1
right -= 1
elif current_sum < targetSum:
left += 1
else:
right -= 1
return triplet
# def threeNumberSum(array, targetSum):
# array.sort()
# current_pointer = 0
# answer = []
#
# while current_pointer < len(array)-2:
# #reset the left and right pointer every time the inner loop end
# left_pointer = current_pointer + 1
# right_pointer = len(array) - 1
#
# while left_pointer < right_pointer:
# sum = array[current_pointer] + array[left_pointer] + array[right_pointer]
# if sum == targetSum:
# answer.append([array[current_pointer], array[left_pointer], array[right_pointer]])
# #when we find the target sum, we need to move both pointer.
# left_pointer += 1
# right_pointer -= 1
# elif sum < targetSum:
# left_pointer += 1
# else:
# right_pointer -= 1
# current_pointer += 1
# return answer
#test
array = [12, 3, 1, 2, -6, 5, -8, 6]
target = 0
print(threeNumberSum(array, target))
|
5,259 | 6b2bd6954f188626fa857ffc37611d3f971d22e2 | from command import Command, is_command, CommandException
from event import Event
class ItemInfo(Command):
@is_command
def item_info(self, player, *args):
if len(args) == 0:
raise CommandException(CommandException.NOT_ENOUGH_ARGUMENTS)
item_id = args[0]
if item_id in player.inventory:
item = player.inventory[item_id]
elif item_id in player.location.lobjects:
item = player.location.lobjects[item_id]
else:
raise CommandException(CommandException.UNKNOWN_ITEM)
return Event('item-info', {"item": item.to_dict()}) |
5,260 | bfcf6e241881c4f668f926e087ab0f7dcad61dee | from django import forms
from acl.models import Alert
class CreateAlertForm(forms.ModelForm):
class Meta:
model = Alert
exclude = ['role','age_analysis','Date_Uploaded','alias_name','CAMT_Reveiewer','Date_Regularised','alert_message', 'Count2']
|
5,261 | d70d3d8eef711441ac89c2d98c72a5f95e0ab20d | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This script reads in video information frame-by-frame, and then calculates
visual edge information for each frame, storing the information in a vector.
This can be averaged within TRs in an fMRI analysis to 'regress out'
high-frequency visual information in the video.
@author: zreagh
"""
import cv2
import numpy as np
# Can uncomment this pyplot import for frame plotting - see below
#from matplotlib import pyplot as plt
# Define the paths to your video file and eventual JPEG image files
vidpath = '/Users/zreagh/Desktop/edge_vector_analysis/test.mov'
imgpath = '/Users/zreagh/Desktop/edge_vector_analysis/'
edge_outfile = open('edge_outfile.csv','w')
edge_outfile.write('frame,prop_edge_pix\n')
# Function to extract video info including frames
def AnalyzeFrames(vidpath):
print("\nGetting video info & writing out image files for each frame...\n")
# Path to video file
vidObj = cv2.VideoCapture(vidpath)
# Get FPS
fps = vidObj.get(cv2.CAP_PROP_FPS)
print("Frames per second: {0}\n".format(fps))
# Used as counter variable
count = 0
# Create an empty list to be filled with image names for calculations below
jpeglist = []
# Checks whether frames were extracted
success = 1
# Make sure vidObj call is read
while success:
# Function extract frames
success, frame = vidObj.read()
# Saves the frames indexed with frame number as jpeg frames
cv2.imwrite("frame{0}.jpg".format(count), frame)
# Iteratively fill our list to be called in frame analyses below
jpeglist.append("frame{0}.jpg".format(count))
# Tick up our counter with each frame
count += 1
# Drop the video from the buffer
vidObj.release()
# Print some useful info to the console
print('Total number of frames: {0}\n'.format(count))
print('Video duration in seconds: {0}\n'.format(round(count/fps)))
# Loop through the images and do edge calculations
# NOTE: I am constraining to range 0:193 here because my 193rd image is
# empty for some reason. You can probably delete this for your purposes
# so that it reads "for jpeg in jpeglist:" instead!
print("Analyzing visual edges and writing output file...\n")
for jpeg in jpeglist[0:193]:
img = cv2.imread(imgpath + jpeg,0)
edges = cv2.Canny(img,100,200)
# Get the total number of pixels for each image
n_pix = np.sum(edges > -1)
# Get the proportion of white (edge) pixels for each image
n_white_pix = np.sum(edges == 255)
# Calculate the proportion of edge pixels (white/total) for each image
prop_edge_pix = float(n_white_pix/n_pix)
edge_outfile.write('{0},{1}\n'.format(jpeg,prop_edge_pix))
# Prints out relevant calculations above for each image - uncomment to
# debug or peek under the hood
# print('\nFrame image:', jpeg)
# print('Total number of pixels:', n_pix)
# print('Number of white pixels:', n_white_pix)
# print('Proportion of edge pixels:', prop_edge_pix)
# Plot each raw frame and edge frame side-by-side - uncomment to
# peek under the hood (will slow things down a bunch FYI)
# plt.subplot(121),plt.imshow(img,cmap = 'gray')
# plt.title('Original Image'), plt.xticks([]), plt.yticks([])
# plt.subplot(122),plt.imshow(edges,cmap = 'gray')
# plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
# plt.show()
print("Done! Check your output file: edge_outfile.csv")
# Do the damn thing
if __name__ == '__main__':
# Calling the function
AnalyzeFrames(vidpath) |
5,262 | 604c94e50b1fb9b5e451c4432113498410a4ac1f | #!/g/kreshuk/lukoianov/miniconda3/envs/inferno/bin/python3
# BASIC IMPORTS
import argparse
import os
import subprocess
import sys
import numpy as np
# INTERNAL IMPORTS
from src.datasets import CentriollesDatasetOn, CentriollesDatasetBags, GENdataset
from src.utils import get_basic_transforms, log_info, get_resps_transforms
import src.implemented_models as impl_models
# INFERNO IMPORTS
import torch
from inferno.trainers.basic import Trainer
from torch.utils.data import DataLoader
from inferno.trainers.callbacks.logging.tensorboard import TensorboardLogger
from inferno.trainers.callbacks.scheduling import AutoLR
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run learning of simple CNN implementation')
parser.add_argument('--model_name', type=str, default='', help='Name of the model from models dir')
parser.add_argument('--test', action='store_true', help='Test this model on simpler dataset')
parser.add_argument('--features', action='store_true', help='Representation of repsponces')
parser.add_argument('--mil', action='store_true', help='Continue learning on the bag lavel')
parser.add_argument('--id', type=str, default='default', help='Unique net id to save')
parser.add_argument('--img_size', type=int, default=60, help='Size of input images')
args = parser.parse_args()
log_info('Params: ' + str(args))
if args.mil:
train_tr, test_tr = get_resps_transforms(features=args.features)
if args.test:
train_ds = GENdataset(transform=train_tr, bags=False, crop=True)
test_ds = GENdataset(train=False, transform=test_tr, bags=False, crop=True)
log_info('Artificial MIL data is used')
else:
train_ds = CentriollesDatasetBags(transform=train_tr,
inp_size=512, bags=False, crop=True)
test_ds = CentriollesDatasetBags(train=False, transform=test_tr,
inp_size=512, bags=False, crop=True)
log_info('MIL dataset is used')
else:
train_tr, test_tr = get_basic_transforms()
if args.test:
train_ds = CentriollesDatasetOn(transform=train_tr,
pos_dir='dataset/mnist/1',
neg_dir='dataset/mnist/0', inp_size=args.img_size)
test_ds = CentriollesDatasetOn(transform=test_tr,
pos_dir='dataset/mnist/1',
neg_dir='dataset/mnist/0', inp_size=args.img_size, train=False)
log_info('Test bags dataset is used')
else:
train_ds = CentriollesDatasetOn(transform=train_tr,
pos_dir='dataset/artificial/train_pos/',
neg_dir='dataset/artificial/train_neg/',
inp_size=args.img_size, all_data=True)
test_ds = CentriollesDatasetOn(transform=test_tr,
pos_dir='dataset/artificial/test_pos/',
neg_dir='dataset/artificial/test_neg/',
inp_size=args.img_size, all_data=True)
log_info('ILC dataset is used')
train_dl = DataLoader(train_ds, batch_size=4, shuffle=True, num_workers=0)
test_dl = DataLoader(test_ds, batch_size=4, shuffle=True, num_workers=0)
log_info('Datasets are initialized!')
# DIRS AND MODEL
exec("model = impl_models.%s" % (args.model_name))
model_dir = os.path.join('models', args.model_name)
curent_model_dir = os.path.join(model_dir, args.id)
log_info('Model will be saved to %s' % (curent_model_dir))
log_info(' + Number of params: {}'.format(sum([p.data.nelement() for p in model.parameters()])))
weight_dir = os.path.join(curent_model_dir, 'weights')
log_info('Weights will be saved to %s' % (weight_dir))
if not os.path.exists(weight_dir):
os.mkdir(weight_dir)
logs_dir = os.path.join(curent_model_dir, 'logs')
if not os.path.exists(logs_dir):
os.mkdir(logs_dir)
log_info('Logs will be saved to %s' % (logs_dir))
# Build trainer
logger = TensorboardLogger(log_scalars_every=(1, 'iteration'),
log_images_every=(np.inf, 'epochs'))
def log_histogram(self, tag, values, bins=1000):
pass
logger.log_histogram = log_histogram
trainer = Trainer(model)\
.build_criterion('CrossEntropyLoss') \
.build_metric('CategoricalError') \
.build_optimizer('Adam') \
.validate_every((2, 'epochs')) \
.save_every((5, 'epochs')) \
.save_to_directory(weight_dir) \
.set_max_num_epochs(10000) \
.build_logger(logger, log_directory=logs_dir) \
.register_callback(AutoLR(0.96, (1, 'epochs'), monitor_momentum=0.9,
monitor_while='validating',
consider_improvement_with_respect_to='best'))
# Bind loaders
trainer \
.bind_loader('train', train_dl) \
.bind_loader('validate', test_dl)
if torch.cuda.is_available():
trainer.cuda()
trainer.fit()
|
5,263 | 9cebce7f97a1848885883692cd0f494cce6bae7f | # Copyright 2019 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing class for AWS's Redshift Cluster Subnet Group."""
from absl import flags
from perfkitbenchmarker import resource
from perfkitbenchmarker import vm_util
FLAGS = flags.FLAGS
class RedshiftClusterSubnetGroup(resource.BaseResource):
"""Cluster Subnet Group associated with a Redshift cluster launched in a vpc.
A cluster subnet group allows you to specify a set of subnets in your VPC.
Attributes:
name: A string name of the cluster subnet group.
subnet_id: A string name of the subnet id associated with the group.
"""
def __init__(self, cmd_prefix):
super(RedshiftClusterSubnetGroup, self).__init__(user_managed=False)
self.cmd_prefix = cmd_prefix
self.name = 'pkb-' + FLAGS.run_uri
self.subnet_id = ''
def _Create(self):
cmd = self.cmd_prefix + [
'redshift', 'create-cluster-subnet-group',
'--cluster-subnet-group-name', self.name, '--description',
'Cluster Subnet Group for run uri {}'.format(
FLAGS.run_uri), '--subnet-ids', self.subnet_id
]
vm_util.IssueCommand(cmd)
def _Delete(self):
"""Delete a redshift cluster subnet group."""
cmd = self.cmd_prefix + [
'redshift', 'delete-cluster-subnet-group',
'--cluster-subnet-group-name', self.name
]
vm_util.IssueCommand(cmd, raise_on_failure=False)
|
5,264 | b1d8a454e590dfa4afa257ca665376c320a4acb5 | # Generated by Django 3.0.8 on 2020-07-12 19:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('CRUD', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='employee',
old_name='eAdddress',
new_name='eAddress',
),
]
|
5,265 | 4b5f58d471b05428caef3ca7a3bdc0d30a7e3881 |
from PrStatusWorker import PrStatusWorker
import threading
def initialize_worker():
worker = PrStatusWorker()
worker.start_pr_status_polling()
print("Starting the PR status monitor worker thread...")
worker_thread = threading.Thread(target=initialize_worker, name="pr_status_worker")
worker_thread.start()
|
5,266 | e4f97018567559fc2714b75654974fb7c51f770f | def phi(n):
r = n
d = 2
p = n
while r > 1:
if r % d == 0:
p -= int(r/d)
while r % d == 0:
r = int(r/d)
d += 1
return p
m = (0, 1)
for n in range(2, 1000000):
p = phi(n)
m = max(m, (n/p, n))
if n % 10000 == 0:
print(n)
print(m)
|
5,267 | 9ba74c7ecbd20c59883aff4efdc7e0369ff65daf | # Stubs for binascii
# Based on http://docs.python.org/3.2/library/binascii.html
import sys
from typing import Union, Text
if sys.version_info < (3,):
# Python 2 accepts unicode ascii pretty much everywhere.
_Bytes = Text
_Ascii = Text
else:
# But since Python 3.3 ASCII-only unicode strings are accepted by the
# a2b_* functions.
_Bytes = bytes
_Ascii = Union[bytes, str]
def a2b_uu(string: _Ascii) -> bytes: ...
if sys.version_info >= (3, 7):
def b2a_uu(data: _Bytes, *, backtick: bool = ...) -> bytes: ...
else:
def b2a_uu(data: _Bytes) -> bytes: ...
def a2b_base64(string: _Ascii) -> bytes: ...
if sys.version_info >= (3, 6):
def b2a_base64(data: _Bytes, *, newline: bool = ...) -> bytes: ...
else:
def b2a_base64(data: _Bytes) -> bytes: ...
def a2b_qp(string: _Ascii, header: bool = ...) -> bytes: ...
def b2a_qp(data: _Bytes, quotetabs: bool = ..., istext: bool = ..., header: bool = ...) -> bytes: ...
def a2b_hqx(string: _Ascii) -> bytes: ...
def rledecode_hqx(data: _Bytes) -> bytes: ...
def rlecode_hqx(data: _Bytes) -> bytes: ...
def b2a_hqx(data: _Bytes) -> bytes: ...
def crc_hqx(data: _Bytes, crc: int) -> int: ...
def crc32(data: _Bytes, crc: int = ...) -> int: ...
def b2a_hex(data: _Bytes) -> bytes: ...
def hexlify(data: _Bytes) -> bytes: ...
def a2b_hex(hexstr: _Ascii) -> bytes: ...
def unhexlify(hexlify: _Ascii) -> bytes: ...
class Error(Exception): ...
class Incomplete(Exception): ...
|
5,268 | 102ba5c1cb4beda6f9b82d37d9b343fe4f309cfb | #!/usr/bin/python
###########################################################################
#
# Copyright 2019 Dell, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
import sys
import time
import json
import ast
import cli_client as cc
from rpipe_utils import pipestr
from scripts.render_cli import show_cli_output
def prompt(msg):
prompt_msg = msg + " [confirm y/N]: "
x = raw_input(prompt_msg)
while x.lower() != "y" and x.lower() != "n":
print ("Invalid input, expected [y/N]")
x = raw_input(prompt_msg)
if x.lower() == "n":
exit(0)
def invoke(func, args):
body = None
aa = cc.ApiClient()
if func == 'rpc_sonic_interface_clear_counters':
keypath = cc.Path('/restconf/operations/sonic-interface:clear_counters')
body = {"sonic-interface:input":{"interface-param":args[0]}}
if args[0] == "all":
prompt("Clear all Interface counters")
elif args[0] == "PortChannel":
prompt("Clear all PortChannel interface counters")
elif args[0] == "Ethernet":
prompt("Clear all Ethernet interface counters")
else:
prompt("Clear counters for " + args[0])
return aa.post(keypath, body)
else:
return
def run(func, args):
try:
api_response = invoke(func,args)
status = api_response.content["sonic-interface:output"]
if status["status"] != 0:
print status["status-detail"]
# prompt() returns SystemExit exception when exit() is called
except SystemExit:
return
except:
print "%Error: Transaction Failure"
return
if __name__ == '__main__':
pipestr().write(sys.argv)
run(sys.argv[1], sys.argv[2:])
|
5,269 | 5e7a589af69a604021ed9558fcce721a8e254fee | from .context import mango
from solana.publickey import PublicKey
def test_token_lookup():
data = {
"tokens": [
{
"address": "So11111111111111111111111111111111111111112",
"symbol": "SOL",
"name": "Wrapped SOL",
"decimals": 9,
},
{
"address": "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v",
"symbol": "USDC",
"name": "USD Coin",
"decimals": 6,
},
{
"address": "9n4nbM75f5Ui33ZbPYXn59EwSgE8CGsHtAeTH5YFeJ9E",
"symbol": "BTC",
"name": "Wrapped Bitcoin (Sollet)",
"decimals": 6,
},
{
"address": "2FPyTwcZLUg1MDrwsyoP4D6s1tM7hAkHYRjkNb5w6Pxk",
"symbol": "ETH",
"name": "Wrapped Ethereum (Sollet)",
"decimals": 6,
}]
}
actual = mango.SplTokenLookup("test-filename", data)
assert actual is not None
assert actual.logger is not None
assert actual.find_by_symbol("ETH") is not None
assert actual.find_by_symbol("ETH").name == "Wrapped Ethereum (Sollet)"
assert actual.find_by_symbol("BTC") is not None
assert actual.find_by_symbol("BTC").name == "Wrapped Bitcoin (Sollet)"
def test_token_lookups_with_full_data():
token_lookup = mango.SplTokenLookup.load(mango.SplTokenLookup.DefaultDataFilepath)
assert token_lookup.find_by_symbol("BTC").mint == PublicKey("9n4nbM75f5Ui33ZbPYXn59EwSgE8CGsHtAeTH5YFeJ9E")
assert token_lookup.find_by_symbol("ETH").mint == PublicKey("2FPyTwcZLUg1MDrwsyoP4D6s1tM7hAkHYRjkNb5w6Pxk")
assert token_lookup.find_by_mint("AKJHspCwDhABucCxNLXUSfEzb7Ny62RqFtC9uNjJi4fq").symbol == "SRM-SOL"
assert token_lookup.find_by_mint("Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB").symbol == "USDT"
|
5,270 | 8753996c90ecea685e6312020dfd31fabb366138 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
class ClassMKB(models.Model):
name = models.CharField(max_length=512,verbose_name = 'Наименование')
code = models.CharField(max_length=20, null=True, blank=True,verbose_name = 'Код')
parent_id = models.IntegerField(null=True, blank=True)
parent_code = models.CharField(max_length=100, null=True, blank=True,verbose_name = 'Код предка')
node_count = models.SmallIntegerField(default=0, null=True, blank=True,verbose_name = 'Количество в группе')
additional_info = models.TextField(null=True, blank=True,verbose_name = 'Дополнительно')
class Meta:
db_table = 'class_mkb'
verbose_name = 'Международная класификация болезней'
verbose_name_plural = 'Международная класификация болезней'
def __unicode__(self):
return self.name |
5,271 | abcefa0a3312e158517ec8a15421d1d07220da6a | count = int(input())
for i in range(1, count + 1):
something = '='
num1, num2 = map(int, input().split())
if num1 > num2:
something = '>'
elif num1 < num2:
something = '<'
print(f'#{i} {something}')
|
5,272 | 6c5c07dadbe7ec70a210ee42e756be0d710c0993 | #!/usr/local/bin/python
import cgi
import pymysql
import pymysql.cursors
import binascii
import os
from mylib import siteLines
import threading
def checkStringLine(ip, host, pagel, objects, title):
onlyIp = ip.split(":")[0]
connection = siteLines()
with connection.cursor() as cursor:
# Read a single record
sql = f"SELECT `IP` FROM `sites` WHERE `IP`=\'{onlyIp}\'"
cursor.execute(sql)
result = cursor.fetchone()
if result == None:
SiteStringLine(ip, host, pagel, objects, title)
else: pass
def SiteStringLine(ip, host, pagel, objects, title):
connection = siteLines()
with connection:
with connection.cursor() as cursor:
# Create a new record
sql = f"INSERT INTO `sites` (`IP`, `URL`, `PageLeight`, `Objects`, `Title`) VALUES (\'{ip}\', \'{host}\', \'{pagel}\', \'{objects}\', \'{title}\')"
cursor.execute(sql)
connection.commit()
form = cgi.FieldStorage()
open("gates.log", "a+", encoding="utf-8").write(str(form) + "\n")
if form.__contains__("host"):
ip = form.__contains__("ip")
host = form.__contains__("host")
pagel = form.__contains__("pagel")
objects = form.__contains__("words")
title = form.__contains__("title")
thread0 = threading.Thread(target = checkStringLine, args = (form["ip"].value, form["host"].value, form["pagel"].value, form["words"].value, form["title"].value))
thread0.start()
|
5,273 | 22c2425f1dc14b6b0005ebf2231af8abf43aa2e1 | from flask import Flask, flash, abort, redirect, url_for, request, render_template, make_response, json, Response
import os, sys
import config
import boto.ec2.elb
import boto
from boto.ec2 import *
app = Flask(__name__)
@app.route('/')
def index():
list = []
creds = config.get_ec2_conf()
for region in config.region_list():
conn = connect_to_region(region, aws_access_key_id=creds['AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds['AWS_SECRET_ACCESS_KEY'])
zones = conn.get_all_zones()
instances = conn.get_all_instance_status()
instance_count = len(instances)
ebs = conn.get_all_volumes()
ebscount = len(ebs)
unattached_ebs = 0
unattached_eli = 0
event_count = 0
for instance in instances:
events = instance.events
if events:
event_count = event_count + 1
for vol in ebs:
state = vol.attachment_state()
if state == None:
unattached_ebs = unattached_ebs + 1
elis = conn.get_all_addresses()
eli_count = len(elis)
for eli in elis:
instance_id = eli.instance_id
if not instance_id:
unattached_eli = unattached_eli + 1
connelb = boto.ec2.elb.connect_to_region(region, aws_access_key_id=creds['AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds['AWS_SECRET_ACCESS_KEY'])
elb = connelb.get_all_load_balancers()
elb_count = len(elb)
list.append({ 'region' : region, 'zones': zones, 'instance_count' : instance_count, 'ebscount' : ebscount, 'unattached_ebs' : unattached_ebs, 'eli_count' : eli_count, 'unattached_eli' : unattached_eli, 'elb_count' : elb_count, 'event_count' : event_count})
return render_template('index.html',list=list)
@app.route('/ebs_volumes/<region>/')
def ebs_volumes(region=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds['AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds['AWS_SECRET_ACCESS_KEY'])
ebs = conn.get_all_volumes()
ebs_vol = []
for vol in ebs:
state = vol.attachment_state()
if state == None:
ebs_info = { 'id' : vol.id, 'size' : vol.size, 'iops' : vol.iops, 'status' : vol.status }
ebs_vol.append(ebs_info)
return render_template('ebs_volume.html',ebs_vol=ebs_vol,region=region)
@app.route('/ebs_volumes/<region>/delete/<vol_id>')
def delete_ebs_vol(region=None,vol_id=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds['AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds['AWS_SECRET_ACCESS_KEY'])
vol_id = vol_id.encode('ascii')
vol_ids = conn.get_all_volumes(volume_ids=vol_id)
for vol in vol_ids:
vol.delete()
return redirect(url_for('ebs_volumes', region=region))
@app.route('/elastic_ips/<region>/')
def elastic_ips(region=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds['AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds['AWS_SECRET_ACCESS_KEY'])
elis = conn.get_all_addresses()
un_eli = []
for eli in elis:
instance_id = eli.instance_id
if not instance_id:
eli_info = { 'public_ip' : eli.public_ip, 'domain' : eli.domain}
un_eli.append(eli_info)
return render_template('elastic_ip.html',un_eli=un_eli,region=region)
@app.route('/elastic_ips/<region>/delete/<ip>')
def delete_elastic_ip(region=None,ip=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds['AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds['AWS_SECRET_ACCESS_KEY'])
ip = ip.encode('ascii')
elis = conn.get_all_addresses(addresses=ip)
for eli in elis:
eli.release()
return redirect(url_for('elastic_ips', region=region))
@app.route('/instance_events/<region>/')
def instance_events(region=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds['AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds['AWS_SECRET_ACCESS_KEY'])
instances = conn.get_all_instance_status()
instance_event_list = []
for instance in instances:
event = instance.events
if event:
event_info = { 'instance_id' : instance.id, 'event' : instance.events[0].code, 'description' : instance.events[0].description, 'event_before' : instance.events[0].not_before, 'event_after': instance.events[0].not_after }
instance_event_list.append(event_info)
return render_template('instance_events.html', instance_event_list=instance_event_list)
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0')
|
5,274 | d29c8ec737b8e962d381c8fdd0999e7e01847836 | import sys
import psyco
sys.stdin = open("/home/shiva/Learning/1.txt", "r")
sys.stdout = open("/home/shiva/Learning/2.txt", "w")
def compute(plus,minus,total,inp):
if plus == 1 and minus == 0:
print(total); return
elif (plus == 1 and minus == 1):
print("Impossible"); return
elif (abs(plus-minus) > total):
plus
temp = total
total += minus
res = []
if int(total/plus) > temp:
print("Impossible"); return
elif int(total%plus) == 0:
res = [int(total/plus) for i in range(0,plus)]
else:
res = [int(total/(plus-1)) for i in range(0,plus-1)]
res.append(total%(plus-1))
j = 0
prev = 0
for i in inp.split():
if j == 0:
print(res[j],end=' ')
j+=1
elif i == '+' or i=='-':
print(i,end=' ')
prev = i
elif i == '?':
if prev == '+':
print(res[j],end=' ')
j+=1
else:
print('1',end=' ')
else:
print(i,end=' ')
inp = input()
plus =1
minus = 0
total = 0
for i in inp.split():
if i=='?' or i=='=':
continue
elif i == '+':
plus+=1
elif i == '-':
minus +=1
else:
total = int(i)
compute(plus,minus,total,inp)
|
5,275 | 479411727de14e8032b6d01cdb844632111af688 | import os
import argparse
from data.downloader import *
from data.utils import *
from data.danmaku import *
from utils import *
key = '03fc8eb101b091fb'
parser = argparse.ArgumentParser(description = 'Download Video From Bilibili')
parser.add_argument('-d', type = str, help = 'dataset')
parser.add_argument('-o', type = str, default = 'dataset', help = 'output directory')
parser.add_argument('-f', type = str, default = 'mp4', help = 'format')
parser.add_argument('-c', type = str, default = '', help = 'country')
parser.add_argument('-q', type = int, default = 0, help = 'quality')
parser.add_argument('-i', action = 'store_true', default = False, help = 'ignore download')
args = parser.parse_args()
cookie = dict()
cookie['DedeUserID'] = '347368229'
cookie['DedeUserID__ckMd5'] = '6e02ca142544e64c'
cookie['sid'] = 'ii8ca1k2'
cookie['SESSDATA'] = '1d13f39c%2C1544246349%2Cc62b611b'
aids, attr = download_list(os.path.join('list', args.d + '.txt'), os.path.join(args.o, args.d, 'video'), **cookie, ignore = args.i, quality = args.q, debug = True)
print('[*] Video Download Finished')
infos = dict()
for aid in aids:
extra = dict()
if 'ep' in aid:
epid = aid
aid = attr['aid']
fn = os.path.join(args.o, args.d, 'video', '{}.{}'.format(epid, args.f))
page = int(epid[2:]) - int(attr['base'][2:]) + 1
info = GetVideoInfo(aid.strip('av'), key, 1)
else:
fn = os.path.join(args.o, args.d, 'video', '{}.{}'.format(aid, args.f))
info = GetVideoInfo(aid.strip('av'), key)
extra['danmaku'] = request_danmaku(cid = info.cid)
if 'country' in attr:
extra['country'] = attr['country']
extra['complete'] = False
else:
capture = get_capture(fn)
print('[*] Capture : {}'.format(fn))
extra['duration'] = get_duration(capture = capture)
extra['duration'] = get_duration(capture = capture)
extra['nframes'] = get_nframes(capture = capture)
extra['fps'] = get_fps(capture = capture)
extra['boundary'] = get_boundary(fn, capture, extra['nframes'], 'hecate')
extra['positions'] = get_positions(extra['nframes'])
extra['fpsegment'] = get_fpsegment(extra['boundary'])
extra['score'] = get_score(**extra)
extra['summary'] = get_summary(**extra)
extra['complete'] = True
for k, v in extra.items():
setattr(info, k, v)
infos[aid] = info
save_pickle(infos, '{}.info'.format(args.d))
|
5,276 | 754b34028780231c7eccb98cdf3e83bd615d843f | import pandas as pd
import os
from appia.processors.core import normalizer
from math import ceil
class Experiment:
def __init__(self, id) -> None:
self.id = id
self.version = 4
self._hplc = None
self._fplc = None
@property
def hplc(self):
try:
return self._hplc
except AttributeError:
return None
@hplc.setter
def hplc(self, df):
if isinstance(df, pd.DataFrame) or df is None:
try:
self._hplc = df.sort_values(by=["Normalization", "Channel", "mL"])
except AttributeError:
self._hplc = df
else:
raise TypeError("HPLC input is not a pandas dataframe")
@property
def fplc(self):
try:
return self._fplc
except AttributeError:
return None
@fplc.setter
def fplc(self, df):
if isinstance(df, pd.DataFrame) or df is None:
self._fplc = df
else:
raise TypeError("FPLC input is not a pandas dataframe")
@property
def wide(self):
wide = self.hplc.copy()
wide = wide.loc[wide["Normalization"] == "Signal"]
wide["Sample"] = wide["Sample"].astype(str) + " " + wide["Channel"]
wide.drop(["Channel", "Normalization"], axis=1)
wide = wide.pivot_table(index="Time", columns="Sample", values="Value")
return wide
def __repr__(self):
to_return = f'Experiment "{self.id}" with '
if self.hplc is not None:
to_return += "HPLC "
if self.hplc is not None and self.fplc is not None:
to_return += "and "
if self.fplc is not None:
to_return += "FPLC "
if self.hplc is None and self.fplc is None:
to_return += "no "
to_return += "data"
return to_return
def extend_hplc(self, hplc):
if not isinstance(hplc, pd.DataFrame):
raise TypeError(f"Tried to extend experiment hplc with {type(hplc)}")
self.hplc = pd.concat([self.hplc, hplc])
def show_tables(self):
print("HPLC:")
print(self.hplc)
print("FPLC:")
print(self.fplc)
def jsonify(self):
if self.hplc is not None:
hplc_json = (
self.hplc.pivot_table(
index=["mL", "Channel", "Time", "Normalization"],
columns="Sample",
values="Value",
)
.reset_index()
.to_json()
)
else:
hplc_json = ""
if self.fplc is not None:
fplc_json = self.fplc.to_json()
else:
fplc_json = ""
doc = {
"_id": self.id,
"version": self.version,
"hplc": hplc_json,
"fplc": fplc_json,
}
return doc
def renormalize_hplc(self, norm_range, strict):
if self.hplc is None:
raise ValueError("No HPLC data")
# this arcane string of pandas commands is the equivalent of pivot_wider from tidyverse
# from https://medium.com/@durgaswaroop/reshaping-pandas-dataframes-melt-and-unmelt-9f57518c7738;.'/
hplc = self.hplc.pivot(
index=["mL", "Sample", "Channel", "Time"], columns=["Normalization"]
)["Value"].reset_index()
hplc = hplc.groupby(["Sample", "Channel"], group_keys=False).apply(
lambda x: normalizer(x, norm_range, strict)
)
hplc = hplc.melt(
id_vars=["mL", "Sample", "Channel", "Time"],
value_vars=["Signal", "Normalized"],
var_name="Normalization",
value_name="Value",
)
self.hplc = hplc
def renormalize_fplc(self, norm_range, strict):
if self.fplc is None:
raise ValueError("No FPLC data")
fplc = self.fplc.pivot(
index=["mL", "CV", "Fraction", "Channel", "Sample"],
columns=["Normalization"],
)["Value"].reset_index()
fplc = fplc.groupby(["Sample", "Channel"], group_keys=False).apply(
lambda x: normalizer(x, norm_range, strict)
)
fplc = fplc.melt(
id_vars=["mL", "CV", "Channel", "Fraction", "Sample"],
value_vars=["Signal", "Normalized"],
var_name="Normalization",
value_name="Value",
)
self.fplc = fplc
def reduce_hplc(self, num_points):
# reduce the number of points in the hplc trace to num_points per sample/channel/norm
def reduction_factor(df, final_points):
reduction_factor = ceil(df.shape[0] / final_points)
return df[::reduction_factor]
try:
self.hplc = self.hplc.groupby(
["Channel", "Sample", "Normalization"], group_keys=False, as_index=False
).apply(lambda x: reduction_factor(x, num_points))
self.hplc = self.hplc.reset_index(drop=True)
except AttributeError:
return
def rename_channels(self, channel_name_dict):
self.hplc = self.hplc.replace({"Channel": channel_name_dict})
def hplc_csv(self, outfile):
if outfile[-4:] == ".csv":
outfile = outfile[:-4]
if self.hplc is not None:
self.hplc.to_csv(outfile + "-long.csv", index=False)
self.wide.to_csv(outfile + "-wide.csv", index=True)
return outfile + "-long.csv"
def fplc_csv(self, outfile):
if outfile[-4:] != ".csv":
outfile = outfile + ".csv"
if self.fplc is not None:
self.fplc.to_csv(outfile, index=False)
return outfile
def save_csvs(self, path):
hplc_csv = self.hplc_csv(os.path.join(path, f"{self.id}_hplc"))
fplc_csv = self.fplc_csv(os.path.join(path, f"{self.id}_fplc"))
return hplc_csv, fplc_csv
def concat_experiments(exp_list):
hplcs = []
fplcs = []
for exp in [x for x in exp_list if x.hplc is not None]:
hplc = exp.hplc
hplc["Sample"] = f"{exp.id}: " + hplc["Sample"].astype(str)
hplcs.append(hplc)
for exp in [x for x in exp_list if x.fplc is not None]:
fplc = exp.fplc
fplc["Sample"] = exp.id
fplcs.append(fplc)
concat_exp = Experiment("concat")
try:
concat_exp.hplc = pd.concat(hplcs)
except ValueError:
pass
try:
concat_exp.fplc = pd.concat(fplcs)
except ValueError:
pass
return concat_exp
|
5,277 | 3bea4413a41a9eecb5e3184d090b646e17892b5c | from typing import List, Tuple
test_string = "2 3 0 3 10 11 12 1 1 0 1 99 2 1 1 2"
with open('data/day8_input.txt', 'r') as fp:
my_string = fp.read()
class Node:
def __init__(self):
self.metadata = list()
self.children = list()
def checksum(self):
return sum([x for x in self.metadata])
def add_child(self, child):
self.children.append(child)
pass
def value(self):
if len(self.children) == 0:
return self.checksum()
else:
val = 0
for m in self.metadata:
if m > 0 and m <= len(self.children):
val += self.children[m-1].value()
return val
def parse_string(my_string : str) -> List[int]:
return [int(x) for x in my_string.split(" ")]
def parse_node(codes: List[int], idx : int) -> Tuple[Node, int]:
num_children = codes[idx]
num_metadata = codes[idx + 1]
node = Node()
j = idx + 2
for i in range(num_children):
child, j = parse_node(codes, j)
node.add_child(child)
meta = list()
for i in range(num_metadata):
meta.append(codes[j])
j += 1
node.metadata = meta
return (node, j)
codes = parse_string(my_string)
tree, _ = parse_node(codes, 0)
def checksum(node):
c = node.checksum()
for child in node.children:
c += checksum(child)
return c
print(checksum(tree))
print(tree.value()) |
5,278 | 85ac851e28dba3816f18fefb727001b8e396cc2b | # Алексей Головлев, группа БСБО-07-19
def lucky(ticket):
def sum_(number):
number = str(number)
while len(number) != 6:
number = '0' + number
x = list(map(int, number))
return sum(x[:3]) == sum(x[3:])
return 'Счастливый' if sum_(ticket) == sum_(lastTicket) else 'Несчастливый'
lastTicket = 123456
print(lucky(100001))
lastTicket = 123321
print(lucky(100001))
|
5,279 | e732fa0e2b377a87b8b088303b277cc08cb695b3 | from flask import Flask, render_template, request, redirect, flash, session
from mysqlconnection import connectToMySQL
from flask_bcrypt import Bcrypt
import re
app = Flask(__name__)
bcrypt = Bcrypt(app)
app.secret_key = "something secret10"
DATABASE = "exam_quote_dash"
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
#users
# id_users, first_name, last_name, email, password
#quotes
#id_quotes, from_user, liked_from, content, author
@app.route("/")
def signin():
return render_template("index.html")
@app.route("/register", methods=["POST"])
def register():
is_valid = True
if len(request.form['first_name']) < 2:
is_valid = False
flash("please enter your first name.")
if len(request.form['last_name']) < 2:
is_valid = False
flash("please enter your last name.")
if not EMAIL_REGEX.match(request.form['email']):
flash("Invalid email address!")
if len(request.form['password']) < 8:
is_valid = False
flash("password must be atleast 8 characters long.")
if (request.form['password'] != request.form['confirm_password']):
is_valid = False
flash("passwords do not match.")
if not is_valid:
return redirect('/')
else:
flash("sucessfully added")
mysql = connectToMySQL(DATABASE)
pw_hash = bcrypt.generate_password_hash(request.form['password'])
query = "INSERT INTO users (email, password, first_name, last_name) VALUES (%(em)s,%(pw)s,%(fn)s,%(ln)s);"
data = {
'em': request.form['email'],
'pw': pw_hash,
'fn': request.form['first_name'],
'ln': request.form['last_name']
}
id_users = mysql.query_db(query,data)
session['id_users'] = id_users
session['greeting'] = request.form['first_name']
return redirect('/quotes')
@app.route('/login', methods=['POST'])
def login():
mysql = connectToMySQL(DATABASE)
query = "SELECT * FROM users WHERE email = %(em)s;"
data = {
'em': request.form['email']
}
result = mysql.query_db(query, data)
if len(result) > 0:
if bcrypt.check_password_hash(result[0]['password'], request.form['password']):
session['id_users'] = result[0]['id_users']
session['greeting'] = result[0]['first_name']
return redirect('/quotes')
else:
flash("Email and/or password does not match.")
return redirect('/')
else:
flash("Please enter your registered Email.")
return redirect('/')
@app.route('/success')
def success():
if 'id_users' not in session:
return redirect('/')
else:
return render_template('success.html')
@app.route('/quotes')
def quotes():
mysql = connectToMySQL(DATABASE)
query = "SELECT * FROM quotes JOIN users ON from_user = id_users;"
join = mysql.query_db(query)
return render_template('quotes.html', joined = join)
@app.route('/create', methods=['POST'])
def create():
is_valid = True
if len(request.form['content']) < 10:
flash("quotes are required to be longer than 10 characters.")
is_valid == False
if is_valid == True:
mysql = connectToMySQL(DATABASE)
query = "INSERT INTO quotes (content, author, from_user) VALUES (%(quo)s, %(auth)s, %(from)s);"
data = {
'quo': request.form['content'],
'auth': request.form['author'],
'from': session['id_users']
}
mysql.query_db(query, data)
return redirect('/quotes')
@app.route('/delete/<id>/<thing>')
def delete(id,thing):
if session['id_users'] == int(thing):
mysql = connectToMySQL(DATABASE)
query = "DELETE FROM quotes WHERE id_quotes = %(id)s;"
data = {
'id': id
}
mysql.query_db(query, data)
return redirect('/quotes')
else:
flash("Unable to delete other's quotes")
return redirect('/quotes')
@app.route("/edit")
def edit():
mysql = connectToMySQL(DATABASE)
query = "SELECT * From users WHERE id_users = %(id)s"
data ={
'id' : session['id_users']
}
users_table = mysql.query_db(query, data)
return render_template('edit_account.html', users = users_table)
@app.route("/update", methods=["POST"])
def update():
is_valid = True
if len(request.form['f_name']) < 3:
is_valid = False
flash("please enter your first name.")
if len(request.form['l_name']) < 3:
is_valid = False
flash("please enter your last name.")
if not EMAIL_REGEX.match(request.form['email']):
flash("Invalid email address!")
if not is_valid:
return redirect('/edit')
else:
flash("sucessfully updated")
mysql = connectToMySQL(DATABASE)
query = "UPDATE users Set first_name = %(fn)s, last_name = %(ln)s , email = %(em)s WHERE id_users = %(id)s;"
data = {
"fn": request.form["f_name"],
"ln": request.form["l_name"],
"em": request.form["email"],
'id' : session['id_users']
}
id = mysql.query_db(query, data)
session['greeting'] = request.form['f_name']
return redirect('/quotes')
@app.route("/my_posts")
def my_post():
mysql = connectToMySQL(DATABASE)
query = "SELECT * FROM quotes WHERE from_user = %(id)s;"
data ={
'id' : session['id_users']
}
my_quotes = mysql.query_db(query, data)
return render_template('my_posts.html', quotes = my_quotes)
@app.route('/logout')
def logout():
session.clear()
return redirect('/')
if __name__=="__main__":
app.run(debug=True) |
5,280 | 3d49d03dbc38ee37eadd603b4b464b0e2e1a33d5 | import itertools
def permutations(string):
return list("".join(p) for p in set(itertools.permutations(string))) |
5,281 | a90db2073d43d54cbcc04e3000e5d0f2a2da4a55 | # Generated by Django 2.1.3 on 2020-06-05 23:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('index', '0005_userip_serial_number'),
]
operations = [
migrations.AddField(
model_name='userip',
name='ip_attribution',
field=models.CharField(default='', max_length=8, verbose_name='ip地址'),
),
]
|
5,282 | f17ae8a44f8b032feac7c18fe39663054fea40c0 | import gc
import unittest
import numpy as np
from pydrake.autodiffutils import AutoDiffXd
from pydrake.common import RandomDistribution, RandomGenerator
from pydrake.common.test_utilities import numpy_compare
from pydrake.common.test_utilities.deprecation import catch_drake_warnings
from pydrake.common.value import Value
from pydrake.symbolic import Expression, Variable
from pydrake.systems.framework import (
BasicVector,
DiagramBuilder,
DiagramBuilder_,
InputPort,
TriggerType,
VectorBase,
)
from pydrake.systems.test.test_util import (
MyVector2,
)
from pydrake.systems.primitives import (
Adder, Adder_,
AddRandomInputs,
AffineSystem, AffineSystem_,
ConstantValueSource, ConstantValueSource_,
ConstantVectorSource, ConstantVectorSource_,
ControllabilityMatrix,
Demultiplexer, Demultiplexer_,
DiscreteDerivative, DiscreteDerivative_,
DiscreteTimeDelay, DiscreteTimeDelay_,
FirstOrderLowPassFilter,
FirstOrderTaylorApproximation,
Gain, Gain_,
Integrator, Integrator_,
IsControllable,
IsDetectable,
IsObservable,
IsStabilizable,
Linearize,
LinearSystem, LinearSystem_,
LinearTransformDensity, LinearTransformDensity_,
LogVectorOutput,
MatrixGain,
Multiplexer, Multiplexer_,
MultilayerPerceptron, MultilayerPerceptron_,
ObservabilityMatrix,
PassThrough, PassThrough_,
PerceptronActivationType,
PortSwitch, PortSwitch_,
RandomSource,
Saturation, Saturation_,
SharedPointerSystem, SharedPointerSystem_,
Sine, Sine_,
StateInterpolatorWithDiscreteDerivative,
StateInterpolatorWithDiscreteDerivative_,
SymbolicVectorSystem, SymbolicVectorSystem_,
TrajectoryAffineSystem, TrajectoryAffineSystem_,
TrajectoryLinearSystem, TrajectoryLinearSystem_,
TrajectorySource, TrajectorySource_,
VectorLog, VectorLogSink, VectorLogSink_,
WrapToSystem, WrapToSystem_,
ZeroOrderHold, ZeroOrderHold_,
)
from pydrake.trajectories import PiecewisePolynomial
def compare_value(test, a, b):
# Compares a vector or abstract value.
if isinstance(a, VectorBase):
test.assertTrue(np.allclose(a.get_value(), b.get_value()))
else:
test.assertEqual(type(a.get_value()), type(b.get_value()))
test.assertEqual(a.get_value(), b.get_value())
class TestGeneral(unittest.TestCase):
def _check_instantiations(self, template, supports_symbolic=True):
default_cls = template[None]
self.assertTrue(template[float] is default_cls)
self.assertTrue(template[AutoDiffXd] is not default_cls)
if supports_symbolic:
self.assertTrue(template[Expression] is not default_cls)
def test_instantiations(self):
# TODO(eric.cousineau): Refine tests once NumPy functionality is
# resolved for dtype=object, or dtype=custom is used.
self._check_instantiations(Adder_)
self._check_instantiations(AffineSystem_)
self._check_instantiations(ConstantValueSource_)
self._check_instantiations(ConstantVectorSource_)
self._check_instantiations(Demultiplexer_)
self._check_instantiations(DiscreteDerivative_)
self._check_instantiations(DiscreteTimeDelay_)
self._check_instantiations(Gain_)
self._check_instantiations(Integrator_)
self._check_instantiations(LinearSystem_)
self._check_instantiations(LinearTransformDensity_,
supports_symbolic=False)
self._check_instantiations(Multiplexer_)
self._check_instantiations(MultilayerPerceptron_)
self._check_instantiations(PassThrough_)
self._check_instantiations(PortSwitch_)
self._check_instantiations(Saturation_)
self._check_instantiations(SharedPointerSystem_)
self._check_instantiations(Sine_)
self._check_instantiations(StateInterpolatorWithDiscreteDerivative_)
self._check_instantiations(SymbolicVectorSystem_)
self._check_instantiations(TrajectoryAffineSystem_,
supports_symbolic=False)
self._check_instantiations(TrajectoryLinearSystem_,
supports_symbolic=False)
self._check_instantiations(TrajectorySource_)
self._check_instantiations(VectorLogSink_)
self._check_instantiations(WrapToSystem_)
self._check_instantiations(ZeroOrderHold_)
def test_linear_affine_system(self):
# Just make sure linear system is spelled correctly.
A = np.identity(2)
B = np.array([[0], [1]])
f0 = np.array([[0], [0]])
C = np.array([[0, 1]])
D = [1]
y0 = [0]
system = LinearSystem(A, B, C, D)
context = system.CreateDefaultContext()
self.assertEqual(system.get_input_port(0).size(), 1)
self.assertEqual(context
.get_mutable_continuous_state_vector().size(), 2)
self.assertEqual(system.get_output_port(0).size(), 1)
self.assertTrue((system.A() == A).all())
self.assertTrue((system.B() == B).all())
self.assertTrue((system.f0() == f0).all())
self.assertTrue((system.C() == C).all())
self.assertEqual(system.D(), D)
self.assertEqual(system.y0(), y0)
self.assertEqual(system.time_period(), 0.)
x0 = np.array([1, 2])
system.configure_default_state(x0=x0)
system.SetDefaultContext(context)
np.testing.assert_equal(
context.get_continuous_state_vector().CopyToVector(), x0)
generator = RandomGenerator()
system.SetRandomContext(context, generator)
np.testing.assert_equal(
context.get_continuous_state_vector().CopyToVector(), x0)
system.configure_random_state(covariance=np.eye(2))
system.SetRandomContext(context, generator)
self.assertNotEqual(
context.get_continuous_state_vector().CopyToVector()[1], x0[1])
Co = ControllabilityMatrix(system)
self.assertEqual(Co.shape, (2, 2))
self.assertFalse(IsControllable(system))
self.assertFalse(IsControllable(system, 1e-6))
self.assertFalse(IsStabilizable(sys=system))
self.assertFalse(IsStabilizable(sys=system, threshold=1e-6))
Ob = ObservabilityMatrix(system)
self.assertEqual(Ob.shape, (2, 2))
self.assertFalse(IsObservable(system))
self.assertFalse(IsDetectable(sys=system))
self.assertFalse(IsDetectable(sys=system, threshold=1e-6))
system = AffineSystem(A, B, f0, C, D, y0, .1)
self.assertEqual(system.get_input_port(0), system.get_input_port())
self.assertEqual(system.get_output_port(0), system.get_output_port())
context = system.CreateDefaultContext()
self.assertEqual(system.get_input_port(0).size(), 1)
self.assertEqual(context.get_discrete_state_vector().size(), 2)
self.assertEqual(system.get_output_port(0).size(), 1)
self.assertTrue((system.A() == A).all())
self.assertTrue((system.B() == B).all())
self.assertTrue((system.f0() == f0).all())
self.assertTrue((system.C() == C).all())
self.assertEqual(system.D(), D)
self.assertEqual(system.y0(), y0)
self.assertEqual(system.time_period(), .1)
system.get_input_port(0).FixValue(context, 0)
linearized = Linearize(system, context)
self.assertTrue((linearized.A() == A).all())
taylor = FirstOrderTaylorApproximation(system, context)
self.assertTrue((taylor.y0() == y0).all())
new_A = np.array([[1, 2], [3, 4]])
new_B = np.array([[5], [6]])
new_f0 = np.array([[7], [8]])
new_C = np.array([[9, 10]])
new_D = np.array([[11]])
new_y0 = np.array([12])
system.UpdateCoefficients(
A=new_A, B=new_B, f0=new_f0, C=new_C, D=new_D, y0=new_y0
)
np.testing.assert_equal(new_A, system.A())
np.testing.assert_equal(new_B, system.B())
np.testing.assert_equal(new_f0.flatten(), system.f0())
np.testing.assert_equal(new_C, system.C())
np.testing.assert_equal(new_D, system.D())
np.testing.assert_equal(new_y0, system.y0())
system = MatrixGain(D=A)
self.assertTrue((system.D() == A).all())
system = TrajectoryAffineSystem(
PiecewisePolynomial(A),
PiecewisePolynomial(B),
PiecewisePolynomial(f0),
PiecewisePolynomial(C),
PiecewisePolynomial(D),
PiecewisePolynomial(y0),
.1)
self.assertEqual(system.get_input_port(0), system.get_input_port())
self.assertEqual(system.get_output_port(0), system.get_output_port())
context = system.CreateDefaultContext()
self.assertEqual(system.get_input_port(0).size(), 1)
self.assertEqual(context.get_discrete_state_vector().size(), 2)
self.assertEqual(system.get_output_port(0).size(), 1)
for t in np.linspace(0., 1., 5):
self.assertTrue((system.A(t) == A).all())
self.assertTrue((system.B(t) == B).all())
self.assertTrue((system.f0(t) == f0).all())
self.assertTrue((system.C(t) == C).all())
self.assertEqual(system.D(t), D)
self.assertEqual(system.y0(t), y0)
self.assertEqual(system.time_period(), .1)
x0 = np.array([1, 2])
system.configure_default_state(x0=x0)
system.SetDefaultContext(context)
np.testing.assert_equal(
context.get_discrete_state_vector().CopyToVector(), x0)
generator = RandomGenerator()
system.SetRandomContext(context, generator)
np.testing.assert_equal(
context.get_discrete_state_vector().CopyToVector(), x0)
system.configure_random_state(covariance=np.eye(2))
system.SetRandomContext(context, generator)
self.assertNotEqual(
context.get_discrete_state_vector().CopyToVector()[1], x0[1])
system = TrajectoryLinearSystem(
A=PiecewisePolynomial(A),
B=PiecewisePolynomial(B),
C=PiecewisePolynomial(C),
D=PiecewisePolynomial(D),
time_period=0.1)
self.assertEqual(system.time_period(), .1)
system.configure_default_state(x0=np.array([1, 2]))
system.configure_random_state(covariance=np.eye(2))
def test_linear_affine_system_empty_matrices(self):
# Confirm the default values for the system matrices in the
# constructor.
def CheckSizes(system, num_states, num_inputs, num_outputs):
self.assertEqual(system.num_continuous_states(), num_states)
self.assertEqual(system.num_inputs(), num_inputs)
self.assertEqual(system.num_outputs(), num_outputs)
# A constant vector system.
system = AffineSystem(y0=[2, 1])
CheckSizes(system, num_states=0, num_inputs=0, num_outputs=2)
# A matrix gain.
system = AffineSystem(D=np.eye(2))
CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)
system = LinearSystem(D=np.eye(2))
CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)
# Add an offset.
system = AffineSystem(D=np.eye(2), y0=[1, 2])
CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)
# An integrator.
system = LinearSystem(B=np.eye(2))
CheckSizes(system, num_states=2, num_inputs=2, num_outputs=0)
def test_linear_system_zero_size(self):
# Explicitly test #12633.
num_x = 0
num_y = 2
num_u = 2
A = np.zeros((num_x, num_x))
B = np.zeros((num_x, num_u))
C = np.zeros((num_y, num_x))
D = np.zeros((num_y, num_u))
self.assertIsNotNone(LinearSystem(A, B, C, D))
@numpy_compare.check_nonsymbolic_types
def test_linear_transform_density(self, T):
dut = LinearTransformDensity_[T](
distribution=RandomDistribution.kGaussian,
input_size=3,
output_size=3)
w_in = np.array([T(0.5), T(0.1), T(1.5)])
context = dut.CreateDefaultContext()
dut.get_input_port_w_in().FixValue(context, w_in)
self.assertEqual(dut.get_input_port_A().size(), 9)
self.assertEqual(dut.get_input_port_b().size(), 3)
self.assertEqual(dut.get_distribution(), RandomDistribution.kGaussian)
A = np.array([
[T(0.5), T(1), T(2)], [T(1), T(2), T(3)], [T(3), T(4), T(5)]])
dut.FixConstantA(context=context, A=A)
b = np.array([T(1), T(2), T(3)])
dut.FixConstantB(context=context, b=b)
dut.CalcDensity(context=context)
self.assertEqual(dut.get_output_port_w_out().size(), 3)
self.assertEqual(dut.get_output_port_w_out_density().size(), 1)
def test_vector_pass_through(self):
model_value = BasicVector([1., 2, 3])
system = PassThrough(vector_size=model_value.size())
context = system.CreateDefaultContext()
system.get_input_port(0).FixValue(context, model_value)
output = system.AllocateOutput()
input_eval = system.EvalVectorInput(context, 0)
compare_value(self, input_eval, model_value)
system.CalcOutput(context, output)
output_value = output.get_vector_data(0)
compare_value(self, output_value, model_value)
def test_default_vector_pass_through(self):
model_value = [1., 2, 3]
system = PassThrough(value=model_value)
context = system.CreateDefaultContext()
np.testing.assert_array_equal(
model_value, system.get_output_port().Eval(context))
def test_abstract_pass_through(self):
model_value = Value("Hello world")
system = PassThrough(abstract_model_value=model_value)
context = system.CreateDefaultContext()
system.get_input_port(0).FixValue(context, model_value)
output = system.AllocateOutput()
input_eval = system.EvalAbstractInput(context, 0)
compare_value(self, input_eval, model_value)
system.CalcOutput(context, output)
output_value = output.get_data(0)
compare_value(self, output_value, model_value)
def test_port_switch(self):
system = PortSwitch(vector_size=2)
a = system.DeclareInputPort(name="a")
system.DeclareInputPort(name="b")
context = system.CreateDefaultContext()
self.assertIsInstance(a, InputPort)
system.get_port_selector_input_port().FixValue(context, a.get_index())
def test_first_order_low_pass_filter(self):
filter1 = FirstOrderLowPassFilter(time_constant=3.0, size=4)
self.assertEqual(filter1.get_time_constant(), 3.0)
alpha = np.array([1, 2, 3])
filter2 = FirstOrderLowPassFilter(time_constants=alpha)
np.testing.assert_array_equal(filter2.get_time_constants_vector(),
alpha)
context = filter2.CreateDefaultContext()
filter2.set_initial_output_value(context, [0., -0.2, 0.4])
def test_gain(self):
k = 42.
input_size = 10
systems = [Gain(k=k, size=input_size),
Gain(k=k*np.ones(input_size))]
for system in systems:
context = system.CreateDefaultContext()
output = system.AllocateOutput()
def mytest(input, expected):
system.get_input_port(0).FixValue(context, input)
system.CalcOutput(context, output)
self.assertTrue(np.allclose(output.get_vector_data(
0).CopyToVector(), expected))
test_input = np.arange(input_size)
mytest(np.arange(input_size), k*np.arange(input_size))
def test_saturation(self):
system = Saturation((0., -1., 3.), (1., 2., 4.))
context = system.CreateDefaultContext()
output = system.AllocateOutput()
def mytest(input, expected):
system.get_input_port(0).FixValue(context, input)
system.CalcOutput(context, output)
self.assertTrue(np.allclose(output.get_vector_data(
0).CopyToVector(), expected))
mytest((-5., 5., 4.), (0., 2., 4.))
mytest((.4, 0., 3.5), (.4, 0., 3.5))
def test_trajectory_source(self):
ppt = PiecewisePolynomial.FirstOrderHold(
[0., 1.], [[2., 3.], [2., 1.]])
system = TrajectorySource(trajectory=ppt,
output_derivative_order=0,
zero_derivatives_beyond_limits=True)
context = system.CreateDefaultContext()
output = system.AllocateOutput()
def mytest(input, expected):
context.SetTime(input)
system.CalcOutput(context, output)
self.assertTrue(np.allclose(output.get_vector_data(
0).CopyToVector(), expected))
mytest(0.0, (2.0, 2.0))
mytest(0.5, (2.5, 1.5))
mytest(1.0, (3.0, 1.0))
ppt2 = PiecewisePolynomial.FirstOrderHold(
[0., 1.], [[4., 6.], [4., 2.]])
system.UpdateTrajectory(trajectory=ppt2)
mytest(0.0, (4.0, 4.0))
mytest(0.5, (5.0, 3.0))
mytest(1.0, (6.0, 2.0))
def test_symbolic_vector_system(self):
t = Variable("t")
x = [Variable("x0"), Variable("x1")]
u = [Variable("u0"), Variable("u1")]
system = SymbolicVectorSystem(time=t, state=x, input=u,
dynamics=[x[0] + x[1], t],
output=[u[1]],
time_period=0.0)
context = system.CreateDefaultContext()
self.assertEqual(context.num_continuous_states(), 2)
self.assertEqual(context.num_discrete_state_groups(), 0)
self.assertEqual(system.get_input_port(0).size(), 2)
self.assertEqual(system.get_output_port(0).size(), 1)
self.assertEqual(context.num_abstract_parameters(), 0)
self.assertEqual(context.num_numeric_parameter_groups(), 0)
self.assertTrue(system.dynamics_for_variable(x[0])
.EqualTo(x[0] + x[1]))
self.assertTrue(system.dynamics_for_variable(x[1])
.EqualTo(t))
def test_symbolic_vector_system_parameters(self):
t = Variable("t")
x = [Variable("x0"), Variable("x1")]
u = [Variable("u0"), Variable("u1")]
p = [Variable("p0"), Variable("p1")]
system = SymbolicVectorSystem(time=t, state=x, input=u,
parameter=p,
dynamics=[p[0] * x[0] + x[1] + p[1], t],
output=[u[1]],
time_period=0.0)
context = system.CreateDefaultContext()
self.assertEqual(context.num_continuous_states(), 2)
self.assertEqual(context.num_discrete_state_groups(), 0)
self.assertEqual(system.get_input_port(0).size(), 2)
self.assertEqual(system.get_output_port(0).size(), 1)
self.assertEqual(context.num_abstract_parameters(), 0)
self.assertEqual(context.num_numeric_parameter_groups(), 1)
self.assertEqual(context.get_numeric_parameter(0).size(), 2)
self.assertTrue(system.dynamics_for_variable(x[0])
.EqualTo(p[0] * x[0] + x[1] + p[1]))
self.assertTrue(system.dynamics_for_variable(x[1])
.EqualTo(t))
def test_wrap_to_system(self):
system = WrapToSystem(2)
system.set_interval(1, 1., 2.)
context = system.CreateDefaultContext()
output = system.AllocateOutput()
def mytest(input, expected):
system.get_input_port(0).FixValue(context, input)
system.CalcOutput(context, output)
self.assertTrue(np.allclose(output.get_vector_data(
0).CopyToVector(), expected))
mytest((-1.5, 0.5), (-1.5, 1.5))
mytest((.2, .3), (.2, 1.3))
def test_demultiplexer(self):
# Test demultiplexer with scalar outputs.
demux = Demultiplexer(size=4)
context = demux.CreateDefaultContext()
self.assertEqual(demux.num_input_ports(), 1)
self.assertEqual(demux.num_output_ports(), 4)
numpy_compare.assert_equal(demux.get_output_ports_sizes(),
[1, 1, 1, 1])
input_vec = np.array([1., 2., 3., 4.])
demux.get_input_port(0).FixValue(context, input_vec)
output = demux.AllocateOutput()
demux.CalcOutput(context, output)
for i in range(4):
self.assertTrue(
np.allclose(output.get_vector_data(i).get_value(),
input_vec[i]))
# Test demultiplexer with vector outputs.
demux = Demultiplexer(size=4, output_ports_size=2)
context = demux.CreateDefaultContext()
self.assertEqual(demux.num_input_ports(), 1)
self.assertEqual(demux.num_output_ports(), 2)
numpy_compare.assert_equal(demux.get_output_ports_sizes(), [2, 2])
demux.get_input_port(0).FixValue(context, input_vec)
output = demux.AllocateOutput()
demux.CalcOutput(context, output)
for i in range(2):
self.assertTrue(
np.allclose(output.get_vector_data(i).get_value(),
input_vec[2*i:2*i+2]))
# Test demultiplexer with different output port sizes.
output_ports_sizes = np.array([1, 2, 1])
num_output_ports = output_ports_sizes.size
input_vec = np.array([1., 2., 3., 4.])
demux = Demultiplexer(output_ports_sizes=output_ports_sizes)
context = demux.CreateDefaultContext()
self.assertEqual(demux.num_input_ports(), 1)
self.assertEqual(demux.num_output_ports(), num_output_ports)
numpy_compare.assert_equal(demux.get_output_ports_sizes(),
output_ports_sizes)
demux.get_input_port(0).FixValue(context, input_vec)
output = demux.AllocateOutput()
demux.CalcOutput(context, output)
output_port_start = 0
for i in range(num_output_ports):
output_port_size = output.get_vector_data(i).size()
self.assertTrue(
np.allclose(output.get_vector_data(i).get_value(),
input_vec[output_port_start:
output_port_start+output_port_size]))
output_port_start += output_port_size
def test_multiplexer(self):
my_vector = MyVector2(data=[1., 2.])
test_cases = [
dict(has_vector=False, mux=Multiplexer(num_scalar_inputs=4),
data=[[5.], [3.], [4.], [2.]]),
dict(has_vector=False, mux=Multiplexer(input_sizes=[2, 3]),
data=[[8., 4.], [3., 6., 9.]]),
dict(has_vector=True, mux=Multiplexer(model_vector=my_vector),
data=[[42.], [3.]]),
]
for case in test_cases:
mux = case['mux']
port_size = sum([len(vec) for vec in case['data']])
self.assertEqual(mux.get_output_port(0).size(), port_size)
context = mux.CreateDefaultContext()
output = mux.AllocateOutput()
num_ports = len(case['data'])
self.assertEqual(context.num_input_ports(), num_ports)
for j, vec in enumerate(case['data']):
mux.get_input_port(j).FixValue(context, vec)
mux.CalcOutput(context, output)
self.assertTrue(
np.allclose(output.get_vector_data(0).get_value(),
[elem for vec in case['data'] for elem in vec]))
if case['has_vector']:
# Check the type matches MyVector2.
value = output.get_vector_data(0)
self.assertTrue(isinstance(value, MyVector2))
def test_multilayer_perceptron(self):
mlp = MultilayerPerceptron(
layers=[1, 2, 3], activation_type=PerceptronActivationType.kReLU)
self.assertEqual(mlp.get_input_port().size(), 1)
self.assertEqual(mlp.get_output_port().size(), 3)
context = mlp.CreateDefaultContext()
params = np.zeros((mlp.num_parameters(), 1))
self.assertEqual(mlp.num_parameters(), 13)
self.assertEqual(mlp.layers(), [1, 2, 3])
self.assertEqual(mlp.activation_type(layer=0),
PerceptronActivationType.kReLU)
self.assertEqual(len(mlp.GetParameters(context=context)),
mlp.num_parameters())
mlp.SetWeights(context=context, layer=0, W=np.array([[1], [2]]))
mlp.SetBiases(context=context, layer=0, b=[3, 4])
np.testing.assert_array_equal(
mlp.GetWeights(context=context, layer=0), np.array([[1], [2]]))
np.testing.assert_array_equal(
mlp.GetBiases(context=context, layer=0), np.array([3, 4]))
params = np.zeros(mlp.num_parameters())
mlp.SetWeights(params=params, layer=0, W=np.array([[1], [2]]))
mlp.SetBiases(params=params, layer=0, b=[3, 4])
np.testing.assert_array_equal(
mlp.GetWeights(params=params, layer=0), np.array([[1], [2]]))
np.testing.assert_array_equal(
mlp.GetBiases(params=params, layer=0), np.array([3, 4]))
mutable_params = mlp.GetMutableParameters(context=context)
mutable_params[:] = 3.0
np.testing.assert_array_equal(mlp.GetParameters(context),
np.full(mlp.num_parameters(), 3.0))
global called_loss
called_loss = False
def silly_loss(Y, dloss_dY):
global called_loss
called_loss = True
# We must be careful to update the dloss in place, rather than bind
# a new matrix to the same variable name.
dloss_dY[:] = 1
# dloss_dY = np.array(...etc...) # <== wrong
return Y.sum()
dloss_dparams = np.zeros((13,))
generator = RandomGenerator(23)
mlp.SetRandomContext(context, generator)
mlp.Backpropagation(context=context,
X=np.array([1, 3, 4]).reshape((1, 3)),
loss=silly_loss,
dloss_dparams=dloss_dparams)
self.assertTrue(called_loss)
self.assertTrue(dloss_dparams.any()) # No longer all zero.
dloss_dparams = np.zeros((13,))
mlp.BackpropagationMeanSquaredError(context=context,
X=np.array([1, 3, 4]).reshape(
(1, 3)),
Y_desired=np.eye(3),
dloss_dparams=dloss_dparams)
self.assertTrue(dloss_dparams.any()) # No longer all zero.
Y = np.asfortranarray(np.eye(3))
mlp.BatchOutput(context=context, X=np.array([[0.1, 0.3, 0.4]]), Y=Y)
self.assertFalse(np.allclose(Y, np.eye(3)))
Y2 = mlp.BatchOutput(context=context, X=np.array([[0.1, 0.3, 0.4]]))
np.testing.assert_array_equal(Y, Y2)
mlp2 = MultilayerPerceptron(layers=[3, 2, 1],
activation_types=[
PerceptronActivationType.kReLU,
PerceptronActivationType.kTanh
])
self.assertEqual(mlp2.activation_type(0),
PerceptronActivationType.kReLU)
self.assertEqual(mlp2.activation_type(1),
PerceptronActivationType.kTanh)
Y = np.asfortranarray(np.full((1, 3), 2.4))
dYdX = np.asfortranarray(np.full((3, 3), 5.3))
context2 = mlp2.CreateDefaultContext()
mlp2.BatchOutput(context=context2, X=np.eye(3), Y=Y, dYdX=dYdX)
# The default context sets the weights and biases to zero, so the
# output (and gradients) should be zero.
np.testing.assert_array_almost_equal(Y, np.zeros((1, 3)))
np.testing.assert_array_almost_equal(dYdX, np.zeros((3, 3)))
mlp = MultilayerPerceptron(use_sin_cos_for_input=[True, False],
remaining_layers=[3, 2],
activation_types=[
PerceptronActivationType.kReLU,
PerceptronActivationType.kTanh
])
self.assertEqual(mlp.get_input_port().size(), 2)
np.testing.assert_array_equal(mlp.layers(), [3, 3, 2])
def test_random_source(self):
source = RandomSource(distribution=RandomDistribution.kUniform,
num_outputs=2, sampling_interval_sec=0.01)
self.assertEqual(source.get_output_port(0).size(), 2)
builder = DiagramBuilder()
# Note: There are no random inputs to add to the empty diagram, but it
# confirms the API works.
AddRandomInputs(sampling_interval_sec=0.01, builder=builder)
builder_ad = DiagramBuilder_[AutoDiffXd]()
AddRandomInputs(sampling_interval_sec=0.01, builder=builder_ad)
def test_constant_vector_source(self):
source = ConstantVectorSource(source_value=[1., 2.])
context = source.CreateDefaultContext()
source.get_source_value(context)
source.get_mutable_source_value(context)
def test_ctor_api(self):
"""Tests construction of systems for systems whose executions semantics
are not tested above.
"""
ConstantValueSource(Value("Hello world"))
DiscreteTimeDelay(update_sec=0.1, delay_time_steps=5, vector_size=2)
DiscreteTimeDelay(
update_sec=0.1, delay_time_steps=5,
abstract_model_value=Value("Hello world"))
with catch_drake_warnings(expected_count=2) as w:
DiscreteTimeDelay(update_sec=0.1, delay_timesteps=5, vector_size=2)
DiscreteTimeDelay(
update_sec=0.1, delay_timesteps=5,
abstract_model_value=Value("Hello world"))
ZeroOrderHold(period_sec=0.1, offset_sec=0.0, vector_size=2)
dut = ZeroOrderHold(period_sec=1.0, offset_sec=0.25,
abstract_model_value=Value("Hello world"))
self.assertEqual(dut.period(), 1.0)
self.assertEqual(dut.offset(), 0.25)
def test_shared_pointer_system_ctor(self):
dut = SharedPointerSystem(value_to_hold=[1, 2, 3])
readback = dut.get()
self.assertListEqual(readback, [1, 2, 3])
del dut
self.assertListEqual(readback, [1, 2, 3])
def test_shared_pointer_system_builder(self):
builder = DiagramBuilder()
self.assertListEqual(
SharedPointerSystem.AddToBuilder(
builder=builder, value_to_hold=[1, 2, 3]),
[1, 2, 3])
diagram = builder.Build()
del builder
readback = diagram.GetSystems()[0].get()
self.assertListEqual(readback, [1, 2, 3])
del diagram
self.assertListEqual(readback, [1, 2, 3])
def test_sine(self):
# Test scalar output.
sine_source = Sine(amplitude=1, frequency=2, phase=3,
size=1, is_time_based=True)
self.assertEqual(sine_source.get_output_port(0).size(), 1)
self.assertEqual(sine_source.get_output_port(1).size(), 1)
self.assertEqual(sine_source.get_output_port(2).size(), 1)
# Test vector output.
sine_source = Sine(amplitude=1, frequency=2, phase=3,
size=3, is_time_based=True)
self.assertEqual(sine_source.get_output_port(0).size(), 3)
self.assertEqual(sine_source.get_output_port(1).size(), 3)
self.assertEqual(sine_source.get_output_port(2).size(), 3)
sine_source = Sine(amplitudes=np.ones(2), frequencies=np.ones(2),
phases=np.ones(2), is_time_based=True)
self.assertEqual(sine_source.get_output_port(0).size(), 2)
self.assertEqual(sine_source.get_output_port(1).size(), 2)
self.assertEqual(sine_source.get_output_port(2).size(), 2)
def test_discrete_derivative(self):
discrete_derivative = DiscreteDerivative(num_inputs=5, time_step=0.5)
self.assertEqual(discrete_derivative.get_input_port(0).size(), 5)
self.assertEqual(discrete_derivative.get_output_port(0).size(), 5)
self.assertEqual(discrete_derivative.time_step(), 0.5)
self.assertTrue(discrete_derivative.suppress_initial_transient())
discrete_derivative = DiscreteDerivative(
num_inputs=5, time_step=0.5, suppress_initial_transient=False)
self.assertFalse(discrete_derivative.suppress_initial_transient())
def test_state_interpolator_with_discrete_derivative(self):
state_interpolator = StateInterpolatorWithDiscreteDerivative(
num_positions=5, time_step=0.4)
self.assertEqual(state_interpolator.get_input_port(0).size(), 5)
self.assertEqual(state_interpolator.get_output_port(0).size(), 10)
self.assertTrue(state_interpolator.suppress_initial_transient())
# test set_initial_position using context
context = state_interpolator.CreateDefaultContext()
state_interpolator.set_initial_position(
context=context, position=5*[1.1])
np.testing.assert_array_equal(
context.get_discrete_state(0).CopyToVector(),
np.array(5*[1.1]))
np.testing.assert_array_equal(
context.get_discrete_state(1).CopyToVector(),
np.array(5*[1.1]))
# test set_initial_position using state
context = state_interpolator.CreateDefaultContext()
state_interpolator.set_initial_position(
state=context.get_state(), position=5*[1.3])
np.testing.assert_array_equal(
context.get_discrete_state(0).CopyToVector(),
np.array(5*[1.3]))
np.testing.assert_array_equal(
context.get_discrete_state(1).CopyToVector(),
np.array(5*[1.3]))
state_interpolator = StateInterpolatorWithDiscreteDerivative(
num_positions=5, time_step=0.4, suppress_initial_transient=True)
self.assertTrue(state_interpolator.suppress_initial_transient())
@numpy_compare.check_nonsymbolic_types
def test_log_vector_output(self, T):
# Add various redundant loggers to a system, to exercise the
# LogVectorOutput bindings.
builder = DiagramBuilder_[T]()
kSize = 1
integrator = builder.AddSystem(Integrator_[T](kSize))
port = integrator.get_output_port(0)
loggers = []
loggers.append(LogVectorOutput(port, builder))
loggers.append(LogVectorOutput(src=port, builder=builder))
loggers.append(LogVectorOutput(port, builder, 0.125))
loggers.append(LogVectorOutput(
src=port, builder=builder, publish_period=0.125))
loggers.append(LogVectorOutput(port, builder, {TriggerType.kForced}))
loggers.append(LogVectorOutput(
src=port, builder=builder, publish_triggers={TriggerType.kForced}))
loggers.append(LogVectorOutput(
port, builder, {TriggerType.kPeriodic}, 0.125))
loggers.append(LogVectorOutput(
src=port, builder=builder,
publish_triggers={TriggerType.kPeriodic}, publish_period=0.125))
# Check the returned loggers by calling some trivial methods.
diagram = builder.Build()
context = diagram.CreateDefaultContext()
self.assertTrue(all(logger.FindLog(context).num_samples() == 0
for logger in loggers))
@numpy_compare.check_nonsymbolic_types
def test_vector_log(self, T):
kSize = 1
dut = VectorLog(kSize)
self.assertEqual(dut.get_input_size(), kSize)
dut.AddData(0.1, [22.22])
self.assertEqual(dut.num_samples(), 1)
self.assertEqual(dut.sample_times(), [0.1])
self.assertEqual(dut.data(), [22.22])
dut.Clear()
self.assertEqual(dut.num_samples(), 0)
# There is no good way from python to test the semantics of Reserve(),
# but test the binding anyway.
dut.Reserve(VectorLog.kDefaultCapacity * 3)
@numpy_compare.check_nonsymbolic_types
def test_vector_log_sink(self, T):
# Add various redundant loggers to a system, to exercise the
# VectorLog constructor bindings.
builder = DiagramBuilder_[T]()
kSize = 1
constructors = [VectorLogSink_[T]]
loggers = []
if T == float:
constructors.append(VectorLogSink)
for constructor in constructors:
loggers.append(builder.AddSystem(constructor(kSize)))
loggers.append(builder.AddSystem(constructor(input_size=kSize)))
loggers.append(builder.AddSystem(constructor(kSize, 0.125)))
loggers.append(builder.AddSystem(
constructor(input_size=kSize, publish_period=0.125)))
loggers.append(builder.AddSystem(
constructor(kSize, {TriggerType.kForced})))
loggers.append(builder.AddSystem(
constructor(input_size=kSize,
publish_triggers={TriggerType.kForced})))
loggers.append(builder.AddSystem(
constructor(kSize, {TriggerType.kPeriodic}, 0.125)))
loggers.append(builder.AddSystem(
constructor(input_size=kSize,
publish_triggers={TriggerType.kPeriodic},
publish_period=0.125)))
# Exercise all of the log access methods.
diagram = builder.Build()
context = diagram.CreateDefaultContext()
# FindLog and FindMutableLog find the same object.
self.assertTrue(
all(logger.FindLog(context) == logger.FindMutableLog(context)
for logger in loggers))
# Build a list of pairs of loggers and their local contexts.
loggers_and_contexts = [(x, x.GetMyContextFromRoot(context))
for x in loggers]
# GetLog and GetMutableLog find the same object.
self.assertTrue(
all(logger.GetLog(logger_context)
== logger.GetMutableLog(logger_context)
for logger, logger_context in loggers_and_contexts))
# GetLog and FindLog find the same object, given the proper contexts.
self.assertTrue(
all(logger.GetLog(logger_context) == logger.FindLog(context)
for logger, logger_context in loggers_and_contexts))
|
5,283 | 50f6bcb4d2223d864cca92778ab3483a2d2c3214 | __author__ = 'christopher'
import fabio
import pyFAI
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from pims.tiff_stack import TiffStack_tifffile as TiffStack
from skxray.io.save_powder_output import save_output
from xpd_workflow.mask_tools import *
geo = pyFAI.load(
'/mnt/bulk-data/research_data/USC_beamtime/08-05-2015/2015-08-05/Ni_STD/Ni_PDF_60s-00000.poni')
dq = geo.deltaQ((2048, 2048))
q = geo.qArray((2048, 2048))
bins = 8000
# plt.imshow(dq)
# plt.show()
# AAA
# dq_mean = sts.binned_statistic(q.ravel(), dq.ravel(), bins=bins,
# range=[0, q.max()], statistic='mean')
# dq_median = sts.binned_statistic(q.ravel(), dq.ravel(), bins=bi
# range=[0, q.max()], statistic='median')
# plt.plot(dq_mean[1][:-1], dq_mean[0])
# plt.plot(dq_median[1][:-1], dq_median[0])
# plt.show()
r = geo.qArray((2048, 2048))
nr = r / np.max(r)
img = np.sin(nr * np.pi * 3) * np.exp(-10 * nr)
ideal_img = dc(img)
smax = np.max(img)
smin = np.min(img)
bad_pixels = []
'''
for i in xrange(np.random.randint(1000, 2000)):
x, y = np.random.randint(0, 2048), np.random.randint(0, 2048)
if np.random.random() >= .5:
img[x, y] = smax * 3
else:
img[x, y] = smin * 3
bad_pixels.append([x, y])
'''
plt.imshow(img, vmin=smin, vmax=smax)
plt.show()
# plt.imshow(idsr - dsr)
# plt.show()
# ideal_median = sts.binned_statistic(q.ravel(), ideal_img.ravel(), bins=bins,
# range=[0, q.max()], statistic='median')
#
# ideal_mean = sts.binned_statistic(q.ravel(), ideal_img.ravel(), bins=bins,
# range=[0, q.max()], statistic='mean')
# ideal_std = sts.binned_statistic(q.ravel(), ideal_img.ravel(), bins=bins,
# range=[0, q.max()], statistic=np.std)
# median = sts.binned_statistic(q.ravel(), img.ravel(), bins=bins,
# range=[0, q.max()], statistic='median')
#
# mean = sts.binned_statistic(q.ravel(), img.ravel(), bins=bins,
# range=[0, q.max()], statistic='mean')
# std = sts.binned_statistic(q.ravel(), img.ravel(), bins=bins,
# range=[0, q.max()], statistic=np.std)
# plt.plot(ideal_mean[1][:-1], ideal_mean[0], label='ideal mean')
# plt.plot(ideal_median[1][:-1], ideal_median[0], label='ideal median')
# plt.plot(ideal_std[1][:-1], ideal_std[0], label='ideal std')
# plt.legend()
# plt.show()
# plt.plot(mean[1][:-1], mean[0], label='mean')
# plt.plot(median[1][:-1], median[0], label='median')
# # plt.plot(std[1][:-1], std[0], label='ideal std')
# plt.legend()
# plt.show()
perfect_mask = (img - ideal_img) != 0
for i in [10,
# 9, 8, 7, 6, 5, 4.5, 4
]:
rbmsk = ring_blur_mask(img, geo, i)
print i
print 'good mask', np.sum(perfect_mask == rbmsk)
print 'under masked', np.sum(perfect_mask > rbmsk)
print 'over masked', np.sum(perfect_mask < rbmsk)
print
# '''
plt.imshow(img, interpolation='none', origin='lower', aspect='auto')
for y, x in bad_pixels:
plt.plot(x, y, 'ro', mfc='r', mec='r', ms=10)
for y, x in zip(
np.where(rbmsk != 0)[0],
np.where(rbmsk != 0)[1]
):
plt.plot(x, y, 'go', mfc='g', mec='g', ms=5)
plt.show()
# '''
print q[1907, 173], q[173, 1907]
_, hist_bins, _ = plt.hist(img[np.where((q > 313.) & (q < 314.))], bins=50)
plt.axvline(np.mean(img[np.where((q > 313.) & (q < 314.))]), color='r')
plt.axvline(np.mean(img[np.where((q > 313.) & (q < 314.))]) + np.std(img[np.where((q > 313.) & (q < 314.))]))
plt.axvline(np.mean(img[np.where((q > 313.) & (q < 314.))]) - np.std(img[np.where((q > 313.) & (q < 314.))]))
# plt.hist(img[np.where((q > 287.) & (q < 288.) & (rbmsk != 1))],
# bins=50
# bins=hist_bins
# )
plt.show()
'''
mr = dc(q)
mr[rbmsk.astype(bool)] = -1
msk_median = sts.binned_statistic(mr.ravel(), img.ravel(), bins=bins,
range=[0, mr.max()], statistic='median')
msk_mean = sts.binned_statistic(mr.ravel(), img.ravel(), bins=bins,
range=[0, mr.max()], statistic='mean')
msk_std = sts.binned_statistic(mr.ravel(), img.ravel(), bins=bins,
range=[0, mr.max()], statistic=np.std)
plt.plot(msk_mean[1][:-1], msk_mean[0], label='mean')
plt.plot(msk_median[1][:-1], msk_median[0], label='median')
# plt.plot(std[1][:-1], std[0], label='ideal std')
plt.legend()
plt.show()
# '''
|
5,284 | f45313e4e8f3ecba0c7dc0288d9d5ec4e26f0ba6 | # Goal: Let's Review
# Enter your code here. Read input from STDIN. Print output to STDOUT
T = int(input())
# Iterate through each inputted string
for i in range(T):
even = ''
odd = ''
s = str(input())
for i in range(len(s)):
if (i % 2 == 0):
even = even + s[i]
else:
odd = odd + s[i]
print(even, odd) |
5,285 | 7c82565a4184b2e779e2bb6ba70b497cc287af35 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 11 14:55:12 2019
@author: Furankyyy
"""
import numpy as np
import matplotlib.pyplot as plt
import timeit
###worst sort function###
#define the function that checks whether the list is in ascending order
def right_permutation(arr):
if len(arr)==1: #if there's only one element, then the order is always correct
return True
for i in range(len(arr)-1): #check every elements from the first to the second to the last
if arr[i+1]<arr[i]: #if the i+1th element is smaller than ith, the order is wrong, break the loop and return false
break
else:
if i == len(arr)-2: #if the i+1th element is greater than/equal to ith, check if we have already checked all the elements
return True #if we've already checked all the elements (i.e. i==len(arr)-2), return true; if not, continue the loop
return False
#define the worst sort function
def worstsort(arr):
sort_arr=[] #initialize output
check = False #initialize the result of right_permutation()
while check == False: #while the order is wrong, generate a new permyutation and check if its order is correct
sort_arr = np.random.permutation(arr)
check = right_permutation(sort_arr)
return sort_arr
#test cases
test1=[5,4,3,2,1]
test2=[1,2,3,4,5] #best case
test3=[2,2,2,2,2] #best case as well!
test4=[2] #only one element
print(worstsort(test1))
print(worstsort(test2))
print(worstsort(test3))
print(worstsort(test4))
#the best case is when the input list is already sorted, in this case, we only need to run the right_permutation once
#we have a for loop in right_permutation, so the best case complexity is O(n)
#given a random input of size n, the chance that the input x_k is correctly sorted is Pr(x_k) = 1/P_n = 1/n!
#since in this worst algorithm, we do not "remember" the permutations that we've already checked
#so each time, the Pr(sorted) remains the same 1/n!
#Then we would expect to have n! times to have the corrected sorted list
#the reason is that we have E[Pr(x1)+Pr(x2)+...+Pr(x_k)]=1, since Pr(x_k)=1/n!, we would expect k = n!
#this reasoning is the same as the random indicator variable in the book, where we have the pr(I) for each choice (permutation) and we sum them to find the expected value
#so the averaage case complexity is O(n!)
#to calculate what n is best for this function
def factorial(n):
result=1
for i in range(n):
result=result*(i+1)
return result
x=np.arange(0,7,1)
y_factorial=list(map(factorial,x))
y_compare=x*x
plt.plot(x,y_factorial,label="Factorial of n")
plt.plot(x,y_compare,label="n square")
plt.title("Complexity comparison")
plt.legend()
#from the plot we can see that for algorithms with comlexity of O(n^2) and O(n!), the difference comes when n=5
#when n=4, the two algorithms do not vary that much, but when n=5, they have a >100 times difference
#therefore, this method is feasible when n<=4
#p.s. constants are discounted (they are relatively unimportant)
###median finder###
#the worst case for the median finder is that the elements in the input list are unique
#the best case is that all elements are the same --> no matter which we choose, it is the median
#to consider the times we try before stopping, we need to consider the worst case --> all elements are different
#then the chance to find the exact median is 1/n
#the number of elements lying in the input deviation range x is x//(100/n)+1 for this worst case
#explanation: divide the 100% to n parts, if all elements are different then each element takes the 1 part, the x//(range for 1 part)+1 is the num of elements lying in the range
#therefore, the probability of choosing the element in the range given by x is (x//(100/n)+1)/n
#I want to try the expected times of choosing the correct element(s) for the worst case
#Pr(failure) for 1 try is 1-(x//(100/n)+1)/n
#Pr(failure) for the first k try is (1-(x//(100/n)+1)/n)^k, which scales with x and n.
#so the Pr(at least one success) for the first k try is 1-Pr(failure)=1-(1-(x//(100/n)+1)/n)^k
#we want to find a k taht makes this Pr large enough
#so we want to find a small k minimizing Pr(failure) for the first k try
#to simplify the problem, we regard x as constant and assume the "//" is "/"
#(1-(x//(100/n)+1)/n)^k = ((n-xn/100-1)/n)^k =(1-x/100-1/n)^k
#x/100 is a constant
#-->(1-1/n)^k
#when n is sufficiently large, (1-1/n) is nearly 1
#it is extremely hard to succeed if n is very large, I set the limit of k at 10000, simply because my laptop's computational ability
def median_finder(arr,x):
tried = 0 #record the number of times of choosing the random element
if abs(x) <= 0.5: #when x is valid
lower=np.percentile(arr,50-x/2)
upper=np.percentile(arr,50+x/2)
while tried <10000:
find = np.random.randint(0,len(arr)) #find a new element
if lower<=arr[find] and arr[find]<=upper: #if the chosen element is in the range, return it
return arr[find]
else:
tried += 1
return "Tried enough times, still cannot find the value"
else:
return "x not in the domain"
#test cases
test1=list(np.random.permutation(200))
test2=[4]*100
test3=[5]*1000
test4=test2+test3
print(median_finder(test1,0.5)) #worst case, exactly 2 elements in the range
print(median_finder(test2,0.5)) #best case
print(median_finder(test2,0)) #best case
print(median_finder(test3,0.5)) #best case
print(median_finder(test4,0)) #1000/1100 probability
print(median_finder(test4,0.5)) #same as above.
#time complexity
#best case running time is O(1)
#the time complexity of the worst case running time is E[k]=Sum(E[ki])
#E[ki]=Pr(correct)=(x//(100/n)+1)/n
#sum is from 1 to the limit tried k
#since x is between 0 and 0.5, we simply regard it as constant
#we also assume the "//" is "/"
#then the expression becomes: E[k]= k*(xn/100+1)/n
#as n goes to infinity, we can solve this by trying to use L'Hopital's rule
#the result is kx/100, which is a constant
#O(1)
data=np.empty((1,2))
for i in range(200,1200,50):
testlist=list(np.random.permutation(i))
time=timeit.timeit(stmt="median_finder(testlist,0.5)",setup="from __main__ import median_finder,testlist",number=100)
time=time/100
stack=np.array((time,i))
data=np.vstack((data,stack))
data=data[1:]
plt.figure()
plt.ylim(0,0.01)
plt.scatter(x=data[:,1],y=data[:,0])
plt.xlabel("Inputsize")
plt.ylabel("Running time")
plt.title("Median finder running time")
#from the plot we can see that the running time is almost constant --> O(1)
#space complexity is O(n), because each time we just store the (sorted) list of length n |
5,286 | e2572b48f7183353ba2aab0500130dc8a71a0b22 |
# coding: utf-8
# In[50]:
## Description
## Adds the Fibonacci numbers smaller than 4 million
## Weekly Journal
## When using while True, "break" MUST be used to avoid infinite loops
## Questions
## None
fib=[1,2]
counter=1
while True:
if fib[counter]>4000000:
flag=0
break
else:
fib.append(fib[counter]+fib[counter-1])
counter+=1
fib=fib[0:len(fib)-1]
total=sum(fib)
print(total)
|
5,287 | e8f05a66c642ef3b570130a2996ca27efb8b0cb5 | """Time client"""
import urllib.request
import json
from datetime import datetime
# make sure that module51-server.py service is running
TIME_URL = "http://localhost:5000/"
def ex51():
with urllib.request.urlopen(TIME_URL) as response:
body = response.read()
parsed = json.loads(body)
date = datetime.fromisoformat(parsed["currentTime"])
stamp = date.strftime("%H:%M:%S %Z %B %m %d")
print("The current time is %s" % stamp)
if __name__ == "__main__":
ex51() |
5,288 | 829c833866198307d7d19c4a0cbe40299ee14eb9 | from botocore_eb.model import ServiceModel
from botocore_eb.exceptions import ParamValidationError
from botocore_eb.exceptions import DataNotFoundError
from botocore_eb.exceptions import OperationNotPageableError
from botocore_eb import xform_name
from botocore_eb.paginate import Paginator
import botocore_eb.validate
import botocore_eb.serialize
class ClientError(Exception):
MSG_TEMPLATE = (
'An error occurred ({error_code}) when calling the {operation_name} '
'operation: {error_message}')
def __init__(self, error_response, operation_name):
msg = self.MSG_TEMPLATE.format(
error_code=error_response['Error']['Code'],
error_message=error_response['Error']['Message'],
operation_name=operation_name)
super(ClientError, self).__init__(msg)
self.response = error_response
class ClientCreator(object):
"""Creates client objects for a service."""
def __init__(self, loader, endpoint_creator):
self._loader = loader
self._endpoint_creator = endpoint_creator
def create_client(self, service_name, region_name, is_secure=True,
endpoint_url=None, verify=None):
service_model = self._load_service_model(service_name)
cls = self.create_client_class(service_name)
client_args = self._get_client_args(service_model, region_name, is_secure,
endpoint_url, verify)
return cls(**client_args)
def create_client_class(self, service_name):
service_model = self._load_service_model(service_name)
methods = self._create_methods(service_model)
py_name_to_operation_name = self._create_name_mapping(service_model)
self._add_pagination_methods(service_model, methods,
py_name_to_operation_name)
cls = type(service_name, (BaseClient,), methods)
return cls
def _add_pagination_methods(self, service_model, methods, name_mapping):
loader = self._loader
def get_paginator(self, operation_name):
"""Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you'd normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator("create_foo")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
# Note that the 'self' in this method refers to the self on
# BaseClient, not on ClientCreator.
if not self.can_paginate(operation_name):
raise OperationNotPageableError(operation_name=operation_name)
else:
actual_operation_name = name_mapping[operation_name]
paginator = Paginator(
getattr(self, operation_name),
self._cache['page_config'][actual_operation_name])
return paginator
def can_paginate(self, operation_name):
"""Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you'd normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator("create_foo")``.
:return: ``True`` if the operation can be paginated,
``False`` otherwise.
"""
if 'page_config' not in self._cache:
try:
page_config = loader.load_data('aws/%s/%s.paginators' % (
service_model.endpoint_prefix,
service_model.api_version))['pagination']
self._cache['page_config'] = page_config
except DataNotFoundError:
self._cache['page_config'] = {}
actual_operation_name = name_mapping[operation_name]
return actual_operation_name in self._cache['page_config']
methods['get_paginator'] = get_paginator
methods['can_paginate'] = can_paginate
def _load_service_model(self, service_name):
json_model = self._loader.load_service_model('aws/%s' % service_name)
service_model = ServiceModel(json_model)
return service_model
def _get_client_args(self, service_model, region_name, is_secure,
endpoint_url, verify):
# A client needs:
#
# * serializer
# * endpoint
# * response parser
protocol = service_model.metadata['protocol']
serializer = botocore_eb.serialize.create_serializer(
protocol, include_validation=True)
endpoint = self._endpoint_creator.create_endpoint(
service_model, region_name, is_secure=is_secure,
endpoint_url=endpoint_url, verify=verify)
response_parser = botocore_eb.parsers.create_parser(protocol)
return {
'serializer': serializer,
'endpoint': endpoint,
'response_parser': response_parser
}
def _create_methods(self, service_model):
op_dict = {}
for operation_name in service_model.operation_names:
py_operation_name = xform_name(operation_name)
op_dict[py_operation_name] = self._create_api_method(
py_operation_name, operation_name, service_model)
return op_dict
def _create_name_mapping(self, service_model):
# py_name -> OperationName
mapping = {}
for operation_name in service_model.operation_names:
py_operation_name = xform_name(operation_name)
mapping[py_operation_name] = operation_name
return mapping
def _create_api_method(self, py_operation_name, operation_name,
service_model):
def _api_call(self, **kwargs):
operation_model = service_model.operation_model(operation_name)
request_dict = self._serializer.serialize_to_request(
kwargs, operation_model)
http, parsed_response = self._endpoint.make_request(
operation_model, request_dict)
if http.status_code >= 300:
raise ClientError(parsed_response, operation_name)
else:
return parsed_response
_api_call.__name__ = str(py_operation_name)
# TODO: docstrings.
return _api_call
class BaseClient(object):
def __init__(self, serializer, endpoint, response_parser):
self._serializer = serializer
self._endpoint = endpoint
self._response_parser = response_parser
self._cache = {}
|
5,289 | ac6f2287390bdad8fe20cdc73c0063f685970cfb | import sys
n = int(input())
min_number = sys.maxsize
max_number = -sys.maxsize
for i in range(0, n):
num = int(input())
if num > max_number:
max_number = num
if num < min_number:
min_number = num
print(f"Max number: {max_number}")
print(f"Min number: {min_number}") |
5,290 | 7f406c1cd4d56da3a7d5f8739e0b65b0e61cf637 | import time
# Returns time in seconds for func(arg) to run
def time_func(func, arg):
start = time.time()
func(arg)
return time.time() - start
|
5,291 | 46e2955756cf1aea902f31685b258ffd14b2e62b | # -*- coding: utf-8 -*-
# @Time : 2021/5/12 2:48 下午
# @Author : shaoguowen
# @Email : shaoguowen@tencent.com
# @FileName: train.py
# @Software: PyCharm
import argparse
from mmcv import Config
import trainers
# 解析传入的参数
parser = argparse.ArgumentParser(description='Train IVQA model')
parser.add_argument('config', help='train config file path')
args = parser.parse_args()
config = Config.fromfile(args.config)
if __name__ == '__main__':
trainer = getattr(trainers, config.trainer.trainer_name)(config, args.config)
trainer.run()
|
5,292 | cb0b963c0e5aadcb67b5ee5f055fb9b6f21892fc | import pandemic as pd
from typing import Sequence
def save_gml(path: str, peers: Sequence[pd.Peer]) -> bool:
try:
with open(path, "w") as file:
file.write(graph(peers))
except Exception:
return True
return False
def print_gml(peers: Sequence[pd.Peer]) -> None:
print(graph(peers))
def graph(peers: Sequence[pd.Peer]) -> str:
return(
'graph [' + '\n' +
'\t' + 'directed 1' + '\n' +
''.join(map(node, peers)) +
''.join(map(edge, peers)) +
']' + '\n'
)
def node(peer: pd.Peer):
if peer.data_infection is None:
return ""
return(
'\t' + 'node [' + '\n' +
'\t' + '\t' + 'id {}'.format(peer.id) + '\n' +
'\t' + '\t' + 'label "{}"'.format(node_label(peer)) + '\n' +
'\t' + ']' + '\n'
)
def node_label(peer: pd.Peer) -> str:
return "" if peer.data_patch is None else str(peer.data_patch.epoch)
def edge(peer: pd.Peer) -> str:
if peer.data_infection is None:
return ""
return(
'\t' + 'edge [' + '\n' +
'\t' + '\t' + 'source {}'.format(peer.data_infection.source) + '\n' +
'\t' + '\t' + 'target {}'.format(peer.data_infection.target) + '\n' +
'\t' + '\t' + 'label "{}"'.format(peer.data_infection.epoch) + '\n' +
'\t' + ']' + '\n'
)
|
5,293 | 71a5ba520f8bc42e80d8f4ce8cf332bdd5fb96de | /Users/apple/miniconda3/lib/python3.7/sre_constants.py |
5,294 | a8b5cf45e5f75ae4b493f5fc9bb4555319f1a725 | import pytest
from moa.primitives import NDArray, UnaryOperation, BinaryOperation, Function
from moa.yaccer import build_parser
@pytest.mark.parametrize("expression,result", [
("< 1 2 3>", NDArray(shape=(3,), data=[1, 2, 3], constant=False)),
])
def test_parse_vector(expression, result):
parser = build_parser(start='vector')
assert parser.parse(expression) == result
@pytest.mark.parametrize("expression, result", [
("const array A^3 <4 3 5>", NDArray(
shape=(4, 3, 5), data=None, constant=True, identifier='A')),
])
def test_parse_constant_arrays(expression, result):
parser = build_parser(start='constant_array')
assert parser.parse(expression) == result
@pytest.mark.parametrize("expression, result", [
("array Zasdf_asdf^1 <3>", NDArray(
shape=(3,), data=None, constant=False, identifier='Zasdf_asdf')),
])
def test_parse_arrays(expression, result):
parser = build_parser(start='array')
assert parser.parse(expression) == result
@pytest.mark.parametrize("expression, result", [
("j psi x", BinaryOperation(
operator='PSI',
left=NDArray(shape=None, data=None, constant=False, identifier='j'),
right=NDArray(shape=None, data=None, constant=False, identifier='x'))),
("A omega <1 2>", BinaryOperation(
operator='OMEGA',
left=NDArray(shape=None, data=None, constant=False, identifier='A'),
right=NDArray(shape=(2,), data=[1, 2], constant=False, identifier=None))),
("A omega B cat C", BinaryOperation(
operator='CAT',
left=BinaryOperation(
operator='OMEGA',
left=NDArray(shape=None, data=None, constant=False, identifier='A'),
right=NDArray(shape=None, data=None, constant=False, identifier='B')),
right=NDArray(shape=None, data=None, constant=False, identifier='C'))),
("(A omega B) cat C", BinaryOperation(
operator='CAT',
left=BinaryOperation(
operator='OMEGA',
left=NDArray(shape=None, data=None, constant=False, identifier='A'),
right=NDArray(shape=None, data=None, constant=False, identifier='B')),
right=NDArray(shape=None, data=None, constant=False, identifier='C'))),
("dim A cat B", BinaryOperation(
operator='CAT',
left=UnaryOperation(
operator='DIM',
right=NDArray(shape=None, data=None, constant=False, identifier='A')),
right=NDArray(shape=None, data=None, constant=False, identifier='B'))),
("dim (A cat B)", UnaryOperation(
operator='DIM',
right=BinaryOperation(
operator='CAT',
left=NDArray(shape=None, data=None, constant=False, identifier='A'),
right=NDArray(shape=None, data=None, constant=False, identifier='B')))),
])
def test_parse_terms_and_operators(expression, result):
parser = build_parser(start='term')
assert parser.parse(expression) == result
@pytest.mark.parametrize("expression, result", [
('main(){}', Function(arguments=[], statements=[], identifier='main')),
('foo_bar(array A^1 <5>){}', Function(
arguments=[NDArray(shape=(5,), data=None, constant=False, identifier='A')],
statements=[],
identifier='foo_bar')),
('BizBAZZ(array A^2 < 3 5>, array B^3 <6 5 8>){}', Function(
arguments=[
NDArray(shape=(3, 5), data=None, constant=False, identifier='A'),
NDArray(shape=(6, 5, 8), data=None, constant=False, identifier='B')],
statements=[],
identifier='BizBAZZ')),
('A_2_3_a(array A^2 <9 1>, array B^2 <3 1>, array ASDF^1 <9>){}', Function(
arguments=[
NDArray(shape=(9, 1), data=None, constant=False, identifier='A'),
NDArray(shape=(3, 1), data=None, constant=False, identifier='B'),
NDArray(shape=(9,), data=None, constant=False, identifier='ASDF')],
statements=[],
identifier='A_2_3_a')),
])
def test_parse_function(expression, result):
parser = build_parser(start='function')
assert parser.parse(expression) == result
|
5,295 | 7ab9c530035185ee2250f3f6ce8cde87bdfd9803 | from django.conf.urls import url
from . import consumers
websocket_urlpatterns = [
url(r'^account/home', consumers.NotificationConsumer),
url(r'^fund/(?P<fund>[\w-]+)', consumers.NotificationConsumer),
url(r'^websockets', consumers.StreamConsumer),
] |
5,296 | 011dd579bb076ec094e9e3085aa321883c484f1c | import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from WeatherDL.data_maker import dataset_maker
from WeatherDL.model_maker import model_3
# Extract data from data_maker
X, y = dataset_maker(window=5, forecast_day=1)
(X_train, X_test, y_train, y_test) = train_test_split(X, y, test_size=0.2, shuffle=False)
# Open model from model_maker
model = model_3((5, 8, 20, 6))
print(model.summary())
# Fit model, and extract training & validation metrics
history = model.fit(X_train, y_train,
validation_data=(X_test, y_test),
batch_size=5,
epochs=30,
verbose=2,
shuffle=False)
# Prediction
y_pred = model.predict(X_test)
# Data Visualization
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('MSE')
plt.legend(['Train', 'Test'], loc='upper right')
plt.show()
plt.plot(history.history['mean_absolute_error'])
plt.plot(history.history['val_mean_absolute_error'])
plt.title('MAE')
plt.legend(['Train', 'Test'], loc='upper right')
plt.show()
plt.plot(history.history['mean_absolute_percentage_error'])
plt.plot(history.history['val_mean_absolute_percentage_error'])
plt.title('MAPE')
plt.legend(['Train', 'Test'], loc='upper right')
plt.show()
|
5,297 | 47f88bc3836490e08f464f71351096b54118420e | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.web_perf.metrics import timeline_based_metric
from telemetry.web_perf.metrics.trace_event_stats import TraceEventStats
from telemetry.web_perf.metrics.trace_event_stats import TraceEventStatsInput
class IndexedDBTimelineMetric(timeline_based_metric.TimelineBasedMetric):
"""Metrics for IndexedDB operations.
"""
def __init__(self):
super(IndexedDBTimelineMetric, self).__init__()
self._stats = TraceEventStats()
self._stats.AddInput(TraceEventStatsInput(
event_category='IndexedDB',
event_name='IndexedDBDatabase::GetOperation',
metric_name='idb-gets',
metric_description='The duration of all "get" ops in IndexedDB',
units='ms',
process_name='Browser'))
self._stats.AddInput(TraceEventStatsInput(
event_category='IndexedDB',
event_name='IndexedDBDatabase::PutOperation',
metric_name='idb-puts',
metric_description='The duration of all "put" ops in IndexedDB',
units='ms',
process_name='Browser'))
self._stats.AddInput(TraceEventStatsInput(
event_category='IndexedDB',
event_name='IndexedDBFactoryImpl::Open',
metric_name='idb-opens',
metric_description='The duration of all "open" ops in IndexedDB',
units='ms',
process_name='Browser'))
self._stats.AddInput(TraceEventStatsInput(
event_category='IndexedDB',
event_name='IndexedDBTransaction::Commit',
metric_name='idb-transaction-commits',
metric_description=('The duration of all "commit" ops of ' +
'transactions in IndexedDB.'),
units='ms',
process_name='Browser'))
self._stats.AddInput(TraceEventStatsInput(
event_category='IndexedDB',
event_name='IndexedDBFactoryImpl::DeleteDatabase',
metric_name='idb-database-deletes',
metric_description=('The duration of all "delete" ops of ' +
'IndexedDB databases.'),
units='ms',
process_name='Browser'))
self._stats.AddInput(TraceEventStatsInput(
event_category='IndexedDB',
event_name='IndexedDBDatabase::OpenCursorOperation',
metric_name='idb-cursor-opens',
metric_description=('The duration of all "open" ops of ' +
'IndexedDB cursors.'),
units='ms',
process_name='Browser'))
self._stats.AddInput(TraceEventStatsInput(
event_category='IndexedDB',
event_name='IndexedDBCursor::CursorIterationOperation',
metric_name='idb-cursor-iterations',
metric_description=('The duration of all "iteration" ops of ' +
'IndexedDB cursors.'),
units='ms',
process_name='Browser'))
def AddResults(self, model, renderer_process, interactions, results):
self._stats.AddResults(model, renderer_process, interactions, results)
|
5,298 | 836c1d2083d18c68fe551278d2df4155edc64c8c | import cv2
import numpy as np
frameWidth = 640
frameHeight = 480
# capturing Video from Webcam
cap = cv2.VideoCapture(0)
cap.set(3, frameWidth)
cap.set(4, frameHeight)
cap.set(10, 150)
myColors = [[20,40,40,70,255,255],
[100,169,121,135,255,255],
[0, 90, 90, 41, 255, 255]]
color_value = [[255, 0, 0], [0, 255, 0], [14, 107, 237]]
x, y, w, h = 0, 0, 0, 0
my_points = []
def find_color(img, color_value, myColors):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
count = 0
new_points = []
for color in myColors:
lower = np.array(color[0:3])
upper = np.array(color[3:6])
mask = cv2.inRange(hsv, lower, upper)
x, y = contour_detect(mask)
cv2.circle(frame_copy, (x,y), 20,color_value[count], -1)
if x != 0 and y != 0:
new_points.append([x,y,count])
count += 1
return new_points
def contour_detect(mask):
x,y,w,h = 0, 0, 0, 0
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for cnt in contours:
area = cv2.contourArea(cnt)
if area > 100:
perimeter = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.01*perimeter, True)
x, y, w, h = cv2.boundingRect(approx)
return x + w // 2, y
def canvas(my_points, color_value):
for point in my_points:
cv2.circle(frame_copy, (point[0], point[1]),
15, color_value[point[2]], -1)
while True:
ret, frame = cap.read()
frame = cv2.flip(frame, 1)
frame_copy = frame.copy()
new_point = find_color(frame, color_value, myColors)
if len(new_point) != 0:
for i in new_point:
my_points.append(i)
if len(my_points) != 0:
canvas(my_points, color_value)
cv2.imshow('frame', frame_copy)
if cv2.waitKey(1) and 0xFF == ord('q'):
break
cv2.destroyAllWindows()
|
5,299 | afccd33e4c6bc5b7907a6af4ab698489fc9ea70d | from meross_iot.model.http.exception import HttpApiError
from logger import get_logger
from typing import Dict
from flask import Blueprint
from authentication import _user_login
from decorator import meross_http_api
from messaging import make_api_response
auth_blueprint = Blueprint('auth', __name__)
_LOGGER = get_logger(__name__)
@auth_blueprint.route('/Login', methods=['POST'])
@meross_http_api(login_required=False)
def login(api_payload: Dict, *args, **kwargs):
email = api_payload.get("email")
password = api_payload.get("password")
if email is None:
raise HttpApiError("Missing email parameter")
if password is None:
raise HttpApiError("Missing password parameter")
user, token = _user_login(email, password)
_LOGGER.info("User: %s successfully logged in" % email)
data = {
"token": str(token.token),
"key": str(user.mqtt_key),
"userid": str(user.user_id),
"email": str(user.email)
}
return make_api_response(data=data)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.