index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
16,500 | 65f18a253948e3c713162277cb440e7d90d3a589 | from django.urls import path
from .views import IndexView, PollResultView, PollDetailView, vote
app_name = 'polls'
urlpatterns = [
path('', IndexView.as_view(), name='index'),
path('<int:pk>/', PollDetailView.as_view(), name='detail'),
path('<int:pk>/results/', PollResultView.as_view(), name='result'),
path('<int:question_id>/vote/', vote, name='vote'),
] |
16,501 | 06a918611093557aba62170cc64f2517a74a5f56 | """
This is where the implementation of the plugin code goes.
The MyPythonPlugin-class is imported from both run_plugin.py and run_debug.py
"""
import sys
import logging
from webgme_bindings import PluginBase
# Setup a logger
logger = logging.getLogger('MyPythonPlugin')
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout) # By default it logs to stderr..
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
class MyPythonPlugin(PluginBase):
def main(self):
core = self.core
root_node = self.root_node
active_node = self.active_node
name = core.get_attribute(active_node, 'name')
logger.info('ActiveNode at "{0}" has name {1}'.format(core.get_path(active_node), name))
core.set_attribute(active_node, 'name', 'newName')
commit_info = self.util.save(root_node, self.commit_hash, 'master', 'Python plugin updated the model')
logger.info('committed :{0}'.format(commit_info))
|
16,502 | 1f36131547a11c738a09e4b383c58fde01fe3d04 | import PyPDF2
minutesFile = open('pdf/meetingminutes.pdf','rb')
pdfReader = PyPDF2.PdfFileReader(minutesFile)
# 90๋ ํ์
page = pdfReader.getPage(0)
# page.rotateClockwise(90)
page.rotateClockwise(180)
pdfWriter = PyPDF2.PdfFileWriter()
pdfWriter.addPage(page)
# resultPdfFile = open('rotate1.pdf', 'wb')
resultPdfFile = open('rotate2.pdf', 'wb')
pdfWriter.write(resultPdfFile)
resultPdfFile.close()
|
16,503 | 4099b8c3b52df51e017fb75206340d36fb1f4493 | # [S/W ๋ฌธ์ ํด๊ฒฐ ๊ธฐ๋ณธ] 9์ผ์ฐจ - ์ค์์ํ
def inorder(n):
global word
if n > 0:
inorder(ch1[n])
word += par[n]
inorder(ch2[n])
T = 10
for tc in range(1, T+1):
N = int(input())
par = [0]*(N+1)
ch1 = [0]*(N+1)
ch2 = [0]*(N+1)
for i in range(N):
node = input().split()
if len(node) == 4:
par[int(node[0])] = node[1]
ch1[int(node[0])] = int(node[2])
ch2[int(node[0])] = int(node[3])
elif len(node) == 3:
par[int(node[0])] = node[1]
ch1[int(node[0])] = int(node[2])
else:
par[int(node[0])] = node[1]
word = ''
inorder(1)
print('#{} {}'.format(tc, word)) |
16,504 | c3f0e8a6efc31b466a3bf4841eaf0a9e220b7147 | from django.urls import path
from lesson_4 import views
urlpatterns = [
path('create-flower/', views.create_flower, name='create_flower'),
path('create-client/', views.create_client, name='create_client'),
path('get_flower/', views.get_flower, name='get_flower')
]
|
16,505 | 7296b2a3ecea4314912a6ac2de021a5e5d7e14bd |
import json
class ConfigObject(object):
def __init__(self, version, type, redis, http):
self.version = version
self.type = type
self.redis = redis
self.http = http
@property.getter
def get_http_properties(self):
return self.http["authmethod"], self.http["port"], self.http["addr"]
@property.getter
def get_redis_properties(self):
return self.redis["port"], self.redis["addr"]
def __str__(self):
return json.dumps({
"version": self.version,
"type": self.type,
"redis": {
"port": self.redis["port"],
"addr": self.redis["addr"]
},
"http": {
"authmethod": self.http["authmethod"],
"port": self.http["port"],
"addr": self.http["addr"],
}
})
class Config(object):
def __init__(self, config_location):
try:
with open(config_location) as fd:
o = fd.readlines()
parsed = json.loads(o.join(""))
# Init
self.default_config = ConfigObject("0.1priv", "private", {"port": 6379, "addr": "127.0.0.1"}, http={})
# checks
try:
if parsed["version"] is None:
raise AttributeError("version")
if parsed["type"] is None:
raise AttributeError("type")
if parsed["redis"]["port"] is None:
raise AttributeError("redis_port")
if parsed["redis"]["addr"] is None:
raise AttributeError("redis_addr")
if parsed["http"]["authmethod"] is None:
raise AttributeError("http_authmethod")
if parsed["http"]["port"] is None:
raise AttributeError("http_port")
if parsed["http"]["addr"] is None:
raise AttributeError("http_addr")
except AttributeError as ae:
for error in ae.args:
if error is "version":
print("Version not found or not valid. Using default values...")
parsed["version"] = self.default_config.version
if error is "type":
print("Type is not found or not valid. Using default values...")
parsed["type"] = self.default_config.type
if error is "redis_port":
print("Redis server port is not found or not valid. Using default values...")
parsed["redis"]["port"] = self.default_config.redis["port"]
if error is "redis_addr":
print("Redis server address is not found or not valid. Using default values...")
parsed["redis"]["addr"] = self.default_config.redis["addr"]
if error is "http_authmethod":
print("Http authentication method is not found or not valid. Using default values...")
parsed["http"]["authmethod"] = self.default_config.http["authmethod"]
if error is "http_port":
print("Http server port is not found or not valid. Using the default values...")
parsed["http"]["port"] = self.default_config.http["port"]
if error is "http_addr":
print("Http server address is not found or not valid. Using the default values... ")
parsed["http"]["addr"] = self.default_config.http["addr"]
else:
print("Failure while reading configuration. Exiting...")
exit(-1)
self.config = parsed
except IOError:
print("IOError: Input file broken")
print("Using default parameters...")
self.config = ConfigObject("0.1priv", "private", {"port": 6379, "addr": "127.0.0.1"}, http={})
else:
print("Failure processing config. Exiting...")
exit(-1) |
16,506 | 58471d9e039bfda05bce49e83b48b4393eb5882d | import socket
import sys
import util
from Crypto.Cipher import AES
Kp = b'secret_key_16bit'
IV = b'initial_vector_f'
BLOCK_SIZE = 16 # Bytes
def connect_to_node(host, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
address = (host, port)
sock.connect(address)
return sock
def choose_mode_of_operation():
print('Choose mode of operation (ECB/CFB): ', end='')
mode_of_operation = input()
while mode_of_operation not in ('ecb', 'cfb'):
print('Invalid mode of operation. Choose between ECB and CFB: ', end='')
mode_of_operation = input()
return mode_of_operation
def communication(server_sock, b_sock):
mode_of_operation = choose_mode_of_operation()
b_sock.send(mode_of_operation.encode())
encrypted_key = server_sock.recv(16)
b_sock.send(encrypted_key)
aes = AES.new(Kp, AES.MODE_ECB)
key = aes.decrypt(encrypted_key)
# waiting for B to initiate communication
b_sock.recv(1)
print('B initialized communication')
end_of_file = False
file = open(sys.argv[5], 'r')
aes = AES.new(key, AES.MODE_ECB)
cfb_cipher = IV
while not end_of_file:
current_block = file.read(BLOCK_SIZE)
if len(current_block) < BLOCK_SIZE:
if len(current_block) == 0:
break
end_of_file = True
current_block = util.pad(current_block, BLOCK_SIZE)
print('Sending ', current_block)
if mode_of_operation == 'ecb':
b_sock.send(aes.encrypt(current_block.encode()))
else:
cfb_cipher = util.byte_xor(aes.encrypt(cfb_cipher), current_block.encode())
b_sock.send(cfb_cipher)
def main():
try:
server_host = sys.argv[1]
server_port = int(sys.argv[2])
server_sock = connect_to_node(server_host, server_port)
b_host = sys.argv[3]
b_port = int(sys.argv[4])
b_sock = connect_to_node(b_host, b_port)
communication(server_sock, b_sock)
server_sock.close()
b_sock.close()
except IndexError:
print('Parameters format: python A.py server_host server_port b_host b_port filename')
if __name__ == '__main__':
main()
|
16,507 | d743941e5790d12ae6b868a0f827b0edb8d4a58b | # Generated by Django 2.2.3 on 2019-08-19 05:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('userext', '0003_user_interface_wide_screen'),
]
operations = [
migrations.AddField(
model_name='user',
name='password_request_date',
field=models.DateTimeField(blank=True, null=True, verbose_name='password change request timeout'),
),
migrations.AddField(
model_name='user',
name='password_request_token',
field=models.CharField(blank=True, default='', max_length=40, null=True, verbose_name='password change request token'),
),
]
|
16,508 | 3cc7e80728191957a50a754784a11d11a8b30148 | import pygame
from OpenGL.GL import *
from OpenGL.GLU import *
import math
import random
import graphics
import helpers
import mango
import other
import plat
import EZMenu
class Main:
def __init__(self):
fontSize = 72
level = int(EZMenu.EZMenu('Choose a level! :D',['Level 1','Level 2','Level 3'],fontSize)[6]) - 1 #Pretend levels are numbered 1-3 but they're really 0-2
self.FPS = 80
# Turned off because if there are bugs there's no escape!
#fullscreen = EZMenu.EZMenu('Fullscreen?',['Yes Yes','No No No'],fontSize)
self.fullscreen = False # fullscreen == 'Yes Yes'
self.w = 800
self.h = 800
pygame.init()
if self.fullscreen:
s = pygame.display.set_mode((0,0),pygame.OPENGL|pygame.FULLSCREEN|pygame.HWSURFACE)
self.w = s.get_width()
self.h = s.get_height()
else:
s = pygame.display.set_mode((self.w,self.h),pygame.OPENGL)
glViewport(0, 0, self.w, self.h)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluPerspective(45,float(self.w)/self.h,1.0,3200.0)
glClearColor(0,0,.25,0)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA,GL_ONE_MINUS_SRC_ALPHA)
glEnable(GL_POINT_SMOOTH)
glPointSize(4)
glClearDepth(1.0)
glEnable(GL_DEPTH_TEST)
self.objects = []
exec(helpers.getLevelText(level))
self.yRot = 0 #Rotation about y axis. Camera always looks at x=0,z=0
self.xzRad = 1200
self.camX = 0
self.camY = 0
self.camZ = 0
self.minCamRotX = 5
self.maxCamRotX = 45
self.camRotX = 17.5
self.camRotXChange = 0
self.camRotXChangeSpeed = 0.125
self.clock = pygame.time.Clock()
def compute(self):
for anObject in self.objects:
anObject.compute()
def draw(self):
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
glPushMatrix()
self.camRotX += self.camRotXChange
if self.camRotX < self.minCamRotX:
self.camRotX = self.minCamRotX
if self.camRotX > self.maxCamRotX:
self.camRotX = self.maxCamRotX
glRotatef(self.camRotX,1,0,0)
glRotatef(-self.yRot*180/math.pi,0,1,0)
self.camX = self.xzRad*math.sin(self.yRot)
self.camZ = self.xzRad*math.cos(self.yRot)
glTranslatef(-self.camX,-self.camY,-self.camZ)
for anObject in self.objects:
anObject.draw()
glPopMatrix()
glFlush()
def go(self):
self.done = 0
while not self.done:
self.compute()
self.draw()
pygame.display.flip()
self.getEvents()
self.clock.tick(self.FPS)
pygame.display.set_caption('Mango 64 Jul 24 09 - framerate ' + str(self.clock.get_fps()) + '/' + str(self.FPS))
pygame.quit()
def getEvents(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.done = 1
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
for anObject in self.objects:
try:
anObject.upKeyDown()
except AttributeError:
pass
if event.key == pygame.K_LEFT:
for anObject in self.objects:
try:
anObject.leftKeyDown()
except AttributeError:
pass
if event.key == pygame.K_RIGHT:
for anObject in self.objects:
try:
anObject.rightKeyDown()
except AttributeError:
pass
if event.key == pygame.K_DOWN:
for anObject in self.objects:
try:
anObject.downKeyDown()
except AttributeError:
pass
if event.key == pygame.K_SPACE:
for anObject in self.objects:
try:
anObject.spaceKeyDown()
except AttributeError:
pass
if event.key == pygame.K_a:
self.camRotXChange = self.camRotXChangeSpeed
if event.key == pygame.K_z:
self.camRotXChange = -self.camRotXChangeSpeed
if event.key == pygame.K_ESCAPE:
self.done = 1
if event.type == pygame.KEYUP:
if event.key == pygame.K_UP:
for anObject in self.objects:
try:
anObject.upKeyUp()
except AttributeError:
pass
if event.key == pygame.K_DOWN:
for anObject in self.objects:
try:
anObject.downKeyUp()
except AttributeError:
pass
if event.key == pygame.K_LEFT:
for anObject in self.objects:
try:
anObject.leftKeyUp()
except AttributeError:
pass
if event.key == pygame.K_RIGHT:
for anObject in self.objects:
try:
anObject.rightKeyUp()
except AttributeError:
pass
if event.key == pygame.K_SPACE:
for anObject in self.objects:
try:
anObject.spaceKeyUp()
except AttributeError:
pass
if event.key == pygame.K_a:
self.camRotXChange = 0
if event.key == pygame.K_z:
self.camRotXChange = 0
def win(self):
print 'win'
self.done = 1
def lose(self):
print 'lose'
self.done = 1
k = Main()
k.go()
|
16,509 | fb83f0394be923bb72a1e36b1974deee6ae6a441 | #!/usr/bin/env python
import os
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
with open('sample/version.py') as version_file:
exec(version_file.read())
setup(
name='sample',
version=__version__,
description='Django Sample',
long_description=README,
author='leeeandroo',
author_email='leeeandroo@gmail.com',
include_package_data=True,
url='https://bitbucket.org/username/sample.gui',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3.7',
'Framework :: Django',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
],
zip_safe=False,
entry_points={
'console_scripts': [
'server = sample.wsgi:run',
]
}
)
|
16,510 | 4c6a8f4a195698b46aa37f13b74d65e0d2318b5e | def searchLo(x, left, right):
if(left == right):
if(a[left] >= x): return left
else: return None
mid = int((left+right)/2)
if(a[mid] >= x):
if(mid == left): return mid
if(mid > left):
mostLeft = searchLo(x, left, mid-1)
if(mostLeft == None): return mid
else: return mostLeft
else: return searchLo(x, mid+1, right)
def searchHi(x, left, right):
if(left == right):
if(a[left] <= x): return left
else: return None
mid = int((left+right)/2)
if(a[mid] <= x):
mostRight = searchHi(x, mid+1, right)
if(mostRight == None): return mid
else: return mostRight
else:
if(mid != left): return searchHi(x, left, mid-1)
input()
a = list(map(int, input().split(" ")))
nQueries = int(input())
queriesArray = []
for q in range(nQueries):
queriesArray.append(int(input()))
for q in range(nQueries):
x = queriesArray[q]
print(searchLo(x, 0, len(a)-1), searchHi(x, 0, len(a)-1))
|
16,511 | 1de139d4e2189298d50d82ce40afe6661830bb1b | from django.contrib import admin
from .models import FileFieldModel
admin.site.register(FileFieldModel) |
16,512 | 871b6ae319c656ceb61e173042ce6dc39162d218 | def f(s):
l=s.find("f(",28)
print s[:l+2]+repr(s)+s[-1]
if __name__ == '__main__':
f('def f(s):\n\tl=s.find("f(",28)\n\tprint s[:l+2]+repr(s)+s[-1]\n\nif __name__ == \'__main__\':\n\tf()')
|
16,513 | 26a7c213ab0cff44dceae1e99035ab82416350ec | """Add license field to rights
Revision ID: 844b6f4ed646
Revises: 59c8cd1935d7
Create Date: 2019-02-04 14:52:40.610752
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '844b6f4ed646'
down_revision = '59c8cd1935d7'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('rights',
sa.Column('license', sa.Unicode, nullable=True)
)
def downgrade():
op.drop_column('rights', 'license')
|
16,514 | 2a818780a1924f0294aa7e259d08f6abe8221677 | import os
import numpy as np
import random
class Reader(object):
def __init__(self, dataset_dir, listfile=None):
self._dataset_dir = dataset_dir
self._current_index = 0
if listfile is None:
listfile_path = os.path.join(dataset_dir, "listfile.csv")
else:
listfile_path = listfile
with open(listfile_path, "r") as lfile:
self._data = lfile.readlines()
self._listfile_header = self._data[0]
self._data = self._data[1:]
def get_number_of_examples(self):
return len(self._data)
def random_shuffle(self, seed=None):
if seed is not None:
random.seed(seed)
random.shuffle(self._data)
def read_example(self, index):
raise NotImplementedError()
def read_next(self):
to_read_index = self._current_index
self._current_index += 1
if self._current_index == self.get_number_of_examples():
self._current_index = 0
return self.read_example(to_read_index)
class DecompensationReader(Reader):
def __init__(self, dataset_dir, listfile=None):
""" Reader for decompensation prediction task.
:param dataset_dir: Directory where timeseries files are stored.
:param listfile: Path to a listfile. If this parameter is left `None` then
`dataset_dir/listfile.csv` will be used.
"""
Reader.__init__(self, dataset_dir, listfile)
self._data = [line.split(',') for line in self._data]
self._data = [(x, float(t), int(y)) for (x, t, y) in self._data]
def _read_timeseries(self, ts_filename, time_bound):
ret = []
with open(os.path.join(self._dataset_dir, ts_filename), "r") as tsfile:
header = tsfile.readline().strip().split(',')
assert header[0] == "Hours"
for line in tsfile:
mas = line.strip().split(',')
t = float(mas[0])
if t > time_bound + 1e-6:
break
ret.append(np.array(mas))
return (np.stack(ret), header)
def read_example(self, index):
""" Read the example with given index.
:param index: Index of the line of the listfile to read (counting starts from 0).
:return: Directory with the following keys:
X : np.array
2D array containing all events. Each row corresponds to a moment.
First column is the time and other columns correspond to different
variables.
t : float
Length of the data in hours. Note, in general, it is not equal to the
timestamp of last event.
y : int (0 or 1)
Mortality within next 24 hours.
header : array of strings
Names of the columns. The ordering of the columns is always the same.
name: Name of the sample.
"""
if index < 0 or index >= len(self._data):
raise ValueError("Index must be from 0 (inclusive) to number of examples (exclusive).")
name = self._data[index][0]
t = self._data[index][1]
y = self._data[index][2]
(X, header) = self._read_timeseries(name, t)
return {"X": X,
"t": t,
"y": y,
"header": header,
"name": name}
class InHospitalMortalityReader(Reader):
def __init__(self, dataset_dir, listfile=None, period_length=48.0):
""" Reader for in-hospital moratality prediction task.
:param dataset_dir: Directory where timeseries files are stored.
:param listfile: Path to a listfile. If this parameter is left `None` then
`dataset_dir/listfile.csv` will be used.
:param period_length: Length of the period (in hours) from which the prediction is done.
"""
Reader.__init__(self, dataset_dir, listfile)
self._data = [line.split(',') for line in self._data]
self._data = [(x, int(y)) for (x, y) in self._data]
self._period_length = period_length
def _read_timeseries(self, ts_filename):
ret = []
with open(os.path.join(self._dataset_dir, ts_filename), "r") as tsfile:
header = tsfile.readline().strip().split(',')
assert header[0] == "Hours"
for line in tsfile:
mas = line.strip().split(',')
ret.append(np.array(mas))
return (np.stack(ret), header)
def read_example(self, index):
""" Reads the example with given index.
:param index: Index of the line of the listfile to read (counting starts from 0).
:return: Dictionary with the following keys:
X : np.array
2D array containing all events. Each row corresponds to a moment.
First column is the time and other columns correspond to different
variables.
t : float
Length of the data in hours. Note, in general, it is not equal to the
timestamp of last event.
y : int (0 or 1)
In-hospital mortality.
header : array of strings
Names of the columns. The ordering of the columns is always the same.
name: Name of the sample.
"""
if index < 0 or index >= len(self._data):
raise ValueError("Index must be from 0 (inclusive) to number of lines (exclusive).")
name = self._data[index][0]
t = self._period_length
y = self._data[index][1]
(X, header) = self._read_timeseries(name)
return {"X": X,
"t": t,
"y": y,
"header": header,
"name": name}
class LengthOfStayReader(Reader):
def __init__(self, dataset_dir, listfile=None):
""" Reader for length of stay prediction task.
:param dataset_dir: Directory where timeseries files are stored.
:param listfile: Path to a listfile. If this parameter is left `None` then
`dataset_dir/listfile.csv` will be used.
"""
Reader.__init__(self, dataset_dir, listfile)
self._data = [line.split(',') for line in self._data]
self._data = [(x, float(t), float(y)) for (x, t, y) in self._data]
def _read_timeseries(self, ts_filename, time_bound):
ret = []
with open(os.path.join(self._dataset_dir, ts_filename), "r") as tsfile:
header = tsfile.readline().strip().split(',')
assert header[0] == "Hours"
for line in tsfile:
mas = line.strip().split(',')
t = float(mas[0])
if t > time_bound + 1e-6:
break
ret.append(np.array(mas))
return (np.stack(ret), header)
def read_example(self, index):
""" Reads the example with given index.
:param index: Index of the line of the listfile to read (counting starts from 0).
:return: Dictionary with the following keys:
X : np.array
2D array containing all events. Each row corresponds to a moment.
First column is the time and other columns correspond to different
variables.
t : float
Length of the data in hours. Note, in general, it is not equal to the
timestamp of last event.
y : float
Remaining time in ICU.
header : array of strings
Names of the columns. The ordering of the columns is always the same.
name: Name of the sample.
"""
if index < 0 or index >= len(self._data):
raise ValueError("Index must be from 0 (inclusive) to number of lines (exclusive).")
name = self._data[index][0]
t = self._data[index][1]
y = self._data[index][2]
(X, header) = self._read_timeseries(name, t)
return {"X": X,
"t": t,
"y": y,
"header": header,
"name": name}
class PhenotypingReader(Reader):
def __init__(self, dataset_dir, listfile=None):
""" Reader for phenotype classification task.
:param dataset_dir: Directory where timeseries files are stored.
:param listfile: Path to a listfile. If this parameter is left `None` then
`dataset_dir/listfile.csv` will be used.
"""
Reader.__init__(self, dataset_dir, listfile)
self._data = [line.split(',') for line in self._data]
self._data = [(mas[0], float(mas[1]), list(map(int, mas[2:]))) for mas in self._data]
def _read_timeseries(self, ts_filename):
ret = []
with open(os.path.join(self._dataset_dir, ts_filename), "r") as tsfile:
header = tsfile.readline().strip().split(',')
assert header[0] == "Hours"
for line in tsfile:
mas = line.strip().split(',')
ret.append(np.array(mas))
return (np.stack(ret), header)
def read_example(self, index):
""" Reads the example with given index.
:param index: Index of the line of the listfile to read (counting starts from 0).
:return: Dictionary with the following keys:
X : np.array
2D array containing all events. Each row corresponds to a moment.
First column is the time and other columns correspond to different
variables.
t : float
Length of the data in hours. Note, in general, it is not equal to the
timestamp of last event.
y : array of ints
Phenotype labels.
header : array of strings
Names of the columns. The ordering of the columns is always the same.
name: Name of the sample.
"""
if index < 0 or index >= len(self._data):
raise ValueError("Index must be from 0 (inclusive) to number of lines (exclusive).")
name = self._data[index][0]
t = self._data[index][1]
y = self._data[index][2]
(X, header) = self._read_timeseries(name)
return {"X": X,
"t": t,
"y": y,
"header": header,
"name": name}
class MultitaskReader(Reader):
def __init__(self, dataset_dir, listfile=None):
""" Reader for multitask learning.
:param dataset_dir: Directory where timeseries files are stored.
:param listfile: Path to a listfile. If this parameter is left `None` then
`dataset_dir/listfile.csv` will be used.
"""
Reader.__init__(self, dataset_dir, listfile)
self._data = [line.split(',') for line in self._data]
def process_ihm(x):
return list(map(int, x.split(';')))
def process_los(x):
x = x.split(';')
if x[0] == '':
return ([], [])
return (list(map(int, x[:len(x)//2])), list(map(float, x[len(x)//2:])))
def process_ph(x):
return list(map(int, x.split(';')))
def process_decomp(x):
x = x.split(';')
if x[0] == '':
return ([], [])
return (list(map(int, x[:len(x)//2])), list(map(int, x[len(x)//2:])))
self._data = [(fname, float(t), process_ihm(ihm), process_los(los),
process_ph(pheno), process_decomp(decomp))
for fname, t, ihm, los, pheno, decomp in self._data]
def _read_timeseries(self, ts_filename):
ret = []
with open(os.path.join(self._dataset_dir, ts_filename), "r") as tsfile:
header = tsfile.readline().strip().split(',')
assert header[0] == "Hours"
for line in tsfile:
mas = line.strip().split(',')
ret.append(np.array(mas))
return (np.stack(ret), header)
def read_example(self, index):
""" Reads the example with given index.
:param index: Index of the line of the listfile to read (counting starts from 0).
:return: Return dictionary with the following keys:
X : np.array
2D array containing all events. Each row corresponds to a moment.
First column is the time and other columns correspond to different
variables.
t : float
Length of the data in hours. Note, in general, it is not equal to the
timestamp of last event.
ihm : array
Array of 3 integers: [pos, mask, label].
los : array
Array of 2 arrays: [masks, labels].
pheno : array
Array of 25 binary integers (phenotype labels).
decomp : array
Array of 2 arrays: [masks, labels].
header : array of strings
Names of the columns. The ordering of the columns is always the same.
name: Name of the sample.
"""
if index < 0 or index >= len(self._data):
raise ValueError("Index must be from 0 (inclusive) to number of lines (exclusive).")
name = self._data[index][0]
(X, header) = self._read_timeseries(name)
return {"X": X,
"t": self._data[index][1],
"ihm": self._data[index][2],
"los": self._data[index][3],
"pheno": self._data[index][4],
"decomp": self._data[index][5],
"header": header,
"name": name}
|
16,515 | 2b8ae34471de28cc52c4cdd19ca82d62b0726de6 | ### ์๋ฃํ2 ###
'''
๋ฆฌ์คํธ ( list )
- ๋ณต์์ ๋ฐ์ดํฐ๋ฅผ ๋ฌถ์ด์ ์ ์ฅํ์ฌ ๋ฐ์ดํฐ์ ์ ์ฅ,์ ์ง,์ฌ์ฉ์
์ฉ์ดํ๊ฒ ํ๋ ์๋ฃ๊ตฌ์กฐ
[ํ์]
๋ฆฌ์คํธ๋ช
= [๋ฐ์ดํฐ1,๋ฐ์ดํฐ2,๋ฐ์ดํฐ3,....]
- ํ ์ธ์ด์ ๋ฐฐ์ด๊ณผ๋ ๋ค๋ฆ
๋๋ค.
'''
# ๋ฆฌ์คํธ ์์ฑํ๊ธฐ
listDatas1 = [] # ๋น ๋ฆฌ์คํธ
listDatas2 = [1, 2, 3, 4, 5] # ์ ์ ๋ฆฌ์คํธ
listDatas3 = [1.1, 2.2, 3.3, 4.4, 5.5] # ์ค์ ๋ฆฌ์คํธ
listDatas4 = ["ํ๋","๋์๊ธฐ","์์ผ"] # ๋ฌธ์์ด ๋ฆฌ์คํธ
listDatas5 = [7, 10.10 , "๋ฌธ์์ด"] # ๋ณตํฉ
listDatas6 = [[1, 2, 3],[4, 5, 6]] # ์ค์ฒฉ ๋ฆฌ์คํธ
print(listDatas1)
print(listDatas2)
print(listDatas3)
print(listDatas4)
print(listDatas5)
print(listDatas6)
print()
# ๋ฆฌ์คํธ ์์ ์ ๊ทผํ๊ธฐ
nums = [1, 2, 3, 4, 5]
print(nums) # ์ ์ฒด
print(nums[0]) # 0๋ฒ์งธ
print(nums[1]) # 1๋ฒ์งธ
print(nums[2]) # 2๋ฒ์งธ
print(nums[3]) # 3๋ฒ์งธ
print(nums[4]) # 4๋ฒ์งธ
print()
# ๋ฆฌ์คํธ ์์ ์์ ํ๊ธฐ
nums = [1,2,3]
print(nums[0])
print(nums[1])
print(nums[2])
nums[0] = 100 # 0๋ฒ์งธ ๊ฐ ์์
nums[1] = 200 # 1๋ฒ์งธ ๊ฐ ์์
nums[2] = 300 # 2๋ฒ์งธ ๊ฐ ์์
print(nums[0])
print(nums[1])
print(nums[2])
print()
# ๋ฆฌ์คํธ ์์ ์ญ์ ํ๊ธฐ
nums = [1,2,3]
print(nums)
nums[2] = 0 # 1) 0 ํน์ ์๋ฏธ์๋ ๋ฐ์ดํฐ ์ฝ์
print(nums)
del nums[2] # 2) del ํค์๋๋ฅผ ์ด์ฉํ์ฌ ์ค์ ๋ก ๋ฐ์ดํฐ ์ญ์
print(nums)
print()
# ๋ฆฌ์คํธ ์ธ๋ฑ์ฑ
strDatas = ["Python" , "C" , "JAVA" , "C++"]
print(strDatas[0] , strDatas[-4]) # Python
print(strDatas[1] , strDatas[-3]) # C
print(strDatas[2] , strDatas[-2]) # JAVA
print(strDatas[3] , strDatas[-1]) # C++
print()
# ์ค์ฒฉ ๋ฆฌ์คํธ ์ธ๋ฑ์ฑ
datas = [1,2,['a','b',['Python','is fun!']]]
print(datas)
print(datas[0])
print(datas[1])
print(datas[2])
print(datas[2][0])
print(datas[2][1])
print(datas[2][2])
print(datas[2][2][0])
print(datas[2][2][1])
print()
# ๋ฆฌ์คํธ ์ฌ๋ผ์ด์ฑ
nums = [10,20,30,40,50,60,70]
print(nums)
print(nums[:5]) # 0 ~ 4๋ฒ์งธ(-1) ๊น์ง
print(nums[5:]) # 5๋ฒ์งธ ๋ถํฐ ๋๊น์ง
print(nums[2:6]) # 2๋ฒ์งธ 5๋ฒ์งธ(-1)๊น์ง
print(nums[2:-1]) # 2๋ฒ์งธ๋ถํฐ 5๋ฒ์งธ ๊น์ง
print()
# ์ค์ฒฉ ๋ฆฌ์คํธ ์ฌ๋ผ์ด์ฑ
datas = [1,2,3,["Python" , "is fun"],4,5]
print(datas)
print(datas[2:5])
print()
# ๋ฆฌ์คํธ ๊ด๋ จ ํจ์
# append() : ์ถ๊ฐํ๊ธฐ ( 1๊ฐ๋ง ๊ฐ๋ฅ , ๋ฆฌ์คํธ์ ๋งจ๋ค์ ์ถ๊ฐ๋ง ๊ฐ๋ฅ )
nums = [1,2,3,4]
print(nums)
nums.append(5) # ๋งจ์์ 5 ์ถ๊ฐ
print(nums)
nums.append(1.5) # ๋งจ๋ค์ 1.5 ์ถ๊ฐ
print(nums)
#nums.append(3,4) # error ๋จ์ผ ๋ฐ์ดํฐ๋ง ์ถ๊ฐ๊ฐ๋ฅ
print()
# ์ญ์ ํ๊ธฐ 1) remove
nums = [1,2,3,4,5,6,7,8,9]
print(nums)
nums.remove(8) # ()์์ ๊ฐ์ ์ญ์ ํ๋ค. - 8 ์ญ์
print(nums)
nums.remove(5) # - 5์ญ์
print(nums)
print()
# ์ญ์ ํ๊ธฐ 2) pop
nums = [1,2,3,4,5,6,7,8,9]
print(nums)
nums.pop(8) # ()์์ ์ธ๋ฑ์ค๋ฅผ ์ญ์ ํ๋ค. - 8๋ฒ์งธ ์์นํ 9์ญ์
print(nums)
nums.pop(5) # - 5๋ฒ์งธ ์์นํ 6์ญ์
print(nums)
print()
# extend, insert, sort, count, index ๊ณต๋ถํด ๋ณด์ธ์.
'''
2) ํํ ( tuple )
- ํํ์ ๋ฐ์ดํฐ๋ค์ ๋ณํ๊ฐ ๋ถ๊ฐ๋ฅํ๋ค๋ ์ ์ ์ ์ธํ๊ณค
๋ฆฌ์คํธ์ ๊ฑฐ์ ๊ฐ๋ค.
- ๋ฆฌ์คํธ๊ด๋ จ ํจ์๊ฐ ํํ์๋ ์ ์ฉ๋์ง ์๋๋ค.
[ํ์]
ํํ๋ช
= (์์1,์์2,์์3)
'''
# ํํ ๋ง๋๋ ๋ฐฉ๋ฒ
tuple1 = ()
tuple2 = (1) # ๋จ์ผ๋ฐ์ดํฐ๋ ํํ์ด ์๋๋ค.
# ๋จ์ผ๋ฐ์ดํฐ๋ ์์ธ์ ์ผ๋ก ๋ค์๊ณผ ๊ฐ์ด ๋ง๋ ๋ค. tuple2 = (1,)
tuple3 = (1,2,3)
tuple4 = 1,2,3 # ์์ธ์ ์ผ๋ก ๋ณ์ํ๋์ ์ฌ๋ฌ๋ฐ์ดํฐ๋ฅผ ๋์
ํ๋ฉด
# ์๋์ ์ผ๋ก ํํ๋ก ๋ง๋ค์ด์ ์ ์ฅํ๋ค.
tuple5 = ([1,2,3],(1,2,3),1,2,3)
print( type(tuple1) , tuple1 )
print( type(tuple2) , tuple2 )
print( type(tuple3) , tuple3 )
print( type(tuple4) , tuple4 )
print( type(tuple5) , tuple5 )
print()
# ํํ ์์ ์์ ( ์์ ๋ถ๊ฐ๋ฅ )
tupleDatas = (1,2,3)
#tupleDatas[0] = 100 # ์์ ๋ถ๊ฐ
#tupleDatas[1] = 200
#print(tupleDatas)
#print()
# ํํ ์์ ์ ๊ฑฐ ( ์ ๊ฑฐ ๋ถ๊ฐ๋ฅ )
tupleDatas = (1,2,3)
#del tupleDatas[0] # ์ ๊ฑฐ ๋ถ๊ฐ๋ฅ
#print(tupleDatas)
#print()
# ํํ ์ธ๋ฑ์ฑ
strDatas = ("Python" , "C" , "JAVA" , "C++")
print(strDatas[0] , strDatas[-4]) # Python
print(strDatas[1] , strDatas[-3]) # C
print(strDatas[2] , strDatas[-2]) # JAVA
print(strDatas[3] , strDatas[-1]) # C++
print()
# ํํ ์ฌ๋ผ์ด์ฑ
nums = (10,20,30,40,50,60,70)
print(nums)
print(nums[:5]) # 0 ~ 4๋ฒ์งธ(-1) ๊น์ง
print(nums[5:]) # 5๋ฒ์งธ ๋ถํฐ ๋๊น์ง
print(nums[2:6]) # 2๋ฒ์งธ 5๋ฒ์งธ(-1)๊น์ง
print(nums[2:-1]) # 2๋ฒ์งธ๋ถํฐ 5๋ฒ์งธ ๊น์ง
print()
'''
3) ๋์
๋๋ฆฌ (์ฌ์ )
- ํค:๋ฒจ๋ฅ ํํ๋ก ๋ฐ์ดํฐ๋ฅผ ์ ์ฅํ๋ ์๋ฃ ๊ตฌ์กฐ
[ํ์]
๋ณ์ = {ํค1:๋ฒจ๋ฅ1 ,ํค2:๋ฒจ๋ฅ2,...}
'''
# ๋์
๋๋ฆฌ ๋ง๋ค๊ธฐ
#1) ํค๊ฐ์๋ ๋ฆฌ์คํธ์,์ฌ์ ํํ๋ฅผ ์ ์ธํ ์๋ฃํ์ด ๊ฐ๋ฅํฉ๋๋ค.
dict1 = {1:'python'}
dict2 = {1.1:'python'}
dict3 = {'string' : 'python'}
dict4 = {(1,2,3) : 'python'}
#dict5 = {[1,2,3] : 'python'} # error
#dict6 = {{'key':'value'} : 'python'} # error
print(dict1)
print(dict2)
print(dict3)
print(dict4)
print()
#2) ๋ฒจ๋ฅ๊ฐ์ ๋ชจ๋ ์๋ฃํ์ด ๊ฐ๋ฅํฉ๋๋ค.
dict1 = {'keyData' : 1}
dict2 = {'keyData' : 1.1}
dict3 = {'keyData' : 'stringData'}
dict4 = {'keyData' : [1,2,3]}
dict5 = {'keyData' : (1,2,3)}
dict6 = {'keyData' : {"subKeyData":"value"}}
print(dict1)
print(dict2)
print(dict3)
print(dict4)
print(dict5)
print(dict6)
print()
# ๋์
๋๋ฆฌ ๋ฐ์ดํฐ ์ ๊ทผ๋ฐฉ๋ฒ
client1 = {"name":"ํ๊ธธ๋" , "age":19 , "contact":"010-1234-5678"}
print(client1)
print(client1["name"] , client1.get("name"))
print(client1["age"] , client1.get("age"))
print(client1["contact"] , client1.get("contact"))
print()
# ๋์
๋๋ฆฌ์ ์์ ์ถ๊ฐ
client1['height'] = 180.5 #๋ณ์๋ช
[์๋ก์ด ํค] = ์๋ก์ด ๋ฒจ๋ฅ ํํ๋ก ์ถ๊ฐ
client1['weight'] = 80.5
print(client1)
print()
# ๋์
๋๋ฆฌ์ ์์ ์์
client1['contact'] = "010-9876-5432" #๋ณ์๋ช
[๊ธฐ์กดํค] = ์๋ก์ด ๋ฒจ๋ฅ
client1['age'] = 29
print(client1)
print()
# ๋์
๋๋ฆฌ์ ์์ ์ญ์
del client1['height'] # del ๋ณ์๋ช
[๊ธฐ์กด์ ํค๊ฐ]
del client1['weight']
print(client1)
print()
# ๋์
๋๋ฆฌ ๊ด๋ จ ํจ์
# keys() : ํค ๋ฆฌ์คํธ ๋ง๋ค๊ธฐ
client1 = { 'name' : '๊ท๋๋ฐ๋ก์ฌ',
'age' : 20,
'contact' : '010-1234-1234',
'grade' : 'A+' }
print(client1 .keys())
print(type(client1 .keys())) # dict_keysํํ๋ก ๋ฐํ๋๋ค.
print(list(client1.keys())) # ๋ฆฌ์คํธํํ๋ก ํ๋ณํ ํ ๊ฒฝ์ฐ
# ๋ฆฌ์คํธ ๋ฌธ๋ฒ์ด ์ ์ฉ๋๋ค. (์ถ๊ฐ ์์
์ด ๊ฐ๋ฅ)
print()
# values() : value ๋ฆฌ์คํธ ๋ง๋ค๊ธฐ
print(client1.values())
print(list(client1.values()))
print()
# items() : ํค,๋ฒจ๋ฅ ๋ชจ๋ ๊ฐ์ ธ์ค๊ธฐ
print(client1.items())
print()
|
16,516 | d9e185894fa2ef477a46403e6780bf77a5982380 | import torch
import numpy as np
import random
from collections import deque, namedtuple
from utils import sync_networks, conv2d_size_out
Experience = namedtuple('Experience',
['state', 'action', 'reward', 'next_state', 'done'])
class DQN_Base_model(torch.nn.Module):
"""Docstring for DQN MLP model """
def __init__(self, device, state_space, action_space, num_actions):
"""Defining DQN MLP model
"""
# initialize all parameters
super(DQN_Base_model, self).__init__()
self.state_space = state_space
self.action_space = action_space
self.device = device
self.num_actions = num_actions
def build_model(self):
# output should be in batchsize x num_actions
raise NotImplementedError
def forward(self, state):
raise NotImplementedError
def max_over_actions(self, state):
state = state.to(self.device)
return torch.max(self(state), dim=1)
def argmax_over_actions(self, state):
state = state.to(self.device)
return torch.argmax(self(state), dim=1)
def act(self, state, epsilon):
if random.random() < epsilon:
return self.action_space.sample()
else:
with torch.no_grad():
state_tensor = torch.Tensor(state).unsqueeze(0)
action_tensor = self.argmax_over_actions(state_tensor)
action = action_tensor.cpu().detach().numpy().flatten()[0]
assert self.action_space.contains(action)
return action
class DQN_MLP_model(DQN_Base_model):
"""Docstring for DQN MLP model """
def __init__(self, device, state_space, action_space, num_actions):
"""Defining DQN MLP model
"""
# initialize all parameters
super(DQN_MLP_model, self).__init__(device, state_space, action_space,
num_actions)
# architecture
self.layer_sizes = [(768, 768), (768, 768), (768, 512)]
self.build_model()
def build_model(self):
# output should be in batchsize x num_actions
# First layer takes in states
layers = [
torch.nn.Linear(self.state_space.shape[0], self.layer_sizes[0][0]),
torch.nn.ReLU()
]
for size in self.layer_sizes:
layer = [torch.nn.Linear(size[0], size[1]), torch.nn.ReLU()]
layers.extend(layer)
layers.append(torch.nn.Linear(self.layer_sizes[-1][1],
self.num_actions))
self.body = torch.nn.Sequential(*layers)
trainable_parameters = sum(
p.numel() for p in self.parameters() if p.requires_grad)
print(f"Number of trainable parameters: {trainable_parameters}")
def forward(self, state):
q_value = self.body(state)
return q_value
class DQN_CNN_model(DQN_Base_model):
"""Docstring for DQN CNN model """
def __init__(self,
device,
state_space,
action_space,
num_actions,
num_frames=4,
final_dense_layer=512,
input_shape=(84, 84)):
"""Defining DQN CNN model
"""
# initialize all parameters
super(DQN_CNN_model, self).__init__(device, state_space, action_space,
num_actions)
self.num_frames = num_frames
self.final_dense_layer = final_dense_layer
self.input_shape = input_shape
self.build_model()
def build_model(self):
# output should be in batchsize x num_actions
# First layer takes in states
self.body = torch.nn.Sequential(*[
torch.nn.Conv2d(self.num_frames, 32, kernel_size=(8, 8), stride=4),
torch.nn.ReLU(),
torch.nn.Conv2d(32, 64, kernel_size=(4, 4), stride=2),
torch.nn.ReLU(),
torch.nn.Conv2d(64, 64, kernel_size=(3, 3), stride=1),
torch.nn.ReLU()
])
final_size = conv2d_size_out(self.input_shape, (8, 8), 4)
final_size = conv2d_size_out(final_size, (4, 4), 2)
final_size = conv2d_size_out(final_size, (3, 3), 1)
self.head = torch.nn.Sequential(*[
torch.nn.Linear(final_size[0] * final_size[1] *
64, self.final_dense_layer),
torch.nn.ReLU(),
torch.nn.Linear(self.final_dense_layer, self.num_actions)
])
trainable_parameters = sum(
p.numel() for p in self.parameters() if p.requires_grad)
print(f"Number of trainable parameters: {trainable_parameters}")
def forward(self, state):
cnn_output = self.body(state)
q_value = self.head(cnn_output.reshape(cnn_output.size(0), -1))
return q_value
def act(self, state, epsilon):
if random.random() < epsilon:
return self.action_space.sample()
else:
with torch.no_grad():
state_tensor = torch.Tensor(state).unsqueeze(0)
action_tensor = self.argmax_over_actions(state_tensor)
action = action_tensor.cpu().detach().numpy().flatten()[0]
assert self.action_space.contains(action)
return action
class DQN_agent:
"""Docstring for DQN agent """
def __init__(self,
device,
state_space,
action_space,
num_actions,
target_moving_average,
gamma,
replay_buffer_size,
epsilon_decay,
epsilon_decay_end,
warmup_period,
double_DQN,
model_type="mlp",
num_frames=None):
"""Defining DQN agent
"""
self.replay_buffer = deque(maxlen=replay_buffer_size)
if model_type == "mlp":
self.online = DQN_MLP_model(device, state_space, action_space,
num_actions)
self.target = DQN_MLP_model(device, state_space, action_space,
num_actions)
elif model_type == "cnn":
assert num_frames
self.num_frames = num_frames
self.online = DQN_CNN_model(device,
state_space,
action_space,
num_actions,
num_frames=num_frames)
self.target = DQN_CNN_model(device,
state_space,
action_space,
num_actions,
num_frames=num_frames)
else:
raise NotImplementedError(model_type)
self.online = self.online.to(device)
self.target = self.target.to(device)
self.target.load_state_dict(self.online.state_dict())
self.target.eval()
self.gamma = gamma
self.target_moving_average = target_moving_average
self.epsilon_decay = epsilon_decay
self.epsilon_decay_end = epsilon_decay_end
self.warmup_period = warmup_period
self.device = device
self.model_type = model_type
self.double_DQN = double_DQN
def loss_func(self, minibatch, writer=None, writer_step=None):
# Make tensors
state_tensor = torch.from_numpy(np.array(
minibatch.state, copy=True)).to(self.device, dtype=torch.float32)
next_state_tensor = torch.from_numpy(
np.array(minibatch.next_state, copy=True)).to(self.device,
dtype=torch.float32)
action_tensor = torch.FloatTensor(minibatch.action).to(self.device, dtype=torch.float32)
reward_tensor = torch.FloatTensor(minibatch.reward).to(self.device, dtype=torch.float32)
done_tensor = torch.ByteTensor(minibatch.done).to(self.device, dtype=torch.uint8)
# Get q value predictions
q_pred_batch = self.online(state_tensor).gather(
dim=1, index=action_tensor.long().unsqueeze(1)).squeeze(1)
with torch.no_grad():
if self.double_DQN:
selected_actions = self.online.argmax_over_actions(
next_state_tensor)
q_target = self.target(next_state_tensor).gather(
dim=1,
index=selected_actions.long().unsqueeze(1)).squeeze(1)
else:
q_target = self.target.max_over_actions(
next_state_tensor.detach()).values
q_label_batch = reward_tensor + (self.gamma) * (1 -
done_tensor) * q_target
q_label_batch = q_label_batch.detach()
# Logging
if writer:
writer.add_scalar('training/batch_q_label', q_label_batch.mean(),
writer_step)
writer.add_scalar('training/batch_q_pred', q_pred_batch.mean(),
writer_step)
writer.add_scalar('training/batch_reward', reward_tensor.mean(),
writer_step)
return torch.nn.functional.mse_loss(q_label_batch, q_pred_batch)
def sync_networks(self):
sync_networks(self.target, self.online, self.target_moving_average)
def set_epsilon(self, global_steps, writer=None):
if global_steps < self.warmup_period:
self.online.epsilon = 1
self.target.epsilon = 1
else:
self.online.epsilon = max(
self.epsilon_decay_end,
1 - (global_steps - self.warmup_period) / self.epsilon_decay)
self.target.epsilon = max(
self.epsilon_decay_end,
1 - (global_steps - self.warmup_period) / self.epsilon_decay)
if writer:
writer.add_scalar('training/epsilon', self.online.epsilon,
global_steps)
|
16,517 | 416051cded9dd44cdc9ff180baab95c1f7d274e1 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName :download.py
# @Author :Shuranima
# @Time :2020/8/11 19:00
import os
import requests
from tqdm import tqdm
import time
from spider.ua import HEADERS
import random
def down_from_url(url, dst):
headers = random.choice(HEADERS)
with requests.get(url, headers=headers, stream=True) as req: # (1)
if 200 != req.status_code and 206 != req.status_code:
return -1
file_size = int(req.headers['content-length']) # (2)
if os.path.exists(dst):
first_byte = os.path.getsize(dst) # (3)
else:
first_byte = 0
if first_byte >= file_size: # (4)
return file_size
headers['Range'] = f"bytes={first_byte}-{file_size}"
pbar = tqdm(total=file_size, initial=first_byte, unit='B', unit_scale=True, desc=dst)
with requests.get(url, headers=headers, stream=True) as req: # (5)
if 200 != req.status_code and 206 != req.status_code:
return -1
with open(dst, 'ab') as f:
for chunk in req.iter_content(chunk_size=1024): # (6)
if chunk:
f.write(chunk)
pbar.update(1024)
pbar.close()
return file_size
def download(url, path, filename):
if not os.path.exists(path):
os.makedirs(path)
print('ๅฐๅ๏ผ' + url)
print('ๅผๅงไธ่ฝฝ,' + filename)
start_time = time.time()
if os.path.exists(path + '/' + filename):
start_file_size = os.path.getsize(path + '/' + filename) # (3)
else:
start_file_size = 0
mp4_size = down_from_url(url, path + '/' + filename)
if os.path.exists(path + '/' + filename):
end_file_size = os.path.getsize(path + '/' + filename) # (3)
else:
end_file_size = 0
end_time = time.time()
if -1 == mp4_size:
print(
f"ไธ่ฝฝๅคฑ่ดฅ--ๆไปถๅๅงๅคงๅฐ๏ผ{start_file_size}/byte๏ผไธ่ฝฝๅคงๅฐ๏ผ{end_file_size - start_file_size}/byte๏ผ่ง้ขๅคงๅฐ๏ผ{mp4_size}๏ผ่ฑ่ดนๆถ้ด๏ผ{end_time - start_time}/s")
elif end_file_size < mp4_size:
print(
f"ไธ่ฝฝๆชๅฎๆ--ๆไปถๅๅงๅคงๅฐ๏ผ{start_file_size}/byte๏ผไธ่ฝฝๅคงๅฐ๏ผ{end_file_size - start_file_size}/byte๏ผ่ง้ขๅคงๅฐ๏ผ{mp4_size}๏ผ่ฑ่ดนๆถ้ด๏ผ{end_time - start_time}/s")
else:
print(
f"ไธ่ฝฝๅทฒๅฎๆ--ๆไปถๅๅงๅคงๅฐ๏ผ{start_file_size}/byte๏ผไธ่ฝฝๅคงๅฐ๏ผ{end_file_size - start_file_size}/byte๏ผ่ง้ขๅคงๅฐ๏ผ{mp4_size}๏ผ่ฑ่ดนๆถ้ด๏ผ{end_time - start_time}/s")
|
16,518 | 4d6ea00c6aefe3d8384c641cc472f7ea7487c31f | import numpy as np
from math import exp
import random
class Sampler(object):
def __init__(self,sample_negative_items_empirically):
self.sample_negative_items_empirically = sample_negative_items_empirically
def init(self,data,max_samples=None):
self.data = data
self.num_users,self.num_items = data.shape
self.max_samples = max_samples
def sample_user(self):
u = self.uniform_user()
num_items = self.data[u].getnnz()
assert(num_items > 0 and num_items != self.num_items)
return u
def sample_negative_item(self,user_items):
j = self.random_item()
while j in user_items:
j = self.random_item()
return j
def uniform_user(self):
return random.randint(0,self.num_users-1)
def random_item(self):
"""sample an item uniformly or from the empirical distribution
observed in the training data
"""
if self.sample_negative_items_empirically:
# just pick something someone rated!
u = self.uniform_user()
i = random.choice(self.data[u].indices)
else:
i = random.randint(0,self.num_items-1)
return i
def num_samples(self,n):
if self.max_samples is None:
return n
return min(n,self.max_samples)
class UniformPairWithoutReplacement(Sampler):
def generate_samples(self,data,max_samples=None):
self.init(data,max_samples)
idxs = range(self.data.nnz)
random.shuffle(idxs)
self.users,self.items = self.data.nonzero()
self.users = self.users[idxs]
self.items = self.items[idxs]
self.idx = 0
for _ in xrange(self.num_samples(self.data.nnz)):
u = self.users[self.idx]
i = self.items[self.idx]
j = self.sample_negative_item(self.data[u])
self.idx += 1
yield u,i,y
|
16,519 | 071559d0203ef4def52b2b7b4de69ca0a05fcc6f | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from articles.models import Article,Category,Tag
from users.models import User
from img.upload import delete_image_to_qiniu
from django.db.models.signals import post_delete
# Register your models here.
class ArticleAdmin(admin.ModelAdmin):
list_display = ['title','nickname','published_date','status','comments_count','view_count',]
list_filter = ['published_date']
# ๅ ้คๅฏน่ฑกๅๅ ้คไธ็ไบ็ๅพ็่ตๆบ
def delete_file(sender,instance,**kwargs):
url = 'media/' + str(instance.img)
delete_image_to_qiniu(url)
post_delete.connect(delete_file,sender=Article)
#่ถ
็บง็จๆทๅๅฑ็คบๅ
จ้จ๏ผ้่ถ
็บง็จๆทๅชๅฑ็คบๅ็ปๅฝ็จๆท็ธๅ
ณ็ไฟกๆฏ
def get_queryset(self,request):
qs = super(ArticleAdmin,self).get_queryset(request)
if request.user.is_superuser:
return qs
#ๆญคๅคuserไธบๅฝๅmodel็related object็related object๏ผ ๆญฃๅธธ็ๅค้ฎๅช่ฆfilter(user=request.user)
return qs.filter(user_id=request.user.id)
def save_model(self, request, obj, form, change):
if not obj.id:
obj.user_id = request.user.id
obj.save()
def nickname(self,obj):
return User.objects.get(id=obj.user_id).nickname
nickname.short_description = u'ไฝ่
'
admin.site.register(Article,ArticleAdmin)
admin.site.register(Category)
admin.site.register(Tag) |
16,520 | bac4ce4458e7027bed7f949dc71053b8b0031a53 | from PySide import QtGui
import sys
from BaseStation.ui.widgets.main_window import Main
from Robot.configuration.config import Config
from Robot.filler import country_repository_filler
from Robot.resources.kinect import Kinect
def init_ui():
app = QtGui.QApplication(sys.argv)
m = Main()
m.show()
sys.exit(app.exec_())
if __name__ == '__main__':
country_repository_filler.fill_repository()
Config().load_config()
Kinect().start()
init_ui()
|
16,521 | 0b59dcc9ab32a582f2156ecdfd54feed5b77815e | # -*- coding:utf-8 -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1412041326.883988
_enable_loop = True
_template_filename = u'/home/chodges/lager-pylons/lager/lager/lager/templates/base.mako'
_template_uri = u'/base.mako'
_source_encoding = 'utf-8'
from webhelpers.html import escape
_exports = []
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
self = context.get('self', UNDEFINED)
__M_writer = context.writer()
__M_writer(u'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"\r\n"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\r\n<html>\r\n <head>\r\n <link rel="stylesheet" type="text/css" href="/lumenate_style.css" />\r\n ')
__M_writer(escape(self.head_tags()))
__M_writer(u'\r\n </head>\r\n <body>\r\n ')
__M_writer(escape(self.body()))
__M_writer(u'\r\n </body>\r\n</html>')
return ''
finally:
context.caller_stack._pop_frame()
"""
__M_BEGIN_METADATA
{"source_encoding": "utf-8", "line_map": {"32": 26, "16": 0, "22": 2, "23": 7, "24": 7, "25": 10, "26": 10}, "uri": "/base.mako", "filename": "/home/chodges/lager-pylons/lager/lager/lager/templates/base.mako"}
__M_END_METADATA
"""
|
16,522 | 5d7fe28f42a11704f5b94bdd9af1c212fd88e321 | import tkinter as tk
from tkinter import *
from tkinter.ttk import *
from tkinter import ttk
from tkcalendar import Calendar, DateEntry
from src.db import insert_expense
class Expense(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
# create a frame for input of amount and date
eframe = tk.LabelFrame(self)
eframe.grid(row=0, column=0, columnspan=15)
eframe.configure({"relief": "flat", "text": ""})
# label for expenditure
textlabel = ttk.Label(eframe, text="Expenditure", padding="5 5 5 5")
textlabel.grid(row=1, column=2)
# entry label for amount
entrylabel = ttk.Entry(eframe)
entrylabel.grid(row=1, column=5)
# date using tk calender for taking date input
datelabel = ttk.Label(eframe, text="Date", padding="5 5 5 5")
datelabel.grid(row=2, column=2)
cal = DateEntry(eframe, width=12, background='darkblue', date_pattern='mm/dd/y',
foreground='white', borderwidth=2)
cal.grid(row=2, column=5, padx=15)
# frame for category
cframe = tk.LabelFrame(self)
cframe.grid(row=0, column=16, rowspan=5, columnspan=10, sticky=tk.E, padx=20, pady=15)
cframe.configure({"relief": "flat", "text": ""})
clabel = ttk.Label(cframe, text="Category", padding="15 15 15 15")
clabel.grid(row=1, column=18)
# radio buttons to take input for category options
category = StringVar()
category.get()
grocery = ttk.Radiobutton(cframe, text="Grocery", variable=category, value='grocery', padding="3 3 3 3")
grocery.grid(row=1, column=22, sticky=tk.W)
electricity = ttk.Radiobutton(cframe, text="Electricity", variable=category, value='electricity',
padding="3 3 3 3")
electricity.grid(row=2, column=22, sticky=tk.W)
education = ttk.Radiobutton(cframe, text="Education", variable=category, value='education', padding="3 3 3 3")
education.grid(row=3, column=22, sticky=tk.W)
travel = ttk.Radiobutton(cframe, text="Travel", variable=category, value='travel', padding="3 3 3 3")
travel.grid(row=4, column=22, sticky=tk.W)
health = ttk.Radiobutton(cframe, text="Health", variable=category, value='health', padding="3 3 3 3")
health.grid(row=5, column=22, sticky=tk.W)
selfDevelopment = ttk.Radiobutton(cframe, text="Self - Development", variable=category, value='selfDevelopment',
padding="3 3 3 3")
selfDevelopment.grid(row=6, column=22, sticky=tk.W)
luxury = ttk.Radiobutton(cframe, text="Luxury", variable=category, value='luxury',
padding="3 3 3 3")
luxury.grid(row=7, column=22, sticky=tk.W)
other = ttk.Radiobutton(cframe, text="Other", variable=category, value='other',
padding="3 3 3 3")
other.grid(row=8, column=22, sticky=tk.W)
# frame for any description for source of expense
extraframe = tk.LabelFrame(self, padx=10, pady=15)
extraframe.grid(row=6, column=0, rowspan=5, columnspan=15)
extraframe.configure({"relief": "flat", "text": ""})
note_label = ttk.Label(extraframe, text="Note")
note_label.grid(row=7, column=2, sticky=W)
note = ttk.Entry(extraframe)
note.grid(row=7, column=3, padx=10, sticky=W)
# save button for entering info in database
button = ttk.Button(extraframe, text="Save",
command=lambda: [insert_expense(entrylabel, category, cal, note), clear()])
button.grid(row=9, column=10, sticky=tk.E)
# TODO turn off category widget
def clear():
entrylabel.delete(0, END)
# date_entrylabel.delete(0, END)
note.delete(0, END) |
16,523 | 1f772db2073124073246b8dc8b7b98179bcd74cd | import tensorflow as tf
import numpy as np
import pdb
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.datasets import fetch_openml
from sklearn.metrics import accuracy_score
#--------------------------------------------------------------------------------
# mnistใใผใฟใใญใผใใใ่ชฌๆๅคๆฐใจ็ฎ็ๅคๆฐใ่ฟใ
def load_mnist_data():
# mnistใใผใฟใใญใผใ
mnist = fetch_openml('mnist_784', version=1,)
# ็ปๅใใผใฟใ784*70000 [[0-255, 0-255, ...], [0-255, 0-255, ...], ... ]
xData = mnist.data.astype(np.float32)
# 0-1ใซๆญฃ่ฆๅใใ
xData /= 255
# ใฉใใซใใผใฟ70000
yData = mnist.target.astype(np.int32)
return xData, yData
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# ้ใฟใฎๅๆๅ
def weight_variable(name, shape):
return tf.get_variable(name, shape, initializer=tf.random_normal_initializer(stddev=0.1))
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# ใใคใขในใฎๅๆๅ
def bias_variable(name, shape):
return tf.get_variable(name, shape, initializer=tf.constant_initializer(0.1))
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# ็ทๅฝขๅๅธฐใขใใซ
def linear_regression(x_t, xDim, yDim, reuse=False):
with tf.variable_scope('linear_regression') as scope:
if reuse:
scope.reuse_variables()
# ้ใฟใๅๆๅ
w = weight_variable('w', [xDim, yDim])
# ใใคใขในใๅๆๅ
b = bias_variable('b', [yDim])
# softmaxๅๅธฐใๅฎ่ก
y = tf.nn.softmax(tf.add(tf.matmul(x_t, w), b))
return y
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# ็ทๅฝขๅๅธฐใขใใซ
def classifier_model(x_t, xDim, yDim, reuse=False):
with tf.variable_scope('classifier_model') as scope:
if reuse:
scope.reuse_variables()
# ้ใฟใๅๆๅ
w1 = weight_variable('w1', [xDim, 128])
# ใใคใขในใๅๆๅ
b1 = bias_variable('b1', [128])
# softmaxๅๅธฐใๅฎ่ก
h1 = tf.nn.relu(tf.add(tf.matmul(x_t, w1), b1))
# ้ใฟใๅๆๅ
w2 = weight_variable('w2', [128, yDim])
# ใใคใขในใๅๆๅ
b2 = bias_variable('b2', [yDim])
# softmaxๅๅธฐใๅฎ่ก
y = tf.nn.softmax(tf.add(tf.matmul(h1, w2), b2))
return y
#--------------------------------------------------------------------------------
if __name__ == "__main__":
# mnistใใผใฟใใญใผใ
xData, yData = load_mnist_data()
# ็ฎ็ๅคๆฐใฎใซใใดใชใผๆฐ(ๆฌกๅ
)ใ่จญๅฎ
label_num = 10
# ใฉใใซใใผใฟใone-hot่กจ็พใซๅคๆ
yData = np.squeeze(np.identity(label_num)[yData])
# ็ฎ็ๅคๆฐใฎใซใใดใชใผๆฐ(ๆฌกๅ
)ใๅๅพ
yDim = yData.shape[1]
# ๅญฆ็ฟใใผใฟใจใในใใใผใฟใซๅๅฒ
xData_train, xData_test, yData_train, yData_test = train_test_split(xData, yData, test_size=0.2, random_state=42)
#--------------------------------------------------------------------------------
# Tensorflowใง็จใใๅคๆฐใๅฎ็พฉ
# ่ชฌๆๅคๆฐใฎใซใใดใชใผๆฐ(ๆฌกๅ
)ใๅๅพ
xDim = xData.shape[1]
#pdb.set_trace()
# ็นๅพด้(x_t)ใจใฟใผใฒใใ(y_t)ใฎใใฌใผในใใซใใผ
x_t = tf.placeholder(tf.float32,[None,xDim])
y_t = tf.placeholder(tf.float32,[None,yDim])
learning_rate = tf.constant(0.01, dtype=tf.float32)
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# Tensorflowใง็จใใใฐใฉใใๅฎ็พฉ
# ็ทๅฝขๅๅธฐใๅฎ่ก
output_train = classifier_model(x_t, xDim, yDim)
output_test = classifier_model(x_t, xDim, yDim, reuse=True)
# ๆๅคฑ้ขๆฐ(ใฏใญในใจใณใใญใใผ)
loss_square_train = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=y_t, logits=output_train))
loss_square_test = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=y_t, logits=output_test))
# ๆ้ฉๅ
opt = tf.train.AdamOptimizer(learning_rate)
training_step = opt.minimize(loss_square_train)
#--------------------------------------------------------------------------------
# ใปใใทใงใณไฝๆ
sess = tf.Session()
# ๅคๆฐใฎๅๆๅ
init = tf.global_variables_initializer()
sess.run(init)
#--------------------------------------------------------------------------------
# ๅญฆ็ฟใจใในใใๅฎ่ก
# lossใฎๅฑฅๆญดใไฟๅญใใใชในใ
loss_train_list = []
loss_test_list = []
# accuracyใฎๅฑฅๆญดใไฟๅญใใใชในใ
accuracy_train_list = []
accuracy_test_list = []
# ใคใใฌใผใทใงใณใฎๅๅพฉๅๆฐ
nIte = 500
# ใในใๅฎ่กใฎๅฒๅ(test_rateๅใซใคใ1ๅ)
test_rate = 10
# ใใใใตใคใบ
BATCH_SIZE = 500
# ๅญฆ็ฟใใผใฟใปใในใใใผใฟใฎๆฐ
num_data_train = xData_train.shape[0]
num_data_test = xData_test.shape[0]
# ๅญฆ็ฟใจใในใใฎๅๅพฉ
for ite in range(nIte):
pern = np.random.permutation(num_data_train)
for i in range(0, num_data_train, BATCH_SIZE):
batch_x = xData_train[pern[i:i+BATCH_SIZE]]
batch_y = yData_train[pern[i:i+BATCH_SIZE]]
# placeholderใซๅ
ฅๅใใใใผใฟใ่จญๅฎ
train_dict = {x_t: batch_x, y_t: batch_y}
# ๅพ้
้ไธๆณใจๆๅฐไบไน่ชคๅทฎใ่จ็ฎ
sess.run([training_step], feed_dict=train_dict)
loss_train = sess.run(loss_square_train, feed_dict=train_dict)
output = sess.run(output_train, feed_dict=train_dict)
accuracy_train = accuracy_score(np.argmax(batch_y,axis=1), np.argmax(output,axis=1))
# ๅๅพฉ10ๅใซใคใไธๅlossใ่กจ็คบ
if ite % test_rate == 0:
test_dict = {x_t: xData_test, y_t: yData_test}
loss_test = sess.run(loss_square_test, feed_dict=test_dict)
accuracy_test = accuracy_score(np.argmax(yData_test, axis=1), np.argmax(sess.run(output_test, feed_dict=test_dict), axis=1))
# lossใฎๅฑฅๆญดใไฟๅญ
loss_train_list.append(loss_train)
loss_test_list.append(loss_test)
accuracy_train_list.append(accuracy_train)
accuracy_test_list.append(accuracy_test)
print('#{0}, train loss : {1}'.format(ite, loss_train))
print('#{0}, test loss : {1}'.format(ite, loss_test))
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# ๅญฆ็ฟใจใในใใฎlossใฎๅฑฅๆญดใplot
fig = plt.figure()
# Plot lines
plt.xlabel('epoch')
plt.plot(range(len(loss_train_list)), loss_train_list, label='train_loss')
plt.plot(range(len(loss_test_list)), loss_test_list, label='test_loss')
plt.legend()
plt.show()
fig = plt.figure()
# Plot lines
plt.xlabel('epoch')
plt.plot(range(len(accuracy_train_list)), accuracy_train_list, label='train_accuracy')
plt.plot(range(len(accuracy_test_list)), accuracy_test_list, label='test_accuracy')
plt.legend()
plt.show()
#-------------------------------------------------------------------------------- |
16,524 | 47cff94cd4131fce63abfb8a772a6bd49662c4fc | #!python3
import paho.mqtt.client as mqtt #import the client1
import time
#import I2C_LCD_driver
def on_connect(client, userdata, flags, rc):
if rc==0:
client.connected_flag=True #set flag
print("connected OK")
# mylcd.lcd_clear()
# mylcd.lcd_display_string("Connected!", 1)
else:
print("Bad connection Returned code=",rc)
# mylcd.lcd_clear()
# mylcd.lcd_display_string("Bad connection", 1)
def on_message(client, userdata, message):
print ("Time: %s" %time.strftime("%H:%M:%S"))
print ("Message received: " + message.topic + ": " + message.payload)
# mylcd.lcd_clear()
# mylcd.lcd_display_string("Time: %s" %time.strftime("%H:%M:%S"), 1)
# mylcd.lcd_display_string(message.topic + ": " + message.payload, 2)
#mylcd = I2C_LCD_driver.lcd()
mqtt.Client.connected_flag=False #create flag in class
broker="192.168.1.10"
port = 1883
user = "pataridis"
password = "rs232"
mytopiclist = [("dummytemp1",0), ("hum1", 0) , ("temp2", 0), ("hum2", 0)]
#mytopic = "temp1"
# mylcd.lcd_clear()
# mylcd.lcd_display_string("Connecting...", 1)
# time.sleep(1)
client = mqtt.Client("pytemp1") #create new instance
client.username_pw_set(user, password=password) #set username and password
client.on_connect=on_connect #bind call back function
client.on_message= on_message #attach function to callback
client.loop_start()
print("Connecting to broker ",broker, "on port ", port)
client.connect(broker, port) #connect to broker
while not client.connected_flag: #wait in loop
print("Waiting for connection...")
time.sleep(2)
print("Subscribing...")
# mylcd.lcd_clear()
# mylcd.lcd_display_string("Subscribing...", 1)
# time.sleep(2)
client.subscribe(mytopiclist)
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print "Exiting...."
time.sleep(1)
client.disconnect()
client.loop_stop()
# client.loop_stop() #Stop loop
# client.disconnect() # disconnect |
16,525 | 8d276bb33ff4489c3ac28a7befc35597160d47d3 | # Create three radio buttons widgets using tkinter module
import tkinter as tk
parent = tk.Tk()
parent.title("Radiobutton")
parent.geometry('350*200')
radio1 = tk.Radiobutton(parent, text='First',value=1)
radio2 = tk.Radiobutton(parent, text='Second', value=2)
radio3 = tk.Radiobutton(parent, text='Thrd', value=3)
radio1.grid(column = 0, row = 0)
radio2.grid(column = 1, row = 0)
|
16,526 | 562616504ac73912a1eeb1fef859b37ff95ae004 | import logging
import os
from abc import abstractmethod
import traceback
import celery.signals
from celery.task import Task
from utils.job_db import JobDb
from utils.object import Object
from utils.setup_logging import setup_logging
from utils.celery_client import celery_app
setup_logging()
@celery.signals.setup_logging.connect
def on_celery_setup_logging(**_):
# underscore is a throwaway-variable, to avoid code style warning for
# unused variable
"""
Enable manual logging configuration, independent of celery.
"""
pass
def merge_dicts(a, b, path=None):
"""
Deep merge two dictionaries.
The two dictionaries are merged recursively so that values
that are themselves dictionaries are merged as well.
Entries in dict b override values in dict a.
:param dict a:
:param dict b:
:param str path:
:return dict:
"""
if path is None:
path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
merge_dicts(a[key], b[key], path + [str(key)])
elif type(a) is type(b):
a[key] = b[key]
else:
raise Exception('Conflicting types at %s'
% '.'.join(path + [str(key)]))
else:
a[key] = b[key]
return a
class BaseTask(Task):
"""
Abstract base class for all tasks in cilantro.
It provides parameter handling and utility methods for accessing
the file system.
Implementations should override the execute_task() method.
Return values of execute_task() are saved under the 'result' key in the
params dictionary. This allows reading task results at a later stage,
i.e. in a following task or when querying the job status.
"""
working_dir = os.environ['WORKING_DIR']
params = {}
results = {}
work_path = None
log = logging.getLogger(__name__)
def __init__(self):
self.job_db = JobDb()
def _propagate_failure_to_ancestors(self, parent_id, error):
self.job_db.update_job_state(parent_id, 'failure')
self.job_db.add_job_error(parent_id, error)
parent = self.job_db.get_job_by_id(parent_id)
if 'parent_job_id' in parent:
self._propagate_failure_to_ancestors(parent['parent_job_id'], error)
def after_return(self, status, retval, task_id, args, kwargs, einfo):
"""
Use celery default handler method to write update to our database.
https://docs.celeryproject.org/en/latest/userguide/tasks.html#handlers
"""
self.job_db.update_job_state(self.job_id, status.lower())
if status == 'FAILURE':
error_object = { 'job_id': self.job_id, 'job_name': self.name, 'message': self.error }
self.job_db.add_job_error( self.job_id, error_object )
if self.parent_job_id is not None:
self._propagate_failure_to_ancestors(self.parent_job_id, error_object)
self.job_db.close()
def get_work_path(self):
abs_path = os.path.join(self.working_dir, self.work_path)
if not os.path.exists(abs_path):
os.mkdir(abs_path)
return abs_path
def run(self, prev_result=None, **params):
"""
Run the task logic and handle possible errors.
In case the task throws an exception, it is caught and the task
is replaced by a special task for handling exceptions.
This task writes the error to the job database and sets the job status
to 'failed'.
Exceptions coming from the exception handler are just reraised
and not handled further.
Exceptions coming from celery because of ignored tasks are reraised
and not handled further.
:param dict/list prev_result: (optional) result of the previous task
:return dict: merged result of the task and previous tasks
"""
self.results = {}
self._init_params(params)
self.job_db.update_job_state(self.job_id, 'started')
if prev_result:
self._add_prev_result_to_results(prev_result)
# results can also be part of the params array in some cases
if 'result' in params:
self._add_prev_result_to_results(params['result'])
try:
task_result = self.execute_task()
except celery.exceptions.Ignore:
# Celery-internal Exception thrown when tasks are ignored/replaced
raise
except Exception as e: # noqa: ignore bare except
self.log.error(traceback.format_exc())
self.error = str(e)
raise e
return self._merge_result(task_result)
def get_param(self, key):
try:
return self.params[key]
except KeyError:
raise KeyError(f"Mandatory parameter {key} is missing"
f" for {self.__class__.__name__}")
def get_result(self, key):
try:
return self.results[key]
except KeyError:
raise KeyError(f"Mandatory result {key} is missing"
f" for {self.__class__.__name__}")
@abstractmethod
def execute_task(self):
"""
Execute the task.
This method has to be implemented by all subclassed tasks and includes
the actual implementation logic of the specific task.
Results have to be dicts or lists of results and are merged recursively
so that partial results in task chains accumulate and may be extended
or modified by following tasks.
Tasks do not have to return results, i.e. the result may be None.
:return dict:
"""
raise NotImplementedError("Execute Task method not implemented")
def _add_prev_result_to_results(self, prev_result):
if isinstance(prev_result, dict):
self.results = merge_dicts(self.results, prev_result)
elif isinstance(prev_result, list):
for result in prev_result:
self._add_prev_result_to_results(result)
elif prev_result:
raise KeyError("Wrong result type in previous task")
def _merge_result(self, result):
if isinstance(result, dict):
return merge_dicts(self.results, result)
else:
return self.results
def _init_params(self, params):
self.params = params
try:
self.job_id = params['job_id']
except KeyError:
raise KeyError("job_id has to be set before running a task")
try:
self.work_path = params['work_path']
except KeyError:
raise KeyError("work_path has to be set before running a task")
try:
self.parent_job_id = params['parent_job_id']
except KeyError:
self.parent_job_id = None
self.log.debug(f"initialized params: {self.params}")
class FileTask(BaseTask):
"""
Abstract base class for file based tasks.
Subclasses have to override the process_file method that holds the
actual conversion logic.
"""
def execute_task(self):
file = self.get_param('work_path')
try:
target_rep = self.get_param('target')
except KeyError:
target_rep = os.path.basename(os.path.dirname(file))
target_dir = os.path.join(
os.path.dirname(os.path.dirname(self.get_work_path())),
target_rep
)
os.makedirs(target_dir, exist_ok=True)
self.process_file(file, target_dir)
@abstractmethod
def process_file(self, file, target_dir):
"""
Process a single file.
This method has to be implemented by all subclassed tasks and includes
the actual implementation logic of the specific task.
:param str file: The path to the file that should be processed
:param str target_dir: The path of the target directory
:return None:
"""
raise NotImplementedError("Process file method not implemented")
class ObjectTask(BaseTask):
"""
Abstract base class for object based tasks.
Subclasses have to override the process_object method that holds the
actual transformation logic.
"""
def get_object(self):
return Object(self.get_work_path())
def execute_task(self):
return self.process_object(self.get_object())
@abstractmethod
def process_object(self, obj):
"""
Process a single object.
This method has to be implemented by all subclassed tasks and includes
the actual implementation logic of the specific task.
:param Object obj: The cilantro object that should be processed
:return dict:
"""
raise NotImplementedError("Process object method not implemented")
|
16,527 | 211e67e41e4ee60e03b1d30e4880db7e33b86405 |
from ..utils import Object
class MessageContent(Object):
"""
Contains the content of a message
No parameters required.
"""
ID = "messageContent"
def __init__(self, **kwargs):
pass
@staticmethod
def read(q: dict, *args) -> "MessageContactRegistered or MessageChatAddMembers or MessageText or MessageChatChangePhoto or MessageChatJoinByLink or MessageVenue or MessageGame or MessagePaymentSuccessful or MessageChatSetTtl or MessagePhoto or MessageVideo or MessageExpiredVideo or MessagePaymentSuccessfulBot or MessageCustomServiceAction or MessageChatUpgradeTo or MessageVoiceNote or MessageScreenshotTaken or MessageWebsiteConnected or MessageLocation or MessageAudio or MessageBasicGroupChatCreate or MessageContact or MessageSticker or MessageDocument or MessageExpiredPhoto or MessageChatChangeTitle or MessageChatUpgradeFrom or MessageUnsupported or MessageCall or MessageSupergroupChatCreate or MessagePassportDataSent or MessagePoll or MessagePinMessage or MessagePassportDataReceived or MessageAnimation or MessageChatDeleteMember or MessageVideoNote or MessageGameScore or MessageChatDeletePhoto or MessageInvoice":
if q.get("@type"):
return Object.read(q)
return MessageContent()
|
16,528 | 4da1c9ff67a51878eafe3f120219fef50d480c46 | from tkinter import *
import os
class GUI:
def __init__(self):
self.Window = Tk()
self.Window.withdraw()
self.login = Toplevel()
self.login.title("Login usando RPC")
self.login.resizable(width=False, height=False)
self.login.configure(width=400, height=300)
self.pls = Label(self.login, text="Digite endereรงo IP", font=("Arial", 16), justify=CENTER)
self.pls.place(relheight=0.1, relx=0.15, rely=0.1)
self.labelIp = Label(self.login, text="IP: ")
self.labelIp.place(relheight=0.1, relx=0.2, rely=0.2)
self.entryIP = Entry(self.login)
self.entryIP.place(relwidth=0.5, relheight=0.10, relx=0.30, rely=0.2)
self.entryIP.focus()
self.pls2 = Label(self.login, text="Digite seu nome", font=("Arial", 16), justify=CENTER)
self.pls2.place(relheight=0.1, relx=0.15, rely=0.35)
self.labelName = Label(self.login, text="Nome: ")
self.labelName.place(relheight=0.1, relx=0.2, rely=0.45)
self.entryName = Entry(self.login)
self.entryName.place(relwidth=0.5, relheight=0.10, relx=0.30, rely=0.45)
self.go = Button(self.login,
text="CONTINUE",
command=lambda: self.init_chat(self.entryIP.get(), self.entryName.get()))
self.go.place(relx=0.4, rely=0.70)
self.Window.mainloop()
def init_chat(self, ip, name):
if ip == "" or ip is None or name == "" or name is None:
return
self.login.destroy()
os.system("python3 client.py " + ip + " " + name)
if __name__ == '__main__':
g = GUI()
|
16,529 | 09771c39a18773854f0be53104dfefb269241b5c | from __future__ import print_function
import sys, warnings
import deepsecurity
from deepsecurity.rest import ApiException
from playsound import playsound
from twilio.rest import Client
import csv
import time
# play a sound when a parameter changes and send a message
lists = []
def text():
account_sid = ''
auth_token = ''
client = Client(account_sid, auth_token)
message = client.messages \
.create(
body="Some unauthorized changes were made to your agent: " + str(lists),
from_='+1',
to='+1'
)
print(message.sid)
def write(list):
count = 0
computers = 0
ec2 = 0
sourceFile = open('api.csv', 'w')
csvreader = csv.writer(sourceFile, delimiter=',', lineterminator='\n')
#csvreader.writerow(['Information: '])
api = str(list).split('\n')
api.pop(0)
for i in api:
print(i)
#sourceFile.writelines(i)
count = count + 1
computers = computers + 1
if (count == 4):
#sourceFile.write('\n')
count = 0
if (computers == 42):
print('\n')
#csvreader.writerow([])
computers = 0
ec2 = ec2 + 1
#csvreader.writerow([])
csvreader.writerow(['COMPUTER NAME:', 'PLATFORM:', 'POLICY NAME:', 'STATUS:'])
flag = 0
for x in range(ec2 + 1):
flag = flag + 1
name = api.pop((x * 38) + 12)
name = format(name)
sourceFile.write(name)
platform = api.pop((x * 38) + 24) #change to fix output/api error 30*
platform = format(platform)
sourceFile.write(platform)
policyname = api.pop((x * 38) + 30)
policyname = format(policyname)
sourceFile.write(policyname)
status = api.pop((x * 38) + 1)
status = format(status)
sourceFile.write(status)
sourceFile.write('\n')
flag = True
if(flag == 1):
lists.append(name)
lists.append(platform)
lists.append(policyname)
lists.append(status)
sourceFile.close()
def list():
# Setup
if not sys.warnoptions:
warnings.simplefilter("ignore")
configuration = deepsecurity.Configuration()
configuration.host = 'https://cloudone.trendmicro.com/api'
# Authentication
configuration.api_key['api-secret-key'] = apikey
# Initialization
# Set Any Required Values
api_instance = deepsecurity.ComputersApi(deepsecurity.ApiClient(configuration))
api_version = 'v1'
expand_options = deepsecurity.Expand()
expand_options.add(expand_options.none)
expand = expand_options.list()
overrides = False
try:
api_response = api_instance.list_computers(api_version, expand=expand, overrides=overrides)
return api_response
except ApiException as e:
print("An exception occurred when calling ComputersApi.list_computers: %s\n" % e)
def format(string):
nameSubString = "display_name"
platformSubString = "platform"
policySubString = "policy_id"
statusSubString = "0.0.0.0"
extra = "last_appliance_communication"
print(string)
if nameSubString in string:
string = string.replace("'", '')
return string.replace("display_name: ", '')
if extra in string:
# return string.replace("'platform': ", 'AWS Linux')
return string.replace("'last_appliance_communication': None", 'AWS Linux') # error in the api so hardcoded
if policySubString in string:
return string.replace("'policy_id': ", '')
if statusSubString in string:
return string.replace("'agent_version': '0.0.0.0'", 'Activation Needed.')
else:
return string.replace("'agent_version': ", 'Activated.')
noerror = True
apikey = input("Enter API key: ")
while(noerror):
api_response = list()
write(api_response)
time.sleep(5)
lists = []
newapi = list()
write(newapi)
if(api_response != newapi):
playsound('startup.mp3')
print("\n" + str(lists) + "\n")
text()
noerror = False
|
16,530 | 2df81b4d87df73d0742bc93f368938b60d14e0fc |
##
## MySolrDbParse.py - primitive where clause converter
## for MySolrDb.py
##
import cStringIO, tokenize
def atom(token):
if token[0] is tokenize.NAME:
return token[1]
elif token[0] is tokenize.STRING:
#return token[1][1:-1].decode("string-escape")
return '"' + token[1][1:-1].decode("string-escape") + '"'
elif token[0] is tokenize.NUMBER:
try:
return int(token[1], 0)
except ValueError:
return float(token[1])
elif token[0] is tokenize.OP:
return token[1]
else:
return None
def parseWhereClause(database_name, table_name, source):
src = cStringIO.StringIO(source).readline
src = tokenize.generate_tokens(src)
operator = ""
state = 0 # 0 = gathering first parameter
solr_statement = ""
for s in src:
if state == 0:
# state = 0 = gathering first parameter
state += 1
sub_statement = ""
if s[0] is tokenize.NAME:
sub_statement = "(column_val_string:"
p1_type = 'column'
column_name = s[1]
elif s[0] is tokenize.STRING or s[0] is tokenize.NUMBER:
sub_statement = ":"+str(atom(s))
p1_type = 'constant'
else:
raise Exception, 'Ill formed statement'
elif state == 1:
state += 1
# state = 1 = gathering operator
if s[0] is tokenize.OP:
operator = s[1]
else:
raise Exception, 'Invalid operator'
elif state == 2:
state += 1
# state = 2 = gathering second parameter
if s[0] is tokenize.NAME:
if p1_type == 'column':
sub_statement += s[1]
column_name = s[1]
else:
raise Exception, 'Column to Column compare currently unsupported!'
elif s[0] is tokenize.STRING or s[0] is tokenize.NUMBER:
if p1_type == 'column':
sub_statement += str(atom(s))
else:
raise Exception, 'Error - constant to constant compare detected!'
else:
raise Exception, 'Ill formed statement'
sub_statement += " AND column_name:"+database_name+"_"+table_name+"_"+column_name + ")"
elif state == 3:
state += 1
if s[0] is tokenize.ENDMARKER:
return solr_statement + produceSolrOutput(sub_statement, operator)
else:
if s[0] is tokenize.NAME and s[1].lower() == 'and':
#print "Disjunction!"
state = 0
solr_statement = solr_statement + produceSolrOutput(sub_statement, operator) + " AND "
elif s[0] is tokenize.NAME and s[1].lower() == 'or':
#print "Conjunction!"
state = 0
solr_statement = solr_statement + produceSolrOutput(sub_statement, operator) + " OR "
else:
raise Exception, 'Missing EndMarker'
else:
print "Creepy Internal Error 1001", state, s
def produceSolrOutput(sub_statement, operator):
if operator == '=':
return sub_statement
elif operator == '!=':
return "-"+sub_statement
elif operator == '>':
a = sub_statement.split(":")
return a[0] + ":[" + a[1] + " TO *]"
elif operator == '<':
a = sub_statement.split(":")
return a[0] + ":[* TO " + a[1] + "]"
elif operator == '>=':
a = sub_statement.split(":")
return a[0] + ":[" + a[1] + " TO *]"
elif operator == '<=':
a = sub_statement.split(":")
return a[0] + ":[* TO " + a[1] + "]"
else:
raise Exception, 'Error - Unsupported operator - '+operator
################
## unit tests ##
################
'''
statement = "x > 75"
res = parseWhereClause(statement)
print "statement:", statement, "returned --->", res
statement = "x > 75 and x < 80"
res = parseWhereClause(statement)
print "statement:", statement, "returned --->", res
statement = "field_name = '75'"
res = parseWhereClause(statement)
print "statement:", statement, "returned --->", res
statement = "field_name >= 'xYz'"
res = parseWhereClause(statement)
print "statement:", statement, "returned --->", res
statement = "user_name = 'Joe' AND user_age > 75"
res = parseWhereClause(statement)
print "statement:", statement, "returned --->", res
'''
|
16,531 | 9671815ab10343d099bf3bd223901379a1d403ff | from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from torchvision.datasets import ImageFolder
from torchvision import utils
import os
import numpy as np
augment_transform = transforms.Compose([
# transforms.Resize((32,32)),
transforms.RandomRotation(degrees=30),
transforms.RandomCrop(32, padding=4),
transforms.ToTensor()
])
trainset = ImageFolder('./data/1of4', augment_transform)
print(len(trainset))
trainloader = DataLoader(trainset, batch_size=128, shuffle=False)
minibatch = 0
for data in trainloader:
minibatch += 1
img, labels = data
for i in range(len(labels)):
if os.path.exists(os.path.join('./data/affine/', str(labels[i].numpy()))) == False:
os.mkdir(os.path.join('./data/affine/', str(labels[i].numpy())))
utils.save_image(img[i], os.path.join('./data/affine/', str(labels[i].numpy()), str(minibatch)+'_'+str(i)+'.png'))
print("Batch OK")
print('Done')
|
16,532 | ad161cbfa54a73a08f5599e6c4ba1df2e4d68c7c | # coding=utf-8
import tensorflow as tf
labels = ''
XXX = ''
biases = ''
# logits = tf.nn.softmax(XXX)
# loss = tf.reduce_mean(-tf.reduce_sum(labels * tf.log(logits)))
logits = tf.matmul(XXX, XXX) + biases
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits, name='losses')
|
16,533 | 34b2f9b500c7e7a6231245fb5ecf4fbcb0f9a97f | import os, sys
import shutil
def check_name(name):
new_name = name.strip()
new_name = new_name.replace(" ", "_")
new_name = new_name.replace("__", "_")
new_name = new_name.replace("__", "_")
return new_name
def copy_files(src, des):
if not os.path.exists(des):
os.mkdir(des)
for name in os.listdir(src):
src_path = os.path.join(src, name)
des_path = os.path.join(des, name)
if os.path.isfile(src_path) and src_path[-4:] == ".jpg":
if not os.path.exists(des_path):
shutil.copy(src_path, des_path)
if os.path.exists(des_path) and os.path.isfile(des_path):
if os.path.getsize(des_path) > 0:
# remove source file if dest exists
os.remove(src_path)
def check_dir(path):
for name in os.listdir(path):
# makes
dir_make = os.path.join(path, name)
print(name)
if os.path.exists(dir_make) and os.path.isdir(dir_make):
# models
for model in os.listdir(dir_make):
correct_model = check_name(model)
if correct_model != model:
# only if it doesnt match
src_path = os.path.join(dir_make, model)
print(src_path)
des_path = os.path.join(dir_make, correct_model)
copy_files(src_path, des_path)
if len(os.listdir(src_path)) == 0:
shutil.rmtree(src_path)
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Need param: python gen_path.py '/vmmr/datasets/source'")
exit(1)
folder = str(sys.argv[1])
exists = os.path.isdir(folder)
if not exists:
print("Folder '{}' not found.".format(folder))
exit(1)
if "/datasets/" not in folder:
print("Folder '{}' must be in /datasets/ directory.".format(folder))
exit(1)
check_dir(folder)
|
16,534 | 35a82979c6c124391276ef2dd62bac22cf3e253c | # -*- coding: utf-8 -*-
# @Time : 2018/9/28 9:13
# @Author : xuyun
import smtplib,os,HTMLTestRunner,unittest,time,datetime
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
def sendmain(file) :
# ๅ้้ฎ็ฎฑ
sender = '819077607@qq.com'
# ๆฅๅ้ฎ็ฎฑ
# receiver = '1035612906@qq.com'
receiver = '2565701045@qq.com'
# ๅ้้ฎ็ฎฑไธป้ข
subget = 'python email test'
# ๅ้้ฎ็ฎฑๆๅกๅจ
smtpserver = 'smtp.qq.com'
# ๅ้้ฎ็ฎฑ็จๆทๅ/ๅฏ็
username = '819077607@qq.com'
password = 'rqlyswejgsqkbcbj'
# msg = MIMEText('HELLO ่ฟไธชไธๅฐๆต่ฏไฟกๆฏ','text','utf-8')
# msg = MIMEText('<html><h1>this is python smtp test</h1></html>','html','utf-8')
msg = MIMEText('<html><h1>็ช็ชๅๅๅๅๅๅๅๅๅๅthis is python SMTP test</h1></html>','html','utf-8')
msg['Subject'] = Header(subget, 'utf-8')
smtp = smtplib.SMTP()
smtp.connect('smtp.qq.com')
smtp.login(username,password)
smtp.sendmail(sender,receiver, msg.as_string())
smtp.quit()
print('email sent')
def sendreport() :
result_dir = 'D://'
lists = os.listdir(result_dir)
lists.sort(key=lambda fn: os.path.getatime(result_dir+'//'+fn)
if not os.path.isabs(result_dir + '//' + fn) else 0)
print(u'ๆๆฐ็ๆต่ฏๆฅๅไธบ: ' + lists[-1])
file = os.path.join(result_dir, lists[-1])
print(file)
sendmain(file)
|
16,535 | 2b5e59ed5def95368480464549cb01ea74632050 | import pyglet
import pyglet.gl
import random
import math
from random import randint
def get_triangle(radius, xcenter, ycenter, numberOfVertices):
"""
This function takes an (x,y) coordinate and a triangle
radius to ouput the coordinates of the triangle as a
list.
:param radius: radius of the triangle
:param xcenter: x center of the triangle
:param ycenter: y center of the triangle
:param numberOfVertices: number of vertices
:return: returns list of coordinates of the vertices
"""
angles = [0.0, (2. / 3.0) * math.pi, (4.0 / 3.0) * math.pi]
vertices = []
for angle in range(0, len(angles)):
x = radius * math.cos(angles[angle]) + xcenter
y = radius * math.sin(angles[angle]) + ycenter
vertices.append(x) # append the x value to the vertex list
vertices.append(y) # append the y value to the vertex list
# convert the vertices list to pyGlet vertices format
vertexList = pyglet.graphics.vertex_list(numberOfVertices, ('v2f', vertices))
return vertexList |
16,536 | 4ae7102e13b5ab15d59a7e1ce675122204bfa9ad | import pandas as pd
import boto3
import s3fs
import json
import random
import os
class MessageDatabaseCSV:
def next_message(self):
message_list_df = self.get_dataframe()
print('Successfully retrieved message database')
print('Values for number of times used: {}'.format(message_list_df['num_times_used'].unique()))
min_num_times_used = message_list_df['num_times_used'].min()
print('Minimum number of times used: {}'.format(min_num_times_used))
selectable_indices = message_list_df.index[message_list_df['num_times_used'] == min_num_times_used].tolist()
print('Selectable indices: {}'.format(selectable_indices))
selected_index = random.choice(selectable_indices)
print('Selected index: {}'.format(selected_index))
selected_message = {
'body': message_list_df.loc[selected_index, 'body'],
'contributor': message_list_df.loc[selected_index, 'contributor']}
print('Selected message: {}'.format(selected_message))
message_list_df.loc[selected_index, 'num_times_used'] += 1
message_list_df.loc[selected_index, 'last_used'] = pd.Timestamp.now()
print('Created new message database')
self.put_dataframe(message_list_df)
return(selected_message)
class MessageDatabaseCSVS3(MessageDatabaseCSV):
def __init__(
self,
message_database_bucket_name=None,
message_database_object_name=None):
if message_database_bucket_name is None:
message_database_bucket_name = os.environ['MESSAGE_DATABASE_S3_BUCKET_NAME']
if message_database_object_name is None:
message_database_object_name = os.environ['MESSAGE_DATABASE_S3_OBJECT_NAME']
self.message_database_bucket_name = message_database_bucket_name
self.message_database_object_name = message_database_object_name
def get_dataframe(self):
s3_location = 's3://' + self.message_database_bucket_name +'/' + self.message_database_object_name
print('S3 location: {}'.format(s3_location))
message_list_df = pd.read_csv(
s3_location,
index_col=0,
parse_dates = ['last_used'],
encoding='utf-8')
return message_list_df
def put_dataframe(
self,
message_list_df):
s3 = s3fs.S3FileSystem(anon=False)
s3_location = self.message_database_bucket_name + '/' + self.message_database_object_name
bytes_to_write = message_list_df.to_csv(None).encode('utf-8')
with s3.open(s3_location, 'wb') as f:
f.write(bytes_to_write)
class MessageDatabaseCSVLocal(MessageDatabaseCSV):
def __init__(
self,
message_database_local_path=None):
if message_database_local_path is None:
message_database_local_path = os.environ['MESSAGE_DATABASE_LOCAL_PATH']
self.message_database_local_path = message_database_local_path
def get_dataframe(self):
message_list_df = pd.read_csv(
self.message_database_local_path,
index_col=0,
parse_dates = ['last_used'],
encoding='utf-8')
return message_list_df
def put_dataframe(
self,
message_list_df):
message_list_df.to_csv(
self.message_database_local_path,
encoding='utf-8')
class MessageStoreS3:
def __init__(
self,
message_store_bucket_name=None,
message_store_object_name=None):
if message_store_bucket_name is None:
message_store_bucket_name = os.environ['MESSAGE_STORE_S3_BUCKET_NAME']
if message_store_object_name is None:
message_store_object_name = os.environ['MESSAGE_STORE_S3_OBJECT_NAME']
self.message_store_bucket_name = message_store_bucket_name
self.message_store_object_name = message_store_object_name
def get_message(self):
s3 = boto3.resource('s3')
message_store_object = s3.Object(
self.message_store_bucket_name,
self.message_store_object_name)
message_string = message_store_object.get()['Body'].read().decode('utf-8')
message = json.loads(message_string)
return message
def put_message(
self,
message):
message_string = json.dumps(message)
s3 = boto3.resource('s3')
message_store_object = s3.Object(
self.message_store_bucket_name,
self.message_store_object_name)
message_store_object.put(
Body=message_string.encode('utf-8'),
ContentType='string')
class MessageStoreLocal:
def __init__(
self,
message_store_local_path=None):
if message_store_local_path is None:
message_store_local_path = os.environ['MESSAGE_STORE_LOCAL_PATH']
self.message_store_local_path = message_store_local_path
def get_message(self):
message_store = open(
self.message_store_local_path,
'r',
encoding='utf-8')
message_string = message_store.read()
message_store.close()
message = json.loads(message_string)
return message
def put_message(
self,
message):
message_string = json.dumps(message)
message_store = open(
self.message_store_local_path,
'w',
encoding='utf-8')
message_store.write(message_string)
message_store.close()
|
16,537 | ce8abd70549029c8335e582733e66c2ddc54b0e0 | import logging
from logging.handlers import SMTPHandler, RotatingFileHandler
import os
#ไปflaskๅ
ไธญๅฏผๅ
ฅFlask็ฑป
from flask import Flask, request
from config import Config
#ๅฐFlask็ฑป็ๅฎไพ่ตๅผ็ปๅไธบ app ็ๅ้๏ผ่ฟไธชๅฎไพๆไธบappๅ
็ๆๅ
from flask_sqlalchemy import SQLAlchemy#ไปๅ
ไธญๅฏผๅ
ฅ็ฑป
from flask_migrate import Migrate
import pymysql
from flask_login import LoginManager #ๅขๅ ็ป้
from flask_mail import Mail
from flask_bootstrap import Bootstrap
from datetime import timedelta#ๆต่ฏ
from flask_moment import Moment
from config import Config
from flask import current_app
moment = Moment()
db = SQLAlchemy()#ๆฐๆฎๅบๅฏน่ฑก
migrate = Migrate()#่ฟ็งปๅผๆๅฏน่ฑก
pymysql.install_as_MySQLdb()
login = LoginManager()
login.login_view = 'auth.login'
login.login_message = u'่ฏท็ป้'
mail = Mail()
bootstrap = Bootstrap()
def create_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(Config)
db.init_app(app)
migrate.init_app(app, db)
login.init_app(app)
mail.init_app(app)
bootstrap.init_app(app)
moment.init_app(app)
from app.errors import bp as errors_bp
app.register_blueprint(errors_bp)
from app.auth import bp as auth_bp
app.register_blueprint(auth_bp, url_prefix='/auth')
from app.main import bp as main_bp
app.register_blueprint(main_bp)
return app
#ไปappๅ
ไธญๅฏผๅ
ฅๆจกๅroutes
from app import models #ๆญคๅคๅจไธ้ขๆฏไธบไบ้ฟๅ
ๅพช็ฏๅผๅ
ฅ
|
16,538 | a2739f6be3e9c7022ed04b637439e3cd68a61a47 | """add posts table
Revision ID: bff64a27bc9b
Revises: 4783cd3d66c8
Create Date: 2020-09-27 14:35:46.031402
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'bff64a27bc9b'
down_revision = '4783cd3d66c8'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('location', sa.String(length=64), nullable=True),
sa.Column('about_me', sa.Text(), nullable=True),
sa.Column('member_since', sa.DateTime(), nullable=True),
sa.Column('last_seen', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=True)
op.create_table('posts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=255), nullable=True),
sa.Column('summary', sa.Text(), nullable=True),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('views', sa.Integer(), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_posts_timestamp'), 'posts', ['timestamp'], unique=False)
op.drop_index('ix_user_email', table_name='user')
op.drop_index('ix_user_username', table_name='user')
op.drop_table('user')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', mysql.INTEGER(display_width=11), autoincrement=True, nullable=False),
sa.Column('username', mysql.VARCHAR(length=64), nullable=True),
sa.Column('email', mysql.VARCHAR(length=120), nullable=True),
sa.Column('password_hash', mysql.VARCHAR(length=128), nullable=True),
sa.Column('about_me', mysql.TEXT(), nullable=True),
sa.Column('last_seen', mysql.DATETIME(), nullable=True),
sa.Column('location', mysql.VARCHAR(length=64), nullable=True),
sa.Column('member_since', mysql.DATETIME(), nullable=True),
sa.Column('name', mysql.VARCHAR(length=64), nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset='utf8',
mysql_engine='InnoDB'
)
op.create_index('ix_user_username', 'user', ['username'], unique=True)
op.create_index('ix_user_email', 'user', ['email'], unique=True)
op.drop_index(op.f('ix_posts_timestamp'), table_name='posts')
op.drop_table('posts')
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
# ### end Alembic commands ###
|
16,539 | 01092704b9af5d64f4778189c2e5309b1e7d2be1 | def pearson(data, labels, dumped=False):
import numpy as np
import util.dump as dump
import math
import warnings
import scipy.stats as stats
warnings.filterwarnings('ignore')
def get_features(data_set):
n = len(data[0])
return [[i[j] for i in data_set] for j in range(n)]
def feature_correlation(x, y):
n = range(len(x))
x_avg = sum(x) / len(x)
y_avg = sum(y) / len(y)
cov = sum([(x[i] - x_avg) * (y[i] - y_avg) for i in n])
x_dev = math.sqrt(sum([(x[i] - x_avg) ** 2 for i in n]))
y_dev = math.sqrt(sum([(y[i] - y_avg) ** 2 for i in n]))
return cov / (x_dev * y_dev)
def correlation(x, y):
from util.frame import progress
print('Pearson: computing corellation coefficients:')
feat_len = len(x)
result = []
for i in range(feat_len):
result.append(feature_correlation(x[i], y))
if i % 10 == 0:
progress((i + 1) / feat_len)
progress(1)
return np.asarray(result)
features = get_features(data)
ro = []
if not dumped:
ro = correlation(features, labels)
dump.dump_object(ro, 'pearson/ro.dump')
else:
ro = dump.load_object('pearson/ro.dump')
v = len(labels) - 2
p = []
for i in range(len(ro)):
t = ro[i] * math.sqrt(v) / math.sqrt(1 - ro[i] ** 2)
p.append((stats.t.sf(np.abs(t), v) * 2, i))
return p
|
16,540 | d1d66729bece2e2cb12200900854e9651cf05965 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : Tom.Lee
import platform
import time
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.forms import forms
from .file_tools import copy_file
from .file_tools import rename_file
from .http_funcs import json_response
from ..settings import UPLOAD_DIR
class UploadObjectForm(forms.Form):
"""
docs: https://github.com/amlyj/horizon/blob/master/openstack_dashboard/api/rest/swift.py
ไฝฟ็จๆนๆณ๏ผ
form = UploadObjectForm(request.POST, request.FILES)
if not form.is_valid():
print 'ๆฐๆฎไธ่ฝไธบ็ฉบ'
data = form.clean()
object_file = data['file']
print 'file_name: %s' % object_file.name
print 'file_size: %s' % object_file.size
"""
file = forms.FileField(required=False)
@json_response
def save_file(request):
form = UploadObjectForm(request.POST, request.FILES)
if not form.is_valid():
return {
'status': False,
'timestamp': int(round(time.time() * 1000)),
'data': None,
'path': request.path,
'method': request.method,
'message': 'ไธไผ ๅคฑ่ดฅ๏ผ ๆไปถไธ่ฝไธบ็ฉบ.'
}
data = form.clean()
object_file = data['file']
# print('file_name: %s' % object_file.name)
# print('file_size: %s' % object_file.size)
save_path = '{upload_dir}/{name}'.format(upload_dir=UPLOAD_DIR, name=object_file.name)
if isinstance(object_file, InMemoryUploadedFile):
# ๅๅฆๆฏๅ
ๅญ็ฑปๅ็ๆไปถ๏ผๅณๆฏๅฐๆไปถ,็ดๆฅๅๅ
ฅๆไปถ
with open(save_path, 'wb') as desc:
for chunk in object_file.chunks():
desc.write(chunk)
else:
# ๅๅฆๆฏๅคงๆไปถ๏ผ็ณป็ป้ป่ฎค็ดๆฅrename, ๅณๆดๆฐlinuxๆไปถ็inode
_func = rename_file
# window็ฏๅขไฝฟ็จcopyfile, rename ไผๆๆ้้ฎ้ข
if platform.system() == 'Windows':
_func = copy_file
_func(object_file.file.name, save_path)
return {
'status': True,
'timestamp': int(round(time.time() * 1000)),
'data': {
'path': save_path,
'size': object_file.size
},
'path': request.path,
'method': request.method,
'message': 'ๆไปถ [%s] ไธไผ ๆๅ๏ผ' % object_file.name
}
def paths(request):
"""
่ทๅ ้กน็ฎๆๆ็URLไฟกๆฏ
:param request:
:return:
"""
from ..urls import urlpatterns
# django 1.x
# from django.urls.resolvers import RegexURLPattern
# django 2.x +
from django.urls import URLPattern, URLResolver
from django.http import JsonResponse
for u in urlpatterns:
print()
if isinstance(u, URLResolver):
print(u, u.pattern, u.url_patterns)
if isinstance(u, URLPattern):
print(u, u.pattern)
return JsonResponse({'paths': ""})
def page_not_found(request, e):
"""
404 page config
:param request:
:param e : exception
:return:
"""
from django.shortcuts import render
if e:
print("404 ๅผๅธธ๏ผ", e)
return render(request, 'error/404.html')
|
16,541 | 50eebbd7f324117fd6dd9e519eb4c89d6d6bf458 | # Scrapes Tripadvisor restaurant page for data
'''
Code in this file scrapes all of the desired information (name, rating, price, address,
city, state, zipcode, country, phone #) on Tripadvisor. We did not use this in our final
implementation because of difficulties building a crawler, and thus this code was discontinued.
'''
import requests
import re
import bs4
def go():
'''
Return restaurant info
'''
url = "https://www.tripadvisor.com/Restaurant_Review-g35805-d7200288-Reviews-Shake_Shack-Chicago_Illinois.html"
soup = cook_soup(url)
restaurant_info = get_info(soup)
return restaurant_info
def cook_soup(url):
'''
Create Beautiful Soup object from url
'''
request = requests.get(url)
encoding = request.encoding
html = request.text.encode(encoding)
soup = bs4.BeautifulSoup(html, "html5lib")
return soup
def get_info(soup):
'''
Pulls name, rating, price, cuisine tags, address, city, state, country, zipcode,
and phone number from page. Return in form of dictionary.
'''
name_tag = soup.find_all('h1', property = "name")
name = name_tag[0].text
name = name.replace("\n", "")
rating_tag = soup.find_all('img', property = "ratingValue")
rating = rating_tag[0]["content"]
price_tag = soup.find_all('div', class_ = "detail first price_rating separator")
price = price_tag[0].text
price = price.replace("\n", "")
cuisine_all = soup.find_all('div', class_ = "detail separator")
cuisine_tags = cuisine_all[0].find_all('a')
cuisine = []
for cuisine_tag in cuisine_tags:
cuisine.append(cuisine_tag.text)
address_tag = soup.find_all('span', property = "streetAddress")
address = address_tag[0].text
city_tag = soup.find_all('span', property = "addressLocality")
city = city_tag[0].text
state_tag = soup.find_all('span', property = "addressRegion")
state = state_tag[0].text
country_tag = soup.find_all('span', property = "addressCountry")
country = country_tag[0]["content"]
zipcode_tag = soup.find_all('span', property = "postalCode")
zipcode = zipcode_tag[0].text
phone_tag = soup.find_all('div', class_ = "fl phoneNumber")
phone = phone_tag[0].text
phone = re.findall("[0-9-]{12}", phone)
phone = phone[0].replace("-", "")
# returns in form 3126671701
restaurant_info = {
"name": name,
"rating": rating,
"price": price
"cuisine": cuisine
"address": address,
"city": city,
"state": state,
"zipcode": zipcode,
"country": country,
"phone": phone
}
return restaurant_info |
16,542 | f14a8eb14ff4885b8d875497c11b10abbaad78ca | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from linha_do_tempo import Event
def invencoes(limites = None):
lista = [
(1774, "Barco a vapor"),
(1811, "Prensa a vapor"),
(1825, "Linha Fรฉrrea"),
(1885, "Automรณvel"),
(1903, "Aviรฃo"),
(1913, "Linha de Montagem Ford T"),
(1876, "Telefone"),
]
create_events(lista, limites)
def guerras(limites=None):
lista= [
(1861, "Guerra Civil Americana"),
(1914, "I Guerra Mundial"),
(1939, "II Guerra Mundial"),
]
create_events(lista, limites)
def acontecimentos(limites=None):
lista=[
(1969, "Apolo 11 pousa na Lua"),
]
create_events(lista, limites)
def create_events(lista,limites):
if not limites:
from linha_do_tempo import Autor
datas_obras = [obra.data for item in [i.obras for i in Autor.todos] for obra in item]
limites = (min(datas_obras),max(datas_obras))
for i in lista:
if not limites or (i[0] >= limites[0] and i[0] <= limites[1]):
Event(i[1], i[0])
|
16,543 | f02e832e8c60bc3dcd26938003e0bbe03aac74cb | from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
sa = SentimentIntensityAnalyzer()
reddit_quotes = [
'I had such low expectations and I still managed to fail my expectations.',
"big facts, 170 was decently cool (coming from someone who really enjoyed 70... I thought I'd like 170 more than I actually did... it got a little too abstract for my liking near the end but the coding final project is definitely super fun)",
"I can tell you right now that if you hate theory and proofs then you'll hate CS170 and it will feel like a slog. ",
"This class is the worst fucking thing I've ever seen. The amount of mismanagement amongst staff is unheard of and completely unacceptable, especially for a class of its size. ",
"It wasnโt as bad as last time I feel",
"I'm worried because it was a less brutal than the first one but I'm pretty sure I didn't do as well on it so I'm gonna be on the really low end of the curve :/"
]
rmp_quotes = [
"DeNero is hilarious and a great professor. Lectures and expectations are super clear",
" His lectures are very clear, the exams are straightforward, and he gives the best curves.",
" Super annoying. I'm not bitter reviewing because of a bad gradeI did well in the class, I just hated every second of it.",
"This guy very clearly cares about teaching his students, and he's a very good lecturer. However, he's a genius, and he has an extremely hard time understanding when students don't \"get\" what he's saying."
]
def rate_list(lst):
"Rates everything in a list."
for q in lst:
print(f'Quote: {q}')
rating = sa.polarity_scores(q)['compound']
print(f'Rating: {rating}')
print('\n')
print('\n\n')
def overall_rating(lst, kw):
"Finds the average rating of everything in the list."
counter = 0
rating = 0
for q in lst:
rating += sa.polarity_scores(q)['compound']
counter += 1
print(f'OVERALL RATING FOR {kw}: {rating/counter}')
def highest_lowest(lst):
"Prints the highest and lowest items in the list."
ratings = []
for q in lst:
rating = sa.polarity_scores(q)['compound']
ratings.append((q, rating))
positive = max(ratings, key=lambda t: t[1])
negative = min(ratings, key=lambda t: t[1])
print(f'Most positive comment: {positive[0]}')
print(f'Rating: {positive[1]}')
print(f'Most negative comment: {negative[0]}')
print(f'Rating: {negative[1]}')
# rate_list(reddit_quotes)
# rate_list(rmp_quotes)
highest_lowest(reddit_quotes+rmp_quotes)
overall_rating(reddit_quotes+rmp_quotes, 'CS170') |
16,544 | cc670e7b9c6683a8c1dd4f6ccf5c9b72d250a3fc | # Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import contextlib
import select
import sys
from typing import Callable, Iterable, Sequence, TypeVar
import numpy as np
from colorama import Fore, Style
import pyrado
from pyrado.utils import run_once
T = TypeVar("T")
def input_timeout(prompt: str, t_timeout: [float, int] = 30, default: str = None) -> str:
"""
Ask the user for an input and quit with a runtime error if no input was given
:param prompt: text to be printed before recording the input
:param t_timeout: time limit to enter an input
:param default: default output that is returned if the timeout was raised, pass `None` for no default
:return: return the input as a string captured by the system
"""
print(prompt, end=" ")
rlist, _, _ = select.select([sys.stdin], [], [], t_timeout)
if not rlist:
if default is None:
raise RuntimeError(f"No input received within {t_timeout}s!")
else:
return default
return sys.stdin.readline().strip()
def print_cbt(msg: str, color: str = "", bright: bool = False, tag: str = "", end="\n"):
"""
Print a colored (and bright) message with a tag in the beginning.
:param msg: string to print
:param color: color to print in, default `''` is the IDE's/system's default
:param bright: flag if the message should be printed bright
:param tag: tag to be printed in brackets in front of the message
:param end: endline symbol forwarded to `print()`
"""
brgt = Style.BRIGHT if bright else ""
if not isinstance(tag, str):
raise pyrado.TypeErr(given=tag, expected_type=str)
else:
if tag != "":
tag = f"[{tag}] "
color = color.lower()
if color in ["", "w", "white"]:
print(brgt + tag + msg + Style.RESET_ALL, end=end)
elif color in ["y", "yellow"]:
print(Fore.YELLOW + brgt + tag + msg + Style.RESET_ALL, end=end)
elif color in ["b", "blue"]:
print(Fore.BLUE + brgt + tag + msg + Style.RESET_ALL, end=end)
elif color in ["g", "green"]:
print(Fore.GREEN + brgt + tag + msg + Style.RESET_ALL, end=end)
elif color in ["r", "red"]:
print(Fore.RED + brgt + tag + msg + Style.RESET_ALL, end=end)
elif color in ["c", "cyan"]:
print(Fore.CYAN + brgt + tag + msg + Style.RESET_ALL, end=end)
else:
raise pyrado.ValueErr(given=color, eq_constraint="'y', 'b', 'g', 'r', or 'c'")
@run_once
def print_cbt_once(msg: str, color: str = "", bright: bool = False, tag: str = "", end="\n"):
"""Wrapped version of `print_cbt` that only prints once."""
return print_cbt(msg, color, bright, tag, end)
def select_query(
items: Sequence,
max_display: int = 10,
fallback: Callable[[], T] = None,
item_formatter: Callable[[T], str] = str,
header: str = "Available options:",
footer: str = "Please enter the number of the option to use.",
) -> T:
"""
Ask the user to select an item out of the given list.
:param items: items for the query
:param max_display: maximum number of items
:param fallback: callable to select select an experiment from a hint if the input is not a number
:param item_formatter: callable to select the parts of an `Experiment` instance which should be printed, by default
`str` turns the complete `Experiment` instance into a string.
:param header: string to print above the query
:param footer: string to print above the query
:return: interactive query for selecting an experiment
"""
# Truncate if needed
print(header)
if max_display is not None and len(items) > max_display:
items = items[:max_display]
print(f"(showing the latest {max_display})")
# Display list
for i, exp in enumerate(items):
print(" ", i, ": ", item_formatter(exp))
print(footer)
# Repeat query on errors
while True:
sel = input()
# Check if sel is a number, if so use it.
if sel == "":
# first item is default
return items[0]
elif sel.isdigit():
# Parse index
sel_idx = int(sel)
if sel_idx < len(items):
return items[sel_idx]
# Error
print("Please enter a number between 0 and ", len(items) - 1, ".")
elif fallback is not None:
# Use fallback if any
fres = fallback(sel)
if fres is not None:
return fres
# The fallback should report it's own errors
else:
print("Please enter a number.")
def insert_newlines(string: str, every: int) -> str:
"""
Inserts multiple line breaks.
:param string: input string to be broken into multiple lines
:param every: gap in number of characters between every line break
:return: the input sting with line breaks
"""
return "\n".join(string[i : i + every] for i in range(0, len(string), every))
def ensure_math_mode(inp: [str, Sequence[str]]) -> [str, list]:
"""
Naive way to ensure that a sting is compatible with LaTeX math mode for printing.
:param inp: input string
:return s: sting in math mode
"""
if isinstance(inp, str):
if inp.count("$") == 0:
# There are no $ symbols yet
if not inp[0] == "$":
inp = "$" + inp
if not inp[-1] == "$":
inp = inp + "$"
elif inp.count("$") % 2 == 0:
# There is an even number of $ symbols, so we assume they are correct and do nothing
pass
else:
raise pyrado.ValueErr(msg=f"The string {inp} must contain an even number of '$' symbols!")
elif inp is None:
return None # in case there a Space has 1 one dimension but no labels
elif isinstance(inp, Iterable):
# Do it recursively
return [ensure_math_mode(s) if s is not None else None for s in inp] # skip None entries
else:
raise pyrado.TypeErr(given=inp, expected_type=[str, list])
return inp
def color_validity(data: np.ndarray, valids: np.ndarray) -> list:
"""
Color the entries of an data array red or green depending on the if the entries are valid.
:param data: ndarray containing the data to print
:param valids: ndarray containing boolian integer values deciding gor the color (1 --> green, 0 --> red)
:return: list of stings
"""
def color_validity(v: int) -> str:
return Fore.GREEN if v == 1 else Fore.RED
return [color_validity(v) + str(ele) + Style.RESET_ALL for (ele, v) in zip(data, valids)]
@contextlib.contextmanager
def completion_context(msg: str, **kwargs):
"""
Context manager that prints a message, executes the code, and then prints a symbol on success or failure (when an
exception is raised).
:param msg: message to print at the beginning, e.g. 'Calibrating'
:param kwargs: keyword arguments forwarded to `print_cbt()`
"""
if not isinstance(msg, str):
raise pyrado.TypeErr(given=msg, expected_type=str)
try:
# Execute the code
print_cbt(msg, **kwargs, end=" ")
yield
except Exception as e:
# In case of any error
print_cbt(pyrado.sym_failure, color=kwargs.get("color", "r"), bright=kwargs.get("bright", False))
raise e
else:
# No error
print_cbt(pyrado.sym_success, color=kwargs.get("color", "g"), bright=kwargs.get("bright", False))
finally:
pass
|
16,545 | edcb2f6e998089446593e24664af551f52a7f6ce | from .home.views import bp as home_blueprint
from .auth.views import jwt_views |
16,546 | b6fd38cee5609e1d4e56864333c2383c8a959226 | # Author: Mark Harmon
# Purpose: Make labels and final trading data for input into recurrent model for tick data
# This is to make my training set and labels for the cnn sequence. I'm going to treat it similarly to a video problem
# by having one sequence == one image. This model should inherently be better than my current recurrent model...
import numpy as np
import pickle as pkl
import sys
def label_make(diff,sigma,seqsize,num_seq,step,window):
# 5 stocks and 5 labels...
changelen=4
lablenint = int((len(diff)-(seqsize+ (num_seq-1)*changelen))/step)
labels = np.zeros((lablenint,window,5,5))
beg = seqsize
totaldata = []
labbeg = seqsize + (num_seq-1)*changelen
factor = 2
week_len = 2016
# For when I do the day type label creation
day_len = int(week_len/7)
stocks=5
week_count = 1
for i in range(len(labels)):
testtemp = diff[labbeg + i * step:labbeg + i * step + window, :]
dattemp = []
for k in range(num_seq):
dattemp += [diff[i*step + k*changelen:beg+i*step+k*changelen,:].T.tolist()]
totaldata += [dattemp]
if beg+i*step+window>factor*day_len:
week_count+=1
factor+=1
# Need to calculate a new sigma value after each week
sigma = np.zeros(stocks)
for m in range(stocks):
sigma[m] = np.std(diff[(week_count-1)*day_len:week_count*day_len])
for k in range(window):
for j in range(stocks):
sigtemp = sigma[j]
if testtemp[k,j]<-sigtemp:
labels[i,k,j,0]=1
elif testtemp[k,j]<0.:
labels[i,k,j,1]=1
elif testtemp[k,j]==0:
labels[i,k,j,2]=1
elif testtemp[k,j] <=sigtemp:
labels[i,k,j,3]=1
else:
labels[i,k,j,4]=1
return labels,totaldata
def main(seqsize,step,window):
address = '/home/mharmon/FinanceProject/Data/tickdata/traindata.pkl'
num_seq = 4
changelen = 4
data,dates = pkl.load(open(address,'rb'))
diff = np.zeros((len(data)-1,5))
for i in range(len(data)-1):
diff[i,:] = data[i+1,:]-data[i,:]
for i in range(5):
diff[:,i] = (diff[:,i]-np.mean(diff[:,i]))/np.std(diff[:,i])
# Calculate my sigma values
for i in range(5):
diff[:,i] = (diff[:,i]-np.min(diff[:,i]))/(np.max(diff[:,i])-np.min(diff[:,i]))
sigma = np.zeros(5)
for i in range(5):
sigma[i] = np.std(diff[:,i])
# Now make the actual labels
myrange = int(len(data) / 4.)
beg = 0
end = myrange
sigsave = '/home/mharmon/FinanceProject/Data/tickdata/sigma.pkl'
pkl.dump(sigma,open(sigsave,'wb'))
for i in range(4):
# Instead of using data, I should be using diff as my actual data..
labels,totaldata = label_make(diff[beg:end],sigma,seqsize,num_seq,step,window)
finaldates = dates[beg+seqsize+(num_seq-1)*changelen+1:end+1]
totaldata = np.array(totaldata,'float32')
totaldata = np.reshape(totaldata,(len(totaldata),num_seq,1,5,seqsize))
labels = np.array(labels,'uint8')
datasave = '/home/mharmon/FinanceProject/Data/tickdata/trainday' + str(seqsize)+'win' + str(window) +'cnn'+str(i)+ '.pkl'
pkl.dump([totaldata,labels,finaldates],open(datasave,'wb'))
beg = end-(seqsize +changelen*(num_seq-1))
end = beg + myrange
return
if __name__=='__main__':
seqsize = int(sys.argv[1])
step = int(sys.argv[2])
window = int(sys.argv[3])
main(seqsize,step,window) |
16,547 | c1ece249b4a36b3220bb75e1781c867190d25dc1 | #!/usr/bin/python
import sys
# We assume we know the dimensions of the final matrix output (m row, n columns)
# Here we will set m and n for our matrix example where A is (2 x 3) and B is (3 x 2) so C is (2 x 2)
# Since python indexing starts at 0 it has column indexes 0 and 1 (same for rows)
# IMPORTANT : - If you are using our matrix generator please input the same m and n you used in that script!
# - If you aren't don't mind that, just change the split to "," instead of "\t"!
m = 2
n = 2
for line in sys.stdin:
# remove leading and trailing whitespace
line = line.strip()
# split the line corresponding to one element of either matrix
element = line.split("\t")
# Retrieve the 4 attributes of each line respectively
provenance = element[0]
row = int(element[1])
col = int(element[2])
value = float(element[3])
# Matrix A
## If line corresponds to an element of matrix A, we will need to replicate each element of this matrix n times
if provenance == "A":
for k in range(0, n + 1): # We add 1 so Python will replicate n times
print '%s\t%s' % ((row, k),(provenance, col, value))
# Matrix B
# If line corresponds to an element of matrix B, we will need to replicate each element of this matrix m times
else:
for i in range(0, m + 1): # We add 1 so Python will replicate m times
print '%s\t%s' % ((i, col),(provenance, row, value))
# Printing last object!
#print '%s\t%s' % ((i, col),(provenance, row, value))
# Each element then goes through a sort&shuffle phase where key-value pairs are grouped by composite key (k1, k2)
#### NOTES : We could try two implementations :
#### - this one
#### - using a dictionnary where we append every element which should have the same composite key
####
#### Main difference is that dictionnaries may be faster but are memory bound, which implies it may crash if the
#### matrix is too big
|
16,548 | 2d15e89d0e5a058452ebc11a78b0d75134c554f7 | # If a number c can be writen in a^2 + b^2 = c^2 or not, not yet solved
# Problem link: https://leetcode.com/contest/leetcode-weekly-contest-39/problems/sum-of-square-numbers/
import math
class Solution(object):
def primes(self,n):
primfac = []
d = 2
while d*d <= n:
while (n % d) == 0:
primfac.append(d) # supposing you want multiple factors repeated
n //= d
d += 1
if n > 1:
primfac.append(n)
return primfac
def judgeSquareSum(self, c):
pms = self.primes(c)
factor_map = {}
for p in pms:
if p not in factor_map:
factor_map[p] = 1
else:
factor_map[p] = factor_map[p] + 1
l = len(factor_map)
if l == 1:
item = factor_map.keys()[0]
if factor_map[item] % 2 == 0:
return True
# Perform 4k + 1
m = (item - 1) / 4
if item == 4*m + 1:
return True
else:
return False
if l == 2:
if factor_map[factor_map.keys()[0]] == factor_map[factor_map.keys()[1]] and factor_map[factor_map.keys()[0]] % 2 == 0:
return True
else:
return False
s = Solution()
print(s.judgeSquareSum(3)) # Should return false
print(s.judgeSquareSum(4)) # Should return false
print(s.judgeSquareSum(5)) # Should return true
print(s.judgeSquareSum(6)) # Should return true
print(s.judgeSquareSum(13))
print(s.judgeSquareSum(10000000)) #Memory Limit
|
16,549 | 04b7ddbc13d35765845de194783b24132209bb2d | from graphics import *
import time
from numpy import *
import math
win=GraphWin("circle polar 2",640,480)
def drawsympoints(a,b,x,y):
time.sleep(0.3)
win.plotPixel( x+a, y+b, "yellow")
win.plotPixel( y+a, x+b, "green")
win.plotPixel( y+a,-x+b, "red")
win.plotPixel( x+a,-y+b, "blue")
win.plotPixel(-x+a,-y+b, "black")
win.plotPixel(-y+a,-x+b, "green")
win.plotPixel(-y+a, x+b, "black")
win.plotPixel(-x+a, y+b, "green")
def circle(a,b,r):
for q in arange(0,(math.pi/4),1/r):
x=r*math.cos(q)
y=r*math.sin(q)
drawsympoints(a,b,x,y)
circle(100,100,100)
|
16,550 | ef2cd38eb0f374379288ab7b449df654ab39f3ed | from django.apps import AppConfig
class Object2BookConfig(AppConfig):
name = 'object2book'
|
16,551 | 2682400549f788a1602bba67d0b1bb14160e7319 | #
def main():
print("Change Counter, by Khalid Hussain", "\n")
price = float(input("Price?: "))
amount_tendered = float(input("Amount tendered?: "))
change_due = round(amount_tendered - price, 2)
price_for_me = int(price * 100)
amount_tendered_for_me = int(amount_tendered * 100)
change_for_me = int(amount_tendered_for_me - price_for_me)
print("\n", "Report", "\n", "==========")
print("Purchase Price: ", price)
print("Amount Tendered: ", amount_tendered, "\n")
print("Change: ", change_due)
one_dollar_bills = int(change_for_me / 100)
print(one_dollar_bills, "one-dollar bills")
change_for_me = change_for_me%100
print(change_for_me// 25, "quarters")
change_for_me = change_for_me%25
print(change_for_me//10, "dimes")
change_for_me = change_for_me%10
print(change_for_me//5, "nickles")
change_for_me = change_for_me%5
print(change_for_me//1 , "pennies")
main()
|
16,552 | 8863d97aba36cd73027363c00db39666be8f5acc | from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.shortcuts import render
from .models import Product
from .forms import ProductForm, LoginForm
def home(request):
return HttpResponseRedirect('/products/')
def index(request):
products = Product.objects.all()
form = ProductForm()
return render(request, 'index.html', {'products': products, 'form': form})
def detail(request, product_id):
product = Product.objects.get(id=product_id)
return render(request, 'detail.html', {'product': product})
# def post_product(request):
# form = ProductForm(request.POST)
# if form.is_valid():
# product = Product( name = form.cleaned_data['name'],
# price = form.cleaned_data['price'],
# stack = form.cleaned_data['stack'],
# image_url = form.cleaned_data['image_url'])
# product.save(commit=True)
#
# return HttpResponseRedirect('/products')
def post_product(request):
form = ProductForm(request.POST)
if form.is_valid():
product = form.save(commit=False)
product.user = request.user
product.save()
return HttpResponseRedirect('/products/')
def profile(request, username):
user = User.objects.get(username=username)
products = Product.objects.filter(user=user)
return render(request, 'profile.html',{'username': username, 'products': products})
def login_view(request):
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
u = form.cleaned_data['username']
p = form.cleaned_data['password']
user = authenticate(username=u, password=p)
if user is not None:
if user.is_active:
login(request, user)
return HttpResponseRedirect('/products/')
else:
print("The account has been disabled!")
else:
print("The username and password were incorrect.")
else:
form = LoginForm()
return render(request, 'login.html', {'form': form})
def logout_view(request):
logout(request)
return HttpResponseRedirect('/login/')
def new(request):
form = ProductForm()
return render(request, 'addNew.html', {'form': form})
def buy_product(request):
product_id = request.GET.get('product_id', None)
stack = 0
if product_id:
product = Product.objects.get(id=int(product_id))
if product is not None:
stack = product.stack - 1
product.stack = stack
product.save()
return HttpResponse(stack)
|
16,553 | c25ac83432640c8cd0758804f9afb50dca899036 | N = int(input())
E = int(input())
known = [0] * N
next_song = 0
for e in range(E):
K = [int(i) for i in input().split()][1:]
if 1 in K:
for k in K:
known[k - 1] |= 1 << next_song
next_song += 1
else:
share = 0
for k in K:
share |= known[k - 1]
for k in K:
known[k - 1] = share
for n, k in enumerate(known):
if k == (1 << next_song) - 1:
print(n + 1)
|
16,554 | a5704dc2e5dc1951b0827920c56b7e6294a92646 | class Solution:
def DFS(self, candidates, target, start, valuelist):
length = len(candidates)
if target == 0 and valuelist not in Solution.ret: return Solution.ret.append(valuelist)
for i in range(start, length):
if target < candidates[i]:
return
self.DFS(candidates, target - candidates[i], i + 1, valuelist + [candidates[i]])
def combinationSum2(self, candidates, target):
candidates.sort()
Solution.ret = []
self.DFS(candidates, target, 0, [])
return Solution.ret |
16,555 | 8cf4954e683437ad16a518b0590c68a8de09f9d6 | # -*- coding: utf-8 -*-
from watchdog.events import PatternMatchingEventHandler
from .functions import *
class BaseFileEventHandler(PatternMatchingEventHandler):
'''Looking for FileSystem events for base file and do something'''
def __init__(self, patterns, mainWindow):
super().__init__(patterns)
self.mainWindow = mainWindow
def on_modified(self,event):
'''Load base file, compare with local base and load it to table if they different'''
try:
from_base = get_data_xls(self.mainWindow.base_file.get())
from_table = self.mainWindow.base_frame.getAllEntriesAsList()
if from_base[1:] != from_table:
self.mainWindow.base_frame.loadBase()
self.mainWindow.status_bar['text'] = u'ะะฐะทะฐ ะพะฑะฝะพะฒะปะตะฝะฐ'
except:
pass |
16,556 | 38435ce82f0e8dd9e4282966f9ecee30160f2dfe | """A lightweight wrapper around PyMySQL for easy to use
Only for python 3
"""
import time
import traceback
import pymysql
import pymysql.cursors
class ConnectionSync:
def __init__(self, host, database, user, password,
port=0,
max_idle_time=7*3600,
connect_timeout=10,
autocommit=True,
return_dict=True,
charset="utf8mb4"):
self.max_idle_time = max_idle_time
self._db_args = {
'host': host,
'database': database,
'user': user,
'password': password,
'charset': charset,
'autocommit': autocommit,
'connect_timeout': connect_timeout,
}
if return_dict:
self._db_args['cursorclass'] = pymysql.cursors.DictCursor
if port:
self._db_args['port'] = port
self._db = None
self._last_use_time = time.time()
self.reconnect()
def _ensure_connected(self):
# Mysql by default closes client connections that are idle for
# 8 hours, but the client library does not report this fact until
# you try to perform a query and it fails. Protect against this
# case by preemptively closing and reopening the connection
# if it has been idle for too long (7 hours by default).
if (self._db is None or
(time.time() - self._last_use_time > self.max_idle_time)):
self.reconnect()
self._last_use_time = time.time()
def _cursor(self):
self._ensure_connected()
return self._db.cursor()
def __del__(self):
self.close()
def close(self):
"""Closes this database connection."""
if getattr(self, "_db", None) is not None:
if not self._db_args['autocommit']:
self._db.commit()
self._db.close()
self._db = None
def reconnect(self):
"""Closes the existing database connection and re-opens it."""
self.close()
self._db = pymysql.connect(**self._db_args)
def query_many(self, queries):
"""query many SQLs, Returns all result."""
assert isinstance(queries, list)
cursor = self._cursor()
results = []
for query in queries:
try:
cursor.execute(query)
result = cursor.fetchall()
except Exception as e:
print(e)
result = []
results.append(result)
return results
def query(self, query, *parameters, **kwparameters):
"""Returns a row list for the given query and parameters."""
cursor = self._cursor()
try:
cursor.execute(query, kwparameters or parameters)
result = cursor.fetchall()
return result
finally:
cursor.close()
def get(self, query, *parameters, **kwparameters):
"""Returns the (singular) row returned by the given query.
"""
cursor = self._cursor()
try:
cursor.execute(query, kwparameters or parameters)
return cursor.fetchone()
finally:
cursor.close()
def execute(self, query, *parameters, **kwparameters):
"""Executes the given query, returning the lastrowid from the query."""
cursor = self._cursor()
try:
cursor.execute(query, kwparameters or parameters)
return cursor.lastrowid
except Exception as e:
if e.args[0] == 1062:
# just skip duplicated item error
pass
else:
traceback.print_exc()
raise e
finally:
cursor.close()
insert = execute
# =============== high level method for table ===================
def table_has(self, table_name, field, value):
sql = 'SELECT {} FROM {} WHERE {}="{}"'.format(
field,
table_name,
field,
value)
d = self.get(sql)
return d
def table_insert(self, table_name, item):
'''item is a dict : key is mysql table field'''
fields = list(item.keys())
values = list(item.values())
fieldstr = ','.join(fields)
valstr = ','.join(['%s'] * len(item))
sql = 'INSERT INTO {} ({}) VALUES({})'.format(
table_name, fieldstr, valstr)
try:
last_id = self.execute(sql, *values)
return last_id
except Exception as e:
print(e)
if e.args[0] == 1062:
# just skip duplicated item error
pass
else:
traceback.print_exc()
print('sql:', sql)
print('item:')
for i in range(len(fields)):
vs = str(values[i])
if len(vs) > 300:
print(fields[i], ' : ', len(vs), type(values[i]))
else:
print(fields[i], ' : ', vs, type(values[i]))
raise e
def table_insert_many(self, table_name, items):
''' items: list of item'''
assert isinstance(items, list)
item = items[0]
fields = list(item.keys())
values = [list(item.values()) for item in items]
fieldstr = ','.join(fields)
valstr = ','.join(['%s'] * len(item))
sql = 'INSERT INTO {} ({}) VALUES({})'.format(
table_name, fieldstr, valstr)
cursor = self._cursor()
try:
last_id = cursor.executemany(sql, values)
return last_id
except Exception as e:
print('\t', e)
if e.args[0] == 1062:
# just skip duplicated item error
pass
else:
traceback.print_exc()
print('sql:', sql)
raise e
def table_update(self, table_name, updates,
field_where, value_where):
'''updates is a dict of {field_update:value_update}'''
upsets = []
values = []
for k, v in updates.items():
s = '{}=%s'.format(k)
upsets.append(s)
values.append(v)
upsets = ','.join(upsets)
sql = "UPDATE {} SET {} WHERE {}='{}'".format(
table_name,
upsets,
field_where, value_where,
)
self.execute(sql, *(values))
|
16,557 | 74510d90a000ad56910bc3321bc7702f0a120ba2 | import json
from j2v.generation.generator import Generator
from j2v.generation.result_writer import SQLWriter, LookerWriter
from j2v.utils.config import generator_config, supported_dialects
from j2v.utils.helpers import get_formatted_var_name
TABLE_WITH_JSON_COLUMN_DEFAULT = generator_config['TABLE_WITH_JSON_COLUMN_DEFAULT']
OUTPUT_VIEW_ML_OUT_DEFAULT = generator_config['OUTPUT_VIEW_ML_OUT_DEFAULT']
COLUMN_WITH_JSONS_DEFAULT = generator_config['COLUMN_WITH_JSONS_DEFAULT']
EXPLORE_LKML_OUT_DEFAULT = generator_config['EXPLORE_LKML_OUT_DEFAULT']
SQL_DIALECT_DEFAULT = generator_config['SQL_DIALECT_DEFAULT']
TABLE_ALIAS_DEFAULT = generator_config['TABLE_ALIAS_DEFAULT']
HANDLE_NULL_VALUES_IN_SQL_DEFAULT = generator_config['HANDLE_NULL_VALUES_IN_SQL_DEFAULT']
class MainProcessor:
def __init__(self, column_name=COLUMN_WITH_JSONS_DEFAULT, output_explore_file_name=EXPLORE_LKML_OUT_DEFAULT,
output_view_file_name=OUTPUT_VIEW_ML_OUT_DEFAULT, sql_table_name=TABLE_WITH_JSON_COLUMN_DEFAULT,
table_alias=TABLE_ALIAS_DEFAULT, handle_null_values_in_sql=HANDLE_NULL_VALUES_IN_SQL_DEFAULT,
primary_key=None, sql_dialect=SQL_DIALECT_DEFAULT):
"""
Init empty lists and ops counter.
"""
self.output_explore_file_name = output_explore_file_name or EXPLORE_LKML_OUT_DEFAULT
self.output_view_file_name = output_view_file_name or OUTPUT_VIEW_ML_OUT_DEFAULT
self.column_name = column_name or COLUMN_WITH_JSONS_DEFAULT
self.sql_table_name = sql_table_name or TABLE_WITH_JSON_COLUMN_DEFAULT
self.table_alias = get_formatted_var_name(table_alias or TABLE_ALIAS_DEFAULT)
self.handle_null_values_in_sql = handle_null_values_in_sql or HANDLE_NULL_VALUES_IN_SQL_DEFAULT
self.sql_dialect = sql_dialect or SQL_DIALECT_DEFAULT
if self.sql_dialect.lower() not in supported_dialects:
raise ValueError("SQL Dialect {} not supported. Dialects available: {}".format(self.sql_dialect, ", ".join(supported_dialects) ))
self.generator = Generator(column_name=self.column_name,
table_alias=self.table_alias,
handle_null_values_in_sql=self.handle_null_values_in_sql,
sql_dialect=self.sql_dialect,
primary_key=primary_key)
self.sql_writer = SQLWriter(self.sql_table_name, self.table_alias, self.sql_dialect)
self.looker_writer = LookerWriter(self.output_explore_file_name, self.output_view_file_name,
self.sql_table_name, self.table_alias)
def process_json_files(self, json_file_list):
"""
:param json_file_list: List with python dicts
:return:
"""
for json_file in json_file_list:
with open(json_file) as f_in:
json_obj = json.load(f_in)
self.process_single_object(json_obj)
self.looker_writer.create_view_file(self.generator.dim_definitions)
self.looker_writer.create_explore_file(self.generator.explore_joins)
self.sql_writer.print_sql(self.generator.dim_sql_definitions, self.generator.all_joins,
self.handle_null_values_in_sql)
def transform(self, data_object):
self.pre_process()
self.process_single_object(data_object)
model, sql, views = self.post_process()
return {"sql": sql, "model": model, "views": views}
def transform_rich(self, data_object_list):
self.pre_process()
for data_object in data_object_list:
self.process_single_object(data_object)
model, sql, views = self.post_process()
return {"sql": sql, "model": model, "views": views}
def pre_process(self):
self.generator.clean()
def post_process(self):
views = self.looker_writer.get_view_str(self.generator.dim_definitions)
model = self.looker_writer.get_explore_str(self.generator.explore_joins)
sql = self.sql_writer.get_sql_str(self.generator.dim_sql_definitions, self.generator.all_joins)
return model, sql, views
def process_single_dict(self, python_dict):
self.process_single_object(data_object=python_dict)
def process_single_object(self, data_object):
self.generator.collect_all_paths(data_object=data_object)
|
16,558 | 8e0f0010c8975f8b9403618bb87cff2e12611e68 | import time, sys
from tracks.models import Album
from .main import connect_to_broker
from .get_artist_albums import sleepWithHeartbeats
from .requeue_all_artists import publish
def run() :
try :
time_start = time.time()
albums_ids = [str(album.deezer_id) for album in Album.objects.all()]
print(f'[*] fetched {len(albums_ids)} artist in {round(time.time() - time_start, 2)} s')
connection = connect_to_broker()
channel = connection.channel()
print('[*] Connection with rabbitmq opened')
channel.confirm_delivery()
for count, album in enumerate(albums_ids):
if count % 5000 == 0 :
print(f'[*] delivered {count} in {round(time.time() - time_start, 2)} s')
sleepWithHeartbeats(connection, 300)
elif count % 500 == 0 :
print(f'[*] delivered {count} in {round(time.time() - time_start, 2)} s')
sleepWithHeartbeats(connection, 30)
elif count % 50 == 0 :
print(f'[*] delivered {count} in {round(time.time() - time_start, 2)} s')
sleepWithHeartbeats(connection, 3)
publish(connection, channel, 'album_tracks', album)
publish(connection, channel, 'album_genres', album)
print('[*] Requeueing done {len(albums_ids)} in {round(time.time() - time_start, 2)} s')
except KeyboardInterrupt :
connection.close()
sys.exit(0)
|
16,559 | fd2b8e17c0b129309d137629264b360c6e8a385d | # Generated by Django 3.2 on 2021-05-30 00:00
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('web', '0005_smartctl_date'),
]
operations = [
migrations.RemoveField(
model_name='smartctl',
name='Current',
),
migrations.RemoveField(
model_name='smartctl',
name='Date',
),
migrations.RemoveField(
model_name='smartctl',
name='RawValue',
),
migrations.RemoveField(
model_name='smartctl',
name='Type',
),
migrations.CreateModel(
name='Attribute',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Date', models.DateField(default=datetime.date.today, verbose_name='Date')),
('Current', models.CharField(help_text='Current', max_length=30, null=True)),
('Type', models.CharField(help_text='Type', max_length=30, null=True)),
('RawValue', models.CharField(help_text='RawValue', max_length=30, null=True)),
('smartctl', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='web.smartctl')),
],
),
]
|
16,560 | b44425d125cb0ee7ceb295eea01fd2cb5c618152 | from scrapy import cmdline
if __name__ == '__main__':
cmdline.execute(['scrapy', 'crawl', 'jdSpider'])
|
16,561 | 667b30a763b29a5a32a6a7e8d28725fc8cc94cb9 | import numpy as np
## from qiskit_textbook.tools import array_to_latex
class Tr:
"""
This is a Python Class to compute traces and partial traces of (4 times 4) matrices.
# Example
If you want to calculate the matrix M using this class, you can call the method in the class as follows
```python
ans = Tr(M).p("b")
```
The argument "p" can specify how to take the partial trace. If it is "a", the partial trace is calculated for the first qubit,
and if it is "b", the partial trace is calculated for the second qubit.
If no argument is specified, a simple trace will be taken.
if you have qiskit_textbook.tools , you can display partial trace using tex format.
# Arguments
M: (4 times 4) matrices which you want to calc traces or partial traces
"""
def __init__(self, M):
self.M = M
self.div_mat = self._division_mat(self.M)
def p(self, flag=None):
if flag == None:
return np.trace(self.M)
else:
self.flag = flag
self.partial_tr = np.zeros((2, 2), dtype=np.complex)
c = 0
for k in [(0, 0), (0, 1), (1, 0), (1, 1)]:
a = self.div_mat[c]
c += 1
for i in [0, 1]:
for j in [0, 1]:
if self.flag == "a": self.partial_tr += a[i, j] * np.trace(
np.dot(self._ket(k[0]), self._bra(k[1]))) * np.dot(self._ket(i), self._bra(j))
if self.flag == "b": self.partial_tr += a[i, j] * np.dot(self._ket(k[0]),
self._bra(k[1])) * np.trace(
np.dot(self._ket(i), self._bra(j)))
## return array_to_latex(self.partial_tr) ## If you want to use the tex format, uncomment it!
return self.partial_tr
def _bra(self, i):
if i == 0:
ans = np.array([[1, 0]])
elif i == 1:
ans = np.array([[0, 1]])
return ans
def _ket(self, j):
if j == 0:
ans = np.array([[1], [0]])
elif j == 1:
ans = np.array([[0], [1]])
return ans
def _division_mat(self, M):
self.upper_left = M[0:2, 0:2]
self.lower_right = M[0:2, 2:4]
self.upper_right = M[2:4, 0:2]
self.lower_left = M[2:4, 2:4]
return (self.upper_left, self.lower_right, self.upper_right, self.lower_left)
if __name__ == '__main__':
M = np.array(np.arange(16).reshape((4, 4)))
print("Matrix: ")
print(M)
print("Trace: ", Tr(M).p())
print("Trace_A: ")
print(Tr(M).p("a"))
print("Trace_B: ")
print(Tr(M).p("b")) |
16,562 | b98218bd0f579369ede39c2375b7b7753c96f405 | def errors(err_code):
err_dict = {
0: '์ ์์ฒ๋ฆฌ',
-10: '์คํจ',
-100: '์ฌ์ฉ์์ ๋ณด๊ตํ์คํจ',
-102: '๋ฒ์ ์ฒ๋ฆฌ์คํจ',
-103: '๊ฐ์ธ๋ฐฉํ๋ฒฝ์คํจ',
-104: '๋ฉ๋ชจ๋ฆฌ๋ณดํธ์คํจ',
-105: 'ํจ์์
๋ ฅ๊ฐ์ค๋ฅ',
-106: 'ํต์ ์ฐ๊ฒฐ์ข
๋ฃ',
-200: '์์ธ์กฐํ๊ณผ๋ถํ',
-201: '์ ๋ฌธ์์ฑ์ด๊ธฐํ์คํจ',
-202: '์ ๋ฌธ์์ฑ์
๋ ฅ๊ฐ์ค๋ฅ',
-203: '๋ฐ์ดํฐ์์',
-204: '์กฐํ๊ฐ๋ฅํ์ข
๋ชฉ์์ด๊ณผ',
-205: '๋ฐ์ดํฐ์์ ์คํจ',
-206: '์กฐํ๊ฐ๋ฅํFID์์ด๊ณผ',
-207: '์ค์๊ฐํด์ ์ค๋ฅ',
-300: '์
๋ ฅ๊ฐ์ค๋ฅ',
-301: '๊ณ์ข๋น๋ฐ๋ฒํธ์์',
-302: 'ํ์ธ๊ณ์ข์ฌ์ฉ์ค๋ฅ',
-303: '์ฃผ๋ฌธ๊ฐ๊ฒฉ์ด20์ต์์์ด๊ณผ',
-304: '์ฃผ๋ฌธ๊ฐ๊ฒฉ์ด50์ต์์์ด๊ณผ',
-305: '์ฃผ๋ฌธ์๋์ด์ด๋ฐํ์ฃผ์์1%์ด๊ณผ์ค๋ฅ',
-306: '์ฃผ๋ฌธ์๋์ด์ด๋ฐํ์ฃผ์์3%์ด๊ณผ์ค๋ฅ',
-307: '์ฃผ๋ฌธ์ ์ก์คํจ',
-308: '์ฃผ๋ฌธ์ ์ก๊ณผ๋ถํ',
-309: '์ฃผ๋ฌธ์๋300๊ณ์ฝ์ด๊ณผ',
-310: '์ฃผ๋ฌธ์๋500๊ณ์ฝ์ด๊ณผ',
-340: '๊ณ์ข์ ๋ณด์์',
-500: '์ข
๋ชฉ์ฝ๋์์'
}
return err_dict(err_code) |
16,563 | 1159635222a7389fa2fee133eb56cddde06edbb7 | f=open("list",mode="w")# wไธบๅๅปบๆจกๅผ๏ผๆฏๆฌก้ฝๆฏๆฐๅปบไธไธช๏ผๅฆๆๅญๅจๅฐฑ่ฆ็ๅๆฅ็
f.write("1\n")
f.write("2\n") #ๅชๆ่ฟๆ ทๆ่ฝๆข่ก
f.write("3\n")
f.close( )
|
16,564 | 20f39a17500620af161261a8c153f8c13fc13f7c | from django.apps import AppConfig
class AnnaConfig(AppConfig):
name = 'anna'
|
16,565 | bfe968b656286f6dc098098acb83d9f212ef7702 | from copy import deepcopy
from random import randrange, random
from typing import List, Dict
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics.classification import accuracy_score, log_loss
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF, RationalQuadratic, ExpSineSquared, ConstantKernel, DotProduct, Matern
from sklearn import decomposition
from sklearn import manifold
from dataset import DataLoader, FILE_NAME
from useful import timeit
"""
References for GPC
http://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.GaussianProcessClassifier.html
http://scikit-learn.org/stable/auto_examples/gaussian_process/plot_gpc.html
http://scikit-learn.org/stable/auto_examples/gaussian_process/plot_gpc_iris.html
For kernels,
http://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.kernels.Matern.html
"""
class GPC:
def __init__(self, kernel, loader: DataLoader = None, n_splits: int = 5, **kwargs):
# Data
self.loader = loader or DataLoader('{}.txt'.format(FILE_NAME), n_splits=n_splits)
if not loader:
self.loader.transform_x(manifold.TSNE, n_components=3)
# Model
self.gpc_dict: Dict[int, GaussianProcessClassifier] = dict()
for k in range(loader.get_n_splits()):
self.gpc_dict[k] = GaussianProcessClassifier(kernel=deepcopy(kernel), **kwargs)
print('initialized with {}, n_splits: {}'.format(kernel, loader.get_n_splits()))
@timeit
def fit(self, fold):
assert fold < self.loader.get_n_splits(), "fold >= {}".format(self.loader.get_n_splits())
X_train, y_train, X_test, y_test = self.loader.get_train_test_xy(fold)
self.gpc_dict[fold].fit(X_train, y_train)
print('fit: {}'.format(self.gpc_dict[fold].kernel))
@timeit
def eval(self, fold, print_result=True):
assert fold < self.loader.get_n_splits(), "fold >= {}".format(self.loader.get_n_splits())
X_train, y_train, X_test, y_test = self.loader.get_train_test_xy(fold)
gpc = self.gpc_dict[fold]
train_acc = accuracy_score(y_train, gpc.predict(X_train))
test_acc = accuracy_score(y_test, gpc.predict(X_test))
if print_result:
print("Fold: {}, Kernel: {}".format(fold, gpc.kernel))
print("Train Acc: {}".format(train_acc))
print("Test Acc: {}".format(test_acc))
print("=" * 10)
return train_acc, test_acc
def run(self, fold, print_result=True):
self.fit(fold)
return self.eval(fold, print_result=print_result)
def run_all(self, print_result=True):
train_acc_list, test_acc_list = [], []
for fold in range(self.loader.get_n_splits()):
train_acc, test_acc = self.run(fold, print_result=False)
train_acc_list.append(train_acc)
test_acc_list.append(test_acc)
train_acc_avg = np.average(train_acc_list)
train_acc_stdev = np.std(train_acc_list)
test_acc_avg = np.average(test_acc_list)
test_acc_stdev = np.std(test_acc_list)
if print_result:
print("K-fold: {}, Kernel: {}".format(self.loader.get_n_splits(), self.gpc_dict[0].kernel))
print("Train Acc: {} (+/- {})".format(train_acc_avg, train_acc_stdev))
print("Test Acc: {} (+/- {})".format(test_acc_avg, test_acc_stdev))
print("=" * 10)
return train_acc_list, test_acc_list
if __name__ == '__main__':
now_fold = 0
data_loader = DataLoader('{}.tsv'.format(FILE_NAME))
data_loader.transform_x(decomposition.PCA, n_components=80)
kernel_list = [
Matern(0.5) + Matern(1.5) + Matern(nu=2.5),
random() * Matern(0.5) + random() * Matern(1.5) + random() * Matern(nu=2.5),
random() * Matern(0.5) + random() * Matern(1.5) + random() * Matern(nu=2.5),
random() * Matern(0.5) + random() * Matern(1.5) + random() * Matern(nu=2.5),
]
results = []
for now_kernel in kernel_list:
try:
gp_classifier = GPC(
kernel=now_kernel,
loader=data_loader,
n_restarts_optimizer=2,
)
train_acc_result, test_acc_result = gp_classifier.run_all()
results.append((now_kernel, train_acc_result, test_acc_result))
except Exception as e:
print(e)
for k, tr, te in results:
print("\t".join(str(x) for x in [k, np.mean(tr), np.std(tr), np.mean(te), np.std(te)]))
|
16,566 | fcb86623b1a7f11c1d5f6e225da97f02340cdb2d | def aniversariantes_de_setembro(dic_aniversariantes):
dic_aniversariantes_setembro = {}
for chave, valor in dic_aniversariantes.items(): # cria dois contadores, primeiro รฉ chave, segundo รฉ valor
if (valor[3:5] == "09"): #Da posiรงรฃo 3 atรฉ a 5 (05, 09, etc) do item valor (cada par: nome, data)
dic_aniversariantes_setembro[chave] = valor #dicionario na posicao 1, 2, 3, etc recebe valor inteiro, nome e data
return dic_aniversariantes_setembro |
16,567 | daff1a757a84c2701b8be7af696eeb302285fdc9 | class Solution:
def replaceSpaces(self, S: str, length: int) -> str:
sList = list(S)
for i in range(length):
if sList[i] == ' ':
sList[i] = '%20'
return ''.join(sList[:length])
|
16,568 | 8e814f0beec992b4e86e5c54cb23ae619549eea8 | #!/usr/bin/python
# Create MCP patches between releases of Forge from git history
import subprocess, os
srcRoot = "../MinecraftForge"
outDir = "../jars/upstream-patches/forge" # relative to srcRoot
startCommit = "feca047114562c2ec2ec6be42e3ffd7c09a9a94d" # build 528, Update FML to 556..
#startCommit = "6673844c54b8de0ebe4cba601b6505ec0e3dda3f" # build 524, Fix ServerBrand retriever..
#startCommit = "0f3bd780e17baf3fcccc8f594337556e2368fe35" # build 518, Merge branch 'master' into TESRculling.. 2013-02-29
#startCommit = "fb87773c3ab77522a27651dcf20066277bb5e88d" # Added input getters for..
#startCommit = "f06e0be5e59723808305f4c4aeb89c9108c79230" # We try and log a message.. - last commit of Forge 516
#startCommit = "f20ea649c6fbf4e49ccb857e6ea9d3333cf6d6a9" # Attempt to fix a possible NPE in the...
#startCommit = "3a9c7b4532240b70dac5f72082cbcedc0dd41335" # build 497, released 2013-01-01
patchBranch = "mcppatch"
masterBranch = "master"
shouldPullLatestChanges = True
shouldCheckoutMaster = True
shouldBuildInitial = True
shouldBuildPatches = True
shouldRewritePaths = True
def build():
print "Starting build..."
print "Rerun setup first?"
if True or raw_input().startswith("y"):
print "Rerunning setup..." # TODO: automatic, if commit changes FML.. or always?
run("py setup.py") # installs MCP, decompiles
print "Continue?"
#raw_input()
run("py release.py") # patches MCP source
# TODO: pass --force to mcp cleanup to avoid confirmation prompt
print "Complete"
def run(cmd):
print ">",cmd
#raw_input()
status = os.system(cmd)
assert status == 0, "Failed to run %s: status %s" % (cmd, status)
def runOutput(cmd):
return subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE).stdout.read()
def buildMCPPatch(commit):
# Get the header from the original commit
header = runOutput(("git", "show", commit,
"--format=email", # email format is for git-format-diff/git-am
"--stat")) # omit the actual diff (since it isn't remapped), only show stats
print header
run("git add -f mcp/src") # the meat
# Compare against rename of previous commit
cmd = ("git", "diff", patchBranch)
diff = runOutput(cmd)
#print "Waiting"
#raw_input()
return header + "\n" + diff
def clean():
# Clean out even non-repository or moved files
pass
# TODO: needed?
def readCommitLog():
# Get commit IDs and messages after the starting commit, in reverse chronological order
commits = []
for line in runOutput(("git", "log", "--format=oneline")).split("\n"):
assert len(line) != 0, "Reached end of commit log without finding starting commit "+startCommit
commit, message = line.split(" ", 1)
if commit == startCommit: break
print commit, message
assert not (message.startswith("Automated") and message.endswith("build")), "Unclean starting point - already has automated commits! Reset and retry."
commits.append((commit, message))
commits.reverse()
return commits
def saveBranch():
# Save our remapped changes to the branch for comparison reference
try:
run("git branch -D "+patchBranch)
except:
pass
run("git checkout -b "+patchBranch)
run("git add -f mcp/src") # the meat
run("git commit mcp/src -m 'Automated build'")
def main():
if os.path.basename(os.getcwd()) != os.path.basename(srcRoot): os.chdir(srcRoot)
if shouldCheckoutMaster:
clean()
run("git checkout "+masterBranch)
if shouldPullLatestChanges:
# Get all the latest changes
run("git pull origin "+masterBranch)
if shouldCheckoutMaster:
run("git checkout "+masterBranch)
commits = readCommitLog()
if shouldBuildInitial:
# Start from here, creating an initial remap on a branch for the basis of comparison
run("git checkout "+startCommit)
build()
saveBranch()
if shouldBuildPatches:
# Build Forge, generating patches from MCP source per commit
n = 0
for commitInfo in commits:
commit, message = commitInfo
n += 1
safeMessage = "".join(x if x.isalnum() else "_" for x in message)
filename = "%s/%.4d-%s-%s" % (outDir, n, commit, safeMessage)
filename = filename[0:200]
print "\n\n*** %s %s" % (commit, message)
clean()
run("git checkout "+commit)
build()
patch = buildMCPPatch(commit)
file(filename, "w").write(patch)
# Save for comparison to next commit
saveBranch()
if shouldRewritePaths:
for filename in sorted(os.listdir(outDir)):
if filename[0] == ".": continue
print filename
path = os.path.join(outDir, filename)
if not os.path.isfile(path): continue
lines = file(path).readlines()
# Clean up patch, removing stat output
# TODO: find out how to stop git show from outputting it in the first place
statLine = None
for i, line in enumerate(lines):
if "files changed, " in line:
statLine = i
break
if statLine is None:
print "Skipping",path # probably already processed
continue
i = statLine - 1
while True:
assert i > 0, "Could not find patch description in %s" % (path,)
if len(lines[i].strip()) == 0: break # blank line separator
i -= 1
lines = lines[:i] + lines[statLine + 1:]
# Fix paths, Forge to MCPC+
for i, line in enumerate(lines):
lines[i] = line.replace("mcp/src/minecraft", "src/minecraft")
# TODO: remove extraneous files (patches of patches)..but requires more
# intelligent diff parsing :(
file(path, "w").write("".join(lines))
if __name__ == "__main__":
main()
|
16,569 | 4f38800a41bb89aeb094729097ddda42360c6dc4 | import testlib
fee = 20
initialsend = 200000
capacity = 1000000
def run_test(env):
bc = env.bitcoind
lit1 = env.lits[0]
lit2 = env.lits[1]
# Connect the nodes.
lit1.connect_to_peer(lit2)
# First figure out where we should send the money.
addr1 = lit1.make_new_addr()
print('Got lit1 address:', addr1)
# Send a bitcoin.
bc.rpc.sendtoaddress(addr1, 1)
env.generate_block()
# Log it to make sure we got it.
bal1 = lit1.get_balance_info()['TxoTotal']
print('initial lit1 balance:', bal1)
# Set the fee so we know what's going on.
lit1.rpc.SetFee(Fee=fee, CoinType=testlib.REGTEST_COINTYPE)
lit2.rpc.SetFee(Fee=fee, CoinType=testlib.REGTEST_COINTYPE)
print('fees set to', fee, '(per byte)')
# Now actually do the funding.
cid = lit1.open_channel(lit2, capacity, initialsend)
print('Created channel:', cid)
# Now we confirm the block.
env.generate_block()
print('Mined new block to confirm channel')
# Figure out if it's actually open now.
res = lit1.rpc.ChannelList(ChanIdx=cid)
cinfo = res['Channels'][0]
assert cinfo['Height'] == env.get_height(), "Channel height doesn't match new block."
# Make sure balances make sense
bals2 = lit1.get_balance_info()
print('new lit1 balance:', bals2['TxoTotal'], 'in txos,', bals2['ChanTotal'], 'in chans')
bal2sum = bals2['TxoTotal'] + bals2['ChanTotal']
print(' = sum ', bal2sum)
print(' -> diff', bal1 - bal2sum)
print(' -> fee ', bal1 - bal2sum - initialsend)
assert bals2['ChanTotal'] > 0, "channel balance isn't nonzero!"
|
16,570 | 81477d0fd93a7b847e1d1e1c1e91e248ad65c3d7 | from django.shortcuts import render, redirect
from django.http import HttpResponse
from .models import Food
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
# Create your views here.
def home(request):
context = {
'foods': Food.objects.all(),
}
if request.user.is_authenticated:
context['cart'] = Food.objects.filter(cart=request.user.cart)
return render(request, 'food/mainpage.html', context)
@login_required
def handle_cart(request):
if request.method == 'POST':
if request.POST.get('add_to_cart'):
food = Food.objects.get(index = request.POST.get('add_to_cart') )
request.user.cart.food_set.add(food)
request.user.cart.total_price = request.user.cart.total_price + food.price
elif request.POST.get('remove_from_cart'):
food = Food.objects.get(index = request.POST.get('remove_from_cart') )
request.user.cart.food_set.remove(food)
request.user.cart.total_price = request.user.cart.total_price - food.price
request.user.save()
return redirect('home') |
16,571 | 8d21ae539025bee059bfcf5b4b2ef8a656b1c31a | #!/usr/bin/env python
"""
Secret Santa script.
"""
import logging
import os
import random
from argparse import ArgumentParser
from email.mime.text import MIMEText
import yaml_utils
from log_utils import log_setup
from smtp_conn import SmtpConn
def opt_setup():
parser = ArgumentParser()
parser.add_argument(
"friends_file",
type=str,
help="List of friends' names and e-mail addresses (yaml format)"
)
parser.add_argument(
"--log-level",
type=str,
choices=["DEBUG", "INFO", "ERROR"],
default="INFO",
help="Set log level"
)
parser.add_argument(
"--really-send",
action="store_true",
help="Send e-mail for real"
)
parser.add_argument(
"--smtp-host",
type=str,
default="smtp.gmail.com:465",
help="SSL SMTP host"
)
parser.add_argument(
"--smtp-user",
type=str,
help="SMTP account name. Takes precedence over SMTP_USER"
)
parser.add_argument(
"--smtp-pass",
type=str,
help="SMTP password. Takes precedence over SMTP_PASS"
)
return parser
def send_santa(smtp, friend, lucky_one, really_send=False):
"""
Notify a friend who he/she'll have to give a present to
:param smtp: SmtpConn object
:param friend: the person who'll receive the Secret Santa email
:param lucky_one: recipient of the friend's present
:param really_send: if True, actually send the email message, defaults to False
"""
logger = logging.getLogger("send_santa")
logger.debug("{} ({} will be notified that his/her lucky one is {}".format(friend["name"], friend["email"], lucky_one["name"]))
msg = MIMEText(
"Ho ho ho!\nThis is a Secret (Soup) Santa announcement!\n\n{friend}, your lucky one is {lucky_one}!\n\nHo ho ho!\nSanta".format(
friend=friend['name'],
lucky_one=lucky_one['name'],
)
)
from_email = "noreply@santaclaus.pole.north"
to_email = friend["email"]
body = msg.as_string()
if not really_send:
logger.info("Dry run:")
logger.info("From: {}".format(from_email))
logger.info("To: {}".format(to_email))
logger.info("Body: {}".format(body))
else:
logger.info("Sending annoucement to {} ({})".format(friend["name"], to_email))
result = smtp.sendmail(
from_addr=from_email,
to_addrs=to_email,
msg=msg.as_string()
)
logger.debug(f"Sendmail result: {result}")
def get_option(value, env_var):
if value is not None:
value = value.strip()
if not value:
value = os.getenv(env_var)
if value is not None:
value = value.strip()
return value
def main():
parser = opt_setup()
args = parser.parse_args()
log_setup(logging.getLevelName(args.log_level))
really_send = args.really_send
logger = logging.getLogger("main")
logger.info("Randomising dice...")
random.seed()
# load list of friendsWhe
friends_file = args.friends_file
logger.info(f"Loading friends list from: {friends_file}")
try:
friends = yaml_utils.loadfile(friends_file)
except Exception as e:
logger.error(f"Couldn't load list of friends: {e}")
return
logger.debug(f"Friends:\n{friends}")
# match friends
logger.info("Matching friends...")
random.shuffle(friends)
for k in range(0, len(friends) - 1):
friends[k]['send_to'] = friends[k + 1]
friends[len(friends) - 1]['send_to'] = friends[0]
# log matches
for friend in friends:
logger.debug("{0} ==> {1}".format(friend['name'], friend['send_to']['name']))
# initialise SMTP connection if requested
smtp = None
if really_send:
logger.info("Initialising SMTP connection...")
smtp = SmtpConn(args.smtp_host)
smtp_user = get_option(args.smtp_user, "SMTP_USER")
if not smtp_user:
logger.error("No SMTP username specified. Use --smtp-user or SMTP_USER env var")
smtp_pass = get_option(args.smtp_pass, "SMTP_PASS")
if not smtp_pass:
logger.error("No SMTP password specified. Use --smtp-pass or SMTP_PASS env var")
if not smtp_user or not smtp_pass:
exit(1)
smtp.login(smtp_user, smtp_pass)
# send or simulate the emails
for friend in friends:
send_santa(smtp, friend, friend['send_to'], really_send)
if __name__ == "__main__":
main()
|
16,572 | 7482081591821e13be2fa36d2795c8d9f28a8c64 | import numpy as np
from wafo.spectrum.models import (Bretschneider, Jonswap, OchiHubble, Tmaspec,
Torsethaugen, McCormick, Wallop)
def test_bretschneider():
S = Bretschneider(Hm0=6.5,Tp=10)
vals = S((0,1,2,3))
true_vals = np.array([ 0. , 1.69350993, 0.06352698, 0.00844783])
assert((np.abs(vals-true_vals)<1e-7).all())
def test_if_jonswap_with_gamma_one_equals_bretschneider():
S = Jonswap(Hm0=7, Tp=11,gamma=1)
vals = S((0,1,2,3))
true_vals = np.array([ 0. , 1.42694133, 0.05051648, 0.00669692])
assert((np.abs(vals-true_vals)<1e-7).all())
w = np.linspace(0,5)
S2 = Bretschneider(Hm0=7, Tp=11)
#JONSWAP with gamma=1 should be equal to Bretscneider:
assert(np.all(np.abs(S(w)-S2(w))<1.e-7))
def test_tmaspec():
S = Tmaspec(Hm0=7, Tp=11,gamma=1,h=10)
vals = S((0,1,2,3))
true_vals = np.array([ 0. , 0.70106233, 0.05022433, 0.00669692])
assert((np.abs(vals-true_vals)<1e-7).all())
def test_torsethaugen():
S = Torsethaugen(Hm0=7, Tp=11,gamma=1,h=10)
vals = S((0,1,2,3))
true_vals = np.array([ 0. , 1.19989709, 0.05819794, 0.0093541 ])
assert((np.abs(vals-true_vals)<1e-7).all())
vals = S.wind(range(4))
true_vals = np.array([ 0. , 1.13560528, 0.05529849, 0.00888989])
assert((np.abs(vals-true_vals)<1e-7).all())
vals = S.swell(range(4))
true_vals = np.array([ 0. , 0.0642918 , 0.00289946, 0.00046421])
assert((np.abs(vals-true_vals)<1e-7).all())
def test_ochihubble():
S = OchiHubble(par=2)
vals = S(range(4))
true_vals = np.array([ 0. , 0.90155636, 0.04185445, 0.00583207])
assert((np.abs(vals-true_vals)<1e-7).all())
def test_mccormick():
S = McCormick(Hm0=6.5,Tp=10)
vals = S(range(4))
true_vals = np.array([ 0. , 1.87865908, 0.15050447, 0.02994663])
assert((np.abs(vals-true_vals)<1e-7).all())
def test_wallop():
S = Wallop(Hm0=6.5, Tp=10)
vals = S(range(4))
true_vals = np.array([ 0.00000000e+00, 9.36921871e-01, 2.76991078e-03,
7.72996150e-05])
assert((np.abs(vals-true_vals)<1e-7).all())
if __name__ == '__main__':
#main()
import nose
# nose.run()
test_tmaspec() |
16,573 | 297ef9d9fac46572f77faab35c5a3bb9038021f5 | import os
import shutil
import time
train_folder_name = 'train'
validation_folder_name = 'val'
test_folder_name = 'test'
CURRENT_FOLDER = os.path.dirname(os.path.abspath(__file__))
def prepare_folders_and_data(path_src, path_dest, make_test_folder=False):
print("DATA FOLDER: {}".format(path_dest))
for (dirpath, dirnames, filenames) in os.walk(path_src):
if filenames:
dir_name = os.path.basename(dirpath)
prepare_dir("{}/{}".format(path_dest, dir_name))
number_of_files = len(filenames) - 10
leaf_type = dir_name.split("___")[0]
condition = ''
if "healthy" in dir_name:
condition = "healthy"
else:
condition = "unhealthy"
if make_test_folder:
path_to_test_folder = os.path.join(CURRENT_FOLDER, "data_set", test_folder_name, dir_name)
prepare_dir(os.path.join(CURRENT_FOLDER, "data_set", test_folder_name, dir_name))
for cnt, file in enumerate(filenames):
path_fo_file = os.path.join(dirpath, file)
print("COPY FILE FROM {}".format(path_fo_file))
renamed_file_name = "{}_{}_{}.JPG".format(leaf_type, condition, cnt)
if cnt < number_of_files:
pth = os.path.join(path_dest, dir_name, renamed_file_name)
shutil.copyfile(path_fo_file, pth)
else:
if make_test_folder:
pth = os.path.join(path_to_test_folder, renamed_file_name)
shutil.copyfile(path_fo_file, pth)
else:
pth = os.path.join(path_dest, dir_name, renamed_file_name)
shutil.copyfile(path_fo_file, pth)
def prepare_dir(path):
print("MKDIR IN {}".format(path))
try:
time.sleep(1)
os.mkdir(path)
except OSError:
print("Creation of the directory %s failed" % path)
else:
print("Successfully created the directory %s " % path)
prepare_folders_and_data("PlantVillage/train", os.path.join(CURRENT_FOLDER, "data_set", train_folder_name), make_test_folder=True)
prepare_folders_and_data("PlantVillage/val", os.path.join(CURRENT_FOLDER, "data_set", validation_folder_name), make_test_folder=False)
prepare_folders_and_data("PlantVillage/val", os.path.join(CURRENT_FOLDER, "data_set", test_folder_name), make_test_folder=False)
|
16,574 | 105b568b8b06e77d0921946db4ab89fcb253cb99 | def locate_all(string, sub):
matches = []
index = 0
while index < len(string):
if string[index: index + len(sub)] == sub:
matches.append(index)
index += len(sub)
else:
index += 1
return matches
# Here are a couple function calls to test with.
print(locate_all('cookbook', 'ook'))
# [1, 5]
print(locate_all('yesyesyes', 'yes'))
# [0, 3, 6]
print(locate_all('the upside down', 'barb'))
# []
|
16,575 | 27f58e378a83911cbea1cea85697486293d3bc7e | #!/usr/bin/env python
# coding: utf-8
# In[1]:
from __future__ import print_function
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
import tensorflow as tf
#from keras.layers import Input, Dense, Lambda
#from keras.models import Model
#from keras import backend as K
#from keras import metrics
#from keras.datasets import mnist
#from keras.layers import Dense, Dropout, Flatten
#from keras.layers import Conv2D, MaxPooling2D
#from keras.layers import Conv2DTranspose,Reshape
#import keras.layers as layers
from PIL import Image, ImageOps
import os
from sklearn.metrics import accuracy_score, precision_score, recall_score
from sklearn.model_selection import train_test_split
from tensorflow.keras import layers, losses
from tensorflow.keras.models import Model
from tensorflow.keras import backend as K
from tensorflow.keras import metrics
# ## Load the dataset
# In[2]:
PATH = "./mergedConstellationsEasyStars2Outline_Large/"
checkpoint_path = "./training_ckpt/cp-{epoch:04d}.ckpt"
imageSaveDir = "./runVAE_Easy2Outline"
if not os.path.exists(imageSaveDir):
os.makedirs(imageSaveDir)
# In[3]:
#get_ipython().run_line_magic('matplotlib', 'inline')
IMG_WIDTH = 256
IMG_HEIGHT = 256
batch_size = 256
original_dim = IMG_WIDTH*IMG_HEIGHT
latent_dim = 512
EPOCHS = epochs = 50
epsilon_std = 1.0
BUFFER_SIZE = 400
# In[4]:
def load(image_file):
image = tf.io.read_file(image_file)
image = tf.image.decode_jpeg(image)
w = tf.shape(image)[1]
w = w // 2
real_image = image[:, :w, :]
input_image = image[:, w:, :]
print(type(real_image))
input_image = tf.cast(input_image, np.float32)
real_image = tf.cast(real_image, np.float32)
return input_image, real_image
# In[5]:
inp, re = load(PATH+'train/object_0_0.jpg')
# casting to int for matplotlib to show the image
plt.figure()
plt.imshow(np.asarray(inp)/255.0)
plt.figure()
plt.imshow(np.asarray(re)/255.0)
# In[6]:
def resize(input_image, real_image, height, width):
input_image = tf.image.resize(input_image, [height, width],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
real_image = tf.image.resize(real_image, [height, width],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return input_image, real_image
# In[7]:
def random_crop(input_image, real_image):
stacked_image = tf.stack([input_image, real_image], axis=0)
cropped_image = tf.image.random_crop(
stacked_image, size=[2, IMG_HEIGHT, IMG_WIDTH, 3])
return cropped_image[0], cropped_image[1]
# In[8]:
@tf.function()
def random_jitter(input_image, real_image):
# resizing to 286 x 286 x 3
input_image, real_image = resize(input_image, real_image, 286, 286)
# randomly cropping to 256 x 256 x 3
input_image, real_image = random_crop(input_image, real_image)
if tf.random.uniform(()) > 0.5:
# random mirroring
input_image = tf.image.flip_left_right(input_image)
real_image = tf.image.flip_left_right(real_image)
return input_image, real_image
# In[9]:
# normalizing the images to [0, 1]
def normalize(input_image, real_image):
input_image = input_image/255.0#(input_image / 127.5) - 1
real_image = real_image/255.0#(real_image / 127.5) - 1
return input_image, real_image
# In[10]:
def load_image_train(image_file):
input_image, real_image = load(image_file)
input_image, real_image = random_jitter(input_image, real_image)
input_image, real_image = normalize(input_image, real_image)
return input_image, real_image
# In[11]:
def load_image_test(image_file):
input_image, real_image = load(image_file)
input_image, real_image = resize(input_image, real_image,
IMG_HEIGHT, IMG_WIDTH)
input_image, real_image = normalize(input_image, real_image)
return input_image, real_image
# ## Input Pipeline
# In[12]:
train_dataset = tf.data.Dataset.list_files(PATH+'train/*.jpg')
train_dataset = train_dataset.map(load_image_train)
train_dataset = train_dataset.shuffle(BUFFER_SIZE)
train_dataset = train_dataset.batch(1)
# In[13]:
test_dataset = tf.data.Dataset.list_files(PATH+'test/*.jpg')
test_dataset = test_dataset.map(load_image_test)
test_dataset = test_dataset.batch(1)
# ## Convert to numpy
# In[14]:
train_np = np.stack(list(train_dataset))
test_np = np.stack(list(test_dataset))
# In[15]:
noise_train = np.squeeze(train_np[:,0,:,:,:])
x_train = np.squeeze(train_np[:,1,:,:,:])
# In[16]:
noise_test = np.squeeze(test_np[:,0,:,:,:])
x_test = np.squeeze(test_np[:,1,:,:,:])
# In[17]:
from tensorflow.python.framework.ops import disable_eager_execution
disable_eager_execution()
# In[18]:
#encoder part
x_noise = layers.Input(shape=(256, 256, 3,), name='input_layer')
# Block-1
x = layers.Conv2D(32, kernel_size=3, strides= 2, padding='same', name='conv_1')(x_noise)
x = layers.BatchNormalization(name='bn_1')(x)
x = layers.LeakyReLU(name='lrelu_1')(x)
# Block-2
x = layers.Conv2D(64, kernel_size=3, strides= 2, padding='same', name='conv_2')(x)
x = layers.BatchNormalization(name='bn_2')(x)
x = layers.LeakyReLU(name='lrelu_2')(x)
# Block-3
x = layers.Conv2D(64, 3, 2, padding='same', name='conv_3')(x)
x = layers.BatchNormalization(name='bn_3')(x)
x = layers.LeakyReLU(name='lrelu_3')(x)
# Block-4
x = layers.Conv2D(64, 3, 2, padding='same', name='conv_4')(x)
x = layers.BatchNormalization(name='bn_4')(x)
x = layers.LeakyReLU(name='lrelu_4')(x)
# Block-5
x = layers.Conv2D(64, 3, 2, padding='same', name='conv_5')(x)
x = layers.BatchNormalization(name='bn_5')(x)
x = layers.LeakyReLU(name='lrelu_5')(x)
# Final Block
flatten = layers.Flatten()(x)
z_mean = layers.Dense(512, name='mean')(flatten)
z_log_var = layers.Dense(512, name='log_var')(flatten)
#reparameterization trick
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim), mean=0.,
stddev=epsilon_std)
return z_mean + K.exp(z_log_var / 2) * epsilon
# note that "output_shape" isn't necessary with the TensorFlow backend
z = layers.Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])
# In[19]:
#decoder part
# we instantiate these layers separately so as to reuse them later
z= layers.Reshape([1,1,latent_dim])(z)
x = layers.Dense(4096)(z)
x = layers.Reshape((8,8,64))(x)
# Block-1
x = layers.Conv2DTranspose(64, 3, strides= 2, padding='same')(x)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU()(x)
# Block-2
x = layers.Conv2DTranspose(64, 3, strides= 2, padding='same')(x)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU()(x)
# Block-3
x = layers.Conv2DTranspose(64, 3, 2, padding='same')(x)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU()(x)
# Block-4
x = layers.Conv2DTranspose(32, 3, 2, padding='same')(x)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU()(x)
# Block-5
x_out = layers.Conv2DTranspose(3, 3, 2,padding='same', activation='sigmoid')(x)
# instantiate VAE model
vae = Model(x_noise, x_out)
vae.summary()
# In[20]:
#from keras.utils.vis_utils import plot_model
#plot_model(vae, to_file='model1.png',show_shapes=True)
# In[21]:
# Compute VAE loss
def VAE_loss(x_origin,x_out):
x_origin=K.flatten(x_origin)
x_out=K.flatten(x_out)
xent_loss = original_dim * metrics.binary_crossentropy(x_origin, x_out)
kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
vae_loss = K.mean(xent_loss + kl_loss)
return vae_loss
vae.compile(optimizer='adam', loss=VAE_loss)
# In[22]:
vae.fit(noise_train,x_train,
shuffle=True,
epochs=epochs,
batch_size=batch_size,
validation_data=(noise_test, x_test))
# In[30]:
def generate_images_test(x_testImg, noise_testImg, x_outImg, saveFolder, index):
inp = Image.fromarray((noise_testImg*255).astype(np.uint8))
inp.save(f"{saveFolder}/input_{index}.jpg")
tar = Image.fromarray((x_testImg*255).astype(np.uint8))
tar.save(f"{saveFolder}/target_{index}.jpg")
pred = Image.fromarray((x_outImg*255).astype(np.uint8))
pred.save(f"{saveFolder}/predictions_{index}.jpg")
# In[31]:
saveDir = PATH+"predictions_vae"
x_out=vae.predict(x_test)
for i in range(100):
generate_images_test(x_test[i], noise_test[i], x_out[i], saveDir, i)
|
16,576 | ffb07b87e7a044943ba909306d4cd0fde08f18a9 | from App.UI import create_app
from config import TestConfig
import unittest
import flask_unittest
from App.Data.Models.users import User
class TestRegistration(flask_unittest.ClientTestCase):
app = create_app(config_class=TestConfig)
def setUp(self, client) -> None:
pass
@staticmethod
def assert_flashes(client, expected_message, expected_category='message'):
with client.session_transaction() as session:
try:
category, message = session['_flashes'][0]
except KeyError:
raise AssertionError('nothing flashed')
assert expected_message in message
assert expected_category == category
@staticmethod
def create_user():
return {
"full_name": "test testsson",
"user_name": "wack_a_tree",
"email": "please_be_unique@mail.com",
"password": "secret",
"confirm_password": "secret"}
def test_signup(self, client):
user = self.create_user()
response = client.post('/signup', data=user)
self.assertStatus(response, 302)
self.assertLocationHeader(response, 'http://localhost/')
self.assertEqual(user['user_name'], User.find(user_name=user['user_name']).first_or_none().user_name)
def test_signup_not_same_password(self, client):
user = self.create_user()
user['password'] = '123'
client.post('/signup', data=user)
self.assert_flashes(client, 'password are not the same')
def test_email_already_exists(self, client):
user = self.create_user()
user['email'] = 'destroyer@discgolf.com'
client.post('/signup', data=user)
self.assert_flashes(client, 'Email already exists')
def test_username_already_exists(self, client):
user = self.create_user()
user['user_name'] = 'Mcbeast'
client.post('/signup', data=user)
self.assert_flashes(client, 'Username already exists')
def tearDown(self, client) -> None:
user = self.create_user()
User.delete_one(user_name=user['user_name'])
if __name__ == '__main__':
unittest.main()
|
16,577 | 95810895564d58cf5f025b13b883d8b13c34ed7c | def normalize(name):
name = name.lower()
l = list(name)
l[0] = l[0].upper()
s = ''.join(l)
return s
L1 = ['adam', 'LISA', 'barT']
L2 = list(map(normalize, L1))
print(L2) |
16,578 | df6727ec863b50123e7ebc96cb418df317cac2a1 | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2017, Gokturk Gok & Nurefsan Sarikaya.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Wikipedia search support to the auto-tagger. Requires the
BeautifulSoup library.
"""
from __future__ import division, absolute_import, print_function
from beets.autotag.hooks import Distance
from beets.plugins import BeetsPlugin
from requests.exceptions import ConnectionError
import time
import urllib.request
from bs4 import BeautifulSoup
info_boxList = ['Released', 'Genre', 'Length', 'Label']
# -----------------------------------------------------------------------
# is Info Box check
def is_in_info(liste):
for i in info_boxList:
if(len(liste) != 0 and liste[0] == i):
return True
return False
def get_track_length(duration):
"""
Returns the track length in seconds for a wiki duration.
"""
try:
length = time.strptime(duration, '%M:%S')
except ValueError:
return None
return length.tm_min * 60 + length.tm_sec
# ----------------------------------------------------------------------
"""
WikiAlbum class is being served like AlbumInfo object which keeps all album
meta data in itself.
"""
class WikiAlbum(object):
def __init__(self, artist, album_name):
self.album = album_name
self.artist = artist
self.tracks = []
self.album_length = ""
self.label = None
self.year = None
self.data_source = "Wikipedia"
self.data_url = ""
self.album_id = 1
self.va = False
self.artist_id = 1
self.asin = None
self.albumtype = None
self.year = None
self.month = None
self.day = None
self.mediums = 1
self.artist_sort = None
self.releasegroup_id = None
self.catalognum = None
self.script = None
self.language = None
self.country = None
self.albumstatus = None
self.media = None
self.albumdisambig = None
self.artist_credit = None
self.original_year = None
self.original_month = None
self.original_day = None
try:
url = 'https://en.wikipedia.org/wiki/' + album_name + \
'_(' + artist + '_album)'
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
try:
url = 'https://en.wikipedia.org/wiki/' + album_name + \
'_(album)'
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
try:
url = 'https://en.wikipedia.org/wiki/' + album_name
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
try:
# in case of album name has (Deluxe) extension
url = 'https://en.wikipedia.org/wiki/' +\
album_name[:-9] + '_(' + artist + '_album)'
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
raise urllib.error.HTTPError
except ConnectionError:
raise ConnectionError
self.data_url = url
soup = BeautifulSoup(html, "lxml")
# ------------------ INFOBOX PARSING ----------------------#
info_box = soup.findAll("table", {"class": "infobox"})
info_counter = 1
for info in info_box:
for row in info.findAll("tr"):
if (self.artist == "" and info_counter == 3):
self.artist = row.getText().split()[-1]
data = (row.getText()).split('\n')
data = list(filter(None, data))
if (is_in_info(data)):
if(data[0] == 'Label'):
self.label = str(data[1:])
elif(data[0] == 'Released'):
if (data[1][-1] == ")"):
self.year = int(data[1][-11:-7])
self.month = int(data[1][-6:-4])
self.day = int(data[1][-3:-1])
else:
self.year = int(data[1][-4:])
# Album length which is converted into beets length format
elif(data[0] == "Length"):
self.album_length = get_track_length(data[1])
# getting Genre
elif(data[0] == "Genre"):
fixed_genre = ""
for character in data[1]:
if (character != ("[" or "{" or "(")):
fixed_genre += character
else:
break
self.genre = fixed_genre
info_counter += 1
track_tables = soup.findAll("table", {"class": "tracklist"})
# set the MediumTotal,total number of tracks in an album is required
track_counter = 0
for table in track_tables:
for row in table.findAll("tr"):
row_data = (row.getText()).split('\n')
row_data = list(filter(None, row_data))
# pick only the tracks not irrelevant parts of the tables.
# len(row_data) check is used for getting the correct
# table data & checks track numbers whether it is exist or not
if (row_data[0][:-1].isdigit() and len(row_data) > 3):
track_counter += 1
for table in track_tables:
for row in table.findAll("tr"):
row_data = (row.getText()).split('\n')
row_data = list(filter(None, row_data))
# pick only the tracks not irrelevant parts of the tables.
# len(row_data) check is used for getting the correct table
# data and checking track numbers whether it is exist or not
if (row_data[0][:-1].isdigit() and len(row_data) > 3):
one_track = Track(row_data)
one_track.set_data_url(self.data_url)
one_track.set_medium_total(track_counter)
self.tracks.append(one_track)
def get_album_len(self):
return self.album_length
def get_tracks(self):
return self.tracks
# keeps the metadata of tracks which are gathered from wikipedia
# like TrackInfo object in beets
class Track(object):
def __init__(self, row):
#####
self.medium = 1
self.disctitle = "CD"
#####
self.medium_index = int(row[0][:-1])
self.track_id = int(row[0][:-1])
self.index = int(row[0][:-1])
# wiping out the character (") from track name
temp_name = ""
for i in row[1]:
if(i != '"'):
temp_name += i
self.title = str(temp_name)
self.writer = list(row[2].split(','))
self.producers = row[3:-1]
self.length = get_track_length(row[-1])
self.artist = None
self.artist_id = None
self.media = None
self.medium_total = None
self.artist_sort = None
self.artist_credit = None
self.data_source = "Wikipedia"
self.data_url = None
self.lyricist = None
self.composer = None
self.composer_sort = None
self.arranger = None
self.track_alt = None
self.track = self.index
self.disc = self.medium
self.disctotal = 2
self.mb_trackid = self.track_id
self.mb_albumid = None
self.mb_album_artistid = None
self.mb_artist_id = None
self.mb_releasegroupid = None
self.comp = 0
self.tracktotal = None
self.albumartist_sort = None
self.albumartist_credit = None
def set_medium_total(self, num):
self.medium_total = num
self.track_total = num
def set_data_url(self, url):
self.data_url = url
def get_name(self):
return self.title
def get_writer(self):
return self.writer
def get_producers(self):
return self.producers
def get_length(self):
return self.length
class Wikipedia(BeetsPlugin):
def __init__(self):
super(Wikipedia, self).__init__()
self.config.add({
'source_weight': 0.50
})
# ----------------------------------------------
""" Track_distance
item --> track to be matched(Item Object)
info is the TrackInfo object that proposed as a match
should return a (dist,dist_max) pair of floats indicating the distance
"""
def track_distance(self, item, info):
dist = Distance()
return dist
# ----------------------------------------------
"""
album_info --> AlbumInfo Object reflecting the album to be compared.
items --> sequence of all Item objects that will be matched
mapping --> dictionary mapping Items to TrackInfo objects
"""
def album_distance(self, items, album_info, mapping):
"""
Returns the album distance.
"""
dist = Distance()
if (album_info.data_source == 'Wikipedia'):
dist.add('source', self.config['source_weight'].as_number())
return dist
# ----------------------------------------------
def candidates(self, items, artist, album, va_likely):
"""Returns a list of AlbumInfo objects for Wikipedia search results
matching an album and artist (if not various).
"""
candidate_list = []
candidate_list.append(WikiAlbum(artist, album))
return candidate_list
|
16,579 | cd84831bb9940f8c5e4f4f497e715b4b6ae04419 | import smtplib
content = "Bu bir maildir"
mail = smtplib.SMTP("smtp.gmail.com",587)
mail.ehlo()
mail.starttls()
mail.login("metastaban@gmail.com","Qaz1907qaz.")
mail.sendmail("metastaban@gmail.com","mehmetemin@tastaban.net",content)
|
16,580 | d8ad3d77a02bb73a28d16b761727d6bceb9346b9 | __author__ = 'bill'
class Location(object):
def __init__(self, latitude, longitude):
self.latitude = latitude
self.longitude = longitude
def __str__(self):
return str(self.latitude) + "," + str(self.longitude) |
16,581 | 223e69ee41294604a2c7fb11ab53d50f6ee12e22 | from bot import Telegram_Chatbot
from chat_controller import Chat_Controller
# Instantiate bot with token specified in the config
my_bot = Telegram_Chatbot("config.cfg")
chat_controller = Chat_Controller()
def make_reply(message):
return "Okay cool"
update_id = None
while True:
updates = my_bot.get_updates(offset=update_id)
updates = updates['result']
if updates:
if chat_controller.state == "Deactivated":
chat_controller.state = "Activated"
update_id, msg, sender_id = chat_controller.process_input(updates)
reply = chat_controller.make_reply(msg)
my_bot.send_message(reply, sender_id) |
16,582 | 968258ff4a1b8997ec5ea507617a092011f534a9 | from struct import pack, unpack
import time
import re
import os
import _thread
# cofig if use existed dictionaries
USE_DICT_CREATED_PRIVIOUSLY = True
POLLING_TIME_FOR_ASSIGNMENT_ID = 0.5 # unit: second
HEART_PACKAGE_THREAD_STACK_SIZE = 8 * 1024
HEART_PACKAGE_THREAD_PRIORITY = 1
NO_VALID_FRAME = 0
HEAD_START = 1
CHECKSUM_SUCCESS = 2
CHECKSUM_FAILURE = 3
FRAME_DATA_END = 4
lock = _thread.allocate_lock()
current_time = 0
previous_polling_time = 0
frame_status = NO_VALID_FRAME
default_link = None
online_neurons_module_request_dict = dict()
online_neurons_module_response_dict = dict()
if USE_DICT_CREATED_PRIVIOUSLY:
from neurons_protocol.neurons_dicts import general_command_request_dict
from neurons_protocol.neurons_dicts import general_command_response_dict
from neurons_protocol.neurons_dicts import common_neurons_command_request_dict
from neurons_protocol.neurons_dicts import common_neurons_command_response_dict
from neurons_protocol.neurons_dicts import common_neurons_command_default_result_dict
else:
general_command_request_dict = dict()
general_command_response_dict = dict()
common_neurons_command_request_dict = dict()
common_neurons_command_response_dict = dict()
common_neurons_command_default_result_dict = dict()
online_neurons_module_inactive_block_dict = dict()
online_neurons_module_temporary_result_dict = dict()
def send_BYTE(data):
data_bytes = bytearray()
if type(data) == float:
data = int(data)
data_bytes.append(data & 0x7f)
return data_bytes
def send_byte(data):
data_bytes = bytearray()
if type(data) == float:
data = int(data)
input_data_to_bytes = data.to_bytes(1, "big")
data1 = input_data_to_bytes[0] & 0x7f
data2 = (input_data_to_bytes[0] >> 7) & 0x7f
data_bytes.append(data1)
data_bytes.append(data2)
return data_bytes
def send_SHORT(data):
data_bytes = bytearray()
if type(data) == float:
data = int(data)
input_data_to_bytes = data.to_bytes(2, "big")
data1 = input_data_to_bytes[1] & 0x7f
data2 = ((input_data_to_bytes[0] << 1) + (input_data_to_bytes[1] >> 7)) & 0x7f
data_bytes.append(data1)
data_bytes.append(data2)
return data_bytes
def send_short(data):
data_bytes = bytearray()
if type(data) == float:
data = int(data)
input_data_to_bytes = data.to_bytes(2, "big")
data1 = input_data_to_bytes[1] & 0x7f
data2 = ((input_data_to_bytes[0] << 1) + (input_data_to_bytes[1] >> 7)) & 0x7f
data3 = (input_data_to_bytes[0] >> 6) & 0x7f
data_bytes.append(data1)
data_bytes.append(data2)
data_bytes.append(data3)
return data_bytes
def send_float(data):
data_bytes = bytearray()
float_bytes = pack('f', data)
input_data_to_bytes = float_bytes
data1 = input_data_to_bytes[0] & 0x7f
data2 = ((input_data_to_bytes[1] << 1) + (input_data_to_bytes[0] >> 7)) & 0x7f
data3 = ((input_data_to_bytes[2] << 2) + (input_data_to_bytes[1] >> 6)) & 0x7f
data4 = ((input_data_to_bytes[3] << 3) + (input_data_to_bytes[2] >> 5)) & 0x7f
data5 = (input_data_to_bytes[3] >> 4) & 0x7f
data_bytes.append(data1)
data_bytes.append(data2)
data_bytes.append(data3)
data_bytes.append(data4)
data_bytes.append(data5)
return data_bytes
def send_long(data):
data_bytes = bytearray()
if type(data) == float:
data = int(data)
input_data_to_bytes = data.to_bytes(4, "big")
data1 = input_data_to_bytes[3] & 0x7f
data2 = ((input_data_to_bytes[2] << 1) + (input_data_to_bytes[3] >> 7)) & 0x7f
data3 = ((input_data_to_bytes[1] << 2) + (input_data_to_bytes[2] >> 6)) & 0x7f
data4 = ((input_data_to_bytes[0] << 3) + (input_data_to_bytes[1] >> 5)) & 0x7f
data5 = (input_data_to_bytes[0] >> 4) & 0x7f
data_bytes.append(data1)
data_bytes.append(data2)
data_bytes.append(data3)
data_bytes.append(data4)
data_bytes.append(data5)
return data_bytes
def send_STR1(data):
data_bytes = bytearray()
data_bytes_BYTE = send_BYTE(len(data))
for data_element in data_bytes_BYTE:
data_bytes.append(data_element)
for i in range(len(data)):
print(type(data[i]))
if(type(data[i]) == str):
data_bytes.append(ord(data[i]))
elif(type(data[i]) == int):
data_bytes.append(data[i])
return data_bytes
def send_STR2(data):
data_bytes = bytearray()
data_bytes_SHORT = send_SHORT(len(data))
for data_element in data_bytes_SHORT:
data_bytes.append(data_element)
for i in range(len(data)):
if(type(data[i]) == str):
data_bytes.append(ord(data[i]))
elif(type(data[i]) == int):
data_bytes.append(data[i])
return data_bytes
def read_BYTE(data):
result = (data) & 0x7f;
return result
def read_byte(data):
data1 = (data[0]) & 0x7f
temp = (data[1] << 7) & 0x80
result = data1 | temp
data_bytes = pack('B', result)
result = unpack('b', data_bytes)
result = result[0]
return result
def read_SHORT(data):
data1 = (data[0]) & 0x7f
temp = (data[1] << 7) & 0x80
data1 |= temp
data2 = (data[1] >> 1) & 0x7f
result = (data2 << 8 | data1) & 0xffff
return result
def read_short(data):
data1 = (data[0]) & 0x7f
temp = (data[1] << 7) & 0x80
data1 |= temp
data2 = (data[1] >> 1) & 0x7f
temp = (data[2] << 6)
data2 = data2 | temp
result = (data2 << 8 | data1) & 0xffff
data_bytes = pack('H', result)
result = unpack('h', data_bytes)
result = result[0]
return result
def read_long(data):
data1 = (data[0]) & 0x7f
temp = data[1] << 7;
data1 |= temp;
data2 = (data[1] >> 1) & 0x7f;
temp = (data[2] << 6);
data2 |= temp;
data3 = (data[2] >> 2) & 0x7f;
temp = (data[3] << 5);
data3 |= temp;
data4 = (data[3] >> 3) & 0x7f;
temp = (data[4] << 4);
data4 |= temp;
result = (data4 << 24 | data3 << 16 | data2 << 8 | data1) & 0xffffffff
data_bytes = pack('L', result)
result = unpack('l', data_bytes)
result = result[0]
return result
def read_float(data):
data1 = (data[0]) & 0x7f
temp = data[1] << 7;
data1 |= temp;
data2 = (data[1] >> 1) & 0x7f;
temp = (data[2] << 6);
data2 |= temp;
data3 = (data[2] >> 2) & 0x7f;
temp = (data[3] << 5);
data3 |= temp;
data4 = (data[3] >> 3) & 0x7f;
temp = (data[4] << 4);
data4 |= temp;
result = (data4 << 24 | data3 << 16 | data2 << 8 | data1) & 0xffffffff
data_bytes = pack('L', result)
result = unpack('f', data_bytes)
result = result[0]
return result
def request_data_conversion(data_type, data):
data_bytes = bytearray()
if(data_type == "BYTE"):
return send_BYTE(data)
elif(data_type == "byte"):
return send_byte(data)
elif(data_type == "SHORT"):
return send_SHORT(data)
elif(data_type == "short"):
return send_short(data)
elif(data_type == "float"):
return send_float(data)
elif(data_type == "long"):
return send_long(data)
elif(data_type == "STR1"):
return send_STR1(data)
elif(data_type == "STR2"):
return send_STR2(data)
def response_data_conversion(data_type, para_stream_index, para_stream):
if data_type == "BYTE":
data = read_BYTE(para_stream[para_stream_index])
para_stream_index = para_stream_index + 1
return para_stream_index, data
elif data_type == "byte":
data = read_byte(para_stream[para_stream_index:])
para_stream_index = para_stream_index + 2
return para_stream_index, data
elif data_type == "SHORT":
data = read_SHORT(para_stream[para_stream_index:])
para_stream_index = para_stream_index + 2
return para_stream_index, data
elif data_type == "short":
data = read_short(para_stream[para_stream_index:])
para_stream_index = para_stream_index + 3
return para_stream_index, data
elif data_type == "long":
data = read_long(para_stream[para_stream_index:])
para_stream_index = para_stream_index + 5
return para_stream_index, data
elif data_type == "float":
data = read_float(para_stream[para_stream_index:])
para_stream_index = para_stream_index + 5
return para_stream_index, data
def conversion_str_to_data(data_type, str_data):
data = None
if data_type == "BYTE":
data = int(str_data)
elif data_type == "byte":
data = int(str_data)
elif data_type == "SHORT":
data = int(str_data)
elif data_type == "short":
data = int(str_data)
elif data_type == "long":
data = long(str_data)
elif data_type == "float":
data = float(str_data)
return data
def read_general_command_request_to_dict():
global general_command_request_dict
os.chdir("/lib")
with open("request_general_command.csv", "r") as f:
line = f.readline() #read the title line
line = f.readline()
while line:
general_command_request_block_dict = dict()
line_elements = line.split(',')
elements_length = len(line_elements)
general_command_request_block_dict["type"] = int(line_elements[1],16);
if(line_elements[2] != "None"):
general_command_request_block_dict["subtype"] = int(line_elements[2],16);
else:
general_command_request_block_dict["subtype"] = None;
para_num = 1
while line_elements[para_num+2] != "" and \
line_elements[para_num+2] != "\r\n" and \
(para_num + 2) < elements_length:
general_command_request_block_dict[para_num] = line_elements[para_num+2];
para_num = para_num + 1
general_command_request_block_dict["para_num"] = para_num - 1
general_command_request_dict[line_elements[0]] = general_command_request_block_dict
line = f.readline()
f.close( )
#print(general_command_request_dict)
def read_general_command_response_to_dict():
global general_command_response_dict
os.chdir("/lib")
with open("response_general_command.csv", "r") as f:
line = f.readline() #read the title line
line = f.readline()
while line:
general_command_response_block_dict = dict()
line_elements = line.split(',')
elements_length = len(line_elements)
type_str = (int(line_elements[2],16)) & 0x7f
if(line_elements[3] != "None"):
type_str = type_str * 256 + int(line_elements[3],16)
else:
type_str = type_str * 256
general_command_response_block_dict["name"] = line_elements[0]
para_num = 1
while line_elements[para_num+3] != "" and \
line_elements[para_num+3] != "\r\n" and \
(para_num + 3) < elements_length:
general_command_response_block_dict[para_num] = line_elements[para_num+3];
para_num = para_num + 1
general_command_response_block_dict["para_num"] = para_num - 1
general_command_response_dict[type_str] = general_command_response_block_dict
line = f.readline()
#print(general_command_response_dict)
f.close( )
def read_common_neurons_command_request_to_dict():
global common_neurons_command_request_dict
os.chdir("/lib")
with open("request_common_neurons_command.csv", "r") as f:
line = f.readline() #read the title line
line = f.readline()
common_neurons_command_request_block_dict = dict()
while line:
common_neurons_command_request_function_dict = dict()
line_elements = line.split(',')
elements_length = len(line_elements)
type_str = (int(line_elements[1],16)) & 0x7f
if(line_elements[2] != "None"):
type_str = type_str * 256 + int(line_elements[2],16);
else:
type_str = type_str * 256 + 0;
if type_str in common_neurons_command_request_dict:
pass
else:
common_neurons_command_request_function_dict_list = dict()
common_neurons_command_request_block_dict = dict()
common_neurons_command_request_block_dict["name"] = line_elements[0]
common_neurons_command_request_function_dict["command_id"] = int(line_elements[4],16);
para_num = 1
while line_elements[para_num+4] != "" and \
line_elements[para_num+4] != "\r\n" and \
(para_num + 4) < elements_length:
common_neurons_command_request_function_dict[para_num] = line_elements[para_num+4];
para_num = para_num + 1
common_neurons_command_request_function_dict["para_num"] = para_num - 1
common_neurons_command_request_function_dict_list[line_elements[3]] = common_neurons_command_request_function_dict
common_neurons_command_request_block_dict["function"] = common_neurons_command_request_function_dict_list
common_neurons_command_request_dict[type_str] = common_neurons_command_request_block_dict
line = f.readline()
f.close( )
#print(common_neurons_command_request_dict)
def read_common_neurons_command_response_to_dict():
global common_neurons_command_response_dict
global common_neurons_command_default_result_dict
os.chdir("/lib")
with open("response_common_neurons_command.csv", "r") as f:
line = f.readline() #read the title line
line = f.readline()
while line:
common_neurons_command_response_function_dict = dict()
line_elements = line.split(',')
elements_length = len(line_elements)
type_str = (int(line_elements[2],16)) & 0x7f
if(line_elements[3] != "None"):
type_str = type_str * 256 + int(line_elements[3],16);
else:
type_str = type_str * 256 + 0;
if type_str in common_neurons_command_response_dict:
common_neurons_command_default_function_result_list = []
pass
else:
common_neurons_command_response_function_dict_list = dict()
common_neurons_command_response_block_dict = dict()
common_neurons_command_response_block_dict["name"] = line_elements[0]
common_neurons_command_default_function_result_list = []
common_neurons_command_default_block_result_dict = dict()
common_neurons_command_response_function_dict["command_name"] = line_elements[4];
para_num = 1
while line_elements[para_num+5] != "" and \
line_elements[para_num+5] != "\r\n" and \
(para_num + 5) < elements_length:
str_para = line_elements[para_num+5];
pos = str_para.rfind('/')
if pos == -1:
para_type = str_para
para_default_value = 0
else:
para_type = str_para[0:pos]
para_default_value_str = str_para.split('/')[-1]
para_default_value = conversion_str_to_data(para_type, para_default_value_str)
common_neurons_command_response_function_dict[para_num] = line_elements[para_num+5];
common_neurons_command_default_function_result_list.append(para_default_value)
para_num = para_num + 1
common_neurons_command_response_function_dict["para_num"] = para_num - 1
common_neurons_command_response_function_dict_list[int(line_elements[5],16)] = common_neurons_command_response_function_dict
common_neurons_command_response_block_dict["function"] = common_neurons_command_response_function_dict_list
common_neurons_command_default_block_result_dict[line_elements[4]] = common_neurons_command_default_function_result_list
common_neurons_command_response_dict[type_str] = common_neurons_command_response_block_dict
common_neurons_command_default_result_dict[line_elements[0]] = common_neurons_command_default_block_result_dict
line = f.readline()
f.close( )
# print(common_neurons_command_response_dict)
def calculation_block_id_and_subcommand_id(command_name, subcommand, block_index):
global online_neurons_module_request_dict
block_id = 0
subcommand_id = 0
if block_index == 0xff:
block_id = 0xff
else:
function_block_dict = online_neurons_module_request_dict[command_name]
device_id_list = function_block_dict["device_id"]
if len(device_id_list) >= block_index:
block_id = device_id_list[block_index-1]
else:
block_id = None
function_dict = function_block_dict["function"]
if subcommand in function_dict:
subcommand_id = function_dict[subcommand]["command_id"]
return block_id, subcommand_id
def fill_element_of_online_neurons_module_request_dict(device_id, type, subtype):
global online_neurons_module_request_dict
global online_neurons_module_inactive_block_dict
str_block_type = type * 256 + subtype
if str_block_type in common_neurons_command_request_dict:
block_dict = common_neurons_command_request_dict[str_block_type]
block_name = block_dict["name"]
if device_id in online_neurons_module_inactive_block_dict:
online_neurons_module_inactive_block_dict[device_id]["name"] = block_name
online_neurons_module_inactive_block_dict[device_id]["inactive"] = 0
online_neurons_module_inactive_block_dict[device_id]["type"] = str_block_type
else:
online_neurons_module_inactive_block_info_dict = dict()
online_neurons_module_inactive_block_info_dict["name"] = block_name
online_neurons_module_inactive_block_info_dict["inactive"] = 0
online_neurons_module_inactive_block_info_dict["type"] = str_block_type
online_neurons_module_inactive_block_dict[device_id] = online_neurons_module_inactive_block_info_dict
if block_name in online_neurons_module_request_dict:
if device_id in online_neurons_module_request_dict[block_name]["device_id"]:
pass
else:
online_neurons_module_request_dict[block_name]["device_id"].append(device_id)
online_neurons_module_request_dict[block_name]["device_id"].sort()
else:
online_neurons_module_request_block_dict = dict()
online_neurons_module_request_block_dict["device_id"] = []
online_neurons_module_request_block_dict["type"] = type
online_neurons_module_request_block_dict["subtype"] = subtype
online_neurons_module_request_block_dict["device_id"].append(device_id)
if "function" in block_dict:
function_dict_list = block_dict["function"]
online_neurons_module_request_block_dict["function"] = function_dict_list
online_neurons_module_request_dict[block_name] = online_neurons_module_request_block_dict
def fill_element_of_online_neurons_module_response_dict(device_id, type, subtype):
global online_neurons_module_response_dict
global common_neurons_command_response_dict
str_block_type = type * 256 + subtype
if str_block_type in online_neurons_module_response_dict:
if device_id in online_neurons_module_response_dict[str_block_type]["device_id"]:
pass
else:
online_neurons_module_response_dict[str_block_type]["device_id"].append(device_id)
online_neurons_module_response_dict[str_block_type]["device_id"].sort()
online_neurons_module_response_dict[str_block_type][device_id] = []
elif str_block_type in common_neurons_command_response_dict:
online_neurons_module_response_block_dict = dict()
online_neurons_module_response_block_dict["device_id"] = []
block_dict = common_neurons_command_response_dict[str_block_type]
block_name = block_dict["name"]
online_neurons_module_response_block_dict["name"] = block_name
online_neurons_module_response_block_dict["device_id"].append(device_id)
online_neurons_module_response_block_dict[device_id] = []
if "function" in block_dict:
function_dict_list = block_dict["function"]
online_neurons_module_response_block_dict["function"] = function_dict_list
online_neurons_module_response_dict[str_block_type] = online_neurons_module_response_block_dict
def delete_online_neurons_module_request_dict(block_name, device_id):
global online_neurons_module_request_dict
if device_id in online_neurons_module_request_dict[block_name]["device_id"]:
online_neurons_module_request_dict[block_name]["device_id"].remove(device_id)
if len(online_neurons_module_request_dict[block_name]["device_id"]):
pass
else:
del online_neurons_module_request_dict[block_name]
def delete_online_neurons_module_response_dict(block_type, device_id):
global online_neurons_module_response_dict
if block_type in online_neurons_module_response_dict:
if device_id in online_neurons_module_response_dict[block_type]["device_id"]:
online_neurons_module_response_dict[block_type]["device_id"].remove(device_id)
if len(online_neurons_module_response_dict[block_type]["device_id"]):
pass
else:
del online_neurons_module_response_dict[block_type]
def general_command_request(link, command_name, block_index, data_segment = []):
global general_command_request_dict
block_dict = general_command_request_dict[command_name]
general_command_request_data = bytearray()
block_id = block_index
subcommand_id = 0x00
general_command_request_data.append(block_id)
# type data is added
general_command_request_data.append(block_dict["type"])
# subtype data is added
if block_dict["subtype"] != None:
general_command_request_data.append(block_dict["subtype"])
arg_num = block_dict["para_num"]
for i in range(arg_num):
data_type = block_dict[i+1]
if arg_num == 1:
data_bytes = request_data_conversion(data_type, data_segment)
else:
data_bytes = request_data_conversion(data_type, data_segment[i])
for data_element in data_bytes:
general_command_request_data.append(data_element)
# link.send(general_command_request_data)
link(general_command_request_data)
return block_id, subcommand_id
def general_command_response(data_stream):
result = None
data_stream_temp = data_stream
device_id = data_stream_temp[0]
block_type = data_stream_temp[1]
str_block_type = block_type * 256
result = []
if str_block_type in general_command_response_dict:
block_dict = general_command_response_dict[str_block_type]
if block_dict["name"] == "assign_id":
arg_num = block_dict["para_num"]
para_stream = data_stream_temp[2:]
para_stream_start = 0
for i in range(arg_num):
data_type = block_dict[i+1]
try:
para_stream_start, data = response_data_conversion(data_type, para_stream_start, para_stream)
result.append(data)
except:
pass
fill_element_of_online_neurons_module_request_dict(device_id,result[0],result[1])
fill_element_of_online_neurons_module_response_dict(device_id,result[0],result[1])
elif block_dict["name"] == "query_version":
pass
else:
pass
return device_id, result
def common_neurons_command_request(link, command_name, subcommand, data_segment = [], block_index = 0x01):
global online_neurons_module_request_dict
block_dict = online_neurons_module_request_dict[command_name]
online_neurons_module_request_data = bytearray()
block_id, subcommand_id = calculation_block_id_and_subcommand_id(command_name, subcommand, block_index)
if block_id == None:
return block_id
online_neurons_module_request_data.append(block_id)
# type data is added
online_neurons_module_request_data.append(block_dict["type"])
# subtype data is added
if block_dict["subtype"] != None:
online_neurons_module_request_data.append(block_dict["subtype"])
if command_name == "gyro_sensor":
online_neurons_module_request_data.append(0x01)
online_neurons_module_request_data.append(0x00)
function_dict_list = block_dict["function"]
function_dict = function_dict_list[subcommand]
online_neurons_module_request_data.append(function_dict["command_id"])
arg_num = function_dict["para_num"]
for i in range(arg_num):
data_type = function_dict[i+1]
if arg_num == 1:
data_bytes = request_data_conversion(data_type, data_segment)
else:
data_bytes = request_data_conversion(data_type, data_segment[i])
for data_element in data_bytes:
online_neurons_module_request_data.append(data_element)
# link.send(online_neurons_module_request_data)
link(online_neurons_module_request_data)
return block_id, subcommand_id
def common_neurons_command_response(data_stream):
global online_neurons_module_temporary_result_dict
global online_neurons_module_response_dict
global read_count
global last_ticks
data_stream_temp = data_stream
block_id = data_stream_temp[0]
str_block_type = data_stream_temp[1] * 256 + data_stream_temp[2]
command_id = data_stream_temp[3]
result_id = block_id * 256 + command_id
result = []
if str_block_type in online_neurons_module_response_dict:
block_dict = online_neurons_module_response_dict[str_block_type]
function_dict = block_dict["function"][command_id]
para_stream = data_stream_temp[4:]
para_stream_start = 0
name = block_dict["name"]
arg_num = function_dict["para_num"]
for i in range(arg_num):
data_type = function_dict[i+1]
try:
para_stream_start, data = response_data_conversion(data_type, para_stream_start, para_stream)
result.append(data)
except:
pass
if result_id in online_neurons_module_temporary_result_dict:
online_neurons_module_temporary_result_dict[result_id]["result"] = result
else:
online_neurons_module_temporary_result = dict()
online_neurons_module_temporary_result["result"] = result
online_neurons_module_temporary_result_dict[result_id] = online_neurons_module_temporary_result
if lock.locked():
try:
lock.release()
except:
pass
def activation_block_update():
global online_neurons_module_inactive_block_dict
global online_neurons_module_temporary_result_dict
for device_id in online_neurons_module_inactive_block_dict:
online_neurons_module_inactive_block_dict[device_id]["inactive"] += 1
if online_neurons_module_inactive_block_dict[device_id]["inactive"] > 2:
block_name = online_neurons_module_inactive_block_dict[device_id]["name"]
block_type = online_neurons_module_inactive_block_dict[device_id]["type"]
delete_online_neurons_module_request_dict(block_name, device_id)
delete_online_neurons_module_response_dict(block_type, device_id)
del online_neurons_module_inactive_block_dict[device_id]
for result_id in online_neurons_module_temporary_result_dict:
if ((result_id >> 8) & 0xff) == device_id:
del online_neurons_module_temporary_result_dict[result_id]
def get_default_result(block_name, subcommand):
result = None
global common_neurons_command_default_result_dict
default_block_result_dict = []
if block_name in common_neurons_command_default_result_dict:
default_block_result_dict = common_neurons_command_default_result_dict[block_name]
if subcommand in default_block_result_dict:
result = default_block_result_dict[subcommand]
return result
#this is subcommand not subtype
def request_distributor(link, block_name, subcommand, data_segment = [], block_index = 0x01):
global online_neurons_module_request_dict
if block_name in general_command_request_dict:
return general_command_request(link, block_name, data_segment, block_index)
elif block_name in online_neurons_module_request_dict:
return common_neurons_command_request(link, block_name, subcommand, data_segment, block_index)
def response_distributor(frame):
# global default_link
# while True:
# frame = default_link.recv()
# if frame:
if (frame[1] & 0x10) == 0x10:
general_command_response(frame)
else:
common_neurons_command_response(frame)
def neurons_request(block_name, subcommand, data_segment = [], block_index = 0x01):
global default_link
return request_distributor(default_link, block_name, subcommand, data_segment, block_index)
def neurons_response():
response_distributor()
def neurons_del_online_module_temporary_result(block_name, subcommand, block_index):
global online_neurons_module_request_dict
global online_neurons_module_temporary_result_dict
block_id = None
result_id = 0
if block_name in online_neurons_module_request_dict:
block_id, subcommand_id = calculation_block_id_and_subcommand_id(block_name, subcommand, block_index)
if block_id == None:
result_id = 0
else:
result_id = block_id * 256 + subcommand_id
if result_id in online_neurons_module_temporary_result_dict:
del online_neurons_module_temporary_result_dict[result_id]
return result_id
def neurons_blocking_read(block_name, subcommand, data_segment = [], block_index = 0x01):
global online_neurons_module_temporary_result_dict
global online_neurons_module_response_dict
result = None
#delete online result
result_id = neurons_del_online_module_temporary_result(block_name, subcommand, block_index)
last_ticks = time.ticks_ms()
block_id = neurons_request(block_name, subcommand, data_segment, block_index)
if block_id == None:
result = get_default_result(block_name,subcommand)
return result
while True:
lock.acquire(0)
lock.acquire(1)
if result_id in online_neurons_module_temporary_result_dict:
result = online_neurons_module_temporary_result_dict[result_id]["result"]
return result
elif time.ticks_ms() - last_ticks > 1000:
result = get_default_result(block_name,subcommand)
return result
def neurons_async_read(block_name, subcommand, data_segment = [], block_index = 0x01):
global online_neurons_module_temporary_result_dict
global online_neurons_module_request_dict
result = None
if block_name in online_neurons_module_request_dict:
block_id, subcommand_id = calculation_block_id_and_subcommand_id(block_name, subcommand, block_index)
if block_id == None:
result = get_default_result(block_name,subcommand)
return result
result_id = block_id * 256 + subcommand_id
if result_id in online_neurons_module_temporary_result_dict:
result = online_neurons_module_temporary_result_dict[result_id]["result"]
return result
else:
result = get_default_result(block_name,subcommand)
return result
else:
result = get_default_result(block_name,subcommand)
return result
def neurons_async_read_test():
global default_link
online_neurons_module_request_data = bytearray()
online_neurons_module_request_data.append(0x01)
online_neurons_module_request_data.append(0x64)
online_neurons_module_request_data.append(0x02)
online_neurons_module_request_data.append(0x01)
# default_link.send(online_neurons_module_request_data)
default_link(online_neurons_module_request_data)
def neurons_heartbeat_thread():
from makeblock import sleep_special
global online_neurons_module_response_dict
global online_neurons_module_request_dict
global online_neurons_module_temporary_result_dict
global default_link
# default_link = link
# _thread.start_new_thread(neurons_response, (), 3)
neurons_request("assign_id", None, 0xff, (0x00))
while True:
activation_block_update()
neurons_request("assign_id", None, 0xff, (0x00))
if lock.locked():
try:
lock.release()
except:
pass
# print(online_neurons_module_request_dict)
# print(online_neurons_module_response_dict)
# print(online_neurons_module_temporary_result_dict)
# sleep_special use vTaskDelay() instead of mp_hal_delay_ms()
sleep_special(POLLING_TIME_FOR_ASSIGNMENT_ID)
def neurons_heartbeat_start():
if USE_DICT_CREATED_PRIVIOUSLY:
pass
else:
try:
read_general_command_request_to_dict()
read_general_command_response_to_dict()
read_common_neurons_command_request_to_dict()
read_common_neurons_command_response_to_dict()
except Exception as e:
print("neurons read csv error")
print(e)
_thread.stack_size(HEART_PACKAGE_THREAD_STACK_SIZE)
_thread.start_new_thread(neurons_heartbeat_thread, (), HEART_PACKAGE_THREAD_PRIORITY)
def neuron_request_bind_phy(link):
global default_link
default_link = link
|
16,583 | f3aeb0262a95983567b83841073498a2eddc0fac | class InvalidCommandException(Exception):
pass
class Block():
def __init__(self, x, y, data):
self.block = x+y*1j
self.data = data
def __getattr__(self, name):
return getattr(self.block, name)
def __repr__(self):
return repr(self.block)
class BlockStructure():
def __init__(self):
self.blocks = {} # blocks : [connected neighbours]
self.obj_dict = {} # complex : block
def place(self, x, y, colour):
block_c = x+y*1j
if block_c in self.obj_dict:
raise InvalidCommandException
new_block = Block(x, y, colour)
self.obj_dict[block_c] = new_block
self.blocks[new_block] = set()
def remove(self, x, y):
block_c = x+y*1j
if block_c not in self.obj_dict:
raise InvalidCommandException
block = self.obj_dict[block_c]
for neighbour in self.blocks[block]:
self.blocks[neighbour].remove(block)
del self.blocks[block]
del self.obj_dict[block_c]
def connect(self, x1, y1, x2, y2):
block_c1 = x1+y1*1j
block_c2 = x2+y2*1j
if block_c1 not in self.obj_dict or block_c2 not in self.obj_dict:
raise InvalidCommandException
if abs(block_c1 - block_c2) != 1:
raise InvalidCommandException
block1 = self.obj_dict[block_c1]
block2 = self.obj_dict[block_c2]
if block1 in self.blocks[block2]:
raise InvalidCommandException
self.blocks[block1].add(block2)
self.blocks[block2].add(block1)
def disconnect(self, x1, y1, x2, y2):
block_c1 = x1+y1*1j
block_c2 = x2+y2*1j
if block_c1 not in self.obj_dict or block_c2 not in self.obj_dict:
raise InvalidCommandException
if abs(block_c1 - block_c2) != 1:
raise InvalidCommandException
block1 = self.obj_dict[block_c1]
block2 = self.obj_dict[block_c2]
if block1 not in self.blocks[block2]:
raise InvalidCommandException
self.blocks[block1].remove(block2)
self.blocks[block2].remove(block1)
def count(self):
all_blocks = set(self.blocks.keys())
n_structures = 0
while all_blocks:
start_block = all_blocks.pop()
n_structures += 1
all_blocks -= self._flood_fill(start_block)
return n_structures
def move(self, x, y, dx, dy):
block_c = x+y*1j
if block_c not in self.obj_dict:
raise InvalidCommandException
block = self.obj_dict[block_c]
structure = self._flood_fill(block)
delta = dx+dy*1j
curr_positions = {block.block for block in structure}
if any(block.block + delta not in curr_positions and
block.block + delta in set(self.obj_dict.keys())
for block in structure):
raise InvalidCommandException
for block in structure:
del self.obj_dict[block.block]
block.block += delta
self.obj_dict[block.block] = block
def rotate(self, x, y, ccw_times):
block_c = x+y*1j
if block_c not in self.obj_dict:
raise InvalidCommandException
block = self.obj_dict[block_c]
structure = self._flood_fill(block)
transform = lambda pos: (pos - block_c)*(1j**ccw_times) + block_c
curr_positions = {block.block for block in structure}
if any(transform(block.block) not in curr_positions and
transform(block.block) in set(self.obj_dict.keys())
for block in structure):
raise InvalidCommandException
for block in structure:
del self.obj_dict[block.block]
block.block = transform(block.block)
self.obj_dict[block.block] = block
def connected(self, x1, y1, x2, y2):
block_c1 = x1+y1*1j
block_c2 = x2+y2*1j
if block_c1 not in self.obj_dict or block_c2 not in self.obj_dict:
return False
block1 = self.obj_dict[block_c1]
structure = self._flood_fill(block1)
return (self.obj_dict[block_c2] in structure)
def nearest(self, x, y):
if not self.blocks:
return []
min_ = min(abs(block.block.real - x) + abs(block.block.imag - y) for block in self.blocks.keys())
return [(int(block.block.real), int(block.block.imag), block.data) for block in self.blocks.keys()
if abs(block.block.real - x) + abs(block.block.imag - y) == min_]
def colour(self, colour):
return [block for block in self.blocks if block.data == colour]
def _flood_fill(self, block):
connected = {block}
to_search = [block]
while to_search:
curr_block = to_search.pop()
for neighbour in self.blocks[curr_block]:
if neighbour not in connected:
connected.add(neighbour)
to_search.append(neighbour)
return connected
def __repr__(self):
return repr(self.blocks)
|
16,584 | 9286e59ee98f7ca094d11e73285409f99fe45960 | from flask import Flask, request, jsonify, send_from_directory
from flask_cors import CORS
from uuid import uuid4
import glob, os
import pypdftk
from zipfile import ZipFile
app = Flask(__name__)
CORS(app)
production = False
pdf_path = "./files" # PDFS PFAD ANGEBEN !
filled_pdfs_path = "/tmp/files_filled" # FILLED PDFS PFAD ANGEBEN !
kfw_kredite_1 = {
"unternehmen_name": 'antragsteller_unternehmen',
"investitionsort": 'investitionsort',
'Straßengüterverkehr': "Nein",
"beihilfen_trigger": "Beihilfen"
}
kfw_kredite_2 = {
'KUDefinition': "Ja",
"unternehmen_name": "1",
"anz_mitarbeiter": 'undefined',
"umsatz_jahr": 'undefined_2',
"bilanzsumme": 'undefined_3',
"ort": 'Ort und Datum',
"datum": 'Ort und Datum'
}
soforthilfe_bayern_1 = {
"regierung": "Regierung",
"unternehmen_name": 'Firma Name Vorname',
"rechtsform": 'Rechtsform Handelsregisternummer',
"handelsregisternummer": 'Rechtsform Handelsregisternummer',
"strasse_nr": 'Straße',
"ort": 'PLZ Ort',
"plz": 'PLZ Ort',
"telefon": 'Telefon Telefax',
"email": 'EMailAdresse',
"iban": 'IBAN',
"bic": 'BIC',
"kreditinstitut": 'Kreditinstitut',
'Branche Art der gewerblichen oder freiberuflichen Tätigkeit': "Handwerk",
"anz_mitarbeiter": 'Anzahl der Beschäftigten Teilzeitkräfte bitte in Vollzeitkräfte umrechnen',
}
akkutkredit_universalkredit_1 = {
"0": "Universalkredit, Akutkredit",
"unternehmen_name": "1",
"50": "Nein",
}
akkutkredit_universalkredit_2 = {
'1.1a.0': "Universalkredit",
"1.2a": "Akutkredit",
"betrag_1": '1.1a.1.1.0.0',
"betrag_2": '1.1a.1.1.0.1',
"laufzeit_1": '1.1a.1.1.1.0.0',
"laufzeit_2": '1.1a.1.1.1.0.1',
"3.2": "Ja",
"unternehmen_name": "3.3.1",
"strasse_nr": "3.4.1",
"country_code_nichtD": '3.4.2',
"plz": "3.4.3",
"ort": "3.4.4",
"3.5.1": "Ja",
"gruendungsdatum": "3.7",
"3.8.1": "5",
"registernummer": "3.8.2",
"name_registergericht": "3.8.3",
"3.8.5": "Handwerk",
"4.1.1.1": "Ja",
'4.2.2.0.0.0': "Betriebsmittel",
}
akkutkredit_steuererleichterungen = {
"steuer_nr": "Steuernummer",
"telefon_nr": "Telefonnummer",
"name_vorname": "Name Vorname",
"anschrift": "Anschrift",
"finanzamt_1": "Finanzamt 1",
"finanzamt_2": "Straße Hausnummer",
"finanzamt_3": "Postleitzahl Ort",
"datum_tag": "TT1",
"datum_monat": "MM1",
"datum_jahr": "JJJJ1",
"Antrag auf Herabsetzung von Vorauszahlungendes Steuermessbetrages für": "Ja",
}
mapping_dict = {"kfw_kredite_1": kfw_kredite_1,
"kfw_kredite_2": kfw_kredite_2,
"soforthilfe_bayern_1": soforthilfe_bayern_1,
"akkutkredit_universalkredit_1": akkutkredit_universalkredit_1,
"akkutkredit_universalkredit_2": akkutkredit_universalkredit_2,
"akkutkredit_steuererleichterungen": akkutkredit_steuererleichterungen}
def form_filler(input_tim):
filled_pdfs = []
for pdf_name, mapping in mapping_dict.items():
final_mapping = {}
for key, value in mapping.items():
try:
value_list = list(mapping.values())
if value_list.count(value) > 1:
sub_list = [key for key in list(mapping.keys()) if mapping[key] == value]
sub_results = [input_tim[key] for key in sub_list]
final_mapping[value] = ", ".join(sub_results)
else:
final_mapping[value] = input_tim[key]
except:
final_mapping[key] = value
filled_pdf = pypdftk.fill_form(pdf_path=pdf_path + "/" + pdf_name + ".pdf",
out_file=filled_pdfs_path + "/" + pdf_name + "_filled" + ".pdf",
datas=final_mapping,
flatten=True)
filled_pdfs.append(filled_pdf)
return filled_pdfs
@app.route('/fillform', methods=["POST"])
def fillform():
# Form Input from Client
input_tim = {
"vorname": request.form.get("vorname"),
"nachname": request.form.get("nachname"),
"ort": request.form.get("ort"),
"plz": request.form.get("plz"),
"unternehmen_name": request.form.get("unternehmen_name"),
"rechtsform": request.form.get("rechtsform"),
"handelsregisternummer":request.form.get("handelsregisternummer"),
"strasse_nr": request.form.get("strasse_nr"),
"telefon": request.form.get("telefon"),
"email": request.form.get("email"),
"iban":request.form.get("iban"),
"bic":request.form.get("bic"),
"kreditinstitut":request.form.get("kreditinstitut"),
"Branche Art der gewerblichen oder freiberuflichen Tätigkeit": request.form.get("branche"),
"anz_mitarbeiter": request.form.get("anz_mitarbeiter"),
}
form_filler(input_tim)
request_id = str(uuid4())
outputfilename = request_id + ".zip"
outputfilepath = "/tmp/" + outputfilename
zipObj = ZipFile(outputfilepath, "w")
for f in glob.glob("/tmp/files_filled/*"):
zipObj.write(f)
zipObj.close()
# Create the file
#with open("/tmp/" + request_id + ".txt", "w") as f:
# f.write(vorname)
if production == True:
request_url = "http://134.122.86.217:5555/download/" + outputfilename
else:
request_url = "http://localhost:5555/download/" + outputfilename
# Returns id to trigger the download based on download endpoint
return jsonify({"download_url": request_url})
@app.route("/download/<string:request_id>", methods=["GET"])
def get_files(request_id):
return send_from_directory("/tmp/", filename=request_id, as_attachment=True)
@app.route("/foerderantrage", methods=["POST"])
def availableAntraege():
print(request.form.items)
return jsonify({"availableForms": [os.path.basename(file) for file in glob.glob("./formulare/*")]})
@app.route("/downloadblankform/<string:formId>", methods=["GET"])
def downloadForm(formId):
return send_from_directory("./formulare", filename=formId, as_attachment=True)
if __name__ == '__main__':
if production == True:
app.run(host="0.0.0.0", port=5555)
else:
app.run(debug=True, port=5555) |
16,585 | 4917be0405d1ea6155f90ffa29ba71eed43e9426 | """
The backend of the configurable-ranking-system
Uses Flask
__init__.py initializes the app
db.py stores basic database connection related methods
tables.py stores all api routes for interacting with tables (details included there)
General API model (not necessarily representative of the actual backend implementation):
- Data table
- tableName
- viewName
- tableDescription
- fields
- fieldName
- fieldDescription
- isData
- fieldIsAscending
- entryCount
- entries
- values
- ^for
- ^each
- ^field
"""
import os
from flask import Flask
from flask_cors import CORS # CORS stuff is development only
def create_app(test_config=None):
"""
:param test_config: Optionally used for test_config
:return: The Flask app
"""
app = Flask(__name__, instance_relative_config=True)
app.debug = True
CORS(app)
app.config.from_mapping(
SECRET_KEY='test',
DATABASE=os.path.join(app.instance_path, 'api.sqlite')
)
if test_config is None:
app.config.from_pyfile('config.py', silent=True)
else:
app.config.from_mapping(test_config)
try:
os.makedirs(app.instance_path)
except OSError:
pass
from . import db
db.init_app(app)
from . import tables
app.register_blueprint(tables.bp)
return app
|
16,586 | 9d0f7ac2f06ef0c16bdc04910b8bfe55deabbd09 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
from pathlib import Path
import testslide
from ... import error
from .. import incremental, subscription
class SubscriptionTest(testslide.TestCase):
def test_parse_response(self) -> None:
def assert_parsed(response: str, expected: subscription.Response) -> None:
self.assertEqual(
subscription.Response.parse(response),
expected,
)
def assert_not_parsed(response: str) -> None:
with self.assertRaises(incremental.InvalidServerResponse):
subscription.Response.parse(response)
assert_not_parsed("derp")
assert_not_parsed("{}")
assert_not_parsed("[]")
assert_not_parsed('["Error"]')
assert_not_parsed('{"name": "foo", "no_body": []}')
assert_not_parsed('{"body": [], "no_name": "foo"}')
assert_not_parsed('{"name": "foo", "body": ["Malformed"]}')
assert_not_parsed('{"name": "foo", "body": ["TypeErrors", {}]}')
assert_not_parsed('{"name": "foo", "body": ["StatusUpdate", 42]}')
assert_not_parsed('{"name": "foo", "body": ["StatusUpdate", []]}')
assert_parsed(
json.dumps({"name": "foo", "body": ["TypeErrors", []]}),
expected=subscription.Response(body=subscription.TypeErrors()),
)
assert_parsed(
json.dumps(
{
"name": "foo",
"body": [
"TypeErrors",
[
{
"line": 1,
"column": 1,
"stop_line": 2,
"stop_column": 2,
"path": "test.py",
"code": 42,
"name": "Fake name",
"description": "Fake description",
},
],
],
}
),
expected=subscription.Response(
body=subscription.TypeErrors(
[
error.Error(
line=1,
column=1,
stop_line=2,
stop_column=2,
path=Path("test.py"),
code=42,
name="Fake name",
description="Fake description",
),
]
),
),
)
assert_parsed(
json.dumps(
{
"name": "foo",
"body": ["StatusUpdate", ["derp"]],
}
),
expected=subscription.Response(
body=subscription.StatusUpdate(kind="derp"),
),
)
assert_parsed(
json.dumps(
{
"name": "foo",
"body": ["Error", "rip and tear!"],
}
),
expected=subscription.Response(
body=subscription.Error(message="rip and tear!"),
),
)
def test_parse_code_navigation_response(self) -> None:
def assert_parsed(response: str, expected: subscription.Response) -> None:
self.assertEqual(
subscription.Response.parse_code_navigation_response(response),
expected,
)
def assert_not_parsed(response: str) -> None:
with self.assertRaises(incremental.InvalidServerResponse):
subscription.Response.parse_code_navigation_response(response)
assert_not_parsed("derp")
assert_not_parsed("{}")
assert_not_parsed("[]")
assert_not_parsed('["Error"]')
assert_not_parsed('["ServerStatus", {}, "Extra"]')
assert_not_parsed('["ServerStatus", 42]')
assert_parsed(
json.dumps(["ServerStatus", ["BusyChecking"]]),
expected=subscription.Response(
body=subscription.StatusUpdate(kind="BusyChecking")
),
)
assert_parsed(
json.dumps(
[
"ServerStatus",
[
"Stop",
{
"message": "Pyre server stopped because one client explicitly sent a `stop` request"
},
],
]
),
expected=subscription.Response(
body=subscription.StatusUpdate(
kind="Stop",
message="Pyre server stopped because one client explicitly sent a `stop` request",
)
),
)
assert_parsed(
json.dumps(
[
"TypeErrors",
[
{
"line": 1,
"column": 1,
"stop_line": 2,
"stop_column": 2,
"path": "test.py",
"code": 42,
"name": "Fake name",
"description": "Fake description",
},
],
]
),
expected=subscription.Response(
body=subscription.TypeErrors(
[
error.Error(
line=1,
column=1,
stop_line=2,
stop_column=2,
path=Path("test.py"),
code=42,
name="Fake name",
description="Fake description",
),
]
),
),
)
assert_not_parsed(json.dumps(["Error", "Needs more cowbell"]))
assert_parsed(
json.dumps(["Error", ["InvalidRequest", "some request string"]]),
expected=subscription.Response(
body=subscription.Error(message='InvalidRequest: "some request string"')
),
)
assert_parsed(
json.dumps(["Error", ["ModuleNotTracked", {"path": "a/b.py"}]]),
expected=subscription.Response(
body=subscription.Error(message='ModuleNotTracked: {"path": "a/b.py"}')
),
)
assert_parsed(
json.dumps(["Error", ["OverlayNotFound", {"overlay_id": "A"}]]),
expected=subscription.Response(
body=subscription.Error(message='OverlayNotFound: {"overlay_id": "A"}')
),
)
|
16,587 | b9952bd8da221625f5ddf7036463ce1f406f9069 | #!/usr/bin/env python
# coding: utf-8
# In[45]:
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import statsmodels.api as sm
from numpy import set_printoptions
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import chi2
from sklearn.feature_selection import RFE
from sklearn.feature_selection import SelectKBest
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, roc_auc_score
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
# from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.utils import shuffle
from pickle import dump
from pickle import load
import time, datetime
from xgboost import XGBClassifier
from xgboost import plot_importance
import xgboost as xgb
# # Get dummy variables if needed
# In[46]:
def make_dummy_columns(dfr, column):
"""gets dummy columns for variable
Args:
dfr: A dataframe
column: column you want to break into dummies
Returns:
rearranged dataframe with new dummy columns for that variable."""
df = pd.get_dummies(dfr, columns=[column])
return df
# # Move predictor column to end
# In[47]:
# Move Y Column to End
def move_class_col(dfr, column_to_move):
"""moves class column to end.
Args:
dfr: A dataframe
column_to_move: column you want to move to the end
Returns:
rearranged dataframe with column at end."""
cols = list(dfr.columns.values)
cols.pop(cols.index(column_to_move))
dfr = dfr[cols + [column_to_move]]
return dfr
# # Train test split and model run
# In[48]:
def run_model(model, train_x, train_y, test_x, test_y, X):
print(model)
model.fit(train_x, train_y)
y_pred = model.predict(test_x)
predicted = [round(value) for value in y_pred]
matrix = confusion_matrix(test_y, predicted)
print(matrix)
print('precision: ', precision_score(test_y, predicted))
print('recall: ', recall_score(test_y, predicted))
print('roc_auc: ', roc_auc_score(test_y, predicted))
print('accuracy: ', accuracy_score(test_y, predicted))
print('f1_score:', f1_score(test_y, predicted))
# (pd.Series(model.feature_importances_, index=X.columns[1:21]) #the feature count was hardcoded in
# .nlargest(5)
# .plot(kind='barh'))
# plot_importance(model)
# plt.show()
# # Run model and save it if needed
# In[86]:
df = pd.read_csv('iris.csv')
# df['Class-M/F']= df['Class-M/F'].map({Iris-virginica:0, Iris-setosa:1, Iris-versicolor:2})
df = df.loc[df['species'] != 'Iris-versicolor']
df['species'] = df['species'].map({'Iris-virginica': 0, 'Iris-setosa': 1})
df.species.value_counts()
# In[87]:
df.iloc[0:4]
{'sepal_length': 3, 'sepal_width': 2, 'petal_length': 3, 'petal_width': 6}
34
4.9
3.1
1.5
0.1
# In[88]:
df.shape
# df= pd.read_csv('/Users/jermainemarshall/Documents/intenders_conversion_prediction_no_w2v.csv')
# df= pd.read_csv('/Users/jermainemarshall/Documents/intenders_conversion_prediction_exclude_inside_pass_salesgrp.csv')
# df= pd.read_csv('/Users/jermainemarshall/Documents/intenders_conversion_prediction_kitchenaid_only.csv')
# In[89]:
if __name__ == '__main__':
df = shuffle(df)
dataset = df.values
seed = 7
# X = dataset[:,0:4]
x = df[df.columns.difference(['species'])]
y = df['species']
# scaler = Normalizer().fit(X)
# X = scaler.transform(X)
# Y = dataset[:,4]
# Y= Y.astype(int)
# X = X.astype('float32')
# split data into train and test sets
# model will output the confusion matrix, precision, recall, roc_auc, and f1_curve. Will also print feature
# importances
# X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.50, random_state=seed)
# The below parameters were the best so far though it can vary 2-4 depending on seed%.
xgb_model = RandomForestClassifier() #LogisticRegression() #XGBClassifier(learning_rate=0.01, n_estimators=9, max_depth=5, subsample=0.99, colsample_bylevel=1.0,
# gamma=3, scale_pos_weight=3, min_child_weight=5, seed=3)
# run_model(xgb_model,X_train, Y_train, X_test, Y_test,df)
xgb_model.fit(x, y)
# In[90]:
import seaborn as sns
corr = df.iloc[:, 0:4].corr()
# corr= corr.fillna(0)
# df_slice1= df_slice1.fillna(0)
fig, ax = plt.subplots(figsize=(10, 10))
sns.heatmap(corr, vmin=-1, vmax=1, linewidths=.5, center=0, ax=ax)
# In[91]:
filename = 'test_finalized_random_forest_iris_model.sav'
dump(xgb_model, open(filename, 'wb'))
# some time later...
# load the model from disk
# loaded_model = load(open(filename, 'rb'))
# In[92]:
from sklearn.externals import joblib
joblib.dump(xgb_model, 'test_finalized_random_forest_iris_model.pkl')
# In[93]:
cols_when_model_builds = list(df.columns[0:4])
joblib.dump(cols_when_model_builds, 'model_columns.pkl')
print("Models columns dumped!")
print(cols_when_model_builds)
# In[94]:
df.columns
# In[85]:
t = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
maxsum = -100
for i in range(len(t) + 1):
summ = 0
for j in range(i, len(t)):
summ += t[j]
if summ > maxsum:
maxsum = summ
start = i
end = j
print(maxsum)
while start <= end:
print(t[start])
start += 1
# In[ ]:
|
16,588 | 5a2c6b0919236a770389c8fed818f656293413c3 | from flask import Flask, request
from src.measuring.postProcessMeasuring import PostProcessMeasuring
from src.prediction.predictionWrapper import PredictionWrapper
from src.utilities.errorFunctions import imagesMeanSquareError
from src.utilities.errorFunctions import trueSkillStatistic
app = Flask(__name__)
import platform
print(platform.python_version())
@app.route("/last-prediction")
def last_prediction():
with open('last-prediction.json', 'r') as content_file:
content = content_file.read()
return content
@app.route("/predict")
def predict():
print('Prediction Start')
result = PredictionWrapper.predict()
return result
@app.route("/predict-historical")
def predict_historical():
date = request.args['date']
result = PredictionWrapper.predict(date)
return result
@app.route("/accuracy")
def get_accuracy():
files = request.args.getlist('files')
error_fun = request.args['error']
measuring = PostProcessMeasuring().set_files(files)
if error_fun == 'mse':
measuring.set_error_function(imagesMeanSquareError.ImagesMeanSquareError())
if error_fun == 'hk':
measuring.set_error_function(trueSkillStatistic.TrueSkillStatistic())
return measuring.evaluate() |
16,589 | 643bc17a99624e4acf958251485a1bc76a87572d | # Generated by Django 3.1 on 2020-11-18 18:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Account",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=50)),
(
"user",
models.ForeignKey(
editable=False,
on_delete=django.db.models.deletion.PROTECT,
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="Currency",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=100)),
("symbol", models.CharField(max_length=20)),
],
options={
"verbose_name_plural": "currencies",
},
),
migrations.CreateModel(
name="Exchange",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=80)),
(
"currency",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
to="portfolio.currency",
),
),
],
),
migrations.CreateModel(
name="Position",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("balance", models.DecimalField(decimal_places=2, max_digits=12)),
(
"account",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
to="portfolio.account",
),
),
],
options={
"ordering": ("account", "stock", "balance"),
},
),
migrations.CreateModel(
name="Stock",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("symbol", models.CharField(max_length=20)),
("quote_symbol", models.CharField(max_length=20)),
("name", models.CharField(max_length=100)),
(
"exchange",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
to="portfolio.exchange",
),
),
],
),
migrations.CreateModel(
name="Trade",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("type", models.IntegerField(choices=[(0, "Buy"), (1, "Sell")])),
("date", models.DateField()),
(
"number_of_shares",
models.DecimalField(decimal_places=2, max_digits=12),
),
("price", models.DecimalField(decimal_places=3, max_digits=12)),
(
"exchange_rate",
models.DecimalField(
blank=True,
decimal_places=4,
default=1.0,
max_digits=12,
null=True,
),
),
(
"commission",
models.DecimalField(
blank=True, decimal_places=2, max_digits=12, null=True
),
),
(
"convAmount",
models.DecimalField(decimal_places=2, default=0.0, max_digits=12),
),
(
"amount",
models.DecimalField(decimal_places=2, default=0.0, max_digits=12),
),
("notes", models.CharField(blank=True, max_length=100)),
(
"account",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
to="portfolio.account",
),
),
(
"stock",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
to="portfolio.stock",
),
),
(
"user",
models.ForeignKey(
editable=False,
on_delete=django.db.models.deletion.PROTECT,
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"ordering": ("-date", "notes"),
"get_latest_by": "date",
},
),
migrations.CreateModel(
name="Return",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("period", models.IntegerField(choices=[(0, "all time")])),
("irr", models.DecimalField(decimal_places=2, max_digits=12)),
(
"position",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
to="portfolio.position",
),
),
(
"user",
models.ForeignKey(
editable=False,
on_delete=django.db.models.deletion.PROTECT,
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"verbose_name": "Annualized Return",
"ordering": ("position",),
},
),
migrations.AddField(
model_name="position",
name="stock",
field=models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT, to="portfolio.stock"
),
),
migrations.AddField(
model_name="position",
name="user",
field=models.ForeignKey(
editable=False,
on_delete=django.db.models.deletion.PROTECT,
to=settings.AUTH_USER_MODEL,
),
),
]
|
16,590 | 76ab203ad958270719b6b40f596b358232b87591 | from django.contrib.gis.db import models
# Create your models here.
class Shop(models.Model):
title = models.CharField(max_length=200)
description = models.TextField()
location = models.PointField()
|
16,591 | 05a4cfaaf8412860168028b58abf3ec253bc3bde | import FWCore.ParameterSet.Config as cms
process = cms.Process("LHCX")
process.source = cms.Source("NewEventStreamFileReader",
fileNames = cms.untracked.vstring('file:/tmp/avetisya/run273450/streamPhysicsEGammaCommissioning/data/run273450_ls0065_streamPhysicsEGammaCommissioning_StorageManager.dat')
)
process.output = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "Run273450_ls0065_PhysicsEGammaCommissioning_test.root" )
)
process.end = cms.EndPath( process.output )
|
16,592 | feaace838a266ecc4caf04656874d6089eadbed2 | import requests
import os,sys
import json
sys.path.append(os.getcwd())
from common.login import addCookie
from common.environment import env
params={
'appSysNo':None,
'areaCode': None,
'expandIntelliApp': 'true',
'followOrganizationCode': "0001",
'isInternal': None, # null --ๅ
จ็ 1--ๅขๅ
2--ๅขๅค
'onlyImportant': None,
'orderBy': "project.projectLevel DESC, project.SortIndex DESC, project.SysNo DESC",
'overseaCountryCode': None, #
'pageNum': 1,
'pageSize': 10,
'projectStatus': None,# null-- ๆๆ 0-- ็ซฃๅทฅ 1-- ๅจๅปบ
'relationship': 2, # 0-- ๆฌ็บง 2 --ๆฌไธ็บง
'searchKeyword': ""
}
baseUrl=env['baseUrl']
class BaseObject_api:
headers={
'Content-Type':'application/json;charset=UTF-8',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36'
}
url=None
def __init__(self):
if self.headers is None or 'Cookie' not in self.headers.keys():
self.headers=addCookie(self.headers)
def setUrl(self,path):
self.url= (baseUrl+"%s") % path
def get(self,path):
self.setUrl(path)
res=requests.get(self.url,headers=self.headers)
return res
def post(self,path,params):
self.setUrl(path)
self.data=json.dumps(params)
res=requests.post(self.url,data=self.data,headers=self.headers)
return res
class projectMagagement_api(BaseObject_api):
headers={
'Content-Type':'application/json;charset=UTF-8',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36'
}
#
def __init__(self):
super().__init__()
#
# ๆฅ่ฏขๅบ็้กน็ฎๅ
ๅซๆ็้กน็ฎ
def api_getProjects(self,path='api/project/queryProject',args=None):
if not isinstance(args,dict):
raise Exception('invalid type',args)
for key in args.keys():
if key not in params.keys():
raise Exception('invalid input args',args)
params.update(args)
res=self.post(path,params)
print(type(res.text))
#print(json.dumps(res.text))
res=(json.loads(res.text))['data']
return res
def api_queryMyProjects(self,path='api/project/queryMyProject',args=None):
for key in args.keys():
if key not in params.keys():
raise Exception('invalid input args',args)
params.update(args)
res=self.post(path,params)
data=(json.loads(res.text))['data']
return data
# test=projectMagagement_api()
# test.api_getProjects()
|
16,593 | 215d68ce90ec49571bdda0c89b2c5bd8cbea308e | from django.urls import path
from .views import BuyBasketView
urlpatterns = [
path('', BuyBasketView.as_view())
]
|
16,594 | 171f94a69731c170fe1e17fe8188e052a9753842 | def solve():
number_Of_Integers = int(input())
number_Arrays = list(map(int, input().split(' ')))
number_Of_Find_Integers = int(input())
find_Integers = list(map(int, input().split(' ')))
for count in range(number_Of_Integers):
temp = False
for find_Count in range(number_Of_Find_Integers):
if number_Arrays[count] == find_Integers[find_Count]:
print(1)
temp = True
break
if temp is False:
print(0)
solve() |
16,595 | 25deadd038944e95b9f949cea527832e174a37a6 | import pytest
import time
link = "http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/"
def test_add_to_basket_button_is_present(browser):
browser.get(link)
# Uncomment next line to check the page language
#time.sleep(30)
items = browser.find_elements_by_css_selector("form#add_to_basket_form>button.btn.btn-lg.btn-primary.btn-add-to-basket")
assert len(items) > 0 , "'Add to basket' button wasn't found"
|
16,596 | d64d17060894cae2525e42ea99796d4f1b9a5746 | """Test Mapchete default formats."""
import datetime
import json
import pytest
from rasterio.crs import CRS
from tilematrix import TilePyramid
import mapchete
from mapchete import errors
from mapchete.formats import (
available_input_formats,
available_output_formats,
base,
driver_from_extension,
driver_from_file,
dump_metadata,
load_input_reader,
load_metadata,
load_output_reader,
load_output_writer,
read_output_metadata,
)
from mapchete.tile import BufferedTilePyramid
def test_available_input_formats():
"""Check if default input formats can be listed."""
assert set(["Mapchete", "raster_file", "vector_file"]).issubset(
set(available_input_formats())
)
def test_available_output_formats():
"""Check if default output formats can be listed."""
assert set(["GTiff", "PNG", "PNG_hillshade", "GeoJSON"]).issubset(
set(available_output_formats())
)
def test_output_writer_errors():
"""Test errors when loading output writer."""
with pytest.raises(TypeError):
load_output_writer("not_a_dictionary")
with pytest.raises(errors.MapcheteDriverError):
load_output_writer({"format": "invalid_driver"})
def test_output_reader_errors():
"""Test errors when loading output writer."""
with pytest.raises(TypeError):
load_output_reader("not_a_dictionary")
with pytest.raises(errors.MapcheteDriverError):
load_output_reader({"format": "invalid_driver"})
def test_input_reader_errors():
"""Test errors when loading input readers."""
with pytest.raises(TypeError):
load_input_reader("not_a_dictionary")
with pytest.raises(errors.MapcheteDriverError):
load_input_reader({})
with pytest.raises(errors.MapcheteDriverError):
load_input_reader({"abstract": {"format": "invalid_format"}})
def test_driver_from_file_tif():
assert driver_from_file("some.tif") == "raster_file"
def test_driver_from_file_jp2():
assert driver_from_file("some.jp2") == "raster_file"
def test_driver_from_file_geojson():
assert driver_from_file("some.geojson") == "vector_file"
def test_driver_from_file_shp():
assert driver_from_file("some.shp") == "vector_file"
def test_driver_from_file_mapchete():
assert driver_from_file("some.mapchete") == "Mapchete"
def test_driver_from_file_errors(execute_kwargs_py):
"""Test errors when determining input driver from filename."""
with pytest.raises(errors.MapcheteDriverError):
driver_from_file(execute_kwargs_py)
with pytest.raises(FileNotFoundError):
driver_from_file("non_existing_file.tif", quick=False)
def test_mapchete_input(mapchete_input):
"""Mapchete process as input for other process."""
with mapchete.open(mapchete_input.dict) as mp:
config = mp.config.params_at_zoom(5)
input_data = config["input"]["file2"]
assert input_data.bbox()
assert input_data.bbox(CRS.from_epsg(3857))
mp_input = input_data.open(next(mp.get_process_tiles(5)))
assert not mp_input.is_empty()
def test_base_format_classes():
"""Base format classes."""
# InputData
tp = TilePyramid("geodetic")
tmp = base.InputData(dict(pyramid=tp, pixelbuffer=0))
assert tmp.pyramid
assert tmp.pixelbuffer == 0
assert tmp.crs
with pytest.raises(NotImplementedError):
tmp.open(None)
with pytest.raises(NotImplementedError):
tmp.bbox()
with pytest.raises(NotImplementedError):
tmp.exists()
# InputTile
tmp = base.InputTile(None)
with pytest.raises(NotImplementedError):
tmp.read()
with pytest.raises(NotImplementedError):
tmp.is_empty()
# OutputDataWriter
tmp = base.OutputDataWriter(dict(pixelbuffer=0, grid="geodetic", metatiling=1))
assert tmp.pyramid
assert tmp.pixelbuffer == 0
assert tmp.crs
with pytest.raises(NotImplementedError):
tmp.read(None)
with pytest.raises(NotImplementedError):
tmp.write(None, None)
with pytest.raises(NotImplementedError):
tmp.is_valid_with_config(None)
with pytest.raises(NotImplementedError):
tmp.for_web(None)
with pytest.raises(NotImplementedError):
tmp.empty(None)
with pytest.raises(NotImplementedError):
tmp.open(None, None)
@pytest.mark.remote
def test_http_rasters(files_bounds, http_raster):
"""Raster file on remote server with http:// or https:// URLs."""
zoom = 13
config = files_bounds.dict
config.update(input=dict(file1=http_raster), zoom_levels=zoom)
# TODO make tests more performant
with mapchete.open(config) as mp:
assert mp.config.area_at_zoom(zoom).area > 0
tile = next(mp.get_process_tiles(13))
user_process = mapchete.MapcheteProcess(
tile=tile,
params=mp.config.params_at_zoom(tile.zoom),
input=mp.config.get_inputs_for_tile(tile),
)
with user_process.open("file1") as f:
assert f.read().any()
def test_read_from_raster_file(cleantopo_br):
"""Read different bands from source raster."""
with mapchete.open(cleantopo_br.dict) as mp:
tile = mp.config.process_pyramid.tile(5, 0, 0)
user_process = mapchete.MapcheteProcess(
tile=tile,
params=mp.config.params_at_zoom(tile.zoom),
input=mp.config.get_inputs_for_tile(tile),
)
with user_process.open("file1") as f:
assert f.read().shape == f.read([1]).shape == (1, *f.read(1).shape)
def test_invalid_input_type(example_mapchete):
"""Raise MapcheteDriverError."""
# invalid input type
config = example_mapchete.dict
config.update(input=dict(invalid_type=1))
with pytest.raises(errors.MapcheteConfigError):
mapchete.open(config)
def test_old_style_metadata(old_style_metadata_json, old_geodetic_shape_metadata_json):
# deprecated CRS definitions
with pytest.deprecated_call():
assert read_output_metadata(old_style_metadata_json)
# deprecated geodetic shape
with pytest.deprecated_call():
params = read_output_metadata(old_geodetic_shape_metadata_json)
assert params["pyramid"].grid.type == "geodetic"
def test_driver_from_extension_tif():
assert driver_from_extension("tif") == "raster_file"
def test_driver_from_extension_jp2():
assert driver_from_extension("jp2") == "raster_file"
def test_driver_from_extension_geojson():
assert driver_from_extension("geojson") == "vector_file"
def test_driver_from_extension_shp():
assert driver_from_extension("shp") == "vector_file"
def test_driver_from_extension_invalid():
with pytest.raises(ValueError):
driver_from_extension("invalid")
def test_load_metadata_pyramid(driver_metadata_dict):
loaded = load_metadata(driver_metadata_dict)
assert isinstance(loaded["pyramid"], BufferedTilePyramid)
def test_dump_metadata_pyramid(driver_output_params_dict):
dumped = dump_metadata(driver_output_params_dict)
assert isinstance(dumped, dict)
def test_dump_metadata_datetime(driver_output_params_dict):
dumped = dump_metadata(driver_output_params_dict)
assert isinstance(dumped["driver"]["time"]["start"], str)
assert isinstance(dumped["driver"]["time"]["end"], str)
def test_dump_metadata_datetime_list(driver_output_params_dict):
dumped = dump_metadata(driver_output_params_dict)
for t in dumped["driver"]["time"]["steps"]:
assert isinstance(t, str)
def test_load_metadata_datetime(driver_output_params_dict):
loaded = load_metadata(dump_metadata(driver_output_params_dict))
assert isinstance(loaded["driver"]["time"]["start"], datetime.date)
def test_load_metadata_datetime_list(driver_output_params_dict):
loaded = load_metadata(dump_metadata(driver_output_params_dict))
for t in loaded["driver"]["time"]["steps"]:
assert isinstance(t, datetime.date)
def test_tile_path_schema(tile_path_schema):
mp = tile_path_schema.mp()
mp.batch_process()
tile = tile_path_schema.first_process_tile()
control = [str(tile.zoom), str(tile.col), str(tile.row) + ".tif"]
assert mp.config.output_reader.get_path(tile).elements[-3:] == control
def test_tile_path_schema_metadata_json(tile_path_schema):
mp = tile_path_schema.mp()
mp.batch_process()
tile = tile_path_schema.first_process_tile()
output_metadata = read_output_metadata(
mp.config.output_reader.path / "metadata.json"
)
output_params = dict(
output_metadata["driver"],
path=mp.config.output_reader.path,
**output_metadata["pyramid"].to_dict()
)
output_reader = load_output_reader(output_params)
assert mp.config.output_reader.tile_path_schema == output_reader.tile_path_schema
assert mp.config.output_reader.get_path(tile) == output_reader.get_path(tile)
def test_tile_path_schema_stac_json(tile_path_schema):
mp = tile_path_schema.mp()
mp.batch_process()
mp.write_stac()
stac_json = json.loads((mp.config.output_reader.path / "out.json").read_text())
template = stac_json.get("asset_templates")["bands"]["href"]
assert template.split("/")[-2] == "{TileCol}"
assert template.split("/")[-1] == "{TileRow}.tif"
|
16,597 | 899a446e9c9479a2b5ff5889f64a720b628c14ac | import math
def mdc(x,y,z):
return mdcAux(x, mdcAux(y, z))
def mdcAux(x, y):
if (x%y == 0):
return y
else:
return mdcAux(y,x % y)
def a(x,y,z):
s = (y**2) + (z**2)
if(s == x**2):
return True
else:
return False
def b(x,y,z):
s = (x**2) + (z**2)
if(s == y**2):
return True
else:
return False
def c(x,y,z):
s = (x**2) + (y**2)
if(s == z**2):
return True
else:
return False
while True:
try:
p1, p2, p3 = map(int, input().split())
if mdc(p1,p2,p3) == 1 and (a(p1,p2,p3) or b(p1,p2,p3) or c(p1,p2,p3)):
print("tripla pitagorica primitiva")
elif a(p1,p2,p3) or b(p1,p2,p3) or c(p1,p2,p3):
print("tripla pitagorica")
else:
print("tripla")
except EOFError:
break |
16,598 | eaa251901a5f0542bf8350f3ac126bd40c8702f3 | from src.architecture.ImageConverter import BinVec2RGBMatrix, RGBMatrix2BinVec
from src.architecture.Input import importImage, exportImage
from src.util import Function as Function
def simulateNoiseEffect(p, input_file, output_file):
image = importImage(input_file)
bin_image = RGBMatrix2BinVec(image)
rows = image.getRows()
cols = image.getCols()
e = Function.noise(p, 24 * rows * cols)
e.setRows(rows)
e.setCols(cols)
Function.apply_noise(bin_image, e)
result = BinVec2RGBMatrix(bin_image)
exportImage(output_file, result)
|
16,599 | 23c129e93a3ccbb284b941a61640dad694e9883d | import csv
import json
from StringIO import StringIO
content = open('case_studies_aliases.txt', 'r')
jsonfile = open('case_studies_aliases.json', 'w')
reader = csv.DictReader( content, quotechar='"', delimiter=';', escapechar='\\')
out=[]
for row in reader:
row['filename'] = row['filename'].split('/')[-1]
out.append(row)
print "Total rows: %d\n" % len(out)
jsonfile.write(json.dumps( out, sort_keys=True, indent=2))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.