text stringlengths 38 1.54M |
|---|
import csv
import json
csvfile = open('failed_inspections.csv', 'r')
all_failed = []
for row in csv.DictReader(csvfile):
all_failed.append(row)
masterviolations = []
for row in all_failed:
vx = [v.strip() for v in row['Violations'].split('|')]
for v in vx:
if v != '':
txt, comments = v.split('- Comments:')
num = txt.split('.')[0].strip()
category = ' '.join(txt.split('.')[1:])
d = {"number": int(num.strip()), "category": category.strip()}
masterviolations.append(d)
viols = set((m['number'], m['category']) for m in masterviolations)
vfinal = []
for v in viols:
vfinal.append(list(v))
f = open("masterviolations.json", "w")
j = json.dumps(vfinal, indent = 2)
f.write(j)
f.close()
|
# Generated by Django 2.0.7 on 2018-08-08 13:10
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('purelms', '0013_auto_20180808_0213'),
('dashboard', '0002_mycourses_course'),
]
operations = [
migrations.RenameModel(
old_name='mycourses',
new_name='mycourse',
),
]
|
from django.conf.urls import url
from project.apps.core.views import (
HomeView, PostListView, OAuthPostListView, LogoutRedirectView)
urlpatterns = [
url(r'^$', HomeView.as_view(), name='home'),
url(
r'^user/(?P<user_id>[0-9]+)/posts/$',
PostListView.as_view(), name='post_list'
),
url(
r'^user/me/posts/$',
OAuthPostListView.as_view(),
name='oauth_post_list'
),
url(r'^redirect/$', LogoutRedirectView.as_view(), name='logout_redirect'),
]
|
import sys
sys.path.append('build')
import AvTrajectoryPlanner as av
import math
planner = av.Planner(av.AvState(0,0,0,0,0), av.AvState(5,1,0,0,0), av.AvParams(1.0,1.0,0.5,4, 3), av.Boundary([av.Point(0.5,0.5), av.Point(-0.5, 0.5), av.Point(-0.5, -0.5), av.Point(0.5, -0.5)]), av.SolverParams(6, 0.01, 0.1, 3, True, True))
obstacle = av.ObstacleStatic()
obstacle.outline = av.Boundary([av.Point(0.5,0.2), av.Point(-0.5, 0.2), av.Point(-0.5, -0.2), av.Point(0.5, -0.2)])
obstacle.obs_pose = av.Pose(3,0.5, 0.8)
planner.appendObstacleStatic(obstacle)
dynamic_traj = av.ObstacleTrajectory()
dynamic_traj.dt = 2
dynamic_traj.outline = av.Boundary([av.Point(0.5,0.5), av.Point(-0.5, 0.5), av.Point(-0.5, -0.5), av.Point(0.5, -0.5)])
new_table = []
new_table.append(av.Pose(0,3,0))
new_table.append(av.Pose(3,3,0))
new_table.append(av.Pose(3,0,0))
new_table.append(av.Pose(0,0,0))
dynamic_traj.table = new_table
planner.appendObstacleTrajectory(dynamic_traj)
json = planner.saveToJson()
output = open("scenarios/simple_trajectory_1.txt","w")
output.write(json)
# This one is meant to be a really nice trajectory
car_boundary = av.Boundary([av.Point(2.5,0.7), av.Point(-0.7, 0.7), av.Point(-0.7, -0.7), av.Point(2.5, -0.7)])
planner = av.Planner(av.AvState(0,0,0,0,0), av.AvState(15,2,0.3,0,0), av.AvParams(1.0,1.0,0.5,4, 3), car_boundary , av.SolverParams(6, 0.01, 0.1, 3, False, False))
obstacle = av.ObstacleStatic()
obstacle.outline = car_boundary
obstacle.obs_pose = av.Pose(6,-1.0, 0.0)
planner.appendObstacleStatic(obstacle)
obstacle = av.ObstacleStatic()
obstacle.outline = car_boundary
obstacle.obs_pose = av.Pose(11,-1.0, 0.0)
planner.appendObstacleStatic(obstacle)
obstacle = av.ObstacleStatic()
obstacle.outline = car_boundary
obstacle.obs_pose = av.Pose(16,-0.5, 0.3)
planner.appendObstacleStatic(obstacle)
dynamic_traj = av.ObstacleTrajectory()
dynamic_traj.dt = 4
dynamic_traj.outline = car_boundary
new_table = []
new_table.append(av.Pose(-4,1,0))
new_table.append(av.Pose(10,1,0))
dynamic_traj.table = new_table
planner.appendObstacleTrajectory(dynamic_traj)
# dynamic_traj = av.ObstacleTrajectory()
# dynamic_traj.dt = 2
# dynamic_traj.outline = car_boundary
# new_table = []
# new_table.append(av.Pose(0,3,0))
# new_table.append(av.Pose(3,3,0))
# new_table.append(av.Pose(3,0,0))
# new_table.append(av.Pose(0,0,0))
# dynamic_traj.table = new_table
#
# planner.appendObstacleTrajectory(dynamic_traj)
json = planner.saveToJson()
output = open("simulator/sample_trajectory.txt","w")
output.write(json)
|
#!/usr/bin/python
import networkx as nx
from networkx.readwrite import json_graph
import mkit.inference.ip_to_asn as ip2asn
import mkit.inference.ixp as ixp
import mkit.ripeatlas.parse as parse
import mkit.inference.ippath_to_aspath as asp
import os
import pdb
import settings
import json
import glob
msms = []
def parse_caida_json_streaming(fname):
mmt_path_list = []
with open(fname) as fi:
dest_based_aspaths = {}
for line in fi:
trcrt = json.loads(line)
if trcrt['stop_reason'] != 'COMPLETED':
continue
src = trcrt['src']
dst = trcrt['dst']
dst_asn = ip2asn.ip2asn_bgp(dst)
src_asn = ip2asn.ip2asn_bgp(src)
if src_asn and dst_asn:
mmt_path_list.append((int(src_asn), int(dst_asn)))
return mmt_path_list
overall_path_list = []
files = filter(os.path.isfile, glob.glob(settings.CAIDA_DATA + "*"))
for fname in files:
print "Converting %s to JSON" % fname
convert_to_json_cmd = "sc_warts2json %s > %s" % (fname, fname+".json")
os.system(convert_to_json_cmd)
overall_path_list.extend(parse_caida_json_streaming(fname+'.json'))
print "Removing the JSON file to save space"
os.system("rm %s" % fname+".json")
overall_path_list = list(frozenset(overall_path_list))
with open(settings.MEASURED_CAIDA, "w") as fi:
json.dump(overall_path_list, fi)
|
import pytest
import pandas as pd
import pandas.util.testing as pdt
import os
import sys
import logging
sys.path.append(os.path.abspath('./src'))
from train_model import data_filter
logging.basicConfig(level=logging.DEBUG, filename="test_logfile", filemode="a+",
format="%(asctime)-15s %(levelname)-8s %(message)s")
logger = logging.getLogger(__name__)
selected_features = ['funding_rounds', 'founded_month', 'founded_quarter', 'founded_year',
'country_esp', 'country_ind', 'country_other', 'country_usa', 'days_to_fund', 'months_to_fund',
'days_between_rounds', 'months_between_rounds', 'funding_round_type_debt_financing',
'funding_round_type_post_ipo_debt', 'funding_round_type_post_ipo_equity',
'funding_round_type_private_equity', 'funding_round_type_venture', 'unique_investors',
'median_investor_value', 'no_acquisitions', 'no_ipos', 'market_biotechnology',
'market_clean technology', 'market_enterprise software', 'market_finance', 'market_health and wellness',
'market_hospitality', 'market_internet', 'market_mobile', 'market_other', 'raised_amount_usd_mean']
# Test to check if correct features were selected.
def test_filter():
df = data_filter('data/auxiliary/aggregated_data.csv', selected_features)
assert(list(df.columns) == selected_features)
|
#!/usr/bin/python
v1=45
v2=56
res=v1&v2
print "Result of & operation is ",res
res=v1|v2
print "Result of | operation is ",res
res=v1^v2
print "Result of ^ operation is ",res
res=~v1
print "Result of ~v1 operation is ",res
res=~v2
print "Result of ~v2 operation is ",res
res=v1<<1
print "Result of V1<<1 operation is ",res
res=v1>>2
print "Result of V1>>2 operation is ",res
print
v1=034
v2=045
res=v1&v2
print "Result of & operation is ",res
res=v1|v2
print "Result of | operation is ",res
res=v1^v2
print "Result of ^ operation is ",res
res=~v1
print "Result of ~v1 operation is ",res
res=~v2
print "Result of ~v2 operation is ",res
res=v1<<2
print "Result of V1<<2 operation is ",res
res=v1>>3
print "Result of V1>>3 operation is ",res
print
v1=0xAB
v2=0x89
res=v1&v2
print "Result of & operation is ",res
res=v1|v2
print "Result of | operation is ",res
res=v1^v2
print "Result of ^ operation is ",res
res=~v1
print "Result of ~v1 operation is ",res
res=~v2
print "Result of ~v2 operation is ",res
res=v1<<2
print "Result of V1<<2 operation is ",res
res=v1>>3
print "Result of V1>>3 operation is ",res
print
v1=0b11011100
v2=0b01101010
res=v1&v2
print "Result of & operation is ",res
res=v1|v2
print "Result of | operation is ",res
res=v1^v2
print "Result of ^ operation is ",res
res=~v1
print "Result of ~v1 operation is ",res
res=~v2
print "Result of ~v2 operation is ",res
res=v1<<2
print "Result of V1<<2 operation is ",res
res=v1>>3
print "Result of V1>>3 operation is ",res
|
# -*- coding: utf-8 -*-
import os
import subprocess
from datetime import datetime
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from tools.express import models as express_models
from mall import models as mall_models
class Command(BaseCommand):
help = "init express for version 2"
args = ''
def handle(self, **options):
print self.help
express_models.ExpressHasOrderPushStatus.objects.all().update(send_count=1, receive_count=1)
details = express_models.ExpressDetail.objects.filter(order_id__gt=0)
order_ids = [d.order_id for d in details]
orders = mall_models.Order.objects.filter(id__in=order_ids)
order2id = dict([(o.id, o) for o in orders])
for detail in details:
print '-------------[ExpressDetail_id: {}]'.format(detail.id)
order = order2id[detail.order_id]
expresses = express_models.ExpressHasOrderPushStatus.objects.filter(
express_company_name=order.express_company_name,
express_number=order.express_number)
if expresses.count() > 0:
express = expresses[0]
detail.express_id = express.id
detail.save()
|
# Make a Dictionary of four words and take input from the user
print("Enter The Word")
dict = {"Cat": "Pussy", "Bat": "Vampire", "Dog": "Bark", "Hat": "Cap"}
inp = input()
print(dict.get(inp))
|
class Solution(object):
def minimumDeleteSum(self, s1, s2):
"""
:type s1: str
:type s2: str
:rtype: int
"""
m = len(s1) + 1
n = len(s2) + 1
result = [[0]*n for i in range(m)]
for i in range(1, m): result[i][0] += result[i-1][0] + ord(s1[i-1])
for j in range(1, n): result[0][j] += result[0][j-1] + ord(s2[j-1])
for i in range(1, m):
for j in range(1, n):
if s1[i-1] == s2[j-1]:
result[i][j] = result[i-1][j-1]
else:
result[i][j] = min(ord(s1[i-1])+result[i-1][j], ord(s2[j-1])+result[i][j-1])
return result[m-1][n-1]
|
from ola.ClientWrapper import ClientWrapper
import array
"""Python 2 script to test operation of PAR lights with the DMX interface."""
def DmxHandler(status):
if status.Succeeded():
print('Success!')
else:
print('Error: ' + status.message)
if __name__ == '__main__':
#Write to the 1st 8 channels. The array must be 512 bytes long, so all channels have a value.
#Not doing this can result to undefined behaviour.
#Set the light to fade mode, blue color, 75% speed/
arr = array.array('B', [140, 80, 192, 0, 0, 0, 0, 0] + [0] * 504)
wrapper = ClientWrapper()
client = wrapper.Client()
client.SendDmx(0, arr, DmxHandler)
#wrapper.Run()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.21 on 2019-09-18 18:23
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('djiffy', '0003_extra_data_revisions'),
('footnotes', '0002_footnote_is_agree_default_true'),
]
operations = [
migrations.AlterModelOptions(
name='bibliography',
options={'ordering': ('bibliographic_note',), 'verbose_name_plural': 'Bibliographies'},
),
migrations.AddField(
model_name='bibliography',
name='manifest',
field=models.ForeignKey(blank=True, help_text='Digitized version of lending card, if locally available', null=True, on_delete=django.db.models.deletion.SET_NULL, to='djiffy.Manifest'),
),
migrations.AddField(
model_name='footnote',
name='image',
field=models.ForeignKey(blank=True, help_text='Image location from an imported manifest, if available.', null=True, on_delete=django.db.models.deletion.CASCADE, to='djiffy.Canvas'),
),
]
|
import graphene
from sagas.ofbiz.schema_base import ModelBase
from sagas.ofbiz.schema_queries_g import *
from sagas.ofbiz.runtime_context import platform
class TestingTypeInput(graphene.InputObjectType):
testing_type_id = graphene.String()
description = graphene.String()
class CreateTestingType(graphene.Mutation):
class Arguments:
testing_type_data = TestingTypeInput(required=True)
testing_type = graphene.Field(lambda: TestingType)
Output = TestingType
@staticmethod
def mutate(root, info, testing_type_data=None):
testing_type = platform.helper.input_to_dictionary(testing_type_data, "TestingType", TestingType)
return testing_type
class TestingInput(graphene.InputObjectType):
comments = graphene.String()
testing_type_id = graphene.String()
testing_size = graphene.Int()
testing_id = graphene.String()
description = graphene.String()
testing_date = graphene.String()
testing_name = graphene.String()
class CreateTesting(graphene.Mutation):
class Arguments:
testing_data = TestingInput(required=True)
testing = graphene.Field(lambda: Testing)
Output = Testing
@staticmethod
def mutate(root, info, testing_data=None):
testing = platform.helper.input_to_dictionary(testing_data, "Testing", Testing)
return testing
class Mutations(graphene.ObjectType):
create_testing_type = CreateTestingType.Field()
create_testing = CreateTesting.Field()
|
class Solution(object):
def isNumber(self, s):
"""
:type s: str
:rtype: bool
"""
if not s: return False
s=s.strip()
res=signs=eE=dot=False
for c in s:
if '0'<=c<='9':
res=signs=True
elif c=='.' and not dot:
dot=signs=True
elif (c=='e' or c=='E') and (not eE) and res:
res=signs=False
dot=eE=True
elif (c=='+' or c=='-') and not res and not signs:
signs=True
else:
return False
return res |
##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import glob
class CnsNospec(MakefilePackage):
"""A simple, explicit, stencil-based test code for integrating
the compressible Navier-Stokes equations. The code uses
8th order finite differences in space and a 3rd order,
low-storage TVD RK algorithm in time."""
homepage = "https://ccse.lbl.gov/ExaCT/index.html"
url = "https://ccse.lbl.gov/ExaCT/CNS_Nospec.tgz"
tags = ['proxy-app']
version('master', '14ff5be62539d829b30b17281688ee3f')
variant('mpi', default=True, description='Build with MPI support')
variant('debug', default=False, description='Build with debugging')
variant('omp', default=False, description='Build with OpenMP support')
variant('prof', default=False, description='Build with profiling')
depends_on('mpi', when='+mpi')
depends_on('gmake', type='build')
build_directory = 'MiniApps/CNS_NoSpec'
def edit(self, spec, prefix):
with working_dir(self.build_directory):
makefile = FileFilter('GNUmakefile')
if '+mpi' in spec:
makefile.filter('MPI .*', 'MPI := t')
if '+debug' in spec:
makefile.filter('NDEBUG.*', '#')
if '+omp' in spec:
makefile.filter('OMP.*', 'OMP := t')
if '+prof' in spec:
makefile.filter('PROF.*', 'PROF := t')
def install(self, spec, prefix):
mkdirp(prefix.bin)
files = glob.glob(join_path(self.build_directory, '*.exe'))
for f in files:
install(f, prefix.bin)
|
#https://www.tutorialspoint.com/python/python_command_line_arguments.htm
#https://www.cyberciti.biz/faq/python-command-line-arguments-argv-example/
#http://www.diveintopython.net/scripts_and_streams/command_line_arguments.html
# An example of sending command line arguments to your python program.
import sys
import subprocess
print 'arguments found:', len(sys.argv)
print 'command line arguments', sys.argv
#cmdargs = str(sys.argv)
#try:
validflags=0
if len(sys.argv) > 1:
#if sys.argv[1]=="--debug": #this particulary checks arg1.
#print 'Debug arg recevied in position 1'
#cmd='echo Debug option selected'
#q=subprocess.Popen(cmd,shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
#out = q.communicate()[0]
#print out
#if "--debug" or "--dbg" in str(sys.argv): #this works but will catch debug even if it is part of another word (i.e --gtkdebug)
# print '--debug arg received somewhere within args'
for arg in sys.argv:
if arg==sys.argv[0]:
print "ignoring scripts name"
elif arg=="--nfr":
print '--NFR arg received somewhere within args'
validflags=validflags+1
elif arg=="--pymouse":
print "Pymouse arg received somewhere"
validflags=validflags+1
else:
print arg,"is not a valid arg"
if len(sys.argv)==1 or validflags==0:
print "No Valid flags sent at all. Default Values will be used"
#except IndexError:
# pass
|
import pygame
class Board:
def __init__(self, width, height):
self.width = width
self.height = height
self.board = [[0] * width for i in range(height)]
self.left = 60
self.top = 40
self.cell_size = 30
def set_view(self, left, top, cell_size):
self.left = left
self.top = top
self.cell_size = cell_size
def render(self):
for x in range(self.height):
for y in range(self.width):
pygame.draw.rect(screen, (255, 255, 255), (self.left + y * self.cell_size,
self.top + x * self.cell_size,
self.cell_size, self.cell_size),
1 - self.board[x][y])
def get_cell(self, mouse_pos): # координаты мыши
cell = ((mouse_pos[0] - self.left) // self.cell_size,
(mouse_pos[1] - self.top) // self.cell_size)
if cell[0] < self.width and cell[1] < self.height and cell[0] >= 0 and cell[1] >= 0:
return cell
else:
return None
def get_click(self, mouse_pos):
cell = self.get_cell(mouse_pos)
self.on_click(cell)
def on_click(self, cell_coords):
if cell_coords:
for i in range(7):
for j in range(5):
self.board[i][cell_coords[0]] = \
1 - self.board[i][cell_coords[0]]
self.board[cell_coords[1]][j] = \
1 - self.board[cell_coords[1]][j]
self.board[cell_coords[1]][cell_coords[0]] \
= 1 - self.board[cell_coords[1]][cell_coords[0]]
pygame.init()
size = width, height = 300, 300
screen = pygame.display.set_mode(size)
board = Board(5, 7)
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.MOUSEBUTTONDOWN:
board.get_click(event.pos)
screen.fill((0, 0, 0))
board.render()
pygame.display.flip()
pygame.quit()
|
import cv2
# video = cv2.VideoCapture(0)
faceCascade = cv2.CascadeClassifier("C:\\Python\\Python38\\Lib\\site-packages\\cv2\\data\\haarcascade_frontalface_default.xml")
src_image = cv2.imread("manutd.jpg")
gray_image = cv2.cvtColor(src_image, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces_rects = faceCascade.detectMultiScale(gray_image, scaleFactor = 1.1, minNeighbors = 2)
print(type(faces_rects))
print(faces_rects[0])
print(faces_rects[1])
(a,b,c,d) = faces_rects[0]
print(b)
for(x,y,w,h) in faces_rects:
cv2.rectangle(src_image, (x, y), (x+w, y+h), (0,255,0), 2)
cv2.imshow("Face", src_image)
cv2.waitKey(0) |
###
### Copyright (C) 2002-2003 Ximian, Inc.
###
### This program is free software; you can redistribute it and/or modify
### it under the terms of the GNU General Public License, version 2,
### as published by the Free Software Foundation.
###
### This program is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
###
### You should have received a copy of the GNU General Public License
### along with this program; if not, write to the Free Software
### Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
###
import os, string, types, re, gobject, gtk
import red_pixbuf
## The format looks like "<Control>a" or "<Shift><Alt>F1.
## The parser is fairly liberal and allows lower or upper case, and also
## abbreviations such as "<Ctl>" and "<Ctrl>".
class AcceleratorParser:
def __init__(self, s=None):
self.__key = None
self.__mods = 0
self.pattern = re.compile("<([a-z]+)+>", re.IGNORECASE)
self.parse(s)
def parse(self, s=None):
self.__key = None
self.__mods = 0
if not s:
return
mods = self.pattern.findall(s)
self.parse_mods(mods)
key = self.pattern.sub("", s)
self.parse_key(key)
# No key, no joy!
if not self.key():
self.mods = 0
def key(self):
return self.__key
def mods(self):
return self.__mods
## End of public methods
def parse_mods(self, mods=None):
if not mods:
return
for m in mods:
m = m[0].lower()
if m == 's':
self.__mods |= gtk.gdk.SHIFT_MASK
elif m == 'c':
self.__mods |= gtk.gdk.CONTROL_MASK
elif m == 'a':
self.__mods |= gtk.gdk.MOD1_MASK
def parse_key(self, key):
if key:
self.__key = gtk.gdk.keyval_from_name(key)
else:
sel.__key = None
class MenuBar(gtk.MenuBar):
def __init__(self, accel_group=None):
gobject.GObject.__init__(self)
#self.accel_group = accel_group
#if accel_group:
# accel_group.connect("accel-activate",
# lambda g,o,x,y,this:this.refresh_items(),
# self)
#self.accel_parser = AcceleratorParser()
self.constructed = 0
self.pending_items = []
self.pending_items_hash = {}
self.user_data = None
self.statusbar = None
# Automatically construct our menu items, and refresh the items,
# when we are realized.
def on_realize(x):
x.construct()
x.refresh_items()
self.connect("realize",
on_realize)
def set_statusbar(self, statusbar):
self.statusbar = statusbar
def set_user_data(self, x):
self.user_data = x
def refresh_items(self):
self.emit("refresh_items")
def add(self, path,
description=None,
callback=None,
with_dropdown_arrow=0,
is_separator=0,
visible_fn=None,
sensitive_fn=None,
stock=None,
image=None,
pixbuf=None,
pixbuf_name=None,
checked_get=None, checked_set=None,
radiogroup=None,
radiotag=None,
radio_get=None,
radio_set=None,
accelerator=None):
if self.constructed:
print "Can't add '%s' to an already-constructed menu bar." \
% path
assert 0
prefix, name = os.path.split(path)
path = string.replace(path, "_", "")
if self.pending_items_hash.has_key(path):
print "Collision: there is already a menu item with path '%s'" \
% path
assert 0
if pixbuf_name:
assert not pixbuf and not image
image = red_pixbuf.get_widget(pixbuf_name)
if pixbuf:
assert not pixbuf_name and not image
image = gtk.Image()
image.set_from_pixbuf(pixbuf)
item = {"path":path,
"name":name,
"description":description,
"callback":callback,
"with_dropdown_arrow":with_dropdown_arrow,
"is_separator":is_separator,
"visible_fn":visible_fn,
"sensitive_fn":sensitive_fn,
"stock":stock,
"image":image,
"checked_get":checked_get,
"checked_set":checked_set,
"radiogroup":radiogroup,
"radiotag":radiotag,
"radio_get":radio_get,
"radio_set":radio_set,
"accelerator":accelerator,
}
self.pending_items.append(item)
self.pending_items_hash[path] = item
def exercise_menubar(self):
for item in self.pending_items:
if item["path"][:7] != "/Debug/" \
and item["path"] != "/File/Quit" \
and item["callback"]:
print item["path"]
item["callback"](self.user_data)
def construct(self):
# We can only be constructed once.
if self.constructed:
return
self.constructed = 1
tree_structure = {}
radiogroups = {}
for item in self.pending_items:
prefix, base = os.path.split(item["path"])
if tree_structure.has_key(prefix):
tree_structure[prefix].append(base)
else:
tree_structure[prefix] = [base]
def walk_tree(prefix, parent_menu):
for name in tree_structure[prefix]:
path = os.path.join(prefix, name)
item = self.pending_items_hash[path]
needs_refresh = item["visible_fn"] or \
item["sensitive_fn"]
is_leaf = not tree_structure.has_key(path)
item_name = item["name"] or ""
### Flag items that aren't hooked up to callbacks.
if is_leaf and not item["callback"]:
item_name = item_name + " (inactive)"
if item["is_separator"]:
menu_item = gtk.SeparatorMenuItem()
elif item["stock"]:
#menu_item = gtk.ImageMenuItem(item["stock"],
# self.accel_group)
menu_item = gtk.ImageMenuItem(item["stock"])
elif item["image"]:
menu_item = gtk.ImageMenuItem(item["name"])
menu_item.set_image(item["image"])
elif item["radiogroup"] and item["radiotag"]:
grp = radiogroups.get(item["radiogroup"])
grp_widget = None
if grp:
grp_widget, grp_item = grp
item["radio_get"] = grp_item["radio_get"]
item["radio_set"] = grp_item["radio_set"]
menu_item = gtk.RadioMenuItem(grp_widget, item["name"])
if not grp:
#assert item["radio_get"] and item["radio_set"]
radiogroups[item["radiogroup"]] = (menu_item,
item)
def radio_activate(mi, get_fn, set_fn, tag):
if get_fn() != tag:
set_fn(tag)
menu_item.connect_after("activate",
radio_activate,
item["radio_get"],
item["radio_set"],
item["radiotag"])
needs_refresh = 1
elif item["checked_get"] and item["checked_set"]:
menu_item = gtk.CheckMenuItem(item["name"])
menu_item.set_active(item["checked_get"]())
needs_refresh = 1
def check_activate(mi, get_fn, set_fn):
state = mi.get_active()
x = (get_fn() and 1) or 0
if x ^ state:
set_fn(state)
menu_item.connect_after("activate",
check_activate,
item["checked_get"],
item["checked_set"])
else:
if item["with_dropdown_arrow"]:
menu_item = gtk.MenuItem()
hbox = gtk.HBox(0, 0)
hbox.pack_start(gtk.Label(item_name), 0, 0, 0)
hbox.pack_start(gtk.Arrow(gtk.ARROW_DOWN,
gtk.SHADOW_OUT), 0, 0, 0)
menu_item.add(hbox)
else:
menu_item = gtk.MenuItem(item_name)
if self.statusbar and item["description"]:
def select_cb(mi, sb, i):
sb.push(hash(mi), i["description"])
def deselect_cb(mi, sb):
sb.pop(hash(mi))
menu_item.connect("select", select_cb,
self.statusbar, item)
menu_item.connect("deselect", deselect_cb, self.statusbar)
parent_menu.append(menu_item)
menu_item.show_all()
### If this item is a leaf in our tree,
### hook up it's callback
if is_leaf and item["callback"]:
menu_item.connect_after(
"activate",
lambda x, i:i["callback"](self.user_data),
item)
if item["accelerator"]:
self.accel_parser.parse(item["accelerator"])
key = self.accel_parser.key()
if key:
mods = self.accel_parser.mods()
menu_item.add_accelerator("activate",
self.accel_group,
key, mods,
gtk.ACCEL_VISIBLE)
###
### If this item has special visibility, sensitivity or checked
### functions, hook them up to listen for our refresh_items
### signals.
###
def refresh_items(widget, item):
visible_fn = item["visible_fn"]
if (not visible_fn) or visible_fn():
widget.show()
else:
widget.hide()
def eval_fn_or_tuple(fn):
if not fn:
return 1
elif callable(fn):
return (fn() and 1) or 0
elif type(fn) == types.TupleType \
or type(fn) == types.ListType:
assert(len(fn) > 0)
assert(callable(fn[0]))
return (apply(fn[0], fn[1:]) and 1) or 0
print "Couldn't eval", fn
return 0
is_sensitive = eval_fn_or_tuple(item["sensitive_fn"])
widget.set_sensitive(is_sensitive)
if item["checked_get"]:
is_checked = eval_fn_or_tuple(item["checked_get"])
widget.set_active(is_checked)
radiogroup = item["radiogroup"]
radiotag = item["radiotag"]
radio_get = item["radio_get"]
radio_set = item["radio_set"]
if radiogroup and radiotag and radio_get and radio_set:
active_tag = radio_get()
widget.set_active(radiotag == active_tag)
if needs_refresh:
self.connect("refresh_items",
lambda menu, x, y: refresh_items(x, y),
menu_item, item)
###
### If this item has subitems, construct the submenu
### and continue walking down the tree.
###
if not is_leaf:
# Refresh the menu bar every time a top-level
# menu item is opened.
if prefix == "/":
menu_item.connect("activate",
lambda x:self.refresh_items())
submenu = gtk.Menu()
menu_item.set_submenu(submenu)
submenu.show()
walk_tree(path, submenu)
walk_tree("/", self)
gobject.type_register(MenuBar)
gobject.signal_new("refresh_items",
MenuBar,
gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
())
|
import pytest
from time import sleep
from typing import List, Tuple, Dict
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as expected
present = expected.presence_of_element_located
visible = expected.visibility_of_element_located
clickable = expected.element_to_be_clickable
all_present = expected.presence_of_all_elements_located
text_present = expected.text_to_be_present_in_element
driver = webdriver.Chrome()
@pytest.fixture()
def driver():
driver:WebDriver = webdriver.Chrome()
driver.get("https://shop.one-shore.com")
yield driver
sleep(2)
driver.quit()
def search(text:str):
search = driver.find_element("id", "search_widget")
search_field = search.find_element("name", "s")
search_field.clear()
search_field.send_keys(text)
search_button = search.find_element("xpath", "//button[@type='submit']")
search_button.click()
wait = WebDriverWait(driver, 10)
breadcrumbs = By.CSS_SELECTOR, ".breadcrumb"
search_results_present = text_present(breadcrumbs, "Search results")
wait.until(search_results_present)
def test_search_for_customizable_mug(driver:WebDriver):
search_field = driver.find_element_by_name("s")
search_field.clear()
search_field.send_keys("mug")
search_button = driver.find_element_by_xpath("//button[@type='submit']")
search_button.click()
wait = WebDriverWait(driver, 10)
breadcrumbs = By.CSS_SELECTOR, ".breadcrumb"
search_results_present = expected.text_to_be_present_in_element(breadcrumbs, "Search results")
wait.until(search_results_present)
def search_for_item(item:str):
|
empty_list = [] # Create an empty list
empty_list.append(10) #using an index won’t work until the items are added
ages = [19, 21, 20] # A named list with comma separated values
student1_details = [20, "Michael Brennan", 77.5] # Lists can hold a variety of data types
student2_details = [33, "Mairead Gallagher", 65]
class_of_students = [student1_details, student2_details, "module title", [2, 3, 4]]
group_of_students = student1_details + student2_details #creates 1 newlist with contents of previous two
# [20, 'Michael Brennan', 77.5, 33, 'Mairead Gallagher', 65]
grades=[1,2,3]
nu_grades=[grades]*2 #note the square brackets around the list name
print("{}".format(nu_grades))
nu_grades[0][0]=6
print("Repeated list after change {}".format(nu_grades)) #note that the 6 appears in both elements!
|
# stworz pakiet matematyka
# w nim stworz moduly: algebra i geometria
# w module algebra stworz funkcje mnozaca liczbe a przez b
# w module geometria stworz funkcje obliczajaca pole trapezu (1/2 * (a + b) * h)
#
# zaimportuj modul algebra jako algebra i geometria jako geometria (uzyj as)
# przy uzyciu funkcji z tych modulow oblicz iloczyn 111 * 222
# oraz policz pole trapezu o podstawach a=6, b=7 i wysokosci h=4
#
# przetestuj kilka rodzajow importu:
# import pakiet.modul -> wywolanie funkcji przez pakiet.modul.funkcja
# from pakiet import modul -> wywolanie przez modul.funkcja
# from pakiet.modul import funkcja -> wywolanie przez funkcja
# import matematyka.algebra
# import matematyka.geometria
#
# print(matematyka.algebra.pomnoz_a_przez_b(111,222))
# print(matematyka.geometria.pole_trapezu(1,2,3))
# import matematyka.algebra as algebra
# import matematyka.geometria as geometria
#
# print(algebra.pomnoz_a_przez_b(111,222))
# print(geometria.pole_trapezu(1,2,3))
# from matematyka import algebra
# from matematyka import geometria
#
# print(algebra.pomnoz_a_przez_b(111,222))
# print(geometria.pole_trapezu(1,2,3))
from matematyka.algebra import pomnoz_a_przez_b
from matematyka.geometria import pole_trapezu
print(pomnoz_a_przez_b(111,222))
print(pole_trapezu(1,2,3))
# import matematyka.algebra as algebra
# import matematyka.geometria as geometria
# from matematyka import algebra
# from matematyka import geometria
# from matematyka.algebra import pomnoz_a_przez_b
# from matematyka.geometria import pole_trapezu
# print(algebra.pomnoz_a_przez_b(111, 222))
# print(geometria.pole_trapezu(6, 7, 4))
# print(pomnoz_a_przez_b(111, 222))
# print(pole_trapezu(6, 7, 4))
|
#!/usr/bin/env python
import rospy
from enum import Enum
from std_msgs.msg import Int64, Header, Byte
from std_srvs.srv import SetBool
import math
from geometry_msgs.msg import PoseStamped, TwistStamped, Vector3, Quaternion
from mavros_msgs.msg import Altitude, ExtendedState, HomePosition, State, \
WaypointList, PositionTarget, AttitudeTarget, Thrust
from mavros_msgs.srv import CommandBool, ParamGet, SetMode, WaypointClear, \
WaypointPush
from pymavlink import mavutil
from sensor_msgs.msg import NavSatFix, Imu
from six.moves import xrange
from threading import Thread
from tf.transformations import euler_from_quaternion, quaternion_from_euler
import numpy as np
class uavTaskType(Enum):
Idle = 0
TakeOff = 1
Mission = 2
Land = 3
class NumberCounter:
def __init__(self):
self.counter = 0
self.pub = rospy.Publisher("/number_count", Int64, queue_size=10)
self.number_subscriber = rospy.Subscriber(
"/number", Int64, self.callback_number)
self.reset_service = rospy.Service(
"/reset_counter", SetBool, self.callback_reset_counter)
def callback_number(self, msg):
self.counter += msg.data
new_msg = Int64()
new_msg.data = self.counter
self.pub.publish(new_msg)
def callback_reset_counter(self, req):
if req.data:
self.counter = 0
return True, "Counter has been successfully reset"
return False, "Counter has not been reset"
class TaskManager:
def __init__(self):
self.altitude = Altitude()
self.extened_state = ExtendedState()
self.global_position = NavSatFix()
self.imu_data = Imu()
self.home_position = HomePosition()
self.local_position = PoseStamped()
self.attitude_sp = PoseStamped()
self.state = State()
self.local_velocity = TwistStamped() # local_velocity initialize
self.attitude_rate = AttitudeTarget() # use for attitude setpoints pub
self.thrust = Thrust()
self.pos = PoseStamped()
self.position = PositionTarget() # thrust control commands
self.task_state = uavTaskType.Idle
self.euler = Vector3() # Euler angles
self.pos_sp = Vector3() #position setpoint
# ROS publisher
self.pos_control_pub = rospy.Publisher(
'mavros/setpoint_raw/local', PositionTarget, queue_size=10)
self.position_pub = rospy.Publisher(
'mavros/setpoint_position/local', PoseStamped, queue_size=1)
self.attitude_sp_pub = rospy.Publisher(
'mavros/setpoint_attitude/attitude', PoseStamped, queue_size=1)
self.attitude_rate_sp_pub = rospy.Publisher(
'mavros/setpoint_raw/attitude', AttitudeTarget, queue_size=1)
self.attitude_thrust_pub = rospy.Publisher(
'mavros/setpoint_attitude/thrust', Thrust, queue_size = 1)
# ROS subscribers
self.local_pos_sub = rospy.Subscriber(
'mavros/local_position/pose', PoseStamped, self.local_position_callback)
self.state_sub = rospy.Subscriber(
'mavros/state', State, self.state_callback)
self.cmd_sub = rospy.Subscriber('user/cmd', Byte, self.cmd_callback)
self.vel_sub = rospy.Subscriber('mavros/local_position/velocity_local',
TwistStamped, self.local_velocity_callback) # local_velocity susbcriber
#self.vel_global_sub = rospy.Subscriber('mavros/local_position/velocity_local', TwistStamped, self.global_velocity_callback)
# send setpoints in seperate thread to better prevent failsafe
self.pos_thread = Thread(target=self.send_pos_ctrl, args=())
self.pos_thread.daemon = True
self.pos_thread.start()
# ROS services
service_timeout = 30
rospy.loginfo("Waiting for ROS services")
try:
rospy.wait_for_service('mavros/param/get', service_timeout)
rospy.wait_for_service('mavros/cmd/arming', service_timeout)
rospy.wait_for_service('mavros/mission/push', service_timeout)
rospy.wait_for_service('mavros/mission/clear', service_timeout)
rospy.wait_for_service('mavros/set_mode', service_timeout)
rospy.loginfo("ROS services are up")
except rospy.ROSException:
rospy.logerr("failed to connect to services")
self.get_param_srv = rospy.ServiceProxy('mavros/param/get', ParamGet)
self.set_arming_srv = rospy.ServiceProxy(
'mavros/cmd/arming', CommandBool)
self.set_mode_srv = rospy.ServiceProxy('mavros/set_mode', SetMode)
def local_velocity_callback(self, data): # local_velocity callback
self.local_velocity = data
def send_pos(self):
rate = rospy.Rate(10)
self.pos.header = Header()
self.pos.header.frame_id = "base_footprint"
while not rospy.is_shutdown():
self.pos.header.stamp = rospy.Time.now()
self.position_pub.publish(self.pos)
try: # prevent garbage in console output when thread is killed
rate.sleep()
except rospy.ROSInterruptException:
pass
def send_pos_ctrl(self):
rate = rospy.Rate(10)
self.pos.header = Header()
self.pos.header.frame_id = "base_footprint"
while not rospy.is_shutdown():
self.pos.header.stamp = rospy.Time.now()
self.pos_control_pub.publish(self.position)
try: # prevent garbage in console output when thread is killed
rate.sleep()
except rospy.ROSInterruptException:
pass
def cmd_callback(self, data):
# self.task_state = data
cmd = data.data
rospy.loginfo("Command received: {0}".format(self.task_state))
rospy.loginfo("Command received: {0}".format(data))
if cmd == 1:
rospy.loginfo("Taks state changed to {0}".format(self.task_state))
self.task_state = uavTaskType.TakeOff
elif cmd == 2:
rospy.loginfo("Taks state changed to {0}".format(self.task_state))
self.task_state = uavTaskType.Mission
elif cmd == 3:
rospy.loginfo("Taks state changed to {0}".format(self.task_state))
self.task_state = uavTaskType.Land
def local_position_callback(self, data):
self.local_position = data
q = [data.pose.orientation.x, data.pose.orientation.y,
data.pose.orientation.z, data.pose.orientation.w]
self.euler = euler_from_quaternion(q)
def state_callback(self, data):
if self.state.armed != data.armed:
rospy.loginfo("armed state changed from {0} to {1}".format(
self.state.armed, data.armed))
if self.state.connected != data.connected:
rospy.loginfo("connected changed from {0} to {1}".format(
self.state.connected, data.connected))
if self.state.mode != data.mode:
rospy.loginfo("mode changed from {0} to {1}".format(
self.state.mode, data.mode))
if self.state.system_status != data.system_status:
rospy.loginfo("system_status changed from {0} to {1}".format(
mavutil.mavlink.enums['MAV_STATE'][
self.state.system_status].name, mavutil.mavlink.enums[
'MAV_STATE'][data.system_status].name))
self.state = data
#
# Helper methods
#
def set_arm(self, arm, timeout):
"""arm: True to arm or False to disarm, timeout(int): seconds"""
rospy.loginfo("setting FCU arm: {0}".format(arm))
old_arm = self.state.armed
loop_freq = 1 # Hz
rate = rospy.Rate(loop_freq)
arm_set = False
for i in xrange(timeout * loop_freq):
if self.state.armed == arm:
arm_set = True
rospy.loginfo("set arm success | seconds: {0} of {1}".format(
i / loop_freq, timeout))
break
else:
try:
res = self.set_arming_srv(arm)
if not res.success:
rospy.logerr("failed to send arm command")
except rospy.ServiceException as e:
rospy.logerr(e)
try:
rate.sleep()
except rospy.ROSException as e:
rospy.logerr("fail to arm")
def set_mode(self, mode, timeout):
"""mode: PX4 mode string, timeout(int): seconds"""
rospy.loginfo("setting FCU mode: {0}".format(mode))
old_mode = self.state.mode
loop_freq = 1 # Hz
rate = rospy.Rate(loop_freq)
mode_set = False
for i in xrange(timeout * loop_freq):
if self.state.mode == mode:
mode_set = True
rospy.loginfo("set mode success | seconds: {0} of {1}".format(
i / loop_freq, timeout))
break
else:
try:
res = self.set_mode_srv(0, mode) # 0 is custom mode
if not res.mode_sent:
rospy.logerr("failed to send mode command")
except rospy.ServiceException as e:
rospy.logerr(e)
try:
rate.sleep()
except rospy.ROSException as e:
rospy.logerr("fail to set mode")
if __name__ == '__main__':
rospy.init_node('number_counter')
print("hahaha")
NumberCounter()
uavTask = TaskManager()
uavTask.pos.pose.position.x = 0
uavTask.pos.pose.position.y = 0
uavTask.pos.pose.position.z = 0
uavTask.set_mode("OFFBOARD", 5)
uavTask.set_arm(True, 5)
while not rospy.is_shutdown():
rate = rospy.Rate(200)
print(uavTask.task_state)
# uavTask.position_pub.publish(uavTask.pos)
if uavTask.task_state == uavTaskType.TakeOff:
rospy.loginfo("Doing Takeoff using attitude setpoint")
rospy.loginfo("time now is {0}".format(rospy.Time.now()))
uavTask.pos.pose.position.x = 0
uavTask.pos.pose.position.y = 0
uavTask.pos.pose.position.z = 1.5
uavTask.pos.pose.orientation.x = 0
uavTask.pos.pose.orientation.y = 0
uavTask.pos.pose.orientation.z =0
uavTask.pos.pose.orientation.w = 1
uavTask.position.position.x = 0
uavTask.position.position.y = 0
uavTask.position.position.z = 0.8
uavTask.position.type_mask = 32768
uavTask.position_pub.publish(uavTask.pos)
#uavTask.pos_control_pub.publish(uavTask.position)
elif uavTask.task_state == uavTaskType.Mission:
rospy.loginfo("Doing Mission")
uavTask.pos_sp = [0, 0, 0.6]
# Get position feedback from PX4
x = uavTask.local_position.pose.position.x
y = uavTask.local_position.pose.position.y
z = uavTask.local_position.pose.position.z # ENU used in ROS
vx_enu = uavTask.local_velocity.twist.linear.x # NWU body frame
vy_enu = uavTask.local_velocity.twist.linear.y
vz_enu = uavTask.local_velocity.twist.linear.z
# LQR-based controller, x-gamma, y-beta, z-alpha
# gamma = uavTask.euler[0]
# beta = uavTask.euler[1]
yaw = 0/57.3 # attitude_rate setpoint body_z
# yaw = 0 #simulation face east
state_x = np.array([[x, vx_enu]]).T
# K_x = np.array([[0.1,0.1724]]) heading East!!!
K_x = np.array([[0.1,0.1744]]) #less aggressive
beta = -np.matmul(K_x, state_x) # attitude setpoint body_y
state_y = np.array([[y, vy_enu]]).T
# K_y = np.array([[-0.1, -0.1724])
K_y = np.array([[-0.1, -0.1744]])
gamma = -np.matmul(K_y, state_y) # attitude setpoint body_x
state_z = np.array([[z-uavTask.pos_sp[2], vz_enu]]).T
# K_z = np.array([[0.7071, 1.2305]])
K_z = np.array([[0.7071,1.3836]]) #less aggresive
a = -np.matmul(K_z, state_z)/(3*9.8)+0.355 #throttle sp
#a = float(a)
uavTask.attitude_rate.body_rate = Vector3()
uavTask.attitude_rate.header = Header()
uavTask.attitude_rate.header.frame_id = "base_footprint"
#uavTask.attitude_rate.orientation =
quat = quaternion_from_euler(gamma, beta, yaw)
#quat = quaternion_from_euler(0, 0, 0)
# uavTask.attitude_rate.body_rate.y = 0
# uavTask.attitude_rate.body_rate.z = 0
#eu = np.array([[gamma, beta, yaw]]).T
#quat = quaternion_from_euler(gamma, beta, yaw) # X,Y,Z,W
#uavTask.attitude_rate.orientation = quat
uavTask.attitude_rate.orientation.x = quat[0]
uavTask.attitude_rate.orientation.y = quat[1]
uavTask.attitude_rate.orientation.z = quat[2]
uavTask.attitude_rate.orientation.w = quat[3]
#uavTask.attitude_sp.pose.position.x = 0
#uavTask.attitude_sp.pose.position.y = 0
#uavTask.attitude_sp.pose.position.z = 0
#uavTask.attitude_sp.pose.orientation.x = quat[0]
#uavTask.attitude_sp.pose.orientation.y = quat[1]
#uavTask.attitude_sp.pose.orientation.z = quat[2]
#uavTask.attitude_sp.pose.orientation.w = quat[3]
#uavTask.thrust.thrust = a
uavTask.attitude_rate.thrust = a
uavTask.attitude_rate.type_mask = 7
uavTask.attitude_rate_sp_pub.publish(uavTask.attitude_rate)
#uavTask.attitude_thrust_pub.publish(uavTask.thrust)
## Controller will be used here ###
#uavTask.pos_control_pub.publish(uavTask.position)
elif uavTask.task_state == uavTaskType.Land:
rospy.loginfo("Doing Land")
uavTask.pos.pose.position.x = 0
uavTask.pos.pose.position.y = 0
uavTask.pos.pose.position.z = 0
uavTask.position_pub.publish(uavTask.pos)
rate.sleep()
rospy.spin()
|
#!/usr/bin/env python
import sys, os
import numpy as np
from plotROCutils import addTimestamp, addDirname, addNumEvents, readDescription
#----------------------------------------------------------------------
def findHighestEpoch(outputDir, sample):
import glob, re
fnames = glob.glob(os.path.join(outputDir, "roc-data-%s-*.npz" % sample))
highest = -1
for fname in fnames:
mo = re.match("roc-data-" + sample + "-(\d+).npz$", os.path.basename(fname))
if mo:
# note that 'mva' can also appear where otherwise the epoch number
# appears
highest = max(highest, int(mo.group(1), 10))
if highest == -1:
return None
else:
return highest
#----------------------------------------------------------------------
# main
#----------------------------------------------------------------------
from optparse import OptionParser
parser = OptionParser("""
usage: %prog [options] result-directory epoch
use epoch = 0 for highest epoch number found
"""
)
parser.add_option("--save-plots",
dest = 'savePlots',
default = False,
action="store_true",
help="save plots in input directory",
)
parser.add_option("--sample",
dest = 'sample',
default = "test",
choices = [ "test", "train" ],
help="sample to use (train or test)",
)
(options, ARGV) = parser.parse_args()
assert len(ARGV) == 2, "usage: plotNNoutput.py result-directory epoch"
outputDir, epoch = ARGV
epoch = int(epoch)
if epoch == 0:
epoch = findHighestEpoch(outputDir, options.sample)
#----------------------------------------
weightsLabelsFile = os.path.join(outputDir, "weights-labels-" + options.sample + ".npz")
weightsLabels = np.load(weightsLabelsFile)
if options.sample == 'train':
weightVarName = "trainWeight"
else:
# test sample
weightVarName = "weight"
weights = weightsLabels[weightVarName]
labels = weightsLabels['label']
outputsFile = os.path.join(outputDir, "roc-data-%s-%04d.npz" % (options.sample, epoch))
outputsData = np.load(outputsFile)
output = outputsData['output']
import pylab
pylab.hist(output[labels == 1], weights = weights[labels == 1], bins = 100, label='signal', histtype = 'step')
pylab.hist(output[labels == 0], weights = weights[labels == 0], bins = 100, label='background', histtype = 'step')
pylab.legend()
pylab.xlabel('NN output')
pylab.title(options.sample + " epoch %d" % epoch)
pylab.grid()
addTimestamp(outputDir)
addDirname(outputDir)
# addNumEvents(numEvents.get('train', None), numEvents.get('test', None))
if options.savePlots:
outputFname = os.path.join(outputDir, "nn-output-" + options.sample + "-%04d.pdf" % epoch)
pylab.savefig(outputFname)
print >> sys.stderr,"wrote plots to",outputFname
else:
pylab.show()
|
# coding=utf-8
from pytest_bdd import (
scenario
)
@scenario('../features/accidental_delete_usual_case.feature',
'Execute Digito-SimulateS3ObjectsAccidentalDeleteTest_2020-04-01 to to accidentally delete files in S3 '
'bucket')
def test_accidental_delete_usual_case():
"""Create AWS resources using CloudFormation template and execute SSM automation document."""
@scenario('../features/accidental_delete_alarm_failed.feature',
'Execute Digito-SimulateS3ObjectsAccidentalDeleteTest_2020-04-01 to accidentally delete files in S3 bucket '
'and fail because of timed out alarm instead of being in ALARM state')
def test_accidental_delete_alarm_failed():
"""Create AWS resources using CloudFormation template and execute SSM automation document."""
|
from __future__ import print_function
import socket
import datetime
import random
import threading
import unicast as u
import FIFOMulticast as f
class node():
def __init__(self, ID = -1, IP = "", PORT = 0, SOCKET = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) ):
self.RECEIVED = []
self.DESTINATIONS = []
self.MYID = ID
self.MYIP = IP
self.MYPORT = PORT
self.MYSOCKET = SOCKET
self.MIN = 0
self.MAX = 0
self.MULTITYPE = 0
self.FIFO = None
def __str__(self):
return (str(self.MYID) + "|"+ self.MYIP +"|"+ str(self.MYPORT) )
__repr__ = __str__
def make_node(self):
# Opens the configuration file
# The config file lists each node's characteristics, each on a separate line
# The three characteristics right now are ID Number, IP Address, and Port Number
config_file = open("config","r")
# Number of nodes in config file
# -2 to account for first min-max line, and two trailing whitespace line
NUMOFNODES = sum(1 for line in open('config')) - 2
self.FIFO = f.FIFOMulticast(self,NUMOFNODES)
# Take min and max delay from config file
minmaxdelay = config_file.readline().rstrip('\n')
minandmax = minmaxdelay.split(" ")
self.MIN = int(minandmax[0])
self.MAX = int(minandmax[1])
for a in range(NUMOFNODES):
# Read in the ID, IP, and PORT from the config file for each of the four connections
line = config_file.readline().rstrip('\n')
# Split line into ID, IP, PORT
linearray = line.split(" ")
ID = int(linearray[0])
IP = str(linearray[1])
PORT = int(linearray[2])
# Decides which node this process will be
if self.MYID == -1:
self.MULTITYPE = int(raw_input("Type in your Multicast ordering type: 1 for FIFO"))
self.MYID = int(raw_input("Type in your node ID number (0-3)"))
# Creates sockets between all pairs of nodes and
# appends nodes to a list as a tuple of ID, IP, PORT
# and SOCK
SOCK = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if a == self.MYID:
self.MYIP = IP
self.MYPORT = PORT
self.MYSOCKET = SOCK
self.MYSOCKET.bind((self.MYIP, self.MYPORT)) # Port of processNumber
self.DESTINATIONS.append(node(ID,IP,PORT,SOCK))
# Bind socket to the process
def receive(self):
while 1:
data = self.MYSOCKET.recv(1024)
if data == "close": break
datasplit = data.split(" ")
id = int(datasplit[0])
data = datasplit[1]
if(len(datasplit) == 2):
self.RECEIVED.append((id,data))
u.unicast_receive(self,self.DESTINATIONS[id],data)
elif(self.MULTITYPE == 1): #FIFO
Rsequencer = int(self.FIFO.RSEQUENCERS[id])
Ssequencer = int(datasplit[2])
if(Ssequencer == Rsequencer + 1):
print("Message accepted")
self.FIFO.deliver(self.DESTINATIONS[id],data)
for queued in self.FIFO.QUEUE:
qid = queued[0]
qdata = queued[1]
qRsequencer = queued[2]
if(Ssequencer== qRsequencer + 1):
self.FIFO.deliver(self.DESTINATIONS[qid],qdata)
elif(Ssequencer < Rsequencer + 1):
print("Message rejected")
else:
print("Message appended")
self.FIFO.QUEUE.append((id,data,Rsequencer))
def action_loop(self):
threading.Thread(target= self.receive).start()
# Send messages to other nodes using the format:
# send (# of node) (message)
# type in 'close' to exit to terminal
while 1:
decide = raw_input("What do you want to do?\n")
if(decide[0:4] == "send"):
sendTo = int(decide[5])
sendString = decide[7:]
u.unicast_send(self.DESTINATIONS[sendTo], str(self.MYID) + " " + sendString)
if(decide[0:5] == "close"):
u.unicast_send(self.DESTINATIONS[self.MYID], "close")
break
if(decide[0:5] == "msend"):
sendString = decide[6:]
if(self.MULTITYPE == 1):
self.FIFO.multicast(self.DESTINATIONS,str(self.MYID) + " " + sendString)
run = node()
run.make_node()
run.action_loop() |
#!/usr/bin/python3
from scapy.all import *
def spoof_reply(pkt):
if(pkt[2].type == 8):
print("Creating spoof packet...")
dst = pkt[1].dst
src = pkt[1].src
ttl = pkt[1].ttl
id_IP = pkt[1].id
seq = pkt[2].seq
id_ICMP = pkt[2].id
'''
If we want to add the load to the ICMP packet
#load = pkt[3].load
#reply = Ether(src=pkt[0].dst, dst=pkt[0].src, type=pkt[0].type)/IP(id=id_IP, ttl=ttl,src=dst, dst=src)/ICMP(type=0, code=0, id=id_ICMP, seq=seq)/load
'''
reply = Ether(src=pkt[0].dst, dst=pkt[0].src, type=pkt[0].type)/IP(id=id_IP, ttl=ttl,src=dst, dst=src)/ICMP(type=0, code=0, id=id_ICMP, seq=seq)
# contruct the packet with a new checksum for the IP header
del reply[IP].chksum
# contruct the packet with a new checksum for the ICMP packet
del reply[ICMP].chksum
raw_bytes = reply.build()
reply[IP].chksum = Ether(raw_bytes)[IP].chksum
reply[ICMP].chksum = Ether(raw_bytes)[ICMP].chksum
reply.show2()
sendp(reply, iface="ens18")
if __name__=="__main__":
# define the network interface
iface = "ens18"
# filter for only ICMP trafic
filter = "icmp"
# start sniffing
sniff(iface=iface, prn=spoof_reply, filter=filter) |
#
# functions to process file loading and data manipulation
import datetime
import os
import numpy as np
import pandas as pd
def list_files(path,ext):
# returns a list of names (with extension, without full path) of all files
# in folder path ext could be '.txt'
#
files = []
for name in os.listdir(path):
if os.path.isfile(os.path.join(path, name)):
if ext in name:
files.append(name)
return files
def printDate():
# returns the string with the current date/time in minute
# example output '2016-05-23_16-31'
printDate = datetime.datetime.now().isoformat().replace(":", "-")[:16].replace("T", "_")
return printDate
def outputFile(fileName, projectFolder=os.getcwd(), folderSuffix='_output'):
# creates the output folder with current datetime and returns the path url for the file to be used further
outputFolder = os.path.join(projectFolder, printDate() + folderSuffix)
if not os.path.exists(outputFolder):
os.makedirs(outputFolder)
outputFile = os.path.join(outputFolder, fileName)
return outputFile
def intensityThresholding(inputProfile, intensityColumn='intensity', intensityThreshold=0):
#drop rows with intensity les than threshold
inputProfile = inputProfile[inputProfile[intensityColumn] > intensityThreshold]
outputProfile = inputProfile.reset_index(drop=True)
return outputProfile
def genotyping(imageName):
#depending on imageName returns the genotype string
#genotype filtering
list_WT = ['f-f_cre-neg','f-p_cre-neg','p-p_cre-neg','p-p_cre-pos']
list_CKO = ['f-f_cre-pos']
list_HTZ = ['f-p_cre-pos']
if any(ext in str(imageName) for ext in list_CKO):
return 'CKO'
if any(ext in str(imageName) for ext in list_WT):
return 'WT'
if any(ext in str(imageName) for ext in list_HTZ):
return 'HTZ'
def returnID(imageName, list_ID):
# depending on imageName string returns the ID string
ext = ''
for ext in list_ID:
if ext in str(imageName):
return ext |
import json
class TargetserversSerializer:
def serialize_details(self, targetservers, format, prefix=None):
resp = targetservers
if format == "text":
return targetservers.text
targetservers = targetservers.json()
if prefix:
targetservers = [
targetserver
for targetserver in targetservers
if targetserver.startswith(prefix)
]
if format == "dict":
return targetservers
elif format == "json":
return json.dumps(targetservers)
return resp
|
"""
ID: ten.to.1
TASK: numtri
LANG: PYTHON3
"""
class TriNode:
def __init__(self, val):
self.value = val
self.right = None
self.left = None
f_in = open("numtri.in", "r");
f_out = open("numtri.out", "w")
R = int(f_in.readline())
nodes = []
for i in range(0, R):
nodes.append(list(map(TriNode, map(int, f_in.readline().split()))))
for i in range(0, R - 1):
for j in range(0, i + 1):
nodes[i][j].left = nodes[i + 1][j]
nodes[i][j].right = nodes[i + 1][j + 1]
#Print Nodes
#for n in nodes:
# for j in n:
# print(j.value, end=" ")
# print()
max = 0
def solve(node, sum):
sum += node.value;
if(node.left == None):
if(sum > max):
max = sum
return
solve(node.left, sum)
solve(node.right, sum)
solve(nodes[0][0], 0)
f_out.write(str(max) + "\n")
|
from app.config import host,port, database, user, password
import psycopg2
connection = psycopg2.connect(user= "ylgcuwgqfktndd",
password= "5cb7fdab06b8649f26b9b46f97cae5c38d6c1c0b7c3bf466509a46914bb4a9a0",
host= "ec2-18-214-195-34.compute-1.amazonaws.com",
port="5432",
database="dde3v21e2ktfom")
|
from GenericElement import GenericElement
from WaveguideJunction import WaveguideJunction
from WaveguideElement import WaveguideElement
from Utils import toSI as SI
import numpy as np
from matplotlib import pyplot as plt
from scipy.constants import c as c0
a = SI("8.636mm")
l = SI("100.0mm")
dd = SI("1mm")
f_c = 0.5*c0/a
fmin = SI("17.5GHz")
fmax = SI("30GHz")
frange = np.linspace(fmin, fmax, num = 1001)
s12 = np.zeros(frange.shape, dtype = complex)
iris_widths = np.array([0.11, 0.21, 0.31, 0.41, 0.51, 0.61, 0.66, 0.71]) * a
impedances = np.zeros(iris_widths.shape)
irisModes = 10
plt.figure()
dd_lengths = np.linspace(0, SI("2.5mm"), num = 11)
for dd_idx, dd in enumerate(dd_lengths):
for idx, iris in enumerate(iris_widths):
waveguideModes = np.round(irisModes * a/iris)
waveguide = WaveguideElement(a, l, waveguideModes, frange[0])
junction = WaveguideJunction(a, iris, waveguideModes, irisModes, frange[0])
waveguide1 = WaveguideElement(iris, 0.5*dd, irisModes, frange[0])
for i, freq in enumerate(frange):
waveguide.update(freq)
junction.update(freq)
waveguide1.update(freq)
r1 = waveguide * junction * waveguide1
r2 = GenericElement(r1.s22, r1.s21, r1.s12, r1.s11)
r3 = r1*r2
s12[i] = r3.s12[0,0]
x = 1.0/(1.0 - (f_c/frange)**2)
y = np.abs(s12)**(-2) - 1.0
p = np.polyfit(x, y, 1)
impedances[idx] = 0.5/np.sqrt(p[0])
plt.plot(iris_widths/a, impedances, label = ("dd = %f" % dd))
plt.xlabel("iris width [waveguide widths]")
plt.ylabel("Impedance [Z0]")
plt.legend()
plt.grid()
plt.title("Iris load inductance vs. iris width")
plt.show()
|
"""
This enables us to call the minions and search for a specific role
Roles are set using grains (described in http://www.saltstat.es/posts/role-infrastructure.html)
and propagated using salt-mine
"""
import logging
# Import salt libs
import salt.utils
import salt.payload
log = logging.getLogger(__name__)
def get_roles(role, *args, **kwargs):
"""
Send the informer.is_role command to all minions
"""
ret = []
nodes = __salt__['mine.get']('*', 'grains.item')
print "-------------------------------> NODES {0}".format(nodes)
for name, node_details in nodes.iteritems():
name = _realname(name)
roles = node_details.get('roles', [])
if role in roles:
ret.append(name)
return ret
def get_node_grain_item(name, item):
"""Get the details of a node by the name nodename"""
name = _realname(name)
node = __salt__['mine.get'](name, 'grains.item')
print "NODE DETAILS ------> {0}: {1}".format(name, node[name])
return node[name][item]
def all():
"""Get all the hosts and their ip addresses"""
ret = {}
nodes = __salt__['mine.get']('*', 'grains.item')
for name, node_details in nodes.iteritems():
if 'ec2_local-ipv4' in node_details:
ret[_realname(name)] = node_details['ec2_local-ipv4']
else:
ip = __salt__['mine.get'](name, 'network.ip_addrs')[name][0]
print "-----------------------------> {0}".format(ip)
ret[_realname(name)] = ip
return ret
def _realname(name):
"""Basically a filter to get the 'real' name of a node"""
if name == 'master':
return 'saltmaster'
else:
return name |
import sys
import SendData
def lireFichier (emplacement) :
fichTemp = open(emplacement)
contenu = fichTemp.read()
fichTemp.close()
return contenu
def recupTemp (contenuFich) :
secondeLigne = contenuFich.split("\n")[1]
temperatureData = secondeLigne.split(" ")[9]
temperature = float(temperatureData[2:])
temperature = temperature / 1000
return temperature
contenuFich = lireFichier("/sys/bus/w1/devices/28-0119113a3b60/w1_slave")
temperatureY = recupTemp (contenuFich)*1.035
print ("Temperature_Y: ", temperatureY)
contenuFich = lireFichier("/sys/bus/w1/devices/28-01191ae5edd9/w1_slave")
temperatureG = recupTemp (contenuFich)*1.035
print ("Temperature_G: ", temperatureG)
contenuFich = lireFichier("/sys/bus/w1/devices/28-011921255a5b/w1_slave")
temperatureR = recupTemp (contenuFich)*1.025
print ("Temperature_R: ", temperatureR)
SendData.states('sensors/ds18b20', {'temperatureR': temperatureR, 'temperatureG': temperatureG,'temperatureY': temperatureY})
|
import numpy as np
from gym.envs.mujoco import mujoco_env
from gym import utils
NXO_DOF = 9
def mass_center(model):
mass = model.body_mass
xpos = model.data.xipos
return (np.sum(mass * xpos, 0) / np.sum(mass))[0]
class NextageEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, 'nxo_basic.xml') #TODO: Frameskip, how does this affect?
utils.EzPickle.__init__(self)
self.init_qpos = np.concatenate(([0, -0.0104, 0, -1.745, 0.265, 0.164, 0.0558, 0, 0], self.init_qpos[NXO_DOF:]) )
def _get_obs(self):
data = self.model.data
return np.concatenate([data.qpos.flat
# ,
# data.qvel.flat,
# data.site_xpos.flat
]) # TODO: what else can I use?
def step(self, a):
# self.do_simulation(self.init_qpos, self.frame_skip)
self._position_control(self._limit_actions(a))
reward = 0
info = []
return self._get_obs(), reward, False, info
def _position_control(self, a):
i = 0
while True:
# compute simulation
self.do_simulation(np.concatenate((a[:NXO_DOF], self.model.data.qpos[NXO_DOF:].flatten())), self.frame_skip)
# render for the user
self.render()
# check if we achieve the desired position
qpos = self.model.data.qpos.flatten()
if self._compare_arrays(a[:NXO_DOF], qpos[:NXO_DOF]):
break
if i%500 == 0:
print("not looking good :(")
print("action", np.round(a[:NXO_DOF], 3))
print("qpos", np.around(qpos[:NXO_DOF], 3))
i+=1
def _compare_arrays(self, a, b):
a = np.round(a, 3)
b = np.round(b, 3)
c = abs(a.sum() - b.sum())
return c <= 0.4
# return np.array_equal(a, b)
def _limit_actions(self, a):
jnt_range = self.model.jnt_range
for jnt in range(NXO_DOF):
if a[jnt] < jnt_range[jnt][0]:
a[jnt] = jnt_range[jnt][0]
elif a[jnt] > jnt_range[jnt][1]:
a[jnt] = jnt_range[jnt][1]
return a
def reset_model(self):
self.set_state(
self.init_qpos,
self.init_qvel
)
return self._get_obs()
def set_reward_func(self, func):
self.reward_func = func
def viewer_setup(self):
self.viewer.cam.trackbodyid = 0
self.viewer.cam.distance = 2.30
self.viewer.cam.azimuth = -140
self.viewer.cam.elevation = -32 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Created on Aug 15, 2010
@author: Wang Yuanyi
'''
#please change follow 2 row by your family numbers google account
Admin = '@gmail.com'
Users = ['@gmail.com','@gmail.com']
TEST = False
from wiwikai.faccbk import TransPurposeCategory, TransAccount, Payee, \
trans_type_expense, trans_type_income, trans_account_type_credit_card, \
trans_account_type_debit_card
import os
server_software = os.environ['SERVER_SOFTWARE']
DEVELOPMENT = False
if server_software.startswith('Development'):
DEVELOPMENT = True
TEST = True
if DEVELOPMENT == True:
Admin = 'test@example.com'
Users = ['test@example.com']
if TEST:
def insert_trans_purpose_category(ptitle, ptrans_type):
transTargetCtg = TransPurposeCategory(title = ptitle, trans_type = ptrans_type )
transTargetCtg.put()
def insert_trans_account(plastnumber, ptrans_account_type, pbank_name, pstatement_date, ppayment_due_date):
creditCard = TransAccount(last4number = plastnumber, type=ptrans_account_type, bank_name = pbank_name, statement_date = pstatement_date, payment_due_date =ppayment_due_date )
creditCard.put()
def insert_payee(payee_title):
payee = Payee(title = payee_title)
payee.put()
if TransPurposeCategory.all().count() == 0:
insert_trans_purpose_category(u"家庭食物支出", trans_type_expense)
insert_trans_purpose_category(u"工资收入", trans_type_income)
if TransAccount.all().count() == 0:
insert_trans_account('8888', trans_account_type_credit_card, 'ICBC', 20, 8)
insert_trans_account('7777', trans_account_type_debit_card, 'JBC', 25, 15)
if Payee.all().count() == 0:
insert_payee(u'孩子')
insert_payee(u'老婆')
insert_payee(u'自己') |
from unittest import TestCase
# https://github.com/georgezlei/algorithm-training-py
# Author: George Lei
import algorithm_prep as algo
import algorithm_prep.classic.sort as sort
class TestSort(TestCase):
def test_bubble_sort(self):
self.assertTrue(algo.test(sort.bubble_sort, sort.test_cases))
def test_insert_sort(self):
self.assertTrue(algo.test(sort.insert_sort, sort.test_cases))
def test_heap_sort(self):
self.assertTrue(algo.test(sort.heap_sort, sort.test_cases))
def test_merge_sort(self):
self.assertTrue(algo.test(sort.merge_sort, sort.test_cases))
def test_quick_sort(self):
self.assertTrue(algo.test(sort.quick_sort, sort.test_cases))
def test_radix_sort(self):
self.assertTrue(algo.test(sort.radix_sort, sort.test_cases)) |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
# Put the parameters used in simulation
nb_robots_choices = range(1,21)
nb_candidates = 4
nb_classes = 80
nb_robots_further_check = 4
semantic_descriptors = []
robots_verified = []
bow_data = np.zeros(len(nb_robots_choices))
netvlad_data = np.zeros(len(nb_robots_choices))
ours_data = np.zeros(len(nb_robots_choices))
ours_data_broadast = np.zeros(len(nb_robots_choices))
nb_robots_checked = 0
# semantic_descriptors_nb_robots_10_nb_cand_1_rob_furth_check_5_min_dist_0.1.npy
for nb_robots in nb_robots_choices:
nb_robots_checked +=1
semantic_desc_file_path = '../results/dec_sem_desc_'+str(nb_robots)+'.npy'
robots_ver_file_path = '../results/dec_rob2ver_'+str(nb_robots)+'.npy'
semantic_descriptors.append(np.load(semantic_desc_file_path))
robots_verified.append(np.load(robots_ver_file_path))
_,nb_frames,_ = semantic_descriptors[-1].shape
semantic_splits_idx = np.linspace(0, nb_classes, nb_robots+1,dtype=int)
nb_labels_received = np.zeros((nb_robots,nb_frames))
nb_cand_received = np.zeros((nb_robots,nb_frames))
nb_further_checks = np.zeros((nb_robots, nb_frames))
for rob_i in range(len(semantic_splits_idx)-1):
nb_labels_received[rob_i,:] = np.count_nonzero(semantic_descriptors[-1][:,:,semantic_splits_idx[rob_i]:semantic_splits_idx[rob_i+1]],axis=(0,2))
cand = (robots_verified[-1][rob_i, :, :, :, 0] > -1).reshape(nb_frames, -1)
nb_cand_received[rob_i, :] = np.count_nonzero(cand, axis=1)
further_checks_labels =robots_verified[-1][rob_i, :, :, :, 0].reshape(nb_frames, -1)
nb_objects = np.maximum(np.sum(semantic_descriptors[-1],axis=2),16)
# 1.5 byte for each label nb and id, 1 byte for the robot id, 2 bytes for the frame id. For each object : 1 byte for the label, 2 bytes per position
total_data_received_ours = 1.5*nb_labels_received + nb_cand_received * \
(1+2) + nb_objects*nb_robots_further_check*(1+3*2)
# Each query is 16kB per robot, to each robot. Return 1 robot id and 2 for frame id
total_data_received_bow = np.zeros(((nb_robots, nb_frames)))+16000+(1+2)*nb_robots
# 512 bytes for each query, 1 for robot id and 2 for frame id
total_data_received_netvlad = np.zeros(
((nb_robots, nb_frames)))+512+(1+2)*nb_robots
# Ours case naive : Send the full constellation to every other robot
total_data_received_ours_naive = nb_objects*nb_robots*(1+3*2)
# Divide per nb of frames and nb of robots to get the data for a single query
bow_data[nb_robots_checked -
1] = np.sum(total_data_received_bow)/nb_frames/nb_robots
netvlad_data[nb_robots_checked -
1] = np.sum(total_data_received_netvlad)/nb_frames/nb_robots
ours_data[nb_robots_checked -
1] = np.sum(total_data_received_ours)/nb_frames/nb_robots
ours_data_broadast[nb_robots_checked -
1] = np.sum(total_data_received_ours_naive)/nb_frames/nb_robots
# Convert to kB
bow_data/=1e3
netvlad_data/=1e3
ours_data/=1e3
ours_data_broadast/=1e3
plt.rc('font', family='serif')
plt.rc('xtick', labelsize='x-small')
plt.rc('ytick', labelsize='x-small')
fig = plt.figure(figsize=(4, 3))
ax = fig.add_subplot(1, 1, 1)
# x = np.linspace(1., 8., 30)
ax.plot(np.arange(1, nb_robots_checked+1), ours_data, color='r', ls='solid')
ax.plot(np.arange(1, nb_robots_checked+1),
ours_data_broadast, color='k', ls='dashdot')
ax.plot(np.arange(1, nb_robots_checked+1),
netvlad_data, color='g', ls='dashed')
ax.plot(np.arange(1, nb_robots_checked+1), bow_data, color='b', ls='dotted')
ax.set_xlabel('Number of robots')
ax.set_ylabel('Size of one query [kB]')
plt.gca().legend(('Our solution', 'Broadcast constellation', 'Solution from [42]', 'Solution from [10]'), loc=6, prop={'size': 7})
major_ticks_x = np.arange(0, 20.01, 5)
minor_ticks_x = np.arange(0, 20.01, 1)
ax.set_xticks(major_ticks_x)
ax.set_xticks(minor_ticks_x, minor=True)
ax.grid(which='both')
ax.grid(which='minor', alpha=0.2)
ax.grid(which='major', alpha=0.5)
axins_args = {'yticks': np.arange(0, 20.01, 0.4)}
axins = zoomed_inset_axes(ax, 2.5, loc=7, axes_kwargs=axins_args)
axins.plot(np.arange(1, nb_robots_checked+1), ours_data, color='r', ls='solid')
axins.plot(np.arange(1, nb_robots_checked+1),
ours_data_broadast, color='k', ls='dashdot')
axins.plot(np.arange(1, nb_robots_checked+1),
netvlad_data, color='g', ls='dashed')
# specify the limits
axins.plot(np.arange(1, nb_robots_checked+1), bow_data, color='b', ls='dotted')
x1, x2, y1, y2 = 16.5, 19.5, 0.0,3.
axins.set_xlim(x1, x2) # apply the x-limits
axins.set_ylim(y1, y2) # apply the y-limits
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
mark_inset(ax, axins, loc1=3, loc2=4, fc="none", ec="0.5")
plt.show()
|
'''
Given two numbers, hour and minutes. Return the smaller angle (in degrees) formed between the hour and the minute hand.
Example 1:
Input: hour = 12, minutes = 30
Output: 165
Example 2:
Input: hour = 3, minutes = 30
Output: 75
Example 3:
Input: hour = 3, minutes = 15
Output: 7.5
Example 4:
Input: hour = 4, minutes = 50
Output: 155
Example 5:
Input: hour = 12, minutes = 0
Output: 0
Constraints:
1 <= hour <= 12
0 <= minutes <= 59
Answers within 10^-5 of the actual value will be accepted as correct.
Hint #1
The tricky part is determining how the minute hand affects the position of the hour hand.
Hint #2
Calculate the angles separately then find the difference.
'''
#Solution:
class Solution:
def angleClock(self, hour: int, minutes: int) -> float:
a1 = (hour % 12 * 60.0 + minutes) / 720.0
a2 = minutes / 60.0
diff = abs(a1-a2)
return min(diff, 1.0-diff) * 360.0
|
import hashlib
def md5_string(string):
return hashlib.md5(string.encode('utf-8')).hexdigest()
def sha256_string(string):
return hashlib.sha256(string.encode('utf-8')).hexdigest()
hash1 = sha256_string('id0-rsa.pub')
hash2 = md5_string(hash1)
print(hash2)
|
from django.db import models
# Create your models here.
class UserInfo(models.Model):
name = models.CharField(max_length=32,unique=True,null=False)
pwd = models.CharField(max_length=32)
email = models.EmailField(null=True)
phone = models.CharField(max_length=11,null=True)
def __str__(self):
return self.name
class City(models.Model):
name = models.CharField(max_length=16,null=True,unique=True)
|
import numpy as np
from scipy.sparse import coo_matrix
import pyspark
from pyspark.ml.recommendation import ALS as spark_ALS
from pyspark.sql.types import StructType, StructField, FloatType, IntegerType
class ALS:
def __init__(self, n_features=10, lam=0.1, n_jobs=1, max_iter=10, n_blocks=1, tol=0.1):
self.n_features=n_features
self.lam=lam
self.n_jobs=n_jobs
self.n_blocks=n_blocks
self.max_iter=max_iter
self.tol=tol
self.users=None
self.items=None
def fit(self, X, y=None, warm_start=False):
"""Fit the model using ALS given pairs of user-item ratings.
Parameters
----------
X: np.ndarray, shape=(n, 2), dtype=int
2d array containing user and item id pairs.
y: np.ndarray, dtype=(float,int)
Array containing the ratings corresponding to user-item pairs in X.
warm_start: bool, default=False
Whether to use resulting user and item factors of previous fit calls as the
starting values. Randomizes the factors if False.
Returns
-------
output: self
The fitted model.
"""
R = self._convert_to_sparse(X, y)
if not warm_start or self.users is None and self.items is None:
U,V = [np.random.normal(0, np.sqrt(self.n_features)/self.n_features, (n,self.n_features))
for n in R.shape]
else:
U,V = self.users, self.items.T
L = np.diag([self.lam for _ in range(self.n_features)])
for _ in range(self.max_iter):
U = self.lst_sq(V, R.T, self.lam).T
V = self.lst_sq(U, R, self.lam).T
if self._error(R, U, V.T) < self.tol:
break
self.users=U
self.items=V.T
return self
def predict(self, X):
"""Given the user-item matrices found through fit, estimate the ratings of pairs from X.
Parameters
----------
X: np.ndarray, shape=(n, 2), dtype=int
2d array containing user and item id pairs to predict.
Returns
-------
output: np.ndarray
Estimated ratings for user-item pairs in X.
"""
output = []
for u,v in X:
u,v = int(u), int(v)
output.append(sum(self.users[u] * self.items.T[v]))
return output
def _error(self, x, u=None, v=None, lam=None):
if u is None: u = self.users
if v is None: v = self.items
if lam is None: lam = self.lam
if isinstance(x, (tuple,list)):
x = self._convert_to_sparse(*x)
if not isinstance(x, np.ndarray): x = x.toarray()
return np.where(x, (x - u @ v)**2, 0).sum() \
+ (lam) * (sum(a@a.T for a in u)
+ sum(a@a.T for a in v))
def _convert_to_sparse(self, X, y):
"""Convert a set of user-item pairs and known ratings into a sparse matrix.
Parameters
----------
X: np.ndarray
y: np.ndarray
"""
cols, rows = [X[:, i].astype(int) for i in range(2)]
return coo_matrix((y, (cols, rows)))
@staticmethod
def lst_sq(a, b, reg=0.1):
""" Least Squares solution for ax=b with regularization
"""
L = reg * np.eye(a.shape[1])
return np.linalg.pinv(a.T@a + L) @ a.T @ b
class SparkALS(ALS):
"""Simple Wrapper class for spark als model. Mimics behaivior of base ALS class.
Intended for comparison purposes"""
def __init__(self, random_seed=None, **kwargs):
self.spark = pyspark.sql.SparkSession.builder.getOrCreate()
self.random_seed=random_seed
super().__init__(**kwargs)
def fit(self, X, y=None):
R = np.append(X, y[:, None], 1).tolist()
S = self.spark.createDataFrame(R, ['user','item','rating'])
model = spark_ALS(rank=self.n_features, regParam=self.lam,
seed=self.random_seed,
numUserBlocks=self.n_blocks,
numItemBlocks=self.n_blocks,
maxIter=self.max_iter,
itemCol='item',
userCol='user',
ratingCol='rating')
model = model.fit(S)
U = model.userFactors.toPandas()
V = model.itemFactors.toPandas()
U = np.array([row for row in U['features']])
V = np.array([row for row in V['features']])
self.users=U
self.items=V.T
return U,V
def random_ratings(n_users, n_items, response_rate=0.1):
"""
Creates an X,y pair of random ratings.
"""
R = np.array([])
for usr in range(n_users):
while usr not in set(R.reshape(-1, 3)[:, 0]):
for itm in range(n_items):
if np.random.rand() <= response_rate:
rtg = np.random.randint(1, 6)
R = np.append(R, [[usr, itm, rtg]])
R=R.reshape(-1, 3)
X,y = R[:, :2], R[:, 2]
return X,y
|
"""
docstring in functions
"""
# docstring in function without argument
def foo():
"""
the description of this function
:return:
"""
print("Yes, we entered the function of foo()")
# call a function
foo()
print("Good bye!")
# docstring in function with arguments
def add(num1, num2):
"""
the description of this function add()
:param num1:
:param num2:
:return:
"""
res = num1 + num2
return res
result = add(3, 5)
print(foo.__doc__)
print(add.__doc__) |
from .base_cheque_class import BaseCheque
class LeumiParser(BaseCheque):
TYPE_NUMBER = 10
TYPE_NAME = 'leumi'
@classmethod
def parse(cls, gray_img):
return super()._parse(
gray_img,
match_telephones_with_persons=False
)
#
# {'first_person_id': first_person_id,
# 'first_person_name': first_person_name,
# 'second_person_id': second_person_id,
# 'second_person_name': second_person_name,
# 'persons_count': persons_count}
# def def parse_cheque_details_on_numbers(numbers, lang='heb'):
@classmethod
def parse_person_info(cls, img):
person_data = super().parse_person_info(img)
#
# first_person_name_list = person_data['first_person_name'] and list(person_data['first_person_name'][::-1].split(" "))
# seen = set(first_person_name_list)
# print(first_person_name_list)
# print(seen)
# fpnc = first_person_name_list.copy()
# for elem in seen:
# fpnc.remove(elem)
# print(fpnc)
#
# if fpnc and person_data['second_person_name'] is None:
# index = first_person_name_list.index(fpnc[0])
# print(index)
# tmp = first_person_name_list[:index]
# person_data['second_person_name'] = first_person_name_list[index+1:]
# person_data['first_person_name'] = tmp
#
# first_person_name_list = person_data['first_person_name']
# if first_person_name_list:
# person_data['first_person_name'] = ' '.join(first_person_name_list[:-1])
# person_data['first_person_lastname'] = first_person_name_list[-1]
#
# else:
# person_data['first_person_name'] = None
# person_data['first_person_lastname'] = None
# second_person_name_list = person_data['second_person_name']
# if second_person_name_list:
# person_data['second_person_name'] = ' '.join(second_person_name_list[:-1])
# person_data['second_person_lastname'] = second_person_name_list[-1]
return person_data
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 17 12:00:45 2020
@class: COMP469
@author: Cristian Aguilar
@Title: Homework 1: 8-puzzle BFS
"""
import timeit
from copy import deepcopy
class Node():
def __init__(self, data):
self.data = data
self.children1 = []
self.parent = None
def outOfboundsCheck(x, y):
if(x <=-1 or y <= -1 or x >= 3 or y >= 3):
return True
else:
return False
def printPath(pathList):
for m in pathList:
for i in range(0,3):
for j in range(0,3):
print(m[i][j]," ", end = '')
print("")
print("\n")
def matrixToNumber(matrix):
stringNum = ""
for i in range(0,3):
for j in range(0,3):
stringNum += str(matrix[i][j])
return stringNum
def ifGoalState(board, goalState):
return board == goalState
def fringeAndvisitedUpdate(fringeList, visitedList, current_board, current_node):
key = matrixToNumber(current_board)
if(key in visitedList):
return
else:
newNode = Node(current_board)
current_node.children1.append(newNode)
newNode.parent = current_node
fringeList.append(newNode)
def successor_fcn(currNode, fringeList, visitedList):
currBoard = currNode.data
x, y = 0, 0
for i in range(0,3):
for j in range(0, 3):
if(currBoard[i][j] == 0):
x = i
y = j
break;
#left
if(not outOfboundsCheck(x, y+1)):
child = deepcopy(currBoard)
child[x][y] = currBoard[x][y+1]
child[x][y+1] = currBoard[x][y]
fringeAndvisitedUpdate(fringeList, visitedList, child, currNode)
#print(child)
#down
if(not outOfboundsCheck(x+1, y)):
child = deepcopy(currBoard)
child[x][y] = currBoard[x+1][y]
child[x+1][y] = currBoard[x][y]
fringeAndvisitedUpdate(fringeList, visitedList, child, currNode)
#print(child)
#right
if(not outOfboundsCheck(x, y-1)):
child = deepcopy(currBoard)
child[x][y] = currBoard[x][y-1]
child[x][y-1] = currBoard[x][y]
fringeAndvisitedUpdate(fringeList, visitedList, child, currNode)
#print(child)
#right
if(not outOfboundsCheck(x-1, y)):
child = deepcopy(currBoard)
child[x][y] = currBoard[x-1][y]
child[x-1][y] = currBoard[x][y]
fringeAndvisitedUpdate(fringeList, visitedList, child, currNode)
#print(child)
def findPathWithParent(root, endNode):
curr = endNode
while curr != None:
result.append(curr.data)
curr = curr.parent
def BFS():
print("finding solution...")
if(ifGoalState(root.data, Goal)):
return findPathWithParent(root, root)
fringe.append(root)
while(fringe != []):
node = fringe.pop(0)
visited[matrixToNumber(node.data)] = 1
successor_fcn(node, fringe, visited)
for child in node.children1:
if(matrixToNumber(child.data) not in visited):
if(ifGoalState(child.data, Goal)):
return child
print("Error: fringe empty????")
#test matrices
#Goal = [[7, 2, 4], [0, 6, 1], [5, 8, 3]]
#Goal = [[2, 0, 4], [7, 5, 6], [8, 3, 1]]
#Goal = [[2, 0, 4], [5, 7, 3], [1, 8, 6]]
#Goal = [[2, 1, 0], [3, 4, 5], [6, 7, 8]] <--- this one doesnt work IDK why
start = timeit.default_timer()
Board = [[7, 2, 4], [5, 0, 6], [8, 3, 1]]
Goal = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
fringe = []
visited= {}
result = []
root = Node(Board)
endNode = BFS()
if(matrixToNumber(Goal) in visited):
print("WTF")
findPathWithParent(root, endNode)
printPath(result)
print("Size of fringe: ", len(fringe))
print("Number of visited nodes: ", len(visited))
stop = timeit.default_timer()
print('Time: ', stop - start)
|
# Import Moduls
try:
import os
except ImportError:
print ("\033[31m[-] You Don't Have os Module")
try:
import requests
except ImportError:
print ("\033[31m[-] You Don't Have requests Module")
try:
import sys
except ImportError:
print ("\033[31m[-] You Don't Have sys Module")
# Banner Function
def banner():
print ('#'*49)
print ('# Create By Mr Submissive in 2018 ' + '#')
print ('# Token Brute Force Script For Python 3.X.X' + '\t' + '#')
print ('# Github : https://github.com/MrSubmissive' +'\t'*1+ '#')
print ('# Thank You For Support us' +'\t'*3+ '#')
print ('#'*49)
print ("\n\033[32mUsage: python3 adminfounder.py -u URL")
# Main Function
def exploit(url):
try:
request = requests.get(url)
if request.status_code == 200:
print ("\033[32m[+] I Found This: " + url)
else:
print ("\033[31m[-] I Can't Found This: " + url)
except Exception as e:
print ("\033[31m[-] Connection Error !")
def main():
if len(sys.argv) == 3:
if sys.argv[1] == "-u" or sys.argv[1] == "-U":
if "https://" in sys.argv[2]:
url = sys.argv[2]
elif "http://" in sys.argv[2]:
url = sys.argv[2]
else:
url = "http://" + sys.argv[2]
if not os.path.exists("Founder.txt"):
print ("\033[31m[-] My File Is Lost Please Install This Script Again")
sys.exit()
else:
read = open("Founder.txt")
for lines in read.read().splitlines():
line = lines.strip()
if line == "":
continue
if line.find(".") == -1:
line = line + "/"
url1 = url + "/" +line
exploit(url1)
elif sys.argv[1] != "-h" or sys.argv[1] != "-H":
print ("\033[31m[-] You Command Is Not True !")
if len(sys.argv) == 2 and sys.argv[1] == "-h" or sys.argv[1] == "-H":
banner()
if len(sys.argv) > 3:
banner()
try:
main()
except Exception:
print ("\033[31m[-] Some Thing Wrong !\nWe exit soon...!")
exit()
|
"""
Jhonatan da Silva
Last Updated version :
Sun Feb 5 11:02:55 2017
Number of code lines:
61
"""
import time
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import bokeh.plotting as bp
from matplotlib import style
import numpy as np
import random
#style.use('fivethirtyeight')
class gradientDescent():
def __init__(self):
self.x = np.linspace(-10,10,300)
self.y = self.x**2
self.xdot = random.choice(self.x)
self.ydot = 0
self.j = 0
self.mins = []
self.fig = plt.figure()
self.ax1 = self.fig.add_subplot(1,1,1)
def derivative(self,x):
#test function = x^2
return 2*x
def GD(self):
print('Initializing Gradient Descent')
oldMin = 0
currentMin = -7
#precision
epsilon = 0.001
step = 0.01
while abs(currentMin - oldMin) > epsilon:
oldMin = currentMin
gradient = self.derivative(oldMin)
move = gradient * step
currentMin = oldMin - move
self.mins.append(currentMin)
print('Local min : {:.2f}'.format(currentMin))
def livePlot(self,i):
style.use('fivethirtyeight')
maxValue = len(self.mins) -1
self.ax1.clear()
self.ax1.set_ylim([-5,120])
self.ax1.set_xlim([-20,20])
#plt.axis('equal')
self.ax1.plot(self.x,self.y,'c',self.mins[self.j],self.mins[self.j]**2,'ro')
self.j+=1
if self.j == maxValue:
self.j = 0
def makeAnimation(self):
a = animation.FuncAnimation(self.fig,self.livePlot,interval=10)
plt.show()
gradient = gradientDescent()
gradient.GD()
gradient.makeAnimation()
|
#!/usr/bin/env python3
#
##
# @file fe.py
# @brief Determine the file format given an example .rdi file.
# @author Matthew McCormick (thewtex)
# @version
# @date 2009-05-21
# Public Domain
import sys
from optparse import OptionParser
import os
import logging
logging.basicConfig(level = logging.CRITICAL)
fe_logger = logging.getLogger('format_evaluator')
# for working from within the source
script_path = os.path.dirname(sys.modules[__name__].__file__)
sys.path.insert(0, script_path)
from format_evaluator.format_evaluator import FormatEvaluator
##
# @brief run the format evaluator
#
# @param rdi_filepath path to the example .rdi file
#
# @return
def main(rdi_filepath):
with open(rdi_filepath, 'r', encoding='latin_1') as rdi_file:
fe = FormatEvaluator(rdi_file, script_path)
fe.run()
usage = "Usage: " + sys.argv[0] + " <sample-file.rdi>"
if __name__ == "__main__":
parser = OptionParser(usage=usage)
parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
default=False,
help = "Print DEBUG message to stdout, default=%default")
(options, args) = parser.parse_args()
if options.verbose:
fe_logger.setLevel(logging.DEBUG)
if len(args) != 1:
parser.error("Please specify sample rdi file.")
else:
main(args[0])
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 18 18:31:02 2020
@author: keisuke
"""
if __name__ == '__main__':
try:#メッセージボックスがなければエラーが出せない
import tkinter as tk
from tkinter import messagebox as mbox
except:
raise Exception('エラー:tkinterがインポートできません。')
if __name__ == '__main__':
window_for_error = tk.Tk()#tkinterではウィンドウを作成しなければメッセージボックスが出せない
window_for_error.withdraw()#不要なので隠しておく
#エラー定義
class FileReadError(Exception):
def Error(self,file_not_found):
self.file_not_found = file_not_found
mbox.showerror('エラー','ファイルが読み込めませんでした。\n読み込めなかったファイル:'+'・'.join(self.file_not_found))
window_for_error.destroy()
time.sleep(3)
class LibraryReadError(Exception):
def Error(self,library_not_found):
self.library_not_found = library_not_found
mbox.showerror('エラー','ライブラリが読み込めませんでした。\n読み込めなかったライブラリ:'+'・'.join(self.library_not_found))
window_for_error.destroy()
time.sleep(3)
class VariableError(Exception):
def Error(self,ErrorVariableName):
self.ErrorVariableName = ErrorVariableName
mbox.showerror('エラー','変数が予期していない値になった、あるいは読み込めませんでした。\nエラーが起きた変数:'+self.ErrorVariableName)
window_for_error.destroy()
time.sleep(3)
#読み込なかったライブラリ
read_not_library = []
all_library = ['pygame','sys','time']
try:
import pygame
except:
read_not_library.append('pygame')
library_not_found = read_not_library
error = LibraryReadError()
error.Error(library_not_found)
raise LibraryReadError('エラー:ライブラリが読み込めませんでした。\n読み込めなかったライブラリ:'+'・'.join(library_not_found))
try:
import sys
except:
read_not_library.append('sys')
library_not_found = read_not_library
error = LibraryReadError()
error.Error(library_not_found)
raise LibraryReadError('エラー:ライブラリが読み込めませんでした。\n読み込めなかったライブラリ:'+'・'.join(library_not_found))
try:
import time
except:
read_not_library.append('time')
library_not_found = read_not_library
error = LibraryReadError()
error.Error(library_not_found)
raise LibraryReadError('エラー:ライブラリが読み込めませんでした。\n読み込めなかったライブラリ:'+'・'.join(library_not_found))
try:
import threading
except:
read_not_library.append('time')
library_not_found = read_not_library
error = LibraryReadError()
error.Error(library_not_found)
raise LibraryReadError('エラー:ライブラリが読み込めませんでした。\n読み込めなかったライブラリ:'+'・'.join(library_not_found))
#
#使用しているフォント
#・http://itouhiro.hatenablog.com/entry/20130602/font(PixelMplus12-Bold)(PATH=Fonts/PixelMplus12-Bold.ttf)
#
#
#メモ
####
#self == (main = Game() のようにインスタンス変数を定義したときの、インスタンス変数(ここではmain)の代名詞)
#例
#main = main()
#main.GameMainLoop == self.GameMainLoop
#
####
#
#
#
#FileCheckファイル呼び出し
try:#FileCheckファイルが存在するか
import FileCheck
except:
#警告メッセージボックス呼び出し
mbox.showerror('エラー','FileCheckファイルが読み込めません。')
raise FileReadError('FileCheckファイルが読み込めません。')
#
filecheck = FileCheck.FileCheck('All')
#すべてのファイルがあったか
all_file_exists_bool_and_file_not_found = filecheck.FileCheckBool()
all_file_exists_bool = all_file_exists_bool_and_file_not_found[0]
file_not_found = all_file_exists_bool_and_file_not_found[1]
if all_file_exists_bool == True:
pass
else:
#警告メッセージボックス呼び出し
error = FileReadError()
error.Error(file_not_found)
raise FileReadError('エラー:ファイルが読み込めませんでした。\n読み込めなかったファイル:'+'・'.join(file_not_found))
#すべて読み込めたので...
import Config
import GameDraw
class Main(object):#親クラスはobjectクラスを継承しないとだめらしい....
def __init__(self,):
#pygame初期化
pygame.init()
#設定ファイル読み込み
####
self.clock = pygame.time.Clock()#フレームレート実体化
self.key_repeat = pygame.key.set_repeat(5000,10)#引数はdelay,interbalの順(どちらもミリ秒)delayは何秒長押ししたら?interbalは長押ししていたらキーをどれくらいおしたことにするか?
####
#
Config.game_on = True
Config.game_start_screen_on = True
#
if __name__ == '__main__':#これしないと関数呼び出しをめちゃくちゃしてエラー出る
self.GameDrawRelation()
self.GameMainLoop()
#
def GameDrawRelation(self,):
#GameDrawクラスインスタンス化
self.gamedraw = GameDraw.Main()
#最初の画面
self.gamedraw.GameStartScreen('first_time')
def GameMainLoop(self,):
#矢印点滅タイマー
ONE_TIME_TICK = pygame.USEREVENT + 0#event場所取得
ONE_TIME_TICK_EVENT = pygame.event.Event(ONE_TIME_TICK,attr1='ONE_TIME_TICK_EVENT')#イベント設定
pygame.event.post(ONE_TIME_TICK_EVENT)#イベント実行
pygame.time.set_timer(ONE_TIME_TICK_EVENT,1000)#イベントを使いタイマー実行
while Config.game_on:
#pygame.time.wait(30)
self.clock.tick(Config.flame_rate)
for event in pygame.event.get():
#画面update
pygame.display.update()
#key取得
self.event_key = pygame.key.get_pressed()
#
if event.type == pygame.QUIT:
self.GameExit()
if Config.game_start_screen_on:
#矢印点滅
if event == ONE_TIME_TICK_EVENT:
if Config.arrow_flashing_display == True:#もし矢印が画面上に表示されていたら
self.gamedraw.GameStartScreen()#矢印なしの画面を表示する
Config.arrow_flashing_display = False
else:
Config.arrow_flashing_display = True
self.gamedraw.GameStartScreen()#矢印なしの画面を表示する
self.gamedraw.GameArrow()#矢印を表示
if self.event_key[pygame.K_UP] == 1:
#次の場所を設定
Config.next_position = Config.position - 1#関数は変数を持っておらず、クラスが所持している
self.gamedraw.GameArrow()
elif self.event_key[pygame.K_DOWN] == 1:
#次の場所を設定
Config.next_position = Config.position + 1
self.gamedraw.GameArrow()
if self.event_key[pygame.K_RETURN] == 1:#エンターキーを押したら
if Config.position == 0:
Config.game_start_screen_on = False
Config.game_play_mode_serect_screen_on = True
Config.arrow_flashing_display = False
self.gamedraw.GamePlayModeSerect('first_time')
elif Config.position == 1:
pass
elif Config.position == 2:
pass
else:#エラー
error = VariableError()
error.Error('GameDraw.py/Main/GameArrow.position')
elif Config.game_play_mode_serect_screen_on:
#矢印点滅
if event == ONE_TIME_TICK_EVENT:
if Config.arrow_flashing_display == True:#もし矢印が画面上に表示されていたら
self.gamedraw.GamePlayModeSerect()#矢印なしの画面を表示する
Config.arrow_flashing_display = False
else:
Config.arrow_flashing_display = True
self.gamedraw.GamePlayModeSerect()#矢印なしの画面を表示する
#矢印を表示
self.gamedraw.GameArrow()#矢印を表示
if self.event_key[pygame.K_UP] == 1:
#次の場所を設定
Config.next_position = Config.position - 1#関数は変数を持っておらず、クラスが所持している
self.gamedraw.GameArrow()
elif self.event_key[pygame.K_DOWN] == 1:
#次の場所を設定
Config.next_position = Config.position + 1
self.gamedraw.GameArrow()
if self.event_key[pygame.K_RETURN] == 1:#エンターキーを押したら
if Config.position == 0:
self.gamedraw.GamePlayScreen('120secconds','first_time')
elif Config.position == 1:
self.gamedraw.GamePlayScreen('endless','first_time')
elif Config.position == 2:
Config.game_start_screen_on = True
Config.game_play_mode_serect_screen_on = False
Config.arrow_flashing_display = False
#
#最初の画面
self.gamedraw.GameStartScreen('first_time')
else:#予期しない値が変数に入っているのでエラー
error = VariableError()
error.Error('GameDraw.py/Main/GameArrow.position')
elif Config.game_play_120_mode == True or Config.game_play_endless == True:
if event == ONE_TIME_TICK_EVENT:
pass
elif self.event_key[pygame.K_RIGHT]:
break_command = False
if break_command == True:
break_command = False
return 0;
if Config.game_play_120_mode == True:
self.gamedraw.GamePlayScreen('120secconds')
else:
self.gamedraw.GamePlayScreen('endless')
self.height = -1#リストの順番が0,1,2...と続くため,変数に0を代入すると+1されると値が1になりリスト指定がうまくいかない
self.i = -1#リストの順番が0,1,2...と続くため,変数に0を代入すると+1されると値が1になりリスト指定がうまくいかない
self.j = -1#Jは段数のうち何個参照したか、段数が変わるとまたカウントしなおす。
for tetromino_line in Config.tetromino_postion:
if break_command == True:
break
self.height += 1
self.weight = -1#リストの順番が0,1,2...と続くため,変数に0を代入すると+1されると値が1になりリスト指定がうまくいかない
self.j = -1
for tetromino_block in tetromino_line:
self.i += 1#iは何個参照したか
self.j += 1#書いてあります
if tetromino_block != 9:
self.weight += 1#weightはiと違って何個参照したかでなく壁を含めずに何個参照しているかという変数。座標と一致させやすい
if [self.weight,self.height] == Config.moving_tetromino_postion:
Config.moving_tetromino_data = tetromino_line[self.j]#動かしているテトロミノブロックの情報取得
#何ミノか確認(衝突判定)
if Config.moving_tetromino_data[0] == 0:#Iミノ
if Config.moving_tetromino_data[1] == 0:
if Config.moving_tetromino_postion[0] + 1 == 10:#行くところの座標(weight)が10だったらbreak
for k in range(4):#Iミノはブロックが4つだから
Config.main_window.blit(Config.lb_tetromino,[295+(self.weight)*20,100+(self.height+k)*20])#weight+1しないのはテトロミノを動かさないから
break_command == True
break
elif Config.moving_tetromino_data[0] == 1:#Oミノ
if Config.moving_tetromino_data[1] == 0:
if Config.moving_tetromino_postion[0] + 2 == 10:#行くところの座標(weight)が10だったらbreak
for k in range(2):
Config.main_window.blit(Config.y_tetromino,[295+(self.weight)*20,100+(self.height+k)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(2):
Config.main_window.blit(Config.y_tetromino,[295+(self.weight+1)*20,100+(self.height+k)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
break_command == True
break
elif Config.moving_tetromino_data[0] == 2:#Tミノ
if Config.moving_tetromino_data[1] == 0:
if Config.moving_tetromino_postion[0] + 4 == 10:#行くところの座標(weight)が10だったらbreak
Config.main_window.blit(Config.p_tetromino,[295+(self.weight+2)*20,100+(self.height)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(3):
Config.main_window.blit(Config.p_tetromino,[295+(self.weight+1+k)*20,100+(self.height+1)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
break_command == True
break
elif Config.moving_tetromino_data[0] == 3:#Jミノ
if Config.moving_tetromino_data[1] == 0:
if Config.moving_tetromino_postion[0] + 4 == 10:#行くところの座標(weight)が10だったらbreak
Config.main_window.blit(Config.b_tetromino,[295+(self.weight+1)*20,100+(self.height)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(3):
Config.main_window.blit(Config.b_tetromino,[295+(self.weight+1+k)*20,100+(self.height+1)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
break_command == True
break
elif Config.moving_tetromino_data[0] == 4:#Lミノ
if Config.moving_tetromino_data[1] == 0:
if Config.moving_tetromino_postion[0] + 4 == 10:#行くところの座標(weight)が10だったらbreak
Config.main_window.blit(Config.o_tetromino,[295+(self.weight+3)*20,100+(self.height)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(3):
Config.main_window.blit(Config.o_tetromino,[295+(self.weight+1+k)*20,100+(self.height+1)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
break_command == True
break
elif Config.moving_tetromino_data[0] == 5:#Sミノ
if Config.moving_tetromino_data[1] == 0:
if Config.moving_tetromino_postion[0] + 4 == 10:#行くところの座標(weight)が10だったらbreak
for k in range(2):
Config.main_window.blit(Config.g_tetromino,[295+(self.weight+2+k)*20,100+(self.height)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(2):
Config.main_window.blit(Config.g_tetromino,[295+(self.weight+1+k)*20,100+(self.height+1)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
break_command = True
break
elif Config.moving_tetromino_data[0] == 6:#Zミノ
if Config.moving_tetromino_data[1] == 0:
if Config.moving_tetromino_postion[0] + 4 == 10:#行くところの座標(weight)が10だったらbreak
for k in range(2):
Config.main_window.blit(Config.r_tetromino,[295+(self.weight+1+k)*20,100+(self.height)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(2):
Config.main_window.blit(Config.r_tetromino,[295+(self.weight+2+k)*20,100+(self.height+1)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
break_command = True
break
else:
pass
#Config.moving_tetromino_index = tetromino_line.index(tetromino_line[self.i])#動かしているテトロミノブロックの場所を確認。
Config.tetromino_postion[self.height][self.j] = 0#動かすために今いるところを空白にする
Config.tetromino_postion[self.height][self.j+1] = Config.moving_tetromino_data#動かされるところにデータを移行。これで完全に動かされたことになる
Config.moving_tetromino_postion[0] += 1#座標も変更
#動かしたあとの座標はheightそのままweight+1
#何ミノか確認
if Config.moving_tetromino_data[0] == 0:#Iミノ
for k in range(4):#Iミノはブロックが4つだから
Config.main_window.blit(Config.lb_tetromino,[295+(self.weight+1)*20,100+(self.height+k)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
elif Config.moving_tetromino_data[0] == 1:#Oミノ
for k in range(2):
Config.main_window.blit(Config.y_tetromino,[295+(self.weight+1)*20,100+(self.height+k)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(2):
Config.main_window.blit(Config.y_tetromino,[295+(self.weight+2)*20,100+(self.height+k)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
elif Config.moving_tetromino_data[0] == 2:#Tミノ
Config.main_window.blit(Config.p_tetromino,[295+(self.weight+2)*20,100+(self.height)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(3):
Config.main_window.blit(Config.p_tetromino,[295+(self.weight+1+k)*20,100+(self.height+1)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
elif Config.moving_tetromino_data[0] == 3:#Jミノ
Config.main_window.blit(Config.b_tetromino,[295+(self.weight+1)*20,100+(self.height)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(3):
Config.main_window.blit(Config.b_tetromino,[295+(self.weight+1+k)*20,100+(self.height+1)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
elif Config.moving_tetromino_data[0] == 4:#Lミノ
Config.main_window.blit(Config.o_tetromino,[295+(self.weight+3)*20,100+(self.height)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(3):
Config.main_window.blit(Config.o_tetromino,[295+(self.weight+1+k)*20,100+(self.height+1)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
elif Config.moving_tetromino_data[0] == 5:#Sミノ
for k in range(2):
Config.main_window.blit(Config.g_tetromino,[295+(self.weight+2+k)*20,100+(self.height)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(2):
Config.main_window.blit(Config.g_tetromino,[295+(self.weight+1+k)*20,100+(self.height+1)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
elif Config.moving_tetromino_data[0] == 6:#Zミノ
for k in range(2):
Config.main_window.blit(Config.r_tetromino,[295+(self.weight+1+k)*20,100+(self.height)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(2):
Config.main_window.blit(Config.r_tetromino,[295+(self.weight+2+k)*20,100+(self.height+1)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
else:
pass
break_command= True
break
else:
pass
elif self.event_key[pygame.K_LEFT]:
break_command = False
if break_command == True:
break_command = False
return 0;
if Config.game_play_120_mode == True:
self.gamedraw.GamePlayScreen('120secconds')
else:
self.gamedraw.GamePlayScreen('endless')
self.height = -1#リストの順番が0,1,2...と続くため,変数に0を代入すると+1されると値が1になりリスト指定がうまくいかない
self.i = -1#リストの順番が0,1,2...と続くため,変数に0を代入すると+1されると値が1になりリスト指定がうまくいかない
self.j = -1#Jは段数のうち何個参照したか、段数が変わるとまたカウントしなおす。
for tetromino_line in Config.tetromino_postion:
if break_command == True:
break
self.height += 1
self.weight = -1#リストの順番が0,1,2...と続くため,変数に0を代入すると+1されると値が1になりリスト指定がうまくいかない
self.j = -1
for tetromino_block in tetromino_line:
self.i += 1#iは何個参照したか
self.j += 1#書いてあります
if tetromino_block != 9:
self.weight += 1#weightはiと違って何個参照したかでなく壁を含めずに何個参照しているかという変数。座標と一致させやすい
if [self.weight,self.height] == Config.moving_tetromino_postion:
Config.moving_tetromino_data = tetromino_line[self.j]#動かしているテトロミノブロックの情報取得
#何ミノか確認(衝突判定)
if Config.moving_tetromino_data[0] == 0:#Iミノ
if Config.moving_tetromino_data[1] == 0:
if Config.moving_tetromino_postion[0] - 1 == -1:#行くところの座標(weight)が10だったらbreak
for k in range(4):#Iミノはブロックが4つだから
Config.main_window.blit(Config.lb_tetromino,[295+(self.weight)*20,100+(self.height+k)*20])#weight+1しないのはテトロミノを動かさないから
break_command == True
break
elif Config.moving_tetromino_data[0] == 1:#Oミノ
if Config.moving_tetromino_data[1] == 0:
if Config.moving_tetromino_postion[0] - 1 == -1:#行くところの座標(weight)が10だったらbreak
for k in range(2):
Config.main_window.blit(Config.y_tetromino,[295+(self.weight)*20,100+(self.height+k)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(2):
Config.main_window.blit(Config.y_tetromino,[295+(self.weight+1)*20,100+(self.height+k)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
break_command == True
break
elif Config.moving_tetromino_data[0] == 2:#Tミノ
if Config.moving_tetromino_data[1] == 0:
if Config.moving_tetromino_postion[0] -1 == -1:#行くところの座標(weight)が10だったらbreak
Config.main_window.blit(Config.p_tetromino,[295+(self.weight+1)*20,100+(self.height)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(3):
Config.main_window.blit(Config.p_tetromino,[295+(self.weight+k)*20,100+(self.height+1)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
break_command == True
break
elif Config.moving_tetromino_data[0] == 3:#Jミノ
if Config.moving_tetromino_data[1] == 0:
if Config.moving_tetromino_postion[0] -1 == -1:#行くところの座標(weight)が10だったらbreak
Config.main_window.blit(Config.b_tetromino,[295+(self.weight)*20,100+(self.height)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(3):
Config.main_window.blit(Config.b_tetromino,[295+(self.weight+k)*20,100+(self.height+1)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
break_command == True
break
elif Config.moving_tetromino_data[0] == 4:#Lミノ
if Config.moving_tetromino_data[1] == 0:
if Config.moving_tetromino_postion[0] - 1 == -1:#行くところの座標(weight)が10だったらbreak
Config.main_window.blit(Config.o_tetromino,[295+(self.weight+2)*20,100+(self.height)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(3):
Config.main_window.blit(Config.o_tetromino,[295+(self.weight+k)*20,100+(self.height+1)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
break_command == True
break
elif Config.moving_tetromino_data[0] == 5:#Sミノ
if Config.moving_tetromino_data[1] == 0:
if Config.moving_tetromino_postion[0] - 1 == -1:#行くところの座標(weight)が10だったらbreak
for k in range(2):
Config.main_window.blit(Config.g_tetromino,[295+(self.weight+1+k)*20,100+(self.height)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(2):
Config.main_window.blit(Config.g_tetromino,[295+(self.weight+k)*20,100+(self.height+1)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
break_command = True
break
elif Config.moving_tetromino_data[0] == 6:#Zミノ
if Config.moving_tetromino_data[1] == 0:
if Config.moving_tetromino_postion[0] - 1 == -1:#行くところの座標(weight)が10だったらbreak
for k in range(2):
Config.main_window.blit(Config.r_tetromino,[295+(self.weight+k)*20,100+(self.height)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(2):
Config.main_window.blit(Config.r_tetromino,[295+(self.weight+1+k)*20,100+(self.height+1)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
break_command = True
break
else:
pass
#Config.moving_tetromino_index = tetromino_line.index(tetromino_line[self.i])#動かしているテトロミノブロックの場所を確認。
Config.tetromino_postion[self.height][self.j] = 0#動かすために今いるところを空白にする
Config.tetromino_postion[self.height][self.j-1] = Config.moving_tetromino_data#動かされるところにデータを移行。これで完全に動かされたことになる
Config.moving_tetromino_postion[0] -= 1#座標も変更
#動かしたあとの座標はheightそのままweight-1
#何ミノか確認
if Config.moving_tetromino_data[0] == 0:#Iミノ
for k in range(4):#Iミノはブロックが4つだから
Config.main_window.blit(Config.lb_tetromino,[295+(self.weight-1)*20,100+(self.height+k)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
elif Config.moving_tetromino_data[0] == 1:#Oミノ
for k in range(2):
Config.main_window.blit(Config.y_tetromino,[295+(self.weight-1)*20,100+(self.height+k)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(2):
Config.main_window.blit(Config.y_tetromino,[295+(self.weight)*20,100+(self.height+k)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
elif Config.moving_tetromino_data[0] == 2:#Tミノ
Config.main_window.blit(Config.p_tetromino,[295+(self.weight)*20,100+(self.height)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(3):
Config.main_window.blit(Config.p_tetromino,[295+(self.weight-1+k)*20,100+(self.height+1)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
elif Config.moving_tetromino_data[0] == 3:#Jミノ
Config.main_window.blit(Config.b_tetromino,[295+(self.weight-1)*20,100+(self.height)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(3):
Config.main_window.blit(Config.b_tetromino,[295+(self.weight-1+k)*20,100+(self.height+1)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
elif Config.moving_tetromino_data[0] == 4:#Lミノ
Config.main_window.blit(Config.o_tetromino,[295+(self.weight+1)*20,100+(self.height)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(3):
Config.main_window.blit(Config.o_tetromino,[295+(self.weight-1+k)*20,100+(self.height+1)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
elif Config.moving_tetromino_data[0] == 5:#Sミノ
for k in range(2):
Config.main_window.blit(Config.g_tetromino,[295+(self.weight+k)*20,100+(self.height)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(2):
Config.main_window.blit(Config.g_tetromino,[295+(self.weight-1+k)*20,100+(self.height+1)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
elif Config.moving_tetromino_data[0] == 6:#Zミノ
for k in range(2):
Config.main_window.blit(Config.r_tetromino,[295+(self.weight-1+k)*20,100+(self.height)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(2):
Config.main_window.blit(Config.r_tetromino,[295+(self.weight+k)*20,100+(self.height+1)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
else:
pass
break_command= True
break
else:
pass
elif self.event_key[pygame.K_DOWN]:
break_command = False
if break_command == True:
break_command = False
return 0;
if Config.game_play_120_mode == True:
self.gamedraw.GamePlayScreen('120secconds')
else:
self.gamedraw.GamePlayScreen('endless')
self.height = -1#リストの順番が0,1,2...と続くため,変数に0を代入すると+1されると値が1になりリスト指定がうまくいかない
self.i = -1#リストの順番が0,1,2...と続くため,変数に0を代入すると+1されると値が1になりリスト指定がうまくいかない
self.j = -1#Jは段数のうち何個参照したか、段数が変わるとまたカウントしなおす。
for tetromino_line in Config.tetromino_postion:
if break_command == True:
break_command = False
break
self.height += 1
self.weight = -1#リストの順番が0,1,2...と続くため,変数に0を代入すると+1されると値が1になりリスト指定がうまくいかない
self.j = -1
for tetromino_block in tetromino_line:
self.i += 1#iは何個参照したか
self.j += 1#書いてあります
if tetromino_block != 9:
self.weight += 1#weightはiと違って何個参照したかでなく壁を含めずに何個参照しているかという変数。座標と一致させやすい
if [self.weight,self.height] == Config.moving_tetromino_postion:
Config.moving_tetromino_data = tetromino_line[self.j]#動かしているテトロミノブロックの情報取得
#何ミノか確認(衝突判定)
if Config.moving_tetromino_data[0] == 0:#Iミノ
if Config.moving_tetromino_data[1] == 0:#回転
if Config.moving_tetromino_postion[1] + 4 == 20:#行くところの座標(height)が20だったらbreak
for k in range(4):#Iミノはブロックが4つだから
Config.main_window.blit(Config.lb_tetromino,[295+(self.weight)*20,100+(self.height+k)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
break_command == True
break
elif Config.moving_tetromino_data[0] == 1:#Oミノ
if Config.moving_tetromino_data[1] == 0:
if Config.moving_tetromino_postion[1] + 2 == 20:#行くところの座標(height)が20だったらbreak
for k in range(2):
Config.main_window.blit(Config.y_tetromino,[295+(self.weight)*20,100+(self.height+k)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(2):
Config.main_window.blit(Config.y_tetromino,[295+(self.weight+1)*20,100+(self.height+k)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
break_command == True
break
elif Config.moving_tetromino_data[0] == 2:#Tミノ
if Config.moving_tetromino_data[1] == 0:
if Config.moving_tetromino_postion[1] + 2 == 20:#行くところの座標(height)が20だったらbreak
Config.main_window.blit(Config.p_tetromino,[295+(self.weight+1)*20,100+(self.height)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(3):
Config.main_window.blit(Config.p_tetromino,[295+(self.weight+k)*20,100+(self.height+1)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
break_command == True
break
elif Config.moving_tetromino_data[0] == 3:#Jミノ
if Config.moving_tetromino_data[1] == 0:
if Config.moving_tetromino_postion[1] + 2 == 20:#行くところの座標(height)が20だったらbreak
Config.main_window.blit(Config.b_tetromino,[295+(self.weight)*20,100+(self.height)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(3):
Config.main_window.blit(Config.b_tetromino,[295+(self.weight+k)*20,100+(self.height+1)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
break_command == True
break
elif Config.moving_tetromino_data[0] == 4:#Lミノ
if Config.moving_tetromino_data[1] == 0:
if Config.moving_tetromino_postion[1] + 2 == 20:#行くところの座標(height)が20だったらbreak
Config.main_window.blit(Config.o_tetromino,[295+(self.weight+2)*20,100+(self.height)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(3):
Config.main_window.blit(Config.o_tetromino,[295+(self.weight+k)*20,100+(self.height+1)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
break_command == True
break
elif Config.moving_tetromino_data[0] == 5:#Sミノ
if Config.moving_tetromino_data[1] == 0:
if Config.moving_tetromino_postion[1] + 2 == 20:#行くところの座標(height)が20だったらbreak
for k in range(2):
Config.main_window.blit(Config.g_tetromino,[295+(self.weight+1+k)*20,100+(self.height)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(2):
Config.main_window.blit(Config.g_tetromino,[295+(self.weight+k)*20,100+(self.height+1)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
break_command = True
break
elif Config.moving_tetromino_data[0] == 6:#Zミノ
if Config.moving_tetromino_data[1] == 0:
if Config.moving_tetromino_postion[1] + 2 == 20:#行くところの座標(height)が20だったらbreak
for k in range(2):
Config.main_window.blit(Config.r_tetromino,[295+(self.weight+k)*20,100+(self.height)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(2):
Config.main_window.blit(Config.r_tetromino,[295+(self.weight+1+k)*20,100+(self.height+1)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
break_command = True
break
else:
pass
Config.tetromino_postion[self.height][self.j] = 0#動かすために今いるところを空白にする
Config.tetromino_postion[self.height+1][self.j] = Config.moving_tetromino_data#動かされるところにデータを移行。これで完全に動かされたことになる
Config.moving_tetromino_postion[1] += 1#座標も変更
#動かしたあとの座標はheight+1そのままweight
#何ミノか確認
if Config.moving_tetromino_data[0] == 0:#Iミノ
for k in range(4):#Iミノはブロックが4つだから
Config.main_window.blit(Config.lb_tetromino,[295+(self.weight)*20,100+(self.height+1+k)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
elif Config.moving_tetromino_data[0] == 1:#Oミノ
for k in range(2):
Config.main_window.blit(Config.y_tetromino,[295+(self.weight)*20,100+(self.height+1+k)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(2):
Config.main_window.blit(Config.y_tetromino,[295+(self.weight+1)*20,100+(self.height+1+k)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
elif Config.moving_tetromino_data[0] == 2:#Tミノ
Config.main_window.blit(Config.p_tetromino,[295+(self.weight+1)*20,100+(self.height+1)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(3):
Config.main_window.blit(Config.p_tetromino,[295+(self.weight+k)*20,100+(self.height+2)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
elif Config.moving_tetromino_data[0] == 3:#Jミノ
Config.main_window.blit(Config.b_tetromino,[295+(self.weight)*20,100+(self.height+1)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(3):
Config.main_window.blit(Config.b_tetromino,[295+(self.weight+k)*20,100+(self.height+2)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
elif Config.moving_tetromino_data[0] == 4:#Lミノ
Config.main_window.blit(Config.o_tetromino,[295+(self.weight+2)*20,100+(self.height+1)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(3):
Config.main_window.blit(Config.o_tetromino,[295+(self.weight+k)*20,100+(self.height+2)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
elif Config.moving_tetromino_data[0] == 5:#Sミノ
for k in range(2):
Config.main_window.blit(Config.g_tetromino,[295+(self.weight+1+k)*20,100+(self.height+1)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(2):
Config.main_window.blit(Config.g_tetromino,[295+(self.weight+k)*20,100+(self.height+2)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
elif Config.moving_tetromino_data[0] == 6:#Zミノ
for k in range(2):
Config.main_window.blit(Config.r_tetromino,[295+(self.weight+k)*20,100+(self.height+1)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
for k in range(2):
Config.main_window.blit(Config.r_tetromino,[295+(self.weight+1+k)*20,100+(self.height+2)*20])#weight,heightの順番なのは座標変数が[weight,height]の順番だから
else:
pass
break_command= True
break
else:
pass
elif self.event_key[pygame.K_LSHIFT]:
pass
elif self.event_key[pygame.K_UP]:
break_command = False
if break_command == True:
break_command = False
return 0;
if Config.game_play_120_mode == True:
self.gamedraw.GamePlayScreen('120secconds')
else:
self.gamedraw.GamePlayScreen('endless')
self.height = -1#リストの順番が0,1,2...と続くため,変数に0を代入すると+1されると値が1になりリスト指定がうまくいかない
self.i = -1#リストの順番が0,1,2...と続くため,変数に0を代入すると+1されると値が1になりリスト指定がうまくいかない
self.j = -1#Jは段数のうち何個参照したか、段数が変わるとまたカウントしなおす。
for tetromino_line in Config.tetromino_postion:
if break_command == True:
break_command = False
break
self.height += 1
self.weight = -1#リストの順番が0,1,2...と続くため,変数に0を代入すると+1されると値が1になりリスト指定がうまくいかない
self.j = -1
for tetromino_block in tetromino_line:
self.i += 1#iは何個参照したか
self.j += 1#書いてあります
if tetromino_block != 9:
self.weight += 1#weightはiと違って何個参照したかでなく壁を含めずに何個参照しているかという変数。座標と一致させやすい
if [self.weight,self.height] == Config.moving_tetromino_postion:
Config.moving_tetromino_data = tetromino_line[self.j]#動かしているテトロミノブロックの情報取得
if Config.moving_tetromino_data[0] == 0:
if Config.moving_tetromino_data[1] == 0:#1になる処理を行う
for l in range(-1,3):#移動先に何かあったらbreak
if Config.tetromino_postion[self.height+1][self.j+l] != 0:
break_command = True
break
Config.tetromino_postion[self.height][self.j] = 0
Config.moving_tetromino_data[1] = 1
Config.tetromino_postion[self.height+1][self.j+2] = Config.moving_tetromino_data
Config.moving_tetromino_postion = [self.weight+2,self.height+1]
print(Config.moving_tetromino_postion)
print(Config.tetromino_postion[self.height+1][self.j+2])
#描画
for k in range(4):
Config.main_window.blit(Config.lb_tetromino,[295+(self.weight-1+k)*20,100+(self.height+1)*20])
break_command = True
break
elif Config.moving_tetromino_data[1] == 1:#2になる処理を行う
for l in range(-1,3):
if Config.tetromino_postion[self.height+l][self.j-2] != 0:
break_command = True
break
Config.tetromino_postion[self.height][self.j] = 0
Config.moving_tetromino_data[1] = 2
Config.tetromino_postion[self.height+2][self.j-1] = Config.moving_tetromino_data
Config.moving_tetromino_postion = [self.weight-1,self.height+2]
for k in range(4):
Config.main_window.blit(Config.lb_tetromino,[295+(self.weight-1)*20,100+(self.height-1+k)*20])
break_command = True
break
elif Config.moving_tetromino_data[1] == 2:#3になる処理を行う
for l in range(-1,3):
if Config.tetromino_postion[self.height-2][self.j-1+l] != 0:
break_command = True
break
Config.tetromino_postion[self.height][self.j] = 0
Config.moving_tetromino_data[1] = 3
Config.tetromino_postion[self.height-1][self.j-2] = Config.moving_tetromino_data
Config.moving_tetromino_postion = [self.weight-2,self.height-1]
for k in range(4):
Config.main_window.blit(Config.lb_tetromino,[295+(self.weight-2+k)*20,100+(self.height-1)*20])
break_command = True
break
elif Config.moving_tetromino_data[1] == 3:#0になる処理を行う
for l in range(-1,3):#-1から2まで行う
if Config.tetromino_postion[self.height+l][self.j+1] != 0:
break_command = True
break
Config.tetromino_postion[self.height][self.j] = 0
Config.moving_tetromino_data[1] = 0
Config.tetromino_postion[self.height-2][self.j+1] = Config.moving_tetromino_data
Config.moving_tetromino_postion = [self.weight+1,self.height-2]
#描画
for k in range(4):
Config.main_window.blit(Config.lb_tetromino,[295+(self.weight+1)*20,100+(self.height-2+k)*20])
break_command = True
break
if Config.moving_tetromino_data[0] == 1:
if Config.moving_tetromino_data[1] == 0:
pass
elif Config.moving_tetromino_data[1] == 1:
pass
elif Config.moving_tetromino_data[1] == 2:
pass
elif Config.moving_tetromino_data[1] == 3:
pass
if Config.moving_tetromino_data[0] == 2:
if Config.moving_tetromino_data[1] == 0:
pass
elif Config.moving_tetromino_data[1] == 1:
pass
elif Config.moving_tetromino_data[1] == 2:
pass
elif Config.moving_tetromino_data[1] == 3:
pass
if Config.moving_tetromino_data[0] == 3:
if Config.moving_tetromino_data[1] == 0:
pass
elif Config.moving_tetromino_data[1] == 1:
pass
elif Config.moving_tetromino_data[1] == 2:
pass
elif Config.moving_tetromino_data[1] == 3:
pass
if Config.moving_tetromino_data[0] == 4:
if Config.moving_tetromino_data[1] == 0:
pass
elif Config.moving_tetromino_data[1] == 1:
pass
elif Config.moving_tetromino_data[1] == 2:
pass
elif Config.moving_tetromino_data[1] == 3:
pass
if Config.moving_tetromino_data[0] == 5:
if Config.moving_tetromino_data[1] == 0:
pass
elif Config.moving_tetromino_data[1] == 1:
pass
elif Config.moving_tetromino_data[1] == 2:
pass
elif Config.moving_tetromino_data[1] == 3:
pass
if Config.moving_tetromino_data[0] == 6:
if Config.moving_tetromino_data[1] == 0:
pass
elif Config.moving_tetromino_data[1] == 1:
pass
elif Config.moving_tetromino_data[1] == 2:
pass
elif Config.moving_tetromino_data[1] == 3:
pass
else:
pass
def GameExit(self,):
pygame.quit()
Config.game_on = False
window_for_error.destroy()
sys.exit()
def Settings_save(self,):
pass
if __name__ == '__main__':
Game = Main() |
from meerkat_abacus.pipeline_worker.process_steps import ProcessingStep
from meerkat_abacus import model
from meerkat_abacus import util
class SendAlerts(ProcessingStep):
def __init__(self, param_config, session):
self.step_name = "send_alerts"
alerts = session.query(model.AggregationVariables).filter(
model.AggregationVariables.alert == 1)
self.alert_variables = {a.id: a for a in alerts}
self.locations = util.all_location_data(session)[0]
self.config = param_config
self.session = session
def run(self, form, data):
"""
Send alerts
"""
if ("alert" in data["variables"] and
data["variables"]["alert_type"] == "individual"):
alert_id = data["uuid"][
-self.config.country_config["alert_id_length"]:]
data["variables"]["alert_id"] = alert_id
util.send_alert(alert_id, data,
self.alert_variables,
self.locations, self.config)
return [{"form": form,
"data": data}]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-14 19:35
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
import scoremanager.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ScoreboardEntry',
fields=[
('identifier', models.CharField(default=scoremanager.models.identifier_default, editable=False, max_length=32, primary_key=True, serialize=False)),
('date_created', models.DateTimeField(default=django.utils.timezone.now)),
('score', models.PositiveIntegerField()),
('balls_dropped', models.PositiveIntegerField()),
],
options={
'ordering': ['-score', 'balls_dropped', 'date_created'],
},
),
]
|
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse
from django.urls import reverse_lazy
from django.views.generic.edit import FormView
from django.views.generic import DetailView, TemplateView
from django.forms import formset_factory
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib import messages
from django.utils import timezone
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from apps.categorias.models import Categoria
from apps.core.permissions import NoClientePermission
from apps.medicamentos.models import Medicamento
from apps.ventas.models import Venta, VentaProducto, VentaCancelacion
from apps.usuarios.models import Usuario, Cliente, Trabajador
from apps.ventas.forms import VentaProductoForm, SeleccionarClienteForm
@login_required
@api_view(['GET', 'POST'])
@permission_classes((IsAuthenticated, NoClientePermission, ))
def nueva_venta(request, id_producto):
producto = get_object_or_404(Medicamento, pk=id_producto)
venta = Venta.obtener_venta(request)
productos_carrito = venta.productos_comprados.filter(venta=venta, producto=id_producto).first()
if producto.cantidad >= 1:
if productos_carrito == None:
vp = VentaProducto.objects.create(venta=venta, producto=producto, cantidad=1, precio=producto.precio, descuento=0)
vp.venta.subtotal += producto.precio
vp.venta.save()
vp.save()
else:
vp = VentaProducto.objects.get(venta=venta, producto=producto)
vp.cantidad +=1
if vp.producto.cantidad >= vp.cantidad:
vp.precio += producto.precio
vp.venta.subtotal += producto.precio
vp.venta.save()
vp.save()
else:
messages.error(request, 'La cantidad seleccionada no está disponible. ')
else:
messages.error(request, 'La cantidad seleccionada no está disponible. ')
return Response(["Error"])
return redirect('categorias:inicio_ventas')
@login_required
@api_view(['GET', 'POST'])
@permission_classes((IsAuthenticated, NoClientePermission, ))
def eliminar_producto_carrito(request, id_producto):
venta_activa = Venta.obtener_venta(request)
producto = get_object_or_404(VentaProducto, pk=id_producto)
if producto in venta_activa.productos_comprados.all():
producto.venta.subtotal -= producto.producto.precio*producto.cantidad
producto.venta.save()
producto.delete()
messages.success(request, 'Producto eliminado exitosamente.')
else:
return redirect("categorias:inicio_ventas")
return redirect("categorias:inicio_ventas")
@login_required
@api_view(['GET', 'POST'])
@permission_classes((IsAuthenticated, NoClientePermission, ))
def finalizar_venta(request):
from django.db.models import Sum
from django.db.models.functions import Coalesce
empresa = request.tenant
categorias = Categoria.objects.filter(empresa=request.tenant)
activa = Venta.obtener_venta_activa(request, None, request.user)
productos_carrito = activa.productos_comprados.all()
if productos_carrito == None:
return 0
cantidad_carrito = productos_carrito.aggregate(cant_carrito=Coalesce(Sum("cantidad"),0))["cant_carrito"]
productos_carrito = productos_carrito
subtotal = activa.subtotal
iva = subtotal*0.19
total = subtotal + iva
if request.method == 'POST':
form = SeleccionarClienteForm(request.POST)
if form.is_valid():
cliente = form.cleaned_data["cliente"]
venta_activa = Venta.obtener_venta(request)
venta_activa.cliente = cliente
subtotal = venta_activa.subtotal
venta_activa.iva = iva
venta_activa.total = total
venta_activa.terminada = timezone.now()
venta_activa.save()
for producto in productos_carrito:
producto.producto.cantidad -= producto.cantidad
producto.producto.verificar_disponibilidad()
producto.producto.save()
del request.session['venta_activa']
messages.success(request, 'Venta realizada exitosamente.')#pendiente por resolver
return redirect('ventas:factura', venta_activa.id)
else:
form = SeleccionarClienteForm()
venta_activa = Venta.obtener_venta(request)
return render(request, 'ventas/finalizar_venta.html', {
'form': form,
'venta_activa': venta_activa,
'categorias':categorias,
'activa': activa,
'empresa': empresa,
'productos_carrito': productos_carrito,
'cantidad_carrito': cantidad_carrito,
'subtotal': subtotal,
'total': total,
'iva':iva,
})
@login_required
def mis_ventas(request):
ventas = request.user.ventas_del_trabajador.exclude(terminada__isnull=True)
return render(request, 'ventas/listado_ventas.html',{'ventas':ventas})
#--------------------- Compras ---------------------------
@login_required
def nueva_compra(request, id_producto):
producto = get_object_or_404(Medicamento, pk=id_producto)
venta = Venta.obtener_venta(request)
productos_carrito = venta.productos_comprados.filter(venta=venta, producto=id_producto).first()
if producto.cantidad >= 1:
if productos_carrito == None:
vp = VentaProducto.objects.create(venta=venta, producto=producto, cantidad=1, precio=producto.precio, descuento=0)
vp.venta.subtotal += producto.precio
vp.venta.save()
vp.save()
else:
vp = VentaProducto.objects.get(venta=venta, producto=producto)
vp.cantidad +=1
if vp.producto.cantidad >= vp.cantidad:
vp.precio += producto.precio
vp.venta.subtotal += producto.precio
vp.venta.save()
vp.save()
else:
messages.error(request, 'La cantidad seleccionada no está disponible. ')
else:
messages.error(request, 'La cantidad seleccionada no está disponible. ')
return redirect('categorias:inicio_compras')
@login_required
def eliminar_producto_carrito_compra(request, id_producto):
venta_activa = Venta.obtener_venta(request)
producto = get_object_or_404(VentaProducto, pk=id_producto)
if producto in venta_activa.productos_comprados.all():
producto.venta.subtotal -= producto.producto.precio*producto.cantidad
producto.venta.save()
producto.delete()
messages.success(request, 'Producto eliminado exitosamente.')
else:
return redirect("categorias:inicio_compras")
return redirect("categorias:inicio_compras")
@login_required
def finalizar_compra(request):
from django.db.models import Sum
from django.db.models.functions import Coalesce
empresa = request.tenant
categorias = Categoria.objects.filter(empresa=request.tenant)
activa = Venta.obtener_venta_activa(request, request.user, None)
productos_carrito = activa.productos_comprados.all()
if productos_carrito == None:
return 0
cantidad_carrito = productos_carrito.aggregate(cant_carrito=Coalesce(Sum("cantidad"),0))["cant_carrito"]
productos_carrito = productos_carrito
subtotal = activa.subtotal
iva = subtotal*0.19
total = subtotal + iva
if request.method == 'POST':
venta_activa = Venta.obtener_venta(request)
subtotal = venta_activa.subtotal
venta_activa.iva = iva
venta_activa.total = total
venta_activa.terminada = timezone.now()
venta_activa.save()
for producto in productos_carrito:
producto.producto.cantidad -= producto.cantidad
producto.producto.verificar_disponibilidad()
producto.producto.save()
del request.session['venta_activa']
messages.success(request, 'Compra realizada exitosamente.')#pendiente por resolver
return redirect('ventas:factura', venta_activa.id)
else:
pass
venta_activa = Venta.obtener_venta(request)
return render(request, 'ventas/finalizar_venta.html', {
'venta_activa': venta_activa,
'categorias':categorias,
'activa': activa,
'empresa': empresa,
'productos_carrito': productos_carrito,
'cantidad_carrito': cantidad_carrito,
'subtotal': subtotal,
'total': total,
'iva':iva,
})
@login_required
def ver_factura(request, pk):
try:
venta = get_object_or_404(Venta, pk=pk)
except Venta.DoesNotExist:
raise Http404("Venta no existe")
return render(request,'ventas/factura.html', context={'venta':venta,})
@login_required
def mis_compras(request):
compras = request.user.compras_del_cliente.filter(terminada!=None)
return render(request, 'ventas/listado_compras.html',{'compras':compras})
def grafico_ventas_trabajadores(request):
trabajadores = Usuario.objects.filter(rol=Trabajador)
ventas_de_trabajadores = []
for trabajador in trabajadores:
ventas_del_trabajador = trabajador.ventas_del_trabajador.count()
ventas_de_trabajadores.append(ventas_del_trabajador)
datos = list(ventas_de_trabajadores)
return render(request, "ventas/grafico_ventas_trabajadores.html", {
"datos": datos,
"titulo": "Ventas por trabajador",
}) |
from django.contrib import admin
from .models import Usuario
from .forms import CreateUsuarioForm
# Register your models here.
class UsuarioAdmin(admin.ModelAdmin):
pass
admin.site.register(Usuario, UsuarioAdmin)
|
# basic08.py
import glob, csv, sys, os
dir = os.path.dirname(os.path.realpath(__file__))
input_path = dir + '/'
file_counter = 0
print(glob.glob(os.path.join(input_path, 'sales_*')))
for input_file in glob.glob(os.path.join(input_path, 'sales_*')):
total_row = 1
with open(input_file, 'r', newline='') as csv_in_file:
filereader = csv.reader(csv_in_file)
header = next(filereader)
for row in filereader:
total_row += 1
print('{0:30s}: {1:d} rows \t{2:d} columns '.format(os.path.basename(input_file), total_row, len(header)))
file_counter += 1
print("Totol file count: {0:d}".format(file_counter)) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from django.utils import formats, dateparse, timezone
from .models import Period, Traineeship, Student
from django.core.exceptions import ValidationError
from datetime import datetime, date
from io import BytesIO
from docx import Document
from docx.shared import Pt
def json_access_error(request):
return JsonResponse(
{
"errors": [
{
"status": "403",
"source": { "pointer": request.path },
"detail": "vous n'êtes plus autorisé à utiliser cette période"
},
]
},
status=403
)
def time_limit():
today = timezone.localdate()
days_offset = 3-today.weekday()
return timezone.make_aware(datetime.combine(today+timezone.timedelta(days=days_offset), datetime.min.time()))
def calendar(request, action, traineeship):
user = request.user
traineeship = Traineeship.objects.get(id=int(traineeship))
try:
student = user.student
except Student.DoesNotExist:
student = None
# calendar read
if action=='read':
time_start = timezone.make_aware(datetime.combine(dateparse.parse_date(request.GET['start']), datetime.min.time()))
time_end = timezone.make_aware(datetime.combine(dateparse.parse_date(request.GET['end']), datetime.min.time()))
base_criteria = {
'traineeship' : traineeship
}
if request.GET['type']=='past':
base_criteria['start__gte'] = time_start
base_criteria['end__lt'] = time_limit()
if request.GET['type']=='future':
base_criteria['start__gte'] = time_limit()
base_criteria['end__lt'] = time_end
ps = Period.objects.filter(**base_criteria)
d = []
for p in ps:
d.append({
'id': p.id,
'start': p.start,
'end': p.end,
})
return JsonResponse(d, safe=False)
# create period
if action=='create':
time_start = dateparse.parse_datetime(request.GET['start'])
time_end = dateparse.parse_datetime(request.GET['end'])
if student and time_start<time_limit():
return json_access_error(request)
try:
p = traineeship.periods.create(start=time_start, end=time_end)
return JsonResponse({"event_id" : p.id}, safe=False)
except ValidationError as e:
return JsonResponse(
{
"errors": [
{
"status": "422",
"source": { "pointer": request.path },
"detail": "%s" % e.args[0]
},
]
},
status=422
)
# delete event
if action=='delete':
p = traineeship.periods.get(id=int(request.GET['event_id']))
if student and p.start<time_limit():
return json_access_error(request)
p.delete()
return JsonResponse({"event_id" : 0}, safe=False)
# update event
if action=='update':
try:
p = traineeship.periods.get(id=int(request.GET['event_id']))
time_start = dateparse.parse_datetime(request.GET['start'])
time_end = dateparse.parse_datetime(request.GET['end'])
if student and time_start<time_limit():
return json_access_error(request)
p.start = time_start
p.end = time_end
p.save()
return JsonResponse({"event_id" : p.id}, safe=False)
except ValidationError as e:
return JsonResponse(
{
"errors": [
{
"status": "422",
"source": { "pointer": request.path },
"detail": "%s" % e.args[0]
},
]
},
status=422
)
# On ne devrait pas arriver ici...
return JsonResponse(
{
"errors": [
{
"status": "400",
"source": { "pointer": request.path },
"detail": "action not found"
},
]
},
status=400
)
# DOCX
def download_schedule(request, traineeship):
user = request.user
ts = Traineeship.objects.get(id=int(traineeship))
try:
student = user.student
except Student.DoesNotExist:
student = None
# Create the HttpResponse object with the appropriate docx headers.
response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.wordprocessingml.document')
response['Content-Disposition'] = 'attachment; filename="horaire.docx"'
buffer = BytesIO()
document = Document()
document.add_heading("%s %s : Stage d'%s" % (ts.student.first_name, ts.student.last_name, ts.category), 0)
document.save(buffer)
# Get the value of the BytesIO buffer and write it to the response.
doc = buffer.getvalue()
buffer.close()
response.write(doc)
return response
JOURS = ['Lundi', 'Mardi', 'Mercredi', 'Jeudi', 'Vendredi', 'Samedi', 'Dimanche']
def download_schedule_for_student(request, student, from_date=timezone.localdate()):
next_monday = from_date + timezone.timedelta(days=7-from_date.weekday())
# télécharge l'horaire d'un étudiant particulier pour la semaine suivant la date fournie ou
# aujourd'hui si cette date n'est pas fournie
student = Student.objects.get(id=student)
#ts = student.traineeships.filter(date_start__lte=from_date, is_closed=False)[0]
ts = student.traineeships.filter(is_closed=False)[0]
# TODO : pas de stage ouvert, plus d'un stage ouvert, étudiant n'existant pas
# Create the HttpResponse object with the appropriate docx headers.
response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.wordprocessingml.document')
response['Content-Disposition'] = 'attachment; filename="horaire %s %s.docx"' % (student.last_name,
student.first_name)
buffer = BytesIO()
document = Document()
document.styles["Title"].font.size = Pt(18)
document.styles["Subtitle"].font.size = Pt(16)
document.add_heading("%s %s : du %s au %s" % (
ts.student.first_name,
ts.student.last_name,
next_monday.strftime("%d-%m-%Y"),
(next_monday + timezone.timedelta(days=6)).strftime("%d-%m-%Y"),
)
,0)
document.add_paragraph("Stage d'%s - %s" % (ts.category, ts.place,), style="Subtitle")
table = document.add_table(rows=1, cols=5)
table.style = 'Light Shading Accent 1'
hdr_cells = table.rows[0].cells
hdr_cells[0].text = 'Jour'
hdr_cells[1].text = 'De'
hdr_cells[2].text = 'A'
hdr_cells[3].text = 'Périodes'
hdr_cells[4].text = 'Heures'
for x in range(7):
row_day = next_monday + timezone.timedelta(days=x)
day_periods = ts.periods.filter(start__date=row_day).order_by('start')
row_cells = table.add_row().cells
row_cells[0].text = JOURS[x]
num_p = 0
for p in day_periods :
num_p += 1
row_cells[1].text = timezone.localtime(p.start).strftime("%H:%M")
row_cells[2].text = timezone.localtime(p.end).strftime("%H:%M")
row_cells[3].text = str(p.period_duration())
row_cells[4].text = str(p.hour_duration())
if not num_p == len(day_periods):
row_cells = table.add_row().cells
document.save(buffer)
# Get the value of the BytesIO buffer and write it to the response.
doc = buffer.getvalue()
buffer.close()
response.write(doc)
return response
|
import json
import logging
import requests, hashlib
from requests import RequestException
from urllib.parse import urlencode
from dquant.config import cfg
from dquant.constants import Constants
from dquant.markets.market import Market
class OkexFutureRest(Market):
def __init__(self, meta_code):
base_currency, market_currency, symbol, contract_type = self.parse_meta(meta_code)
super().__init__(base_currency, market_currency, meta_code, cfg.get_float_config(Constants.OKEX_FEE))
self.apikey = cfg.get_config(Constants.OKEX_APIKEY)
self.apisec = cfg.get_config(Constants.OKEX_APISEC)
self.contract_type = contract_type
self.symbol = symbol
self.base_url = Constants.OKEX_FUTURE_REST_BASE
self.session = requests.session()
self.timeout = Constants.OK_HTTP_TIMEOUT
def get_ticker(self):
depth = self.get_depth()
if not depth:
return None
res = {'ask': {'price': 0, 'amount': 0}, 'bid': {'price': 0, 'amount': 0}}
if len(depth['asks']) > 0:
res['ask'] = depth['asks'][0]
if len(depth["bids"]) > 0:
res['bid'] = depth['bids'][0]
return res
# 轮询获取depth
def get_depth(self):
params = {"symbol": self.symbol,
"contract_type": self.contract_type}
while True:
try:
res = self.request(Constants.OKEX_FUTURE_DEPTH_RESOURCE_REST, params, "get")
list_of_ask = self.okex_depth_format(res,"asks")
list_of_bid = self.okex_depth_format(res,"bids")
return {"asks": list_of_ask, 'bids': list_of_bid}
except Exception:
logging.exception("http error")
# 格式化depth数据
def okex_depth_format(self, res, flag):
result_list = []
for ticker in res[flag]:
result_list.append({
'price': ticker[0],
'amount': ticker[1]
})
return result_list
def long(self, amount, price='', lever_rate='10'):
res = self.okex_request(price=price, amount=amount, type='1', api_url=Constants.OKEX_FUTURE_TRADE_REST)
return res
def short(self, amount, price='', lever_rate='10'):
res = self.okex_request(price=price, amount=amount, type='2', api_url=Constants.OKEX_FUTURE_TRADE_REST)
return res
def close_long(self, amount, price=''):
res = self.okex_request(price=price, amount=amount, type='3', api_url=Constants.OKEX_FUTURE_TRADE_REST)
return res
def close_short(self, amount, price=''):
res = self.okex_request(price=price, amount=amount, type='4', api_url=Constants.OKEX_FUTURE_TRADE_REST)
return res
def delete_order(self, order_id, tillOK=True):
'''
:param order_id:
:return: {'result': True, 'order_id': '14435081666'}
'''
while True:
try:
res = self.okex_request(order_id=order_id, api_url=Constants.OKEX_FUTURE_DELETE_ORDER_REST)
if 'result' in res:
if res['result'] == True:
return res
if tillOK ==True:
continue
else:
logging.error(res)
break
except Exception as ex:
logging.exception("message")
if tillOK:
continue
return None
def get_account(self, coin=[]):
'''
:return:{"info": {"btc": {"account_rights": 1,"keep_deposit": 0,"profit_real": 3.33,"profit_unreal": 0,"risk_rate": 10000},"ltc": {"account_rights": 2,"keep_deposit": 2.22,"profit_real": 3.33,"profit_unreal": 2,"risk_rate": 10000},"result": true}
'''
res = self.okex_request(api_url=Constants.OKEX_FUTURE_USERINFO_REST)
if res['result'] is True:
if coin:
ret = {}
for c in coin:
if c.lower() in res["info"]:
ret[c.lower()] = res["info"][c.lower()]
return ret
else:
return res["info"]
# 向API发送请求
def okex_request(self, api_url, **kwargs):
'''
:param price: 默认对手价
:param amount: 最小为1
:param type: 1:开多 2:开空 3:平多 4:平空, 'delete_order':取消订单
:param lever_rate: 杠杆倍数 value:10\20 默认10
:param match_price: 是否为对手价 0:不是 1:是,当取值为1时,price无效。这里根据price是否为空判断。
:param contract_type: 合约类型: this_week:当周 next_week:下周 quarter:季度
:return: {"order_id":986,"result":true}
'''
params = {}
if api_url is Constants.OKEX_FUTURE_DELETE_ORDER_REST:
order_id = kwargs.get('order_id', None)
params = {'api_key': self.apikey, 'symbol': self.symbol, 'contract_type': self.contract_type, 'order_id': order_id}
# params['sign'] = self.buildMySign(params, self.apisec)
# res = self.request(Constants.OKEX_FUTURE_DELETE_ORDER_REST, params=params, type='post')
elif api_url is Constants.OKEX_FUTURE_TRADE_REST:
params = {'api_key': self.apikey, 'symbol': self.symbol, 'contract_type': self.contract_type, 'amount': str(kwargs.get('amount')),
'type': str(kwargs.get('type')), 'match_price': '1', 'lever_rate': str(kwargs.get('lever_rate', 10))}
price = kwargs.get('price', None)
if price:
params['price'] = str(price)
params['match_price'] = '0'
elif api_url is Constants.OKEX_FUTURE_USERINFO_REST:
params = {'api_key': self.apikey}
params['sign'] = self.buildMySign(params, self.apisec)
res = self.request(api_url, params=params, type='post')
return res
def request(self, resource, params, type):
headers = {
"Content-type": "application/x-www-form-urlencoded",
}
if type == "post":
temp_params = urlencode(params)
res = self.session.post(url=self.base_url + resource, data=temp_params, timeout=self.timeout, headers=headers)
elif type == "get":
res = self.session.get(url=self.base_url + resource, params=params, timeout=self.timeout)
if res.status_code == 200:
return json.loads(res.content, encoding='utf-8')
else:
# print(res)
logging.exception("request error")
raise RequestException("status error")
def buildMySign(self, params, secretKey):
sign = ''
for key in sorted(params.keys()):
sign += key + '=' + str(params[key]) + '&'
data = sign + 'secret_key=' + secretKey
return hashlib.md5(data.encode("utf8")).hexdigest().upper()
def parse_meta(self, meta_code):
meta_table = {"btc_usd_this_week": ("btc", "usd", "btc_usd", "this_week"),
"eth_usd_this_week": ("eth", "usd", "eth_usd", "this_week"),}
return meta_table[meta_code]
|
#!/usr/bin/python
def get_squares_gen(n):
for x in range(n):
yield x**2
squares=get_squares_gen(4)
print(squares)
print(next(squares))
print(next(squares))
print(next(squares))
print(next(squares))
print(next(squares))
#print(list(get_squares_gen(10)))
|
"""Helper methods to talk with the notifications backend"""
import uuid
import requests
def set_path_prefix(base_path):
"""Set up the paths to use"""
if base_path is None:
raise RuntimeError("No base path passed")
global __APPLICATION_PREFIX
global __BUNDLES_PREFIX
global event_types_prefix
global integrations_prefix
global notifications_prefix
__APPLICATION_PREFIX = base_path + "/internal/applications"
__BUNDLES_PREFIX = base_path + "/internal/bundles"
event_types_prefix = base_path + "/internal/eventTypes"
integrations_prefix = base_path + "/api/integrations/v1.0"
notifications_prefix = base_path + "/api/notifications/v1.0"
def find_application(bundle_id, app_name):
"""Find an application by name and return its UUID or return None
:param bundle_id Id of the bundle under which the app resides
:param app_name: Name of the application
"""
r = requests.get(__BUNDLES_PREFIX + "/" + bundle_id + "/applications")
if r.status_code != 200:
return None
j = r.json()
for app in j:
if app["name"] == app_name:
return app["id"]
return None
def add_application(bundle_id, name, display_name):
"""Adds an application if it does not yet exist
:param bundle_id: id of the bundle we add the application to
:param name: Name of the application, [a-z0-9-]+
:param display_name: Display name of the application
"""
# First try to find it.
ret = find_application(bundle_id, name)
if ret is not None:
return ret
# The app does not yet exist, so try to create
app_json = {"name": name,
"display_name": display_name,
"bundle_id": bundle_id}
r = requests.post(__APPLICATION_PREFIX, json=app_json)
print(r.status_code)
response_json = r.json()
print(response_json)
if r.status_code / 10 != 20:
exit(1)
aid = response_json['id']
return aid
def delete_application(app_id):
"""Deletes an application by its id"""
r = requests.delete(__APPLICATION_PREFIX + "/" + app_id)
print(r.status_code)
def delete_bundle(bundle_id):
"""Deletes a bundle by its id"""
r = requests.delete(__BUNDLES_PREFIX + "/" + bundle_id)
print(r.status_code)
def add_event_type(application_id, name, display_name):
"""Add an EventType by name
:param application_id: UUID of the application
:param name: Name of the type
:param display_name: Display name of the type
"""
# First try to find it
ret = find_event_type(application_id, name)
if ret is not None:
return ret
# It does not exist, so create it
et_json = {"name": name, "display_name": display_name, "application_id": application_id}
r = requests.post(event_types_prefix, json=et_json)
response_json = r.json()
print(response_json)
if r.status_code / 10 != 20:
exit(2)
return response_json['id']
def add_bundle(name, display_name):
"""Adds a bundle if it does not yet exist
:param name: Name of the bundle, [a-z0-9-]+
:param display_name: Display name of the application
"""
# First try to find it.
ret = find_bundle(name)
if ret is not None:
return ret
# It does not yet exist, so try to create
bundle_json = {"name": name,
"display_name": display_name}
r = requests.post(__BUNDLES_PREFIX, json=bundle_json)
print(r.status_code)
response_json = r.json()
print(response_json)
if r.status_code / 10 != 20:
exit(1)
aid = response_json['id']
return aid
def find_bundle(name):
"""Find a bundle by name and return its UUID or return None
:param name: Name of the bundle
"""
result = requests.get(__BUNDLES_PREFIX)
if result.status_code != 200:
return None
result_json = result.json()
for bundle in result_json:
if bundle["name"] == name:
return bundle["id"]
return None
def find_event_type(application_id, name):
"""Find an event type by name for an application.
Returns the full type or None if not found
"""
r = requests.get(__APPLICATION_PREFIX + "/" + application_id + "/eventTypes")
if r.status_code != 200:
return None
j = r.json()
for et in j:
if et["name"] == name:
return et
return None
def create_endpoint(name, xrhid, properties, ep_type="webhook", ep_subtype= None):
"""Creates an endpoint"""
ep_uuid = uuid.uuid4()
ep_id = str(ep_uuid)
properties["endpointId"] = ep_id
ep_json = {"name": name,
"description": name,
"enabled": True,
"properties": properties,
"type": ep_type}
if ep_subtype is not None:
ep_json["sub_type"] = ep_subtype
h = {"x-rh-identity": xrhid}
r = requests.post(integrations_prefix + "/endpoints", json=ep_json, headers=h)
print(r.status_code)
if r.status_code / 100 != 2:
print(r.reason)
exit(1)
response_json = r.json()
epid = response_json["id"]
print(epid)
return epid
def update_endpoint(name, xrhid, properties):
"""Updates an endpoint"""
ep = find_endpoint(name, xrhid)
eid = ep['id']
ep['properties']['extras'] = properties['extras']
h = {"x-rh-identity": xrhid}
r = requests.put(integrations_prefix + "/endpoints/" + eid, json=ep, headers=h)
print(r.status_code)
if r.status_code / 100 != 2:
print(r.reason)
exit(1)
def delete_endpoint(name, xrhid):
"""Removes an endpoint"""
h = {"x-rh-identity": xrhid}
ep = find_endpoint(name, xrhid)
uid = ep["id"]
r = requests.delete(integrations_prefix + "/endpoints/" + uid, headers = h )
print(r.status_code)
def find_endpoint(name, xrhid):
"""Find an endpoint by its name"""
h = {"x-rh-identity": xrhid}
r = requests.get(integrations_prefix + "/endpoints", headers = h)
if r.status_code / 100 != 2:
print(r.reason)
exit(1)
response_json = r.json()
for ep in response_json["data"]:
if ep["name"] == name:
return ep
return None
def list_endpoints(xrhid):
"""List all endpoints for the passed user"""
h = {"x-rh-identity": xrhid}
r = requests.get(integrations_prefix + "/endpoints", headers = h)
if r.status_code / 100 != 2:
print(r.reason)
exit(1)
response_json = r.json()
return response_json["data"]
def find_behavior_group(display_name, bundle_id, x_rhid):
"""Find a behavior group by its display name"""
headers = {"x-rh-identity": x_rhid}
r = requests.get(notifications_prefix + "/notifications/bundles/" + bundle_id + "/behaviorGroups",
headers=headers)
if r.status_code != 200:
return None
j = r.json()
for bg in j:
if bg["display_name"] == display_name:
return bg["id"]
return None
def create_behavior_group(name, bundle_id, x_rhid):
"""Creates a behavior group"""
bg_id = find_behavior_group(name, bundle_id, x_rhid)
if bg_id is not None:
return bg_id
bg_json = {"display_name": name,
"bundle_id": bundle_id}
headers = {"x-rh-identity": x_rhid}
r = requests.post(notifications_prefix + "/notifications/behaviorGroups",
json=bg_json,
headers=headers)
print(r.status_code)
if r.status_code / 100 != 2:
print(r.reason)
exit(1)
response_json = r.json()
bg_id = response_json["id"]
print(bg_id)
return bg_id
def link_bg_endpoint(bg_id, ep_id, x_rhid):
"""Link the behavior group to the endpoint"""
headers = {"x-rh-identity": x_rhid}
ep_list = [ep_id]
r = requests.put(notifications_prefix + "/notifications/behaviorGroups/" + bg_id + "/actions",
json=ep_list,
headers=headers)
def add_endpoint_to_event_type(event_type_id, endpoint_id, x_rhid):
headers = {"x-rh-identity": x_rhid}
r = requests.put(notifications_prefix + "/notifications/eventTypes/" + event_type_id + "/" + endpoint_id,
headers=headers)
print(r.status_code)
def shorten_path(path):
"""Shorten an incoming domain name like path to
only have the first char of each segment except the last
e.g. foo.bar.baz -> f.b.baz
"""
out = ""
segments = path.split(".")
l = len(segments)
i = 0
while i < l:
element = segments[i]
if i < l-1:
out = out + element[0]
out = out + "."
else:
out = out + element
i += 1
return out
def print_history_for_event_type(bundle_id, app_id, event_type_name, x_rhid):
headers = {"x-rh-identity": x_rhid}
params={"bundleIds": bundle_id,
"appIds": app_id,
"includeDetails": True,
"eventTypeDisplayName": event_type_name}
r = requests.get(notifications_prefix + "/notifications/events/",
params=params,
headers=headers)
if r.status_code != 200:
print (r.reason)
exit(1)
response_json = r.json()
data = response_json['data']
for entry in data:
print("Entry created at " + entry["created"] )
for action in entry["actions"]:
print(f" Type {action['endpoint_type']}, success= {action['invocation_result']}")
if action['endpoint_type'] == 'camel':
details = action['details']
if details is None:
print(" No details provided")
else:
print(" sub_type " + shorten_path(details['type']))
print(" target url " + details['target'])
print(" outcome " + details['outcome'])
def add_event_type_to_behavior_group(et_id, bg_id, x_rh_id):
bg_set = [bg_id]
headers = {"x-rh-identity": x_rh_id}
r = requests.put(notifications_prefix + "/notifications/eventTypes/" + et_id + "/behaviorGroups",
json=bg_set,
headers=headers)
print(r.status_code)
return None
|
from Robot import Robot
import argparse
# Create parser for putting robot in experiment mode
gs_parser = argparse.ArgumentParser(description='Specify the mode of the computer -> Experiment(1), Demo(0)')
gs_parser.add_argument('-e',
'--experiment',
action='store_true',
help='Experiment mode')
args = gs_parser.parse_args()
robot = Robot(args.experiment)
robot.stateSetup()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from layer import NeuralLayer
import theano.tensor as T
class Softmax(NeuralLayer):
def __init__(self):
super(Softmax, self).__init__("softmax")
def compute_tensor(self, x):
return T.nnet.softmax(x) |
import cv2
import numpy as np
net_torch=cv2.dnn.readNetFromTorch("./data/torch_enet_model.net")
net_tensorflow = cv2.dnn.readNetFromTensorflow("./data/tensorflow_inception_graph.pb")
|
def is_isogram(string):
result = False
string = string.replace("-", "").replace(" ", "").lower()
if len(string) == len(set(string)):
result = True
return result
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TypeVideoFeatured'
db.create_table('video_typevideofeatured', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=25)),
))
db.send_create_signal('video', ['TypeVideoFeatured'])
# Adding model 'VideoFeatured'
db.create_table('video_videofeatured', (
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.Category'], null=True, blank=True)),
('typevideofeatured', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.TypeVideoFeatured'], null=True, blank=True)),
('theme', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='themefeature_set', null=True, to=orm['video.Category'])),
('video', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.Video'], null=True, blank=True)),
('date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('channel', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.Channel'], null=True, blank=True)),
))
db.send_create_signal('video', ['VideoFeatured'])
# Adding model 'Publicity'
db.create_table('video_publicity', (
('scale', self.gf('django.db.models.fields.FloatField')(default=1)),
('name', self.gf('django.db.models.fields.TextField')()),
('title', self.gf('django.db.models.fields.TextField')()),
('posy', self.gf('django.db.models.fields.FloatField')(default=0)),
('posx', self.gf('django.db.models.fields.FloatField')(default=0)),
('video', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.Video'])),
('timeout', self.gf('django.db.models.fields.FloatField')(default=0)),
('time', self.gf('django.db.models.fields.FloatField')(default=0)),
('date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('link', self.gf('django.db.models.fields.TextField')()),
('rotation', self.gf('django.db.models.fields.FloatField')(default=0)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('published', self.gf('django.db.models.fields.IntegerField')(default=False, null=True, blank=True)),
))
db.send_create_signal('video', ['Publicity'])
# Adding model 'Question'
db.create_table('video_question', (
('date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('question_text', self.gf('django.db.models.fields.TextField')()),
('video', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.Video'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('published', self.gf('django.db.models.fields.IntegerField')(default=False, null=True, blank=True)),
))
db.send_create_signal('video', ['Question'])
# Adding M2M table for field response on 'Question'
db.create_table('video_question_response', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('question', models.ForeignKey(orm['video.question'], null=False)),
('responseintoquestion', models.ForeignKey(orm['video.responseintoquestion'], null=False))
))
# Adding model 'ResponseIntoQuestion'
db.create_table('video_responseintoquestion', (
('response_text', self.gf('django.db.models.fields.TextField')()),
('video', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.Video'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('correct', self.gf('django.db.models.fields.IntegerField')(default=False, null=True, blank=True)),
))
db.send_create_signal('video', ['ResponseIntoQuestion'])
# Adding model 'VideoRoll'
db.create_table('video_videoroll', (
('title', self.gf('django.db.models.fields.TextField')()),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('video', self.gf('django.db.models.fields.related.ForeignKey')(related_name='rolls', to=orm['video.Video'])),
('published', self.gf('django.db.models.fields.IntegerField')(default=False, null=True, blank=True)),
('position', self.gf('django.db.models.fields.FloatField')(default=0)),
('roll', self.gf('django.db.models.fields.related.ForeignKey')(related_name='videos_roll_from', to=orm['video.Video'])),
))
db.send_create_signal('video', ['VideoRoll'])
# Adding model 'Category'
db.create_table('video_category', (
('name', self.gf('django.db.models.fields.TextField')()),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.Category'], null=True, blank=True)),
('sequence', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=1000, null=True, blank=True)),
('published', self.gf('django.db.models.fields.IntegerField')(default=False, null=True, blank=True)),
('date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('channel', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.Channel'])),
('description', self.gf('django.db.models.fields.TextField')(default='')),
))
db.send_create_signal('video', ['Category'])
# Adding model 'UserProfile'
db.create_table('video_userprofile', (
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=1000, null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], unique=True)),
))
db.send_create_signal('video', ['UserProfile'])
# Adding model 'UserChannel'
db.create_table('video_userchannel', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('channel', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.Channel'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
))
db.send_create_signal('video', ['UserChannel'])
# Adding model 'Tv'
db.create_table('video_tv', (
('description', self.gf('django.db.models.fields.TextField')()),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('video', ['Tv'])
# Adding model 'Channel'
db.create_table('video_channel', (
('description', self.gf('django.db.models.fields.TextField')()),
('title', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('tv', self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['video.Tv'])),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=1000, null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('video', ['Channel'])
# Adding M2M table for field video on 'Channel'
db.create_table('video_channel_video', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('channel', models.ForeignKey(orm['video.channel'], null=False)),
('video', models.ForeignKey(orm['video.video'], null=False))
))
# Adding model 'Metaclass'
db.create_table('video_metaclass', (
('validate', self.gf('django.db.models.fields.TextField')()),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('video', ['Metaclass'])
# Adding model 'Transcode'
db.create_table('video_transcode', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('video', ['Transcode'])
# Adding model 'VideoTag'
db.create_table('video_videotag', (
('endtime', self.gf('django.db.models.fields.FloatField')(default=0)),
('video', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.Video'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('initime', self.gf('django.db.models.fields.FloatField')(default=0)),
('tags', self.gf('tagging.fields.TagField')()),
))
db.send_create_signal('video', ['VideoTag'])
# Adding model 'ContentFile'
db.create_table('video_contentfile', (
('name', self.gf('django.db.models.fields.TextField')()),
('original_name', self.gf('django.db.models.fields.TextField')()),
('server', self.gf('django.db.models.fields.CharField')(default='localhost', max_length=256)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('dir', self.gf('django.db.models.fields.TextField')()),
('size', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal('video', ['ContentFile'])
# Adding model 'ContentPart'
db.create_table('video_contentpart', (
('content_file', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.ContentFile'])),
('part', self.gf('django.db.models.fields.IntegerField')(default=1)),
('video', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.Video'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('offset', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal('video', ['ContentPart'])
# Adding model 'Video'
db.create_table('video_video', (
('status', self.gf('django.db.models.fields.CharField')(max_length=24)),
('displayname', self.gf('django.db.models.fields.TextField')()),
('name', self.gf('django.db.models.fields.TextField')()),
('width', self.gf('django.db.models.fields.IntegerField')()),
('total_size', self.gf('django.db.models.fields.IntegerField')(default=0)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('views', self.gf('django.db.models.fields.IntegerField')()),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('duration', self.gf('django.db.models.fields.FloatField')()),
('user', self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['auth.User'], null=True, blank=True)),
('ratesum', self.gf('django.db.models.fields.IntegerField')()),
('published', self.gf('django.db.models.fields.IntegerField')()),
('date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('ratenum', self.gf('django.db.models.fields.IntegerField')()),
('height', self.gf('django.db.models.fields.IntegerField')()),
('videoclass', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.Videoclass'])),
('dir', self.gf('django.db.models.fields.TextField')()),
('size', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal('video', ['Video'])
# Adding model 'Videocategory'
db.create_table('video_videocategory', (
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.Category'])),
('video', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.Video'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('sequence', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
))
db.send_create_signal('video', ['Videocategory'])
# Adding model 'Videoclass'
db.create_table('video_videoclass', (
('metatitle', self.gf('django.db.models.fields.TextField')()),
('displayname', self.gf('django.db.models.fields.TextField')()),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('video', ['Videoclass'])
# Adding model 'Videocomment'
db.create_table('video_videocomment', (
('name', self.gf('django.db.models.fields.TextField')()),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('content', self.gf('django.db.models.fields.TextField')()),
('video', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.Video'])),
('published', self.gf('django.db.models.fields.IntegerField')(default=True, null=True, blank=True)),
('date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('email', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('video', ['Videocomment'])
# Adding model 'Videometa'
db.create_table('video_videometa', (
('metaclass', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.Metaclass'])),
('video', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.Video'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('value', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('video', ['Videometa'])
# Adding model 'Videometaclass'
db.create_table('video_videometaclass', (
('displayname', self.gf('django.db.models.fields.TextField')()),
('sequence', self.gf('django.db.models.fields.IntegerField')()),
('metaclass', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.Metaclass'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('videoclass', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.Videoclass'])),
))
db.send_create_signal('video', ['Videometaclass'])
# Adding model 'Videothumb'
db.create_table('video_videothumb', (
('name', self.gf('django.db.models.fields.TextField')()),
('height', self.gf('django.db.models.fields.IntegerField')()),
('width', self.gf('django.db.models.fields.IntegerField')()),
('video', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.Video'])),
('position', self.gf('django.db.models.fields.FloatField')()),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('size', self.gf('django.db.models.fields.CharField')(default='M', max_length=3)),
))
db.send_create_signal('video', ['Videothumb'])
# Adding model 'Videotranscode'
db.create_table('video_videotranscode', (
('transcode', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.Transcode'])),
('size', self.gf('django.db.models.fields.IntegerField')(default=0)),
('video', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.Video'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('video', ['Videotranscode'])
# Adding model 'Job'
db.create_table('video_job', (
('status', self.gf('django.db.models.fields.CharField')(max_length=4)),
('original_name', self.gf('django.db.models.fields.CharField')(default='no_name', max_length=256)),
('ip', self.gf('django.db.models.fields.CharField')(max_length=128)),
('pid', self.gf('django.db.models.fields.IntegerField')(default=-1)),
('video', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.Video'], null=True)),
('date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('message', self.gf('django.db.models.fields.TextField')()),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('channel', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.Channel'], null=True)),
))
db.send_create_signal('video', ['Job'])
# Adding model 'JobLog'
db.create_table('video_joblog', (
('vars', self.gf('django.db.models.fields.TextField')(default='')),
('level', self.gf('django.db.models.fields.CharField')(default='D', max_length=3)),
('job', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.Job'])),
('date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('message', self.gf('django.db.models.fields.TextField')()),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('video', ['JobLog'])
# Adding model 'VideoVote'
db.create_table('video_videovote', (
('vote', self.gf('django.db.models.fields.IntegerField')(default=0)),
('ip', self.gf('django.db.models.fields.CharField')(max_length=128)),
('video', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.Video'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('agent', self.gf('django.db.models.fields.CharField')(max_length=128)),
))
db.send_create_signal('video', ['VideoVote'])
# Adding model 'DocumentClass'
db.create_table('video_documentclass', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=32)),
))
db.send_create_signal('video', ['DocumentClass'])
# Adding model 'Document'
db.create_table('video_document', (
('video', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.Video'])),
('documentclass', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.DocumentClass'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('filename', self.gf('django.db.models.fields.files.FileField')(max_length=1000, null=True, blank=True)),
))
db.send_create_signal('video', ['Document'])
# Adding model 'JobDispatch'
db.create_table('video_jobdispatch', (
('commands_serialized', self.gf('django.db.models.fields.TextField')(default='')),
('dispatch_path', self.gf('django.db.models.fields.TextField')(default='')),
('start', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('job', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.Job'])),
('user', self.gf('django.db.models.fields.TextField')(default='www-data')),
('tvname', self.gf('django.db.models.fields.TextField')(default='')),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('video', ['JobDispatch'])
# Adding model 'SearchRate'
db.create_table('video_searchrate', (
('date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('value', self.gf('django.db.models.fields.CharField')(default='', max_length=255)),
('rate', self.gf('django.db.models.fields.IntegerField')(default=0)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('channel', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.Channel'], null=True, blank=True)),
))
db.send_create_signal('video', ['SearchRate'])
def backwards(self, orm):
# Deleting model 'TypeVideoFeatured'
db.delete_table('video_typevideofeatured')
# Deleting model 'VideoFeatured'
db.delete_table('video_videofeatured')
# Deleting model 'Publicity'
db.delete_table('video_publicity')
# Deleting model 'Question'
db.delete_table('video_question')
# Removing M2M table for field response on 'Question'
db.delete_table('video_question_response')
# Deleting model 'ResponseIntoQuestion'
db.delete_table('video_responseintoquestion')
# Deleting model 'VideoRoll'
db.delete_table('video_videoroll')
# Deleting model 'Category'
db.delete_table('video_category')
# Deleting model 'UserProfile'
db.delete_table('video_userprofile')
# Deleting model 'UserChannel'
db.delete_table('video_userchannel')
# Deleting model 'Tv'
db.delete_table('video_tv')
# Deleting model 'Channel'
db.delete_table('video_channel')
# Removing M2M table for field video on 'Channel'
db.delete_table('video_channel_video')
# Deleting model 'Metaclass'
db.delete_table('video_metaclass')
# Deleting model 'Transcode'
db.delete_table('video_transcode')
# Deleting model 'VideoTag'
db.delete_table('video_videotag')
# Deleting model 'ContentFile'
db.delete_table('video_contentfile')
# Deleting model 'ContentPart'
db.delete_table('video_contentpart')
# Deleting model 'Video'
db.delete_table('video_video')
# Deleting model 'Videocategory'
db.delete_table('video_videocategory')
# Deleting model 'Videoclass'
db.delete_table('video_videoclass')
# Deleting model 'Videocomment'
db.delete_table('video_videocomment')
# Deleting model 'Videometa'
db.delete_table('video_videometa')
# Deleting model 'Videometaclass'
db.delete_table('video_videometaclass')
# Deleting model 'Videothumb'
db.delete_table('video_videothumb')
# Deleting model 'Videotranscode'
db.delete_table('video_videotranscode')
# Deleting model 'Job'
db.delete_table('video_job')
# Deleting model 'JobLog'
db.delete_table('video_joblog')
# Deleting model 'VideoVote'
db.delete_table('video_videovote')
# Deleting model 'DocumentClass'
db.delete_table('video_documentclass')
# Deleting model 'Document'
db.delete_table('video_document')
# Deleting model 'JobDispatch'
db.delete_table('video_jobdispatch')
# Deleting model 'SearchRate'
db.delete_table('video_searchrate')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'video.category': {
'Meta': {'object_name': 'Category'},
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.Channel']"}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.Category']", 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.IntegerField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'sequence': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'video.channel': {
'Meta': {'object_name': 'Channel'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'tv': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['video.Tv']"}),
'video': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['video.Video']", 'symmetrical': 'False'})
},
'video.contentfile': {
'Meta': {'object_name': 'ContentFile'},
'dir': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'original_name': ('django.db.models.fields.TextField', [], {}),
'server': ('django.db.models.fields.CharField', [], {'default': "'localhost'", 'max_length': '256'}),
'size': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'video.contentpart': {
'Meta': {'object_name': 'ContentPart'},
'content_file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.ContentFile']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offset': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'part': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.Video']"})
},
'video.document': {
'Meta': {'object_name': 'Document'},
'documentclass': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.DocumentClass']"}),
'filename': ('django.db.models.fields.files.FileField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.Video']"})
},
'video.documentclass': {
'Meta': {'object_name': 'DocumentClass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
'video.job': {
'Meta': {'object_name': 'Job'},
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.Channel']", 'null': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'message': ('django.db.models.fields.TextField', [], {}),
'original_name': ('django.db.models.fields.CharField', [], {'default': "'no_name'", 'max_length': '256'}),
'pid': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.Video']", 'null': 'True'})
},
'video.jobdispatch': {
'Meta': {'object_name': 'JobDispatch'},
'commands_serialized': ('django.db.models.fields.TextField', [], {'default': "''"}),
'dispatch_path': ('django.db.models.fields.TextField', [], {'default': "''"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.Job']"}),
'start': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'tvname': ('django.db.models.fields.TextField', [], {'default': "''"}),
'user': ('django.db.models.fields.TextField', [], {'default': "'www-data'"})
},
'video.joblog': {
'Meta': {'object_name': 'JobLog'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.Job']"}),
'level': ('django.db.models.fields.CharField', [], {'default': "'D'", 'max_length': '3'}),
'message': ('django.db.models.fields.TextField', [], {}),
'vars': ('django.db.models.fields.TextField', [], {'default': "''"})
},
'video.metaclass': {
'Meta': {'object_name': 'Metaclass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'validate': ('django.db.models.fields.TextField', [], {})
},
'video.publicity': {
'Meta': {'object_name': 'Publicity'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.TextField', [], {}),
'posx': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'posy': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'published': ('django.db.models.fields.IntegerField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'rotation': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'scale': ('django.db.models.fields.FloatField', [], {'default': '1'}),
'time': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'timeout': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'title': ('django.db.models.fields.TextField', [], {}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.Video']"})
},
'video.question': {
'Meta': {'object_name': 'Question'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'published': ('django.db.models.fields.IntegerField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'question_text': ('django.db.models.fields.TextField', [], {}),
'response': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['video.ResponseIntoQuestion']", 'symmetrical': 'False'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.Video']"})
},
'video.responseintoquestion': {
'Meta': {'object_name': 'ResponseIntoQuestion'},
'correct': ('django.db.models.fields.IntegerField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'response_text': ('django.db.models.fields.TextField', [], {}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.Video']"})
},
'video.searchrate': {
'Meta': {'object_name': 'SearchRate'},
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.Channel']", 'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rate': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'})
},
'video.transcode': {
'Meta': {'object_name': 'Transcode'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {})
},
'video.tv': {
'Meta': {'object_name': 'Tv'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {})
},
'video.typevideofeatured': {
'Meta': {'object_name': 'TypeVideoFeatured'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
'video.userchannel': {
'Meta': {'object_name': 'UserChannel'},
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.Channel']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'video.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'video.video': {
'Meta': {'object_name': 'Video'},
'category': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['video.Category']", 'through': "orm['video.Videocategory']", 'symmetrical': 'False'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'dir': ('django.db.models.fields.TextField', [], {}),
'displayname': ('django.db.models.fields.TextField', [], {}),
'duration': ('django.db.models.fields.FloatField', [], {}),
'height': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'published': ('django.db.models.fields.IntegerField', [], {}),
'ratenum': ('django.db.models.fields.IntegerField', [], {}),
'ratesum': ('django.db.models.fields.IntegerField', [], {}),
'size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '24'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'total_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'videoclass': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.Videoclass']"}),
'views': ('django.db.models.fields.IntegerField', [], {}),
'width': ('django.db.models.fields.IntegerField', [], {})
},
'video.videocategory': {
'Meta': {'object_name': 'Videocategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sequence': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.Video']"})
},
'video.videoclass': {
'Meta': {'object_name': 'Videoclass'},
'displayname': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metatitle': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.TextField', [], {})
},
'video.videocomment': {
'Meta': {'object_name': 'Videocomment'},
'content': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'published': ('django.db.models.fields.IntegerField', [], {'default': 'True', 'null': 'True', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.Video']"})
},
'video.videofeatured': {
'Meta': {'object_name': 'VideoFeatured'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.Category']", 'null': 'True', 'blank': 'True'}),
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.Channel']", 'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'theme': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'themefeature_set'", 'null': 'True', 'to': "orm['video.Category']"}),
'typevideofeatured': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.TypeVideoFeatured']", 'null': 'True', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.Video']", 'null': 'True', 'blank': 'True'})
},
'video.videometa': {
'Meta': {'object_name': 'Videometa'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metaclass': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.Metaclass']"}),
'value': ('django.db.models.fields.TextField', [], {}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.Video']"})
},
'video.videometaclass': {
'Meta': {'object_name': 'Videometaclass'},
'displayname': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metaclass': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.Metaclass']"}),
'sequence': ('django.db.models.fields.IntegerField', [], {}),
'videoclass': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.Videoclass']"})
},
'video.videoroll': {
'Meta': {'object_name': 'VideoRoll'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'published': ('django.db.models.fields.IntegerField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'roll': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'videos_roll_from'", 'to': "orm['video.Video']"}),
'title': ('django.db.models.fields.TextField', [], {}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rolls'", 'to': "orm['video.Video']"})
},
'video.videotag': {
'Meta': {'object_name': 'VideoTag'},
'endtime': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initime': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'tags': ('tagging.fields.TagField', [], {}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.Video']"})
},
'video.videothumb': {
'Meta': {'object_name': 'Videothumb'},
'height': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'position': ('django.db.models.fields.FloatField', [], {}),
'size': ('django.db.models.fields.CharField', [], {'default': "'M'", 'max_length': '3'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.Video']"}),
'width': ('django.db.models.fields.IntegerField', [], {})
},
'video.videotranscode': {
'Meta': {'object_name': 'Videotranscode'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'transcode': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.Transcode']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.Video']"})
},
'video.videovote': {
'Meta': {'object_name': 'VideoVote'},
'agent': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.Video']"}),
'vote': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['video']
|
from aiohttp.web import get, post, Response, json_response, HTTPBadRequest, HTTPForbidden
from aiohttp import FormData
from json import loads
from io import BytesIO
class NotAllowed(HTTPForbidden):
def __init__(self, ip):
super().__init__(text="Only localhost and whitelisted IP's can access the admin routes, Your IP: {}".format(ip))
class Routes:
def __init__(self, instance):
self.instance = instance
self.instance.web.add_routes([
post("/admin/add_manga", self.add_manga),
post("/admin/add_chapter", self.add_chapter),
post("/admin/add_scanlator", self.add_scanlator),
post("/admin/rm_manga", self.rm_manga),
post("/admin/rm_chapter", self.rm_chapter),
get("/admin/subscribe", self.subscribe_to_instance),
get("/admin/unsubscribe", self.unsubscribe_from_instance),
get("/admin/get_pending_approvals", self.get_pending_approvals),
get("/admin/approve_sync", self.approve_sync),
get("/admin/reject_sync", self.reject_sync)
])
def _check(self, request):
addresses = ["127.0.0.1"] + self.instance.config.admin_ips
if request.remote not in addresses:
raise NotAllowed(request.remote)
async def post_async(self,form):
data = FormData()
for i in form:
data.add_field("file", i, filename=i.name)
res = await self.instance.client.post("{}/api/v0/add".format(self.instance.config.upload_ipfs_node),
headers={"Accept" : "application/json"},
data=data,
params={
"wrap-with-directory" : "true",
"stream-channels" : "true",
"pin" : "true",
"quieter" : "true"
})
res = (await res.text()).splitlines()
try:
return [loads(i) for i in res]
except JSONDecodeError:
print("IPFS server error: ", res)
return []
async def add_manga(self, request):
self._check(request)
data = (await request.post()).copy()
data["titles"] = [i.strip() for i in data["titles"].split(",")]
data["artists"] = [i.strip() for i in data["artists"].split(",")]
data["authors"] = [i.strip() for i in data["authors"].split(",")]
data["genres"] = [i.strip() for i in data["genres"].split(",")]
manga = await self.instance.db.create_manga(**data)
return json_response({"id" : manga}, status=201)
async def add_chapter(self, request):
self._check(request)
#TODO: Implement image verification
await self.instance.db.get_manga_by_id(request.query.get("manga_id"))
reader = await request.multipart()
form = []
while True:
part = await reader.next()
if not part:
break
name = part.filename.split("/")[-1]
data = BytesIO(await part.read())
data.name = name
form.append(data)
res = await self.post_async(form)
if len(res) > 0:
cid = next(i["Hash"] for i in res if not i["Name"])
chapter = await self.instance.db.create_chapter(ipfs_link=cid, page_count=len(form) , **request.query)
return json_response({"id" : chapter}, status=201)
else:
return Response(status=500)
async def add_scanlator(self, request):
self._check(request)
data = await request.post()
res = await self.instance.db.create_scanlator(**data)
return json_response({"id" : res}, status=201)
async def rm_manga(self, request):
self._check(request)
await self.instance.db.remove_manga(request.query.get("id"))
async def rm_chapter(self, request):
self._check(request)
await self.instance.db.remove_chapter(request.query.get("id"))
async def subscribe_to_instance(self, request):
self._check(request)
address = request.query.get("address")
res = await self.instance.client.get(params={"address" : self.instance.config.instance_address})
if res.status == 200:
self.instance.context["subscribe_confirmations"].append(address)
return Response(body="Pending confirmation")
return Response("The instance did not accept our request: '{}'".format(await res.text()))
async def unsubscribe_from_instance(self, request):
self._check(request)
pass
async def get_pending_approvals(self, request):
self._check(request)
return json_response(list(self.instance.sync_manager.approvals.values()))
async def approve_sync(self, request):
self._check(request)
id = request.query.get("address")
self.instance.sync_manager.approvals[id].approve()
return Response("OK")
async def reject_sync(self, request):
self._check(request)
id = request.query.get("address")
self.instance.sync_manager.approvals[id].reject()
return Response("OK") |
import unittest
class Memory:
def __init__(self, a):
self.a = list(map(int, a.split(',')))
self.last = {}
for i in range(len(self.a) - 1):
self.last[self.a[i]] = i
def iterate(self):
x = self.a[-1]
i = len(self.a) - 1
if x in self.last:
y = i - self.last[x]
else:
y = 0
self.last[x] = i
self.a.append(y)
def get(self, i):
while len(self.a) < i:
self.iterate()
return self.a[-1]
class TestMemory(unittest.TestCase):
def test_get(self):
tests = [
('1,3,2', 1),
('2,1,3', 10),
('1,2,3', 27),
('2,3,1', 78),
('3,2,1', 438),
('3,1,2', 1836),
]
for t, r in tests:
m = Memory(t)
self.assertEqual(m.get(2020), r)
tests = [
('0,3,6', 175594),
('1,3,2', 2578),
('2,1,3', 3544142),
('1,2,3', 261214),
('2,3,1', 6895259),
('3,2,1', 18),
('3,1,2', 362),
]
for t, r in tests:
m = Memory(t)
self.assertEqual(m.get(30000000), r)
#unittest.main()
m = Memory('14,8,16,0,1,17')
print(m.get(2020))
print(m.get(30000000))
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 22 11:17:33 2021
@author: bb339
Raspberry pi
get api key at https://timezonedb.com/api
"""
import requests
import time
import board
import neopixel
import sys
import datetime
sys.path.append(r"/home/pi/.local/lib/python3.7/site-packages/")
import multiprocess
from multiprocess import Process, Manager, Value
from functools import reduce
# install BeautifulSoup4 and google
from bs4 import BeautifulSoup
import googlesearch#install flask
from flask import Flask
from flask import Response, request, jsonify,send_from_directory
pixel_pin = board.D21
# The number of NeoPixels
num_pixels = 8
ORDER = neopixel.GRB
pixels = neopixel.NeoPixel(
pixel_pin, num_pixels, brightness=0.2, auto_write=False, pixel_order=ORDER
)
app = Flask(__name__)
#clock globals
totsec = 86400
#ctr = 0
loc_now = True
#manager = Manager()
ctr = multiprocess.Value('i',0)
tf = multiprocess.Value('i',1)
@app.route("/search/<query>")
def coord_query(query):
"""
Web API server query
"""
global tf
if query is None: return jsonify(success = False)
query = query.replace("&&"," ")
lat,long= google_coords(query)
if (lat is not None and long is not None):
res = coords_request(lat,long)
if (res is not None):
with tf.get_lock():
tf.value=0
return jsonify(success=True,
curr_time=res["curr_time"],
country=res["country"],
city=res["city"],
prog_secs=datetime.datetime.now().second)
else:
return jsonify(success=False)
def current_milli_time():
return round(time.time() * 1000)
def coords_request(lat,lng):
global ctr,tf
#params
API_key = "C7BV3SHDW5LP"
_format = "json"
_url = "http://api.timezonedb.com/v2.1/get-time-zone"
res = requests.get(_url,params=dict(key=API_key,by="position",format=_format,lat=lat,lng=lng)).json()
if (res['status']=="OK"):
start = time.time()
print(res)
ts = res["formatted"].split(' ')[-1].split(":")
#print(ts)
secs = (3600*int(ts[0]))+(60*int(ts[1]))+int(ts[2])
end = time.time()
#ctr = secs+(end-start)
with ctr.get_lock():
ctr.value = int(secs+(end-start))
return {"curr_time":ctr.value,
"country":format_none(res["countryName"]),
"city":format_none(res["zoneName"])
}
else:
return None
# return secs+(end-start)
def format_none(val):
return val if val is not None else ""
def google_coords(query):
"""
Web Scrape google for the feedback form for coordinates,
"""
h = {"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36"}
r = requests.get("https://www.google.ie/search?q={}+coordinates"\
.format(' '.join(query.split()).replace(" ","+")), headers=h).text
soup = BeautifulSoup(r,"lxml")
search_res = soup.find("div", {"class": "Z0LcW XcVN5d"})#coordinates tag
if search_res is None:
pass
else:
rep={"° N":"*1",
"° S":"*-1",
"° E":"*1",
"° W":"*-1"
}
res = search_res.text
for k,v in rep.items():
res = res.replace(k,v)
#latitude north ° N
# print(res)
u_lat, u_long = list(map(lambda x: reduce((lambda y, z: y * z), map(lambda k: float(k), x.split("*"))) ,\
res.split(",")))
#print(u_lat, u_long)
return (u_lat,u_long)
def control_clock():
global ctr, totsec,tf
#default settings
lat, lng = google_coords("Washington Dc")
coords_request(lat,lng)
print("initialized")
#print(ctr)
#print(ctr/86400)
#print(len(pixels))
print(abs( ctr.value-(43200))/totsec)
cols = [(255,0,0),(25,23,225)]
while(True):
if (tf.value ):
if (ctr.value>totsec):
with ctr.get_lock():
ctr.value = 0
#if daytime disp red,
#nighttime display blue
#abs( ctr.value-(43200))/43200
sel = cols[0] if (ctr.value<(totsec/2)) else cols[1]
for i in range(int(num_pixels*( ctr.value/totsec ))):
pixels[i] = sel
pixels.show()
with ctr.get_lock():
ctr.value+=1
#pixels.show()
time.sleep(1)
else:
#reset leds
pixels.fill((0,0,0))
with tf.get_lock():
tf.value = 1
#def main():
# control_clock()
if __name__ == "__main__":
# app.debug = True
#print(0)
Process(target=app.run, kwargs=dict(host="127.0.0.1",port=8025)).start()
Process(target=control_clock).start()# |
import sys
import os
import pandas as pd
import plotly.graph_objs as go
import tarfile
import pickle
import plotly as py
import shutil
from sklearn import svm
from ipywidgets import interactive
from src.models.train_model import genome_svm_selection
from IPython.display import display
from src.data.rfam_db import rfam_session, Genome
from src.data.make_dataset import extract_igrs, annotate_igrs, download_genome
from Bio import SeqIO, SeqRecord
def graph_layout(genome):
ytickvals = list(range(0, 110, 10))
yticktext = ['<b>0</b>', '', '<b>20</b>', '', '<b>40</b>', '', '<b>60</b>', '', '<b>80</b>', '', '<b>100</b>']
xtickvals = list(range(10, 100, 10)) + list(range(100, 1000, 100)) + list(range(1000, 11000, 1000))
xticktext = ['<b>10</b>'] + [''] * 8 + ['<b>100</b>'] + [''] * 8 + ['<b>1000</b>'] + [''] * 8 + ['<b>10000</b>']
layout = go.Layout(title=dict(text="<b><i>{}</i></b>".format(genome.scientific_name), x=0.45, y=0.925),
font=dict(family="Arial", size=18, color='black'),
yaxis=dict(title="<b> GC Content (%) </b>",
showline=True, showgrid=False,
linecolor='black',
linewidth=2, tickwidth=2,
tickfont=dict(size=16),
ticks='outside',
tickvals=ytickvals, ticktext=yticktext,
mirror=True,
range=[0, 100]),
xaxis=dict(title=dict(text="<b> IGR Length </b>"),
type="log",
tickangle=0,
tickvals=xtickvals, ticktext=xticktext,
showline=True, showgrid=False,
linecolor='black', linewidth=2,
tickfont=dict(size=16), tickwidth=2,
ticks='outside',
mirror=True,
range=[1, 4],),
width=900,
hovermode='closest',
legend=dict(y=0.5, x=1.05, borderwidth=2),
height=600,
autosize=False,
margin=dict(autoexpand=False,l=75,r=250,b=100,t=100,pad=5),
plot_bgcolor="white",paper_bgcolor='white')
return layout
def graph_genome(annotated_df, selection=None):
annotated_df = annotated_df.copy()
category_style = {
"No Known RNA": ["triangle-up-open", "rgba(192,192,192,0.9)", 'skip'],
"Selected IGR": ["triangle-up", "rgba(192,192,192,0.9)", 'skip'],
"sRNA": ["triangle-down", "rgba(192,192,192,0.9)", 'text'],
"tRNA": ["diamond", "rgba(55,126,184,0.8)", 'text'],
"rRNA": ["square", "rgba(55,126,184, 0.8)", 'text'],
"Intron": ["cross", "rgba(255,255,51, 0.8)", 'text'],
"RNase P": ["triangle-right", "rgba(255,127,0, 0.8)", 'text'],
"6S RNA": ["triangle-left", "rgba(55,126,184,0.8)", 'text'],
"tmRNA": ["diamond", "rgba(255,127,0,0.8)", 'text'],
"Riboswitch": ["star", "rgba(228,26,28,0.8)", 'text'],
"Ribozyme": ["x", "rgba(247,129,191,0.8)", 'text'],
"Miscellaneous": ["pentagon", "rgba(166,86,40,0.8)", 'text'],
"Multiple": ["star-diamond", "rgba(152,78,163,0.8)", 'text']
}
knowns = annotated_df['category'] != 'No Known RNA'
total_igrs = len(knowns)
total_knowns = sum(knowns)
if selection is not None:
# Set the values of category to "Selected IGR" vs
annotated_df.loc[annotated_df["rfam_acc"].isnull() & selection, "category"] = "Selected IGR"
annotated_df.loc[annotated_df["rfam_acc"].isnull() & ~selection, "category"] = "No Known RNA"
unique_categories = annotated_df["category"].unique()
knowns_included = sum(knowns & selection)
unknowns_included = sum(~knowns & selection)
print("Number of known IGRs included: {} ({:.1%})".format(knowns_included,
knowns_included / total_knowns))
print("Number of unknown IGRs included: {} ({:.1%})".format(unknowns_included, unknowns_included/total_igrs))
print("Fold Enrichment: {:5.2f}".format((knowns_included/sum(selection))/(total_knowns/total_igrs) ))
annotated_df['selection'] = selection
point_selection = [
list(annotated_df[annotated_df['category'] == category].reset_index().query('selection').index) for
category in unique_categories]
else:
unique_categories = annotated_df["category"].unique()
point_selection = [None] * len(unique_categories)
scatter_plots = [go.Scatter(y=annotated_df[annotated_df['category'] == category]['gc'],
x=annotated_df[annotated_df['category'] == category]['length'],
name=category,
mode='markers',
selectedpoints= point_selection[index],
text=annotated_df[annotated_df['category'] == category]['description'],
hoverinfo=category_style[category][2],
marker=dict(size=10,
symbol=category_style[category][0],
color=category_style[category][1])
) for index, category in enumerate(unique_categories)]
return scatter_plots
def interactive_selection(annotated_df, layout, gamma=0.001, class_weight_mod=1):
selection = genome_svm_selection(annotated_df, gamma=gamma, class_weight_mod=class_weight_mod)
# Build the plotly scatter plots for each category
scatter_plots = graph_genome(annotated_df, selection=selection)
fig = go.FigureWidget(data=scatter_plots, layout=layout)
display(fig)
return fig
def display_genome(upid):
session = rfam_session()
genome = session.query(Genome).get(upid)
session.close()
download_genome(genome)
igr_df = extract_igrs(genome, igr_length_cutoff=1)
annotated_df = annotate_igrs(genome, igr_df)
scatter_plots = graph_genome(annotated_df)
layout = graph_layout(genome)
fig = go.FigureWidget(data=scatter_plots, layout=layout)
return annotated_df, fig, layout, genome
def prepare_selection(annotated_df):
y = (annotated_df['category'] != 'No Known RNA') & (annotated_df['category'] != 'sRNA')
total_igrs = len(y)
total_knowns = y.sum()
total_unknowns = total_igrs - total_knowns
return (y, total_igrs, total_knowns, total_unknowns)
def build_interactive_fn (annotated_df, layout, genome):
y, total_igrs, total_knowns, total_unknowns = prepare_selection(annotated_df)
def interactive_fn(class_weight_mod=0.5, c_exp=2, gamma_exp=-2,):
class_weight = {False: total_knowns / total_igrs, True: (total_unknowns / total_igrs * class_weight_mod)}
svm_clf = svm.SVC(C=10**c_exp, class_weight=class_weight, gamma=10**(gamma_exp), random_state=0)
svm_clf.fit(annotated_df.loc[:, ["gc", "log_length"]], y)
selection = pd.Series(svm_clf.predict(annotated_df.loc[:, ["gc", "log_length"]]))
scatter_plots = graph_genome(annotated_df, selection=selection)
fig = go.FigureWidget(data=scatter_plots, layout=layout)
display(fig)
return interactive_fn
def save_selected_IGRs(interactive_plot, annotated_df, genome):
class_weight_mod = interactive_plot.kwargs["class_weight_mod"]
c_exp = interactive_plot.kwargs["c_exp"]
gamma_exp = interactive_plot.kwargs["gamma_exp"]
output_folder="data/interim/{}/selection_{}_{}_{}".format(genome.assembly_acc, class_weight_mod, c_exp, gamma_exp)
if not os.path.exists(output_folder + '/data_files'):
os.makedirs(output_folder + '/data_files')
# Re-create the selection
y, total_igrs, total_knowns, total_unknowns = prepare_selection(annotated_df)
class_weight = {False: total_knowns / total_igrs, True: (total_unknowns / total_igrs * class_weight_mod)}
svm_clf = svm.SVC(C=10**c_exp, class_weight=class_weight, gamma=10**gamma_exp, probability=True, random_state=0)
svm_clf.fit(annotated_df.loc[:, ["gc", "log_length"]], y)
# Save the selection classifier to a pickle
svm_pickle = pickle.dumps(svm_clf)
with open("{}/data_files/svmclf.pickle".format(output_folder,genome.assembly_acc, class_weight_mod, c_exp, gamma_exp), 'wb') as svm_pickle_file:
svm_pickle_file.write(svm_pickle)
selection = pd.Series(svm_clf.predict(annotated_df.loc[:, ["gc", "log_length"]]))
# Save a graph of the genome.
scatter_plots = graph_genome(annotated_df, selection=selection)
layout = graph_layout(genome)
fig = go.FigureWidget(data=scatter_plots, layout=layout)
fig.write_image("{}/data_files/{}_plot.svg".format(output_folder,genome.scientific_name.replace(' ','_')))
py.io.write_json(fig, "{}/data_files/{}_json.plotly".format(output_folder,genome.scientific_name.replace(' ','_')))
selected_unknowns = selection & (annotated_df['category'] == 'No Known RNA')
# Save a fasta file with all the selected IGRs
selected_igr_list = [SeqRecord.SeqRecord(row.sequence, id=("{}/{}-{}".format(row.accession, row.start +1, row.end)))
for row in annotated_df.loc[selected_unknowns, ["accession","start","end","sequence"]].itertuples()]
if not os.path.exists(output_folder + '/igr_fastas'):
os.makedirs(output_folder + '/igr_fastas')
for igr in selected_igr_list:
outputfilename = "{}/igr_fastas/{}.fasta".format(output_folder, igr.id.replace('/','_'))
SeqIO.write(igr, outputfilename, "fasta")
annotated_df.to_csv("{}/data_files/annotated_igrs.csv".format(output_folder), index=False)
#Block 6
if not os.path.exists(output_folder + '/scripts'):
os.makedirs(output_folder + '/scripts')
shutil.copy('src/shell/cluster.conf', '{}/scripts'.format(output_folder))
shutil.copy('src/shell/make_tar.sh', '{}/scripts'.format(output_folder))
shutil.copy('src/shell/blast_source_template.sh', '{}/scripts/blast_source.sh'.format(output_folder))
shutil.copy('src/shell/blast_run_template.sh', '{}/blast_run.sh'.format(output_folder))
if not os.path.exists(output_folder + '/blast_xml'):
os.makedirs(output_folder + '/blast_xml')
if not os.path.exists(output_folder + '/output'):
os.makedirs(output_folder + '/output')
with open("{}/scripts/blast_jobfile.sh".format(output_folder), 'w') as jobfile:
for igr in selected_igr_list:
fasta_filename = "igr_fastas/{}.fasta".format(igr.id.replace('/','_'))
xml_filename = "blast_xml/{}.xml".format(igr.id.replace('/','_'))
jobfile.write("source scripts/blast_source.sh; $BLASTCMD {} > {}\n".format(fasta_filename, xml_filename))
with tarfile.open("data/export/{}_{}_selection_{}_{}_{}_blastdata.tar.gz".format('_'.join(genome.scientific_name.split(' ')[0:2]), genome.assembly_acc, class_weight_mod, c_exp, gamma_exp), "w:gz") as tar:
tar.add(output_folder, arcname="{}_{}_selection_{}_{}_{}_blastdata".format('_'.join(genome.scientific_name.split(' ')[0:2]), genome.assembly_acc, class_weight_mod, c_exp, gamma_exp))
print("\nTarfile created:",tar.name)
return
|
from collections import defaultdict
import json
import gzip
import pandas as pd
import numpy as np
import itertools
from utils import *
from sklearn import preprocessing
def create_time_series_data(df):
"""
:param df: dataframe with time-series
:return: temporal sequences, target sequence
"""
df = df.reset_index(drop=True)
data = np.dstack([np.array(df["tar_derived_speed"].tolist()),
np.array(df["altitude"].tolist())])
targData = np.array(df["tar_heart_rate"].tolist()).reshape(-1, 300, 1)
return data, targData
def create_time_series_1D(df, feature):
"""
:param df: dataframe with time-series
:return: temporal sequences, target sequence
"""
df = df.reset_index(drop=True)
targData = np.array(df[feature].tolist()).reshape(-1, 300, 1)
return targData
def process_catData(df, feature):
"""
:param df: dataframe
:param feature: (str) categorical feature to be processed
:return: processed and reshaped feature
"""
df = df.reset_index(drop=True)
le = preprocessing.LabelEncoder()
le.fit(df[feature])
transfrom_data = le.transform(df[feature])
print(f'Feature: {feature}')
print(transfrom_data.tolist()[:2])
print(list(le.inverse_transform(transfrom_data.tolist()[:2])))
print()
return np.tile(transfrom_data, (300, 1)).T.reshape(-1, 300, 1)
def find_user_workouts(wid, df):
w_df = df.loc[lambda df: df['id'] == wid]
uid = w_df['userId'].tolist()[0]
t = w_df['timestamp'].tolist()[0][0]
u_df = df.loc[lambda df: df['userId'] == uid][:]
u_df['start'] = u_df['timestamp'].apply(lambda x: x[0])
myList = list(zip(u_df.start, u_df.id))
myList = sorted(myList, key=lambda x: x[0])
idx = myList.index((t, wid))
if idx > 0:
return myList[idx-1][1]
else:
return None
def time_since_last(wid, df):
prevWid = df[df["id"] == wid]["prevId"].values[0]
t = np.NaN
if prevWid > 0:
timePrev = np.array(df.loc[lambda df: df['id'] == prevWid]['timestamp'])[0][0]
timeCurr = np.array(df.loc[lambda df: df['id'] == wid]['timestamp'])[0][0]
t = timeCurr - timePrev
return t
def prev_wid(df):
return df['id'].apply(lambda x: find_user_workouts(x, df))
def scaling (row, mean, std, zMultiple=1):
row = np.array(row)
row -= mean
row /= std
row *= zMultiple
return row.tolist()
def scaleData(df, feature):
flat_data = list(itertools.chain.from_iterable(df[feature].values.flatten()))
mean, std = np.mean(flat_data), np.std(flat_data)
print("\n")
print(feature)
print(mean, std)
scaled_feat = df[feature].apply(scaling, args=(mean, std))
return scaled_feat
def clean_time(row):
row = np.array(row)
row -= row[0]
return row
def curr_preprocess(df, load_exist=True, dataset_name=None):
target_dir = f'./data/processed/{dataset_name}/'
if load_exist:
assert dataset_name is not None
if os.path.exists(os.path.join(target_dir, 'input_speed.npy')):
print('loading existing data')
outputs = ['input_speed', 'input_alt', 'input_gender', 'input_sport',
'input_user', 'input_time_last', 'prevData', 'targData']
out_vars = []
for output in outputs:
out_var = np.load(f'./data/processed/{dataset_name}/{output}.npy')
out_vars.append(out_var)
return out_vars
df['prevId'] = prev_wid(df)
df['time_last'] = df['id'].apply(lambda x: time_since_last(x, df))
df['time_last'] = scaling(df['time_last'], np.mean(df['time_last']), np.std(df['time_last']), zMultiple=1)
df = prev_dataframe(df)
for feature in ["tar_derived_speed", "altitude", "tar_heart_rate"]:
df[feature] = scaleData(df, feature)
df = remove_first_workout(df)
df.reset_index(drop=True, inplace=True)
# seqs, targData = create_time_series_data(df)
input_speed = create_time_series_1D(df, 'tar_derived_speed')
input_alt = create_time_series_1D(df, 'altitude')
targData = create_time_series_1D(df, 'tar_heart_rate')
input_gender = process_catData(df, 'gender')
input_sport = process_catData(df, 'sport')
input_user = process_catData(df, 'userId')
input_time_last = np.tile(df.time_last, (300, 1)).T.reshape(-1, 300, 1)
prevData = prev_time_series_data(df)
if dataset_name is not None:
mkdir(target_dir)
np.save(os.path.join(target_dir, 'input_speed.npy'), input_speed)
np.save(os.path.join(target_dir, 'input_alt.npy'), input_alt)
np.save(os.path.join(target_dir, 'input_gender.npy'), input_gender)
np.save(os.path.join(target_dir, 'input_sport.npy'), input_sport)
np.save(os.path.join(target_dir, 'input_user.npy'), input_user)
np.save(os.path.join(target_dir, 'input_time_last.npy'), input_time_last)
np.save(os.path.join(target_dir, 'prevData.npy'), prevData)
np.save(os.path.join(target_dir, 'targData.npy'), targData)
return [input_speed, input_alt, input_gender, input_sport, input_user, input_time_last, prevData, targData]
def prev_dataframe(df):
# df["prevID"] = prev_wid(df)
df2 = df[["tar_derived_speed", "altitude", "tar_heart_rate", "id"]][:]
df2.rename(columns={"tar_derived_speed": "prev_tar_speed",
"altitude": "prev_altitude",
"tar_heart_rate": "prev_tar_heart_rate",
"id": "id"}, inplace=True)
prevDf = pd.DataFrame({"pid": df["prevId"]})
prevDf = prevDf.merge(df2, left_on="pid", right_on="id")
mergeDF = df.merge(prevDf, left_on="prevId", right_on="pid")
mergeDF.rename(columns={"id_x": "id"}, inplace=True)
return mergeDF
def prev_time_series_data(mergeDF):
data = np.dstack([np.array(mergeDF["prev_tar_speed"].tolist()),
np.array(mergeDF["prev_altitude"].tolist()),
np.array(mergeDF["prev_tar_heart_rate"].tolist())])
return data
def remove_first_workout(df):
df_list = []
uList = df['userId'].unique()
for u in uList:
u_df = df[df['userId'] == u]
wid = u_df['id']
t = u_df['timestamp']
startT = t.apply(lambda x: x[0])
myList = list(zip(startT, wid))
myList = sorted(myList, key=lambda x: x[0])
for i in myList[1:]:
j = i[1]
df_list.append(df[df['id'] == j][:])
return pd.concat(df_list)
if __name__ == "__main__":
set_path("saman")
df1 = pd.read_json('./data/female_bike.json')
df2 = pd.read_json('./data/female_run.json')
df3 = pd.read_json('./data/male_run.json')
df4 = pd.read_json('./data/male_bike.json')
print('processing all female')
curr_preprocess(pd.concat([df1, df2]), load_exist=False, dataset_name='female')
print('processing all male')
curr_preprocess(pd.concat([df3, df4]), load_exist=False, dataset_name='male')
print('processing all run')
curr_preprocess(pd.concat([df2, df4]), load_exist=False, dataset_name='run')
print('processing all bike')
curr_preprocess(pd.concat([df1, df3]), load_exist=False, dataset_name='bike')
print('processing all data')
curr_preprocess(pd.concat([df1, df2, df3, df4]), load_exist=False, dataset_name='all')
# [input_speed, input_alt, input_gender, input_sport, input_user, input_time_last, prevData, targData] = curr_preprocess(df1)
# print(input_speed.shape)
# print(input_gender.shape)
# print(input_sport.shape)
# print(input_time_last)
# print(prevData.shape)
|
# coding:utf-8
#!/usr/bin/python
# ========================================================
# Project: project
# Creator: lilyluo
# Create time: 2020-04-25 12:42
# IDE: PyCharm
# =========================================================
# Definition for a binary tree node.
from collections import deque, defaultdict
from Week_03 import buildTree
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def preorderTraversal_1(self, root: TreeNode):
'''递归调用'''
res = []
def helper(root):
if not root:
return
res.append(root.val)
helper(root.left)
helper(root.right)
helper(root)
return res
def preorderTraversal_2(self, root: TreeNode):
"""迭代调用"""
if not root:
return []
stack,output = [root,],[]
while stack:
root = stack.pop()
if root is not None:
output.append(root.val)
if root.right is not None:
stack.append(root.right)
if root.left is not None:
stack.append(root.left)
return output
def inorderTraversal_1(self, root):
"""recursive
时间: 52 ms 很慢
"""
res = []
if not root:
return []
def helper(root):
if root.left is not None:
helper(root.left)
res.append(root.val)
if root.right is not None:
helper(root.right)
helper(root)
return res
def inorderTraversal_2(self, root):
"""iterator
时间:
"""
curr = root
stack,output = [],[]
while curr or stack:
while curr:
stack.append(curr)
curr = curr.left
curr = stack.pop()
output.append(curr.val)
curr = curr.right
return output
def inorderTraversal_3(self, root):
'''使用颜色标记节点的状态,新节点为白色,已访问的节点为灰色。
如果遇到的节点为白色,则将其标记为灰色,然后将其右子节点、自身、左子节点依次入栈。
如果遇到的节点为灰色,则将节点的值输出。
'''
WHITE, GRAY = 0, 1
res = []
stack = [(WHITE, root)]
while stack:
color, node = stack.pop()
if node is None: continue
if color == WHITE:
stack.append((WHITE, node.right))
stack.append((GRAY, node))
stack.append((WHITE, node.left))
else:
res.append(node.val)
return res
def postorder_1(self, root):
"""binary tree post order
递归调用"""
res = []
if not root:
return []
def helper(root):
res.append(root.val)
if root.right is not None:
helper(root.right)
if root.left is not None:
helper(root.left)
helper(root)
return res[::-1]
def postorder_2(self, root):
"""binary tree post order
递归调用"""
res = []
if not root:
return []
def helper(root):
if root.left is not None:
helper(root.left)
if root.right is not None:
helper(root.right)
res.append(root.val)
helper(root)
return res
def postorder_3(self, root):
"""binary tree post order
迭代调用"""
res = []
if not root:
return []
stack = [root,]
while stack:
node = stack.pop()
if node.left is not None:
stack.append(node.left)
if node.right is not None:
stack.append(node.right)
res.append(node.val)
return res[::-1]
def levelOrder_1(self, root):
'''迭代,采用队列,BFS,48ms'''
output = []
if root is None:
return []
que = deque(root)
level = len(que)
while que:
res = []
for i in range(level):
root = que.popleft()
res.append(root.val)
if root.left is not None:
que.append(root.left)
if root.right is not None:
que.append(root.right)
level = len(que)
output.append(res)
return output
def levelOrder_2(self, root):
'''递归,回溯 56ms'''
output = defaultdict(list)
if root is None:
return []
def helper(level,node):
output[level].append(node.val)
if node.left is not None:
helper(level+1,node.left)
if node.right is not None:
helper(level+1,node.right)
helper(0,root)
return list(output.values())
preorder = [3,9,20,15,7]
inorder = [9,3,15,20,7]
bt = buildTree.Solution()
binary_tree = bt.buildTree(preorder,inorder)
so = Solution()
out = so.levelOrder_2(binary_tree)
print(out)
|
from django.db import models
class Airport(models.Model):
code = models.CharField(max_length=20, primary_key=True)
name = models.CharField(max_length=200)
city = models.CharField(max_length=200)
latitude = models.DecimalField(max_digits=10, decimal_places=6)
longitude = models.DecimalField(max_digits=10, decimal_places=6)
country = models.CharField(max_length=20)
|
import pytest
from eikon.tools import check_for_int, check_for_string, is_list_of_string, is_string_type, tz_replacer
def test_check_for_int():
check_for_int(parameter=5, name="Maffay")
with pytest.raises(ValueError):
check_for_int(parameter="Peter", name="Maffay")
def test_check_for_string():
check_for_string(parameter="Peter", name="Maffay")
with pytest.raises(ValueError):
check_for_string(parameter=5, name="Maffay")
def test_is_list_of_string():
assert is_list_of_string(values=["Peter", "Maffay"])
assert not is_list_of_string(values=["Peter", 5])
def test_is_string_type():
assert is_string_type("Peter")
assert not is_string_type(5)
def test_tz_replacer():
assert tz_replacer(s="2019-05-05 20:00:00Z") == "2019-05-05 20:00:00"
assert tz_replacer(s="2019-05-05 20:00:00-0000") == "2019-05-05 20:00:00"
assert tz_replacer(s="2019-05-05 20:00:00.000Z") == "2019-05-05 20:00:00" |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm, Normalize
import sys
import time
from multiprocessing import Pool
import cProfile
import pstats
from rnns import utils, image_utils
from gmm_placing import gaussian, collect_data, heatmap, placing_utils
class GMMSequencePredictions:
def __init__(self, init_obj, plate, ref_plate_dims, gaussians, image,
obj_image, obj_mask=None, init_loc=None, transform=None):
"""
Uses cross entropy optimization to get a single placement prediction
Inputs:
init_obj (np.array): shape (4,) array describing the bounding
box of the object that was used to make the initial placement.
Format (in pixel space):
- [0],[1] x_min and y_min coordinates
- [2],[3] x_max and y_max coordinates
NOTE: This is values 1-4 of a yolo label so: YOLO_label[1:5]
plate (np.array): shape (4,) array describing the bounding box of the
plate/cutting board that was used to make the initial placement.
Format (in pixel space):
- [0],[1] x_min and y_min coordinates
- [2],[3] x_max and y_max coordinates
NOTE: This is values 1-4 of a yolo label so: YOLO_label[1:5]
ref_plate_dims (list or int): info that describes plates real world
dimensions in meters. either an int (0 or 1) specifying which cutting
board was used (0 is smaller and 1 in larger) or a list of
len 2 containing the width and height of the cutting board
in meters
gaussians (dict): object to use for scoring data.
Use the gaussians.SequenceGMMs.gaussians.
image (np.array): the background image of the scene to make placements on
obj_image (np.array): the image of the initial object being placed
obj_mask (np.array): the alpha layer mask of the initial object being
placed. #TODO should remove this at some point since new image format has alpha layer as 4th channel
init_loc (np.array): shape (2,) giving the (y,x) coordinate of where to
place the initial object if init_obj doesn't represent the placement
location and is only for the dimensions
transform (TODO)
# (list): the list of the previous n objects that were placed
# in this sequence. in the format nx2, where each row is (y,x) coordinate
# see collect_data.ImageData for more info on n
next_obj (np.array): same as init_obj, but is the object to be placed next,
only being used for its height and width measurements
"""
self.seq_idx = 1
# get dimensions of inital object
if init_loc is not None:
init_height, init_width = placing_utils.get_bbox_dims(init_obj)
init_centerx = init_loc[1]
init_centery = init_loc[0]
else:
init_centerx, init_centery, init_height, init_width = placing_utils.get_bbox_center(init_obj, return_dims=True)
# array of the positions of the previous objs in the sequence
self.prev_obj_centers = np.array([init_centery, init_centerx]).reshape(1, 2)
self.prev_obj_dims = np.array([init_height, init_width]).reshape(1, 2)
self.prev_objs = np.array([init_obj]).reshape(1, 4)
# dimensions of the plate or cutting board descriptor that object is on
self.plate = plate
self.plate_width, self.plate_height, self.plate_centerx, self.plate_centery = placing_utils.get_bbox_center(plate, return_dims=True)
# Make the image of the initial placement
self.img = image_utils.paste_image(image, obj_image, [init_centerx, init_centery], obj_mask)
# assign the cutting obard dimensions
assert type(ref_plate_dims) is list or type(ref_plate_dims) is int
if type(ref_plate_dims) == int:
assert 0 <= ref_plate_dims < 2
if ref_plate_dims == 0:
plate_ref_width = 0.358
plate_ref_heigsht = 0.280
elif ref_plate_dims == 1:
plate_ref_width = 0.455
plate_ref_height = 0.304
else:
assert len(ref_plate_dims) == 2
plate_ref_width = ref_plate_dims[0]
plate_ref_height = ref_plate_dims[1]
# ratio to convert pixels to meters (meters/pixels)
if transform is None:
self.ratio = plate_ref_width/self.plate_width
else:
#TODO get the camera intrinsics/extrinsics to get more accruate conversion
raise ValueError('Not implemented yet')
# initialize the variables for next object to be placed
self.next_obj = None
self.next_obj_width = None
self.next_obj_height = None
self.gaussians = gaussian
def update_next_obj(self, next_obj):
"""
Set dimensions of next object to be placed
Inputs:
next_obj (np.array): shape (4,) array describing the bounding
box of the next object to be placed.
Format (in pixel space):
- [0],[1] x_min and y_min coordinates
- [2],[3] x_max and y_max coordinates
NOTE: This is values 1-4 of a yolo label so: YOLO_label[1:5]
"""
self.next_obj = next_obj
self.next_obj_height, self.next_obj_width = placing_utils.get_bbox_dims(next_obj)
def update_seq_idx(self):
"""
Increment the sequence index
"""
self.seq_idx += 1
def update_prev_objs(self, placement_loc, obj):
"""
Update the arrays containing placements that have already been made
Inputs
placement_loc (np.array): shape (2,) or 1x2 of the (y,x) coordinate
of where the placement that was just made was
obj (np.array): shape (4,) array describing the bounding
box of the object that was just placed.
Format (in pixel space):
- [0],[1] x_min and y_min coordinates
- [2],[3] x_max and y_max coordinates
NOTE: This is values 1-4 of a yolo label so: YOLO_label[1:5]
"""
#TODO change the prev_objs so the bounding box is at actual placement loc
self.prev_objs = np.vstack((self.prev_objs, obj.reshape(1,4)))
self.prev_obj_centers = np.vstack((self.prev_obj_centers, placement_loc.reshape(1,2)))
obj_height, obj_width = placing_utils.get_bbox_dims(obj)
temp_dims = np.array([obj_height, obj_width]).reshape(1,2)
self.prev_obj_dims = np.vstack((self.prev_obj_dims, temp_dims))
def update_image(self, next_obj_img, loc, alpha_mask=None, background_image=None, viz=False):
"""
Paste the object onto self.img at given location
Inputs:
next_obj_img (np.array): the image of the next object to be placed.
it should already be sized to fit on self.img and should have a
4th channel alpha layer specifiying the transparency. Can use
alpha_mask instead of the 4th channel
loc (np.array): the (y,x) coordinates of the placement
alpha_mask (np.array): the alpha layer specifiying the transparency
of each pixel. should be the same (h,w) size as self.img
background_img (np.array): can replace self.img with this argument
and next_obj_img will be pasted onto that instead
viz (bool): if True, the updated image will be shown
"""
# update image and make placement
if background_image is not None:
assert self.img.shape == background_image.shape
self.img = background_image
else:
self.img = image_utils.paste_image(self.img, next_obj_img, loc, alpha_mask)
#TODO put a check here to see if the image is getting pasted outside of image range
if viz:
plt.imshow(self.img)
plt.show()
def rand_samples(self, num_samples):
"""
Randomly generates array of pixel coordinates to be sampled from
Outputs:
samples (np.array): is a Nx2 array, where each row gives
the Y, X coordinates (height/width)
"""
x1 = int(self.plate[1] + self.next_obj_width/2)
y1 = int(self.plate[2] + self.next_obj_height/2)
x2 = int(self.plate[3] - self.next_obj_width/2)
y2 = int(self.plate[4] - self.next_obj_height/2)
#get a coordinate map of the image pixels
imgX = np.arange(self.img.shape[1])
imgY = np.arange(self.img.shape[0])
meshX, meshY = np.meshgrid(imgX, imgY)
#get coordinate map of the plate
sample_areaX = meshX[y1:y2,x1:x2]
sample_areaY = meshY[y1:y2,x1:x2]
#create the random sample points
pattern = np.random.randint(0, sample_areaX.shape[0]*sample_areaX.shape[1], num_samples)
patternX = pattern % sample_areaX.shape[1]
patternY = pattern // sample_areaX.shape[1]
#instantiate array of random sample coordinates
samples = np.zeros((num_samples,2))
samples[:,0] = sample_areaY[patternY, patternX]
samples[:,1] = sample_areaX[patternY, patternX]
return samples
def get_sample_values(self, samples, key, ref_idx):
"""
Helper function to call functions that will get the delta values
"""
assert type(key) == str
if 'dc' in key:
return self.d_centers(samples, ref_idx)
elif 'dp' in key:
return self.d_plate(samples)
elif 'de' in key:
return self.d_edges(samples, ref_idx)
else:
raise ValueError('Invalid key')
def d_centers(self, samples, ref_idx):
"""
Takes the randomly sampled pixles and returns the distance in meters
between the center of an object in the sequence and the samples
Outputs:
dcx (np.array): size N array, where N is the number of samples,
gives distance between centers in horizontal direction
dcy (np.array): size N array, where N is the number of samples,
gives distance between centers in vertical direction
ref_idx (int): the sequence index of the object in the sequence
to take the distance to. Zero indexed, so 0 is initial object.
"""
obj_center = self.prev_obj_centers[ref_idx, :]
dcx = (samples[:,1] - obj_center[1])*self.ratio
dcy = (samples[:,0] - obj_center[0])*self.ratio
return np.hstack((dcx.reshape(-1,1), dcy.reshape(-1,1)))
def d_plate(self, samples):
"""
Takes the randomly sampled pixles and returns the distance in meters
between the plate/cutting board center and the samples' centers
Outputs:
dpx (np.array): size N array, where N is the number of samples,
gives distance between centers in horizontal direction
dpy (np.array): size N array, where N is the number of samples,
gives distance between centers in vertical direction
"""
dpx = (self.plate_centerx - samples[:,1])*self.ratio
dpy = (self.plate_centery - samples[:,0])*self.ratio
return np.hstack((dpx.reshape(-1,1), dpy.reshape(-1,1)))
def d_edges(self, samples, ref_idx):
"""
Takes the randomly sampled pixles and returns the distance in meters
between self.obj's and the samples' bottom right edges (xmax,ymax)
Outputs:
dex (np.array): size N array, where N is the number of samples,
gives distance between centers in horizontal direction
dey (np.array): size N array, where N is the number of samples,
gives distance between centers in vertical direction
ref_idx (int): the sequence index of the object in the sequence
to take the distance to. Zero indexed, so 0 is initial object.
"""
dex = (self.prev_objs[ref_idx, 2] - (samples[:,1] + self.next_obj_width/2))*self.ratio
dey = (self.prev_objs[ref_idx, 3] - (samples[:,0] + self.next_obj_height/2))*self.ratio
return np.hstack((dex.reshape(-1,1), dey.reshape(-1,1)))
def make_2D_predictions(self, next_obj, seq_idx=None, num_neighbors=1, mode=['dc', 'de', 'dp'],
num_samples=None, seq_weights='relative', feat_weights=None, future=False,
viz_figs=False, save_fig_name=None, norm_feats=False, fig_title=None):
"""
Get the position of where to place the next object in the sequence.
Return object placement with highest score, using 2-D gaussians
Inputs:
next_obj (np.array): shape (4,) array describing the bounding
box of the next object to place. This is just for its dimensions.
Format (in pixel space):
- [0],[1] x_min and y_min coordinates
- [2],[3] x_max and y_max coordinates
NOTE: This is values 1-4 of a yolo label so: YOLO_label[1:5]
num_neighbors (int): the number of neighbors in the sequence to
take into account when making the prediction. e.g. for a
seq_idx=3 and num_neighbors=2, the prediction will be based
on the guassians created from the data of the distance between
the seq_idx=3 to seq_idx=2 and seq_idx=3 and seq_idx=1.
mode (string): string specifying how to score. Can be
"dc, "dp", "de", or a combination of the 3.
if passing more than one, put inside a list.
Can also pass in "all" to score based on all 3.
See gaussian.SequenceGMMs.fit_gaussians for more info.
num_samples (int): number of pixels to sample from when calculating
score for cross entropy optimization. Uses all pixels in image if None
seq_weights (array_like): the weights to use for adding the scores from
different sequence indexes. Should be of the shape (num_neighbors,).
Can give the string 'relative' to way indexes that are closer more.
feat_weights (array-like): weights to use when summing the different features.
Should of the shape (num_features,)
future (bool): whether to take into account future placements.
By default, the predictions are made from the gaussians with
a sequence index < given seq_idx. If true then the neighbors
can include indexes > given seq_idx.
viz_figs (bool): whether to visualize the 2d Gaussian
save_fig_name (str): name to save figure of gaussian as, leave as None to not save
norm_feats (bool): whether to average the weights features if they
have more than one gaussian to score.
The maximum number of previous objects you want
to take into account, ie if you provide a n > num_neighbors, then
n = num_neighbors.
n (int): number of previously placed objects to look back
at. (see collect_data.gather_data)
Outputs:
output is a 1-D array of 2 elements, the (x,y) coordinates
of the sample with the highest score, in image coordinates
"""
# Check format of arguments
if type(mode) is not list:
mode = list([mode])
if 'all' in mode:
mode = ['dc', 'dp', 'de']
if viz_figs or save_fig_name is not None:
assert num_samples is None
if feat_weights is not None:
assert len(feat_weights) == len(mode)
else:
feat_weights = np.ones(len(mode))
if seq_idx is None:
seq_idx = self.seq_idx
else:
assert seq_idx > 0
if seq_idx > self.prev_objs.shape[0]+1:
print(f'WARNING: Making placement for sequence index {seq_idx}, but only {self.prev_objs.shape[0]} placements have been made.')
if num_samples is None:
w, h = self.img.shape[1], self.img.shape[0]
y, x = np.mgrid[0:h, 0:w]
samples = np.stack((y.ravel(), x.ravel())).T
num_samples = samples.shape[0]
else:
samples = self.rand_samples(num_samples)
print(f'Making prediction for sequence index {self.seq_idx}...')
self.update_next_obj(next_obj)
feature_scores = []
total_score = np.zeros(num_samples)
for i, feature in enumerate(mode):
sample_values = self.get_sample_values(samples, feature, seq_idx-1)
if 'dp' in feature:
#TODO this weights the 'dp' feature a lot less than the others since the
#'dc' and 'de' features have a set of gaussians for each seq_idx and 'dp' only has one
# this is only if the seq_weights are not normalized though I think
# NOTE the second seq_idx doesn't matter in the below, they are all the same value
feat_scores = np.exp(self.gaussians[feature][seq_idx][0].score_samples(sample_values))
# TODO you aren't using the score samples function here, you're just calling the gaussain directly
assert np.sum(feat_scores) != 0
else:
max_seq_len = len(list(self.gaussians[feature].keys()))
# make predictions on sequence indexes not contained in training data
if seq_idx >= max_seq_len:
print(f'WARNING: Making placement for sequence index value that does not exist in training data. Clipping value, you can make prediction with other sequence indexes.')
#TODO might want to change to mode to ignore dp for predictions > max_seq_length
seq_idx = max_seq_len - 1
if (num_neighbors is None) or (future and num_neighbors > (max_seq_len-1)):
print('WARNING: Training data does not contain sequence lengths large enough for given number of neighbors, clipping value.')
n_neighbors = max_seq_len - 1
elif num_neighbors > (seq_idx):
print('WARNING: Not enough predecessors for given number of neighbors, clipping value.')
n_neighbors = seq_idx
if 'relative' in seq_weights:
s_weights = placing_utils.get_relative_weights(n_neighbors, exponent=2, normalize=True)
elif seq_weights is not None:
assert len(seq_weights) >= n_neighbors
s_weights = seq_weights
else:
s_weights = np.ones(n_neighbors)
if future:
neighbors = list(self.gaussians[feature][seq_idx].keys())
neighbors = placing_utils.get_n_nearest(seq_idx, neighbors, n_neighbors, remove_value=False) # value has already been removed
else:
neighbors = np.arange(1, seq_idx+1)[-n_neighbors:]
feat_scores = np.zeros(num_samples)
for j, ref_idx in enumerate(neighbors):
# Get the samples to score
#TODO you are using the score samples functions, you're just calling the gaussian directly
temp_score = s_weights[j] * np.exp(self.gaussians[feature][seq_idx][ref_idx].score_samples(sample_values))
assert np.sum(temp_score) != 0
feat_scores += temp_score
if norm_feats:
feat_scores /= n_neighbors #TODO
total_score += feat_weights[i]*feat_scores
winner = np.argmax(total_score)
placement_loc = samples[winner, :] # TODO double check that this is right format, (y,x)
# update arrays
self.update_prev_objs(placement_loc, self.next_obj)
self.update_seq_idx()
if viz_figs or save_fig_name is not None:
Z = (-total_score).reshape(self.img.shape[:2])
_ = self.plot_2D_gaussian(Z, mode=mode, viz=viz_figs, save_path=save_fig_name, title=fig_title, convert=False)
return placement_loc
def plot_2D_gaussian(self, scores, mode, viz=True, save_path=None, title=None, convert=True):
"""
Plots the mulivariate, multimodal gaussian
Inputs:
scores (np.array): Array of scores for each pixel in self.img.
It should be the same shape as the image
mode (string): string specifying how to score. Can be
"dc, "dp", "de", or a combination of the 3.
if passing more than one, put inside a list.
Can also pass in "all" to score based on all 3
viz (bool): whether to show the figure
save_path (string): Path to save the plot to, set to None
to just display the figure
"""
#Use base cmap to create transparent
mycmap = heatmap.transparent_cmap(plt.cm.inferno)
img = self.img.copy()
if convert:
img = img[:,:,::-1] #convert BGR to RGB
w, h = img.shape[1], img.shape[0]
y, x = np.mgrid[0:h, 0:w]
#Plot image and overlay colormap
plt.close()
fig, ax = plt.subplots(1, 1)
plt.imshow(img)
# CB = ax.contour(x, y, Z, norm=LogNorm(vmin=0.001, vmax=1000.0),
# levels=np.logspace(0, 3, 10), cmap=mycmap, extend='min')
#TODO fix this log scale for the new predictions (9/29/20)
CB = ax.contour(x, y, scores, norm=Normalize(),#LogNorm(),#vmin=np.min(Z), vmax=np.max(Z)),
levels=100, cmap=mycmap)#, extend='min')
# import ipdb; ipdb.set_trace()
# CB = ax.contour(x, y, Z, norm=LogNorm(vmin=1, vmax=10000.0),
# levels=np.logspace(1, 4, 10), cmap=mycmap, extend='min')
plt.colorbar(CB)
plt.title(title)
if save_path is not None:
plt.savefig(f'{save_path}')
if viz:
plt.show()
return fig
class LocalGMMPredictions(GMMSequencePredictions):
def __init__(self, init_obj, plate, ref_plate_dims, image,
obj_image, obj_mask=None, init_loc=None, transform=None):
"""
Uses cross entropy optimization to get a single placement prediction
Inputs:
init_obj (np.array): shape (4,) array describing the bounding
box of the object that was used to make the initial placement.
Format (in pixel space):
- [0],[1] x_min and y_min coordinates
- [2],[3] x_max and y_max coordinates
NOTE: This is values 1-4 of a yolo label so: YOLO_label[1:5]
plate (np.array): shape (4,) array describing the bounding box of the
plate/cutting board that was used to make the initial placement.
Format (in pixel space):
- [0],[1] x_min and y_min coordinates
- [2],[3] x_max and y_max coordinates
NOTE: This is values 1-4 of a yolo label so: YOLO_label[1:5]
ref_plate_dims (list or int): info that describes plates real world
dimensions in meters. either an int (0 or 1) specifying which cutting
board was used (0 is smaller and 1 in larger) or a list of
len 2 containing the width and height of the cutting board
in meters
image (np.array): the background image of the scene to make placements on
obj_image (np.array): the image of the initial object being placed
obj_mask (np.array): the alpha layer mask of the initial object being
placed. #TODO should remove this at some point since new image format has alpha layer as 4th channel
init_loc (np.array): shape (2,) giving the (y,x) coordinate of where to
place the initial object if init_obj doesn't represent the placement
location and is only for the dimensions
transform (TODO)
# (list): the list of the previous n objects that were placed
# in this sequence. in the format nx2, where each row is (y,x) coordinate
# see collect_data.ImageData for more info on n
next_obj (np.array): same as init_obj, but is the object to be placed next,
only being used for its height and width measurements
"""
self.seq_idx = 1
# get dimensions of inital object
if init_loc is not None:
init_height, init_width = placing_utils.get_bbox_dims(init_obj)
init_centerx = init_loc[1]
init_centery = init_loc[0]
else:
init_centerx, init_centery, init_height, init_width = placing_utils.get_bbox_center(init_obj, return_dims=True)
# array of the positions of the previous objs in the sequence
self.prev_obj_centers = np.array([init_centery, init_centerx]).reshape(1, 2)
self.prev_obj_dims = np.array([init_height, init_width]).reshape(1, 2)
self.prev_objs = np.array([init_obj]).reshape(1, 4)
# dimensions of the plate or cutting board descriptor that object is on
self.plate = plate
self.plate_width, self.plate_height, self.plate_centerx, self.plate_centery = placing_utils.get_bbox_center(plate, return_dims=True)
# Make the image of the initial placement
self.img = image_utils.paste_image(image, obj_image, np.array([init_centery, init_centerx]), obj_mask)
# assign the cutting obard dimensions
assert type(ref_plate_dims) is list or type(ref_plate_dims) is int
if type(ref_plate_dims) == int:
assert 0 <= ref_plate_dims < 2
if ref_plate_dims == 0:
plate_ref_width = 0.358
plate_ref_heigsht = 0.280
elif ref_plate_dims == 1:
plate_ref_width = 0.455
plate_ref_height = 0.304
else:
assert len(ref_plate_dims) == 2
plate_ref_width = ref_plate_dims[0]
plate_ref_height = ref_plate_dims[1]
# ratio to convert pixels to meters (meters/pixels)
if transform is None:
self.ratio = plate_ref_width/self.plate_width
else:
#TODO get the camera intrinsics/extrinsics to get more accruate conversion
raise ValueError('Not implemented yet')
# initialize the variables for next object to be placed
self.next_obj = None
self.next_obj_width = None
self.next_obj_height = None
#TODO DOES MODE NEED TO HAVE dp in the MIDDLE
def make_2D_predictions(self, next_obj, gaussians, seq_idx=None, n_time_neighbors=1,
n_pos_neighbors=10, mode=['dc', 'de', 'dp'], num_samples=None,
time_neighbor_weights='relative', pos_neighbor_weights='relative',
feat_weights=None, future=False, viz_figs=False, save_fig_name=None,
norm_feats=False, fig_title=None, num_processes=4, bandwidth_samples=50):
"""
Get the position of where to place the next object in the sequence.
Return object placement with highest score, using 2-D gaussians
Inputs:
next_obj (np.array): shape (4,) array describing the bounding
box of the next object to place. This is just for its dimensions.
Format (in pixel space):
- [0],[1] x_min and y_min coordinates
- [2],[3] x_max and y_max coordinates
NOTE: This is values 1-4 of a yolo label so: YOLO_label[1:5]
gaussians: gaussian.LocallyWeightedGMMs object, containing the training data
seq_idx (int): the sequence index to make the prediction for. self.seq_idx is
used by default, so leave this as None unless you wish to write that value over.
n_time_neighbors (int): the number of neighbors in the sequence to
take into account when making the prediction. e.g. for a
seq_idx=3 and num_neighbors=2, the prediction will be based
on the guassians created from the data of the distance between
the seq_idx=3 to seq_idx=2 and seq_idx=3 and seq_idx=1.
n_pos_neighbors (int): number of neighbors to include w.r.t. their
spatial distance (i.e. the dp values)
mode (string): string specifying how to score. Can be
"dc, "dp", "de", or a combination of the 3.
if passing more than one, put inside a list.
Can also pass in "all" to score based on all 3.
See gaussian.SequenceGMMs.fit_gaussians for more info.
num_samples (int): number of pixels to sample from when calculating
score for cross entropy optimization. Uses all pixels in image if None
time_neighbor_weights (np.array): a (n_time_neighbors) size array
of the weights for each sample in dataset. The weights will be applied in
the order that this array is given in. They are set to one if None.
Can give the string 'relative' to weight indexes that are closer spatially (dp values)
more instead of giving explicit weights.
pos_neighbor_weights (np.array): a (n_pos_neighbors) size array
of the weights for each sample in dataset. The weights will be applied in
the order that this array is given in. They are set to one if None.
Can give the string 'relative' to weight indexes that are closer spatially (dp values)
more instead of giving explicit weights.
feat_weights (array-like): weights to use when summing the different features.
Should of the shape (num_features,)
future (bool): whether to take into account future placements.
By default, the predictions are made from the gaussians with
a sequence index < given seq_idx. If true then the neighbors
can include indexes > given seq_idx.
viz_figs (bool): whether to visualize the 2d Gaussian
save_fig_name (str): name to save figure of gaussian as, leave as None to not save
norm_feats (bool): whether to average the weights features if they
have more than one gaussian to score.
fig_title (str): string to use for the figure title if viz_figs or save_fig_name is used.
num_processes (int): number of CPU processes to use for sample scoring.
bandwidth_samples (int): the number of bandwidth values to use for cross validation
if an optimal bandwidth value needs to be calculated.
Outputs:
output is a 1-D array of 2 elements, the (x,y) coordinates
of the sample with the highest score, in image coordinates
"""
# Check format of arguments
if type(mode) is not list:
mode = list([mode])
if 'all' in mode:
mode = ['dc', 'dp', 'de']
if viz_figs or save_fig_name is not None:
assert num_samples is None
if feat_weights is not None:
assert len(feat_weights) == len(mode)
else:
feat_weights = np.ones(len(mode))
if seq_idx is None:
seq_idx = self.seq_idx
#TODO need to put checks here to change cap the seq_idx
# # below is +1 because it is one indexed
# if seq_idx > self.prev_objs.shape[0]+1:
# seq_idx = self.max_sequence_length
# print(f'WARNING: Making placement for sequence index {seq_idx}, but only {self.prev_objs.shape[0]} placements have been made.')
else:
assert seq_idx > 0
if seq_idx > self.prev_objs.shape[0]+1:
# seq_idx = self.max_sequence_length
print(f'WARNING: Making placement for sequence index {seq_idx}, but only {self.prev_objs.shape[0]} placements have been made.')
if num_samples is None:
# sample across entire image if a range isn't given
w, h = self.img.shape[1], self.img.shape[0]
y, x = np.mgrid[0:h, 0:w]
samples = np.stack((y.ravel(), x.ravel())).T
num_samples = samples.shape[0]
else:
samples = self.rand_samples(num_samples)
print(f'Making prediction for sequence index {self.seq_idx}...')
self.update_next_obj(next_obj)
feature_scores = []
total_score = np.zeros(num_samples)
# TODO probably a better way to do the below line
ref_dp = self.get_sample_values(self.prev_obj_centers[seq_idx-1,:].reshape(1,2), 'dp', seq_idx-1)
# Get the indices of the relavent data samples
t_neighbor_idxs = gaussians.get_time_neighbor_data(seq_idx, n_time_neighbors, future=future)
p_neighbor_idxs, p_neighbor_dist = gaussians.get_pos_neighbor_data(ref_dp, seq_idx, n_pos_neighbors)
# Get the sample weights for temporal neighbor data points
if time_neighbor_weights is None:
time_neighbor_weights = np.ones(t_neighbor_idxs.shape[0])
elif time_neighbor_weights == 'relative':
#TODO change this weighting to gaussian
time_neighbor_weights = placing_utils.get_relative_weights(
n_time_neighbors,
exponent=2,
normalize=True
)
time_neighbor_weights = np.repeat(time_neighbor_weights, (t_neighbor_idxs.shape[0] / n_time_neighbors))
time_neighbor_weights /= time_neighbor_weights.shape[0]
assert time_neighbor_weights.shape[0] == t_neighbor_idxs.shape[0]
# Get the sample weights for spatial neighbor data points
if pos_neighbor_weights is None:
pos_neighbor_weights = np.ones(p_neighbor_idxs.shape[0])
elif pos_neighbor_weights == 'relative':
pos_neighbor_weights = placing_utils.get_relative_weights(
n_pos_neighbors,
delta_values=p_neighbor_dist,
exponent=2,
normalize=True
)
pos_neighbor_weights /= pos_neighbor_weights.shape[0]
assert pos_neighbor_weights.shape[0] == p_neighbor_idxs.shape[0]
for i, feature in enumerate(mode):
sample_values = self.get_sample_values(samples, feature, seq_idx-1)
max_seq_len = list(gaussians.data[feature].keys())[-1]
#TODO double check these if statements
if seq_idx >= max_seq_len:
print(f'WARNING: Making placement for sequence index value that does not exist in training data. Clipping value, you can make prediction with other sequence indexes.')
#TODO might want to change to mode to ignore dp for predictions > max_seq_length
seq_idx = max_seq_len - 1
if (n_time_neighbors is None) or (future and n_time_neighbors > (max_seq_len-1)):
print('WARNING: Training data does not contain sequence lengths large enough for given number of neighbors, clipping value.')
n_time_neighbors = max_seq_len - 1
elif n_time_neighbors > (seq_idx):
print('WARNING: Not enough predecessors for given number of neighbors, clipping value.')
n_time_neighbors = seq_idx
else:
pass
# Gaussian regression
gmm = gaussians.fit_gaussian(feature=feature,
seq_idx=seq_idx,
time_neighbor_idxs=t_neighbor_idxs,
pos_neighbor_idxs=p_neighbor_idxs,
mode='kde',
future=future,
bandwidths=None,
covariance_type="full",
time_neighbor_weights=time_neighbor_weights,
pos_neighbor_weights=pos_neighbor_weights,
kernel_type='gaussian',
num_samples=50,
n_jobs=num_processes
)
# Score all of the sampled placement locations, split code for multiprocessing
sample_values = np.array_split(sample_values, num_processes, axis=0)
with Pool(processes=num_processes) as p:
feat_scores = p.map(gmm.score_samples, sample_values)
feat_scores = [np.exp(scores) for scores in feat_scores]
feat_scores = np.concatenate(feat_scores, axis=0)
assert np.sum(feat_scores) != 0
total_score += feat_weights[i]*feat_scores
winner = np.argmax(total_score)
placement_loc = samples[winner, :]
# update arrays
self.update_prev_objs(placement_loc, self.next_obj)
self.update_seq_idx()
if viz_figs or save_fig_name is not None:
Z = (-total_score).reshape(self.img.shape[:2])
_ = self.plot_2D_gaussian(Z, mode=mode, viz=viz_figs, save_path=save_fig_name, title=fig_title, convert=False)
return placement_loc
class Prediction:
def __init__(self, num_samples, last_obj, plate, plate_dims, image,
new_obj, scoring, n_objs):
"""
Uses cross entropy optimization to get a single placement prediction
Inputs:
num_samples (int): number of pixels to sample from when calculating
score for cross entropy optimization
last_obj (np.array): 1-D array with 8 values describing the object
was just placed on the board/plate (ie. obj label)
plate (np.array): 1-D array with 8 values describing the cutting
board or plate detected in image (ie. plate/board label)
plate_dims (list or int): info that describes plates real world
dimensions. either an int (0 or 1) specifying which cutting
board was used (0 is smaller and 1 in larger) or a list of
len 2 containing the width and height of the cutting board
jin meters
image (np.array): the image of the scene
new_obj (np.array): same as last_obj, but is the object to be placed,
only being used for its height and width measurements
scoring: class object to use for scoring. class should have
a 'forward' method that outputs score values
n_obj (list): the list of the previous n objects that were placed
in this sequence. in the format nx2, where each row is (y,x) coordinate
see collect_data.ImageData for more info on n
"""
self.num_samples = int(num_samples) #number of iterations/samples to do
#get dimension of the most recently identified object
self.last_obj = last_obj
self.last_obj_width = last_obj[3] - last_obj[1]
self.last_obj_height = last_obj[4] - last_obj[2]
self.last_obj_centerx = last_obj[1] + self.last_obj_width/2
self.last_obj_centery = last_obj[2] + self.last_obj_height/2
#dimensions of the plate or cutting board descriptor that object is on
self.plate = plate
self.plate_width = plate[3] - plate[1]
self.plate_height = plate[4] - plate[2]
self.plate_centerx = plate[1] + self.plate_width/2
self.plate_centery = plate[2] + self.plate_height/2
#the image of the object that was just placed
self.img = image
#assign the cutting obard dimensions
assert type(plate_dims) is list or type(plate_dims) is int
if type(plate_dims) == int:
assert 0 <= plate_dims < 2
if plate_dims == 0:
self.plate_dims_width = 0.358
self.plate_dims_height = 0.280
elif plate_dims == 1:
self.plate_dims_width = 0.455
self.plate_dims_height = 0.304
else:
assert len(plate_dims) == 2
self.plate_dims_width = plate_dims[0]
self.plate_dims_height = plate_dims[1]
#ratio to convert pixels to meters (meters/pixels)
self.ratio = self.plate_dims_width/self.plate_width
#get dimensions of object to be placed
self.new_obj = new_obj
self.new_obj_width = new_obj[3] - new_obj[1]
self.new_obj_height = new_obj[4] - new_obj[2]
self.score = scoring
#make all of the random samples
self.samples = self.rand_sample()
self.n_objs = n_objs
def rand_sample(self):
"""
Randomly generates array of pixel coordinates to be sampled from
Outputs:
self.samples (np.array): is a Nx2 array, where each row gives
the Y, X coordinates (height/width)
"""
x1 = int(self.plate[1] + self.new_obj_width/2)
y1 = int(self.plate[2] + self.new_obj_height/2)
x2 = int(self.plate[3] - self.new_obj_width/2)
y2 = int(self.plate[4] - self.new_obj_height/2)
#get a coordinate map of the image pixels
imgX = np.arange(self.img.shape[1])
imgY = np.arange(self.img.shape[0])
meshX, meshY = np.meshgrid(imgX, imgY)
#get coordinate map of the plate
sample_areaX = meshX[y1:y2,x1:x2]
sample_areaY = meshY[y1:y2,x1:x2]
#create the random sample points
pattern = np.random.randint(0, sample_areaX.shape[0]*sample_areaX.shape[1], self.num_samples)
patternX = pattern % sample_areaX.shape[1]
patternY = pattern // sample_areaX.shape[1]
#instantiate array of random sample coordinates
samples = np.zeros((self.num_samples,2))
samples[:,0] = sample_areaY[patternY, patternX]
samples[:,1] = sample_areaX[patternY, patternX]
return samples
def delta_centers(self, n=1):
"""
Takes the randomly sampled pixles and returns the distance in meters
between self.obj's center and the samples
Outputs:
dcx (np.array): size N array, where N is the number of samples,
gives distance between centers in horizontal direction
dcy (np.array): size N array, where N is the number of samples,
gives distance between centers in vertical direction
dcn (list): list of length n, where each item in list is a Nx2
array of the dcy and dcx values (y,x pairs)
"""
dcx, dcy = self.d_centers(self.samples[:,1], self.samples[:,0])
if n == 1:
return list([np.hstack((dcy.reshape(-1,1), dcx.reshape(-1,1)))])
if n > 1:
# add all of the previous n's
dcn = []
# assuming n starts at 1
for i in range(n-1):
temp_dcx = (self.samples[:,1] - self.n_objs[i,1])*self.ratio
temp_dcy = (self.samples[:,0] - self.n_objs[i,0])*self.ratio
temp = np.hstack((temp_dcy.reshape(-1,1), temp_dcx.reshape(-1,1)))
dcn.append(temp)
# append the current n
temp = np.hstack((dcy.reshape(-1,1), dcx.reshape(-1,1)))
dcn.append(temp)
return dcn
def delta_plate(self, n=1):
"""
Takes the randomly sampled pixles and returns the distance in meters
between the plate/cutting board center and the samples
Outputs:
dpx (np.array): size N array, where N is the number of samples,
gives distance between centers in horizontal direction
dpy (np.array): size N array, where N is the number of samples,
gives distance between centers in vertical direction
dpn (list): list of length n, where each item in list is a Nx2
array of the dpy and dpx values (y,x pairs)
"""
dpx, dpy = self.d_plate(self.samples[:,1], self.samples[:,0])
if n == 1:
return list([np.hstack((dpy.reshape(-1,1), dpx.reshape(-1,1)))])
if n > 1:
dpn = []
# assuming n starts at 1
for i in range(n-1):
temp_dpx = (self.plate_centerx - self.samples[:,1])*self.ratio
temp_dpy = (self.plate_centery - self.samples[:,0])*self.ratio
temp = np.hstack((temp_dpy.reshape(-1,1), temp_dpx.reshape(-1,1)))
dpn.append(temp)
temp = np.hstack((dpy.reshape(-1,1), dpx.reshape(-1,1)))
dpn.append(temp)
return dpn
def delta_edge(self, n=1):
"""
Takes the randomly sampled pixles and returns the distance in meters
between self.obj's and the samples' bottom right edges (xmax,ymax)
Outputs:
dex (np.array): size N array, where N is the number of samples,
gives distance between centers in horizontal direction
dey (np.array): size N array, where N is the number of samples,
gives distance between centers in vertical direction
den (list): list of length n, where each item in list is a Nx2
array of the dey and dex values (y,x pairs)
"""
dex, dey = self.d_edge(self.samples[:,1], self.samples[:,0])
if n == 1:
return list([np.hstack((dey.reshape(-1,1), dex.reshape(-1,1)))])
if n > 1:
den = []
# assuming n starts at 1
for i in range(n-1):
temp_dex = ((self.n_objs[i,1] + self.new_obj_width/2) -
(self.samples[:,1] + self.new_obj_width/2))*self.ratio
temp_dey = ((self.n_objs[i,0] + self.new_obj_height/2) -
(self.samples[:,0] + self.new_obj_height/2))*self.ratio
temp = np.hstack((temp_dey.reshape(-1,1), temp_dex.reshape(-1,1)))
den.append(temp)
temp = np.hstack((dey.reshape(-1,1), dex.reshape(-1,1)))
den.append(temp)
return den
def winner(self, mode, n=1, logprob=False, epsilon=1e-8):
"""
**DEPRECATING**
Return object placement with highest score
Inputs:
mode (string): string specifying how to score. Can be "dcx",
"dcy, "dpx", "dpx", "dex", "dex", or a combination of the 6.
if passing more than one, put inside a list.
Can also pass in "all" to score based on all 6
n (int): number of previously placed objects to look back
at. (see collect_data.gather_data)
logprob (bool): if true use sum of logs for scoring
epsilon (float): to prevent zero division
Outputs:
output is a 1-D array of 2 elements, the (x,y) coordinates
of the sample with the highest score, in image coordinates
NOTE if using KDE, should probably set logprob to False, it already returns log
"""
if type(mode) is not list:
mode = list([mode])
dc = self.delta_centers()
dp = self.delta_plate()
de = self.delta_edge()
total_score = np.zeros(dc[0][:,1].shape)
for i in range(n):
dcx_score = 0
dcy_score = 0
dpx_score = 0
dpy_score = 0
dex_score = 0
dey_score = 0
if 'all' in mode:
mode = list(['dcx', 'dcy', 'dpx', 'dpy', 'dex', 'dey'])
if 'dcx' in mode:
dcx_score = self.score.forward(dc[i][:,1], 6*i)
assert np.sum(dcx_score) != 0
if 'dcy' in mode:
dcy_score = self.score.forward(dc[i][:,0], 6*i + 1)
assert np.sum(dcy_score) != 0
if 'dpx' in mode:
dpx_score = self.score.forward(dp[i][:,1], 6*i + 2)
assert np.sum(dpx_score) != 0
if 'dpy' in mode:
dpy_score = self.score.forward(dp[i][:,0], 6*i + 3)
assert np.sum(dpy_score) != 0
if 'dex' in mode:
dex_score = self.score.forward(de[i][:,1], 6*i + 4)
assert np.sum(dex_score) != 0
if 'dey' in mode:
dey_score = self.score.forward(de[i][:,0], 6*i + 5)
assert np.sum(dey_score) != 0
if logprob == True:
dcx_score = np.log(dcx_score + epsilon)
dcy_score = np.log(dcy_score + epsilon)
dpx_score = np.log(dpx_score + epsilon)
dpy_score = np.log(dpy_score + epsilon)
dex_score = np.log(dex_score + epsilon)
dey_score = np.log(dey_score + epsilon)
total_score = total_score + dcx_score + dcy_score + \
dpx_score + dpy_score + dex_score + dey_score
# NOTE: might want to normalize the values
total_winner = np.argmax(total_score)
return self.samples[total_winner, :]
def winner2D(self, mode, max_n, n=1, epsilon=1e-8):
"""
**DEPRECATING**
Return object placement with highest score, using 2-D gaussians
NOTE: recommend using plot_2D_gaussian to get these outputs, This
was not implemented ideally.
Inputs:
mode (string): string specifying how to score. Can be
"dc, "dp", "de", or a combination of the 3.
if passing more than one, put inside a list.
Can also pass in "all" to score based on all 3
max_n (int): The maximum number of previous objects you want
to take into account, ie if you provide a n > max_n, then
n = max_n.
n (int): number of previously placed objects to look back
at. (see collect_data.gather_data)
epsilon (float): to prevent zero division
Outputs:
output is a 1-D array of 2 elements, the (x,y) coordinates
of the sample with the highest score, in image coordinates
"""
if type(mode) is not list:
mode = list([mode])
assert n >= 0
if n > max_n:
n = max_n
dc = self.delta_centers(n=n)
dp = self.delta_plate(n=n)
de = self.delta_edge(n=n)
total_score = np.zeros(self.num_samples)
for i in range(n):
dc_score = 0
dp_score = 0
de_score = 0
if 'all' in mode:
mode = list(['dc', 'dp', 'de'])
if 'dc' in mode:
dc_score = self.score.forward(dc[-i], 1, i+1)
assert np.sum(dc_score) != 0
if 'dp' in mode:
dp_score = self.score.forward(dp[-i], 2, i+1)
assert np.sum(dp_score) != 0
if 'de' in mode:
de_score = self.score.forward(de[-i], 3, i+1)
assert np.sum(de_score) != 0
total_score = total_score + dc_score + dp_score + de_score
total_winner = np.argmax(total_score)
last_n_objs = np.array([self.last_obj_centery,
self.last_obj_centerx]).reshape(-1,2)
if n == 1:
pass
else:
last_n_objs = np.vstack((self.n_objs, last_n_objs))
return self.samples[total_winner, :], last_n_objs
def plot_prediction(self, prediction, width, height):
"""
Plots the location of the prediction
Inputs:
prediction (np.array): 1-D array with 2 elements, (x,y),
which is the center coordinates of the prediction
width (int): width of the object to be placed, in pixels
"""
corner = (prediction[1]-height/2, prediction[0]-width/2)
box = plt.Rectangle(corner, width, height, linewidth=1,
edgecolor='r', fill=False)
plt.close()
plt.figure()
img = self.img.copy()
img_rgb = img[:,:,::-1] #convert BGR to RGB
plt.imshow(img_rgb)
plt.gca().add_patch(box)
plt.show()
return
def plot_2D_gaussian(self, mode, n, i=None, save_path=None):
"""
Plots the mulivariate, multimodal gaussian
Inputs:
mode (string): string specifying how to score. Can be
"dc, "dp", "de", or a combination of the 3.
if passing more than one, put inside a list.
Can also pass in "all" to score based on all 3
n (int): number of previously placed objects to look back
at. (see collect_data.gather_data)
i (int): for figure annotation if providing only one
item in a sequence. i is the index of that item in the
sequence
save_path (string): Path to save the plot to, set to None
to just display the figure
"""
#Use base cmap to create transparent
mycmap = heatmap.transparent_cmap(plt.cm.inferno)
img = self.img.copy() # ground truth image
img = img[:,:,::-1] #convert BGR to RGB
w, h = img.shape[1], img.shape[0]
y, x = np.mgrid[0:h, 0:w]
# y, x = np.mgrid[125:225, 175:275]
dc_score = 0
dp_score = 0
de_score = 0
measure = ''
if mode == 'all':
mode = list(['dc', 'dp', 'de'])
if 'dc' in mode:
dcx, dcy = self.d_centers(x, y)
inputs = np.array([dcx.ravel(), dcy.ravel()]).T
# dc_score = self.score.forward(inputs, 1, n)
dc_score = self.score.score_samples(inputs, 'dc', n)
assert np.sum(dc_score) != 0
measure = measure + '$\Delta$c '
if 'dp' in mode:
dpx, dpy = self.d_plate(x, y)
inputs = np.array([dpx.ravel(), dpy.ravel()]).T
# dp_score = self.score.forward(inputs, 2, n)
dp_score = self.score.score_samples(inputs, 'dp', n)
assert np.sum(dp_score) != 0
measure = measure + '$\Delta$p '
if 'de' in mode:
dex, dey = self.d_edge(x, y)
inputs = np.array([dex.ravel(), dey.ravel()]).T
# de_score = self.score.forward(inputs, 3, n)
de_score = self.score.score_samples(inputs, 'de', n)
assert np.sum(de_score) != 0
measure = measure + '$\Delta$e '
Z = dc_score + dp_score + de_score
assert np.sum(Z) != 0
Z = -Z
Z = Z.reshape(y.shape)
######stuff for predictions###############
winner = np.argmin(Z)
winner = utils.num2yx(winner, 416,416)
last_n_objs = np.array([self.last_obj_centery,
self.last_obj_centerx]).reshape(-1,2)
if n == 1:
pass
else:
last_n_objs = np.vstack((self.n_objs, last_n_objs))
############################################
#Plot image and overlay colormap
plt.close()
fig, ax = plt.subplots(1, 1)
plt.imshow(img)
# CB = ax.contour(x, y, Z, norm=LogNorm(vmin=0.001, vmax=1000.0),
# levels=np.logspace(0, 3, 10), cmap=mycmap, extend='min')
#for sony demo
#TODO fix this log scale for the new predictions (9/29/20)
CB = ax.contour(x, y, Z, norm=Normalize(),#LogNorm(),#vmin=np.min(Z), vmax=np.max(Z)),
levels=50, cmap=mycmap)#, extend='min')
# import ipdb; ipdb.set_trace()
# CB = ax.contour(x, y, Z, norm=LogNorm(vmin=1, vmax=10000.0),
# levels=np.logspace(1, 4, 10), cmap=mycmap, extend='min')
plt.colorbar(CB)
plt.title(f'Normalized negative log-likelihood predicted by GMM \n Based on {measure} and n = {n}')
if save_path is not None:
if i is None:
i = ''
plt.savefig(f'{save_path}/figure{i}_gaussian.png')
else:
plt.show()
return winner, last_n_objs
def d_centers(self, samplesx, samplesy):
"""
Takes the randomly sampled pixles and returns the distance in meterse
between self.obj's center and the samples
Outputs:
dcx (np.array): size N array, where N is the number of samples,
gives distance between centers in horizontal direction
dcy (np.array): size N array, where N is the number of samples,
gives distance between centers in vertical direction
"""
dcx = (samplesx - self.last_obj_centerx)*self.ratio
dcy = (samplesy - self.last_obj_centery)*self.ratio
return dcx, dcy
def d_plate(self, samplesx, samplesy):
"""
Takes the randomly sampled pixles and returns the distance in meters
between the plate/cutting board center and the samples' centers
Outputs:
dpx (np.array): size N array, where N is the number of samples,
gives distance between centers in horizontal direction
dpy (np.array): size N array, where N is the number of samples,
gives distance between centers in vertical direction
"""
dpx = (self.plate_centerx - samplesx)*self.ratio
dpy = (self.plate_centery - samplesy)*self.ratio
return dpx, dpy
def d_edge(self, samplesx, samplesy):
"""
Takes the randomly sampled pixles and returns the distance in meters
between self.obj's and the samples' bottom right edges (xmax,ymax)
Outputs:
dex (np.array): size N array, where N is the number of samples,
gives distance between centers in horizontal direction
dey (np.array): size N array, where N is the number of samples,
gives distance between centers in vertical direction
"""
dex = (self.last_obj[3] - (samplesx + self.new_obj_width/2))*self.ratio
dey = (self.last_obj[4] - (samplesy + self.new_obj_height/2))*self.ratio
return dex, dey
|
import volar, pprint, ConfigParser, unittest
class TestAdvAccountInfo(unittest.TestCase):
"""
Validates the site data returned via the volar.sites() function for type and expected value.
Also tests searching, sorting, and the bounds of pages
"""
def setUp(self):
# load settings
c = ConfigParser.ConfigParser()
c.read('sample.cfg') #note that self file is only for use with self script. however, you can copy its contents and self code to use in your own scripts
base_url = c.get('settings','base_url')
api_key = c.get('settings','api_key')
secret = c.get('settings','secret')
self.v = volar.Volar(base_url = base_url, api_key = api_key, secret = secret)
def test_DefaultDataTypes(self):
response = self.v.sites()
self.assertTrue(response != False, 'Connection To Sites Failed')
self.assertTrue(isinstance(response['item_count'], basestring), "Incorrect Type Returned for item_count (should be basestring)")
self.assertTrue(isinstance(response['page'], int), "Incorrect Type Returned for page (should be int)")
self.assertTrue(isinstance(response['per_page'], int), "Incorrect Type Returned for per_page (should be int)")
self.assertTrue(isinstance(response['sort_by'], basestring), "Incorrect Type Returned for sort_by (should be basestring)")
self.assertTrue(isinstance(response['sort_dir'], basestring), "Incorrect Type Returned for sort_dir (should be basestring)")
self.assertTrue(response['id'] == None, "Incorrect Type Returned for id (should be None)")
self.assertTrue(response['slug'] == None, "Incorrect Type Returned for slug (should be None)")
self.assertTrue(response['title'] == None, "Incorrect Type Returned for title (should be None)")
self.assertTrue(isinstance(response['sites'][0]['id'], int), "Incorrect Type Returned for sites[id] (should be int)")
self.assertTrue(isinstance(response['sites'][0]['slug'], basestring), "Incorrect Type Returned for sites[slug] (should be basestring)")
self.assertTrue(isinstance(response['sites'][0]['title'], basestring), "Incorrect Type Returned for sites[title] (should be basestring)")
def test_ReturnedData(self):
params = ({'page': 2, 'per_page': 30, 'sort_by': 'title', 'sort_dir': 'DESC',
'id': 1, 'slug': 'volar', 'title': 'Volar Video'})
response = self.v.sites(params)
self.assertTrue(response != False, 'Connection To Sites Failed')
self.assertEqual(1, response['page'], 'Incorrect Value Returned for page')
self.assertEqual(str(params['per_page']), str(response['per_page']), 'Incorrect Value Returned for per_page')
self.assertEqual(params['sort_by'], response['sort_by'], 'Incorrect Value Returned for sort_by')
self.assertEqual(params['sort_dir'], response['sort_dir'], 'Incorrect Value Returned for sort_dir')
self.assertEqual(str(params['id']), str(response['id']), 'Incorrect Value Returned for id')
self.assertEqual(params['slug'], response['slug'], 'Incorrect Value Returned for slug')
self.assertEqual(params['title'], response['title'], 'Incorrect Value Returned for title')
def test_ResponseCorrectness(self):
response = self.v.sites({'id': 1})
self.assertTrue(len(response['sites']) <= 1, 'Found multiple sites with one id')
response = self.v.sites({'site': 'volar', 'sort_by': 'id', 'sort_dir': 'ASC'})
if len(response['sites']) >= 2:
self.assertTrue(response['sites'][0]['id'] <= response['sites'][1]['id'], 'Sites Returned Out Of Order: id ASC')
response = self.v.sites({'site': 'volar', 'sort_by': 'id', 'sort_dir': 'DESC'})
self.assertTrue(response['sites'][0]['id'] <= response['sites'][1]['id'], 'Sites Returned Out Of Order: id DESC')
response = self.v.sites({'site': 'volar', 'sort_by': 'title', 'sort_dir': 'ASC'})
self.assertTrue(response['sites'][0]['title'].lower() <= response['sites'][1]['title'].lower(), 'Sites Returned Out Of Order: title ASC')
response = self.v.sites({'site': 'volar', 'sort_by': 'title', 'sort_dir': 'DESC'})
self.assertTrue(response['sites'][0]['title'].lower() <= response['sites'][1]['title'].lower(), 'Sites Returned Out Of Order: title DESC')
response = self.v.sites({'site': 'volar', 'sort_by': 'status', 'sort_dir': 'ASC'})
self.assertTrue(response['sites'][0]['status'] <= response['sites'][1]['status'], 'Sites Returned Out Of Order: status ASC')
response = self.v.sites({'site': 'volar', 'sort_by': 'status', 'sort_dir': 'DESC'})
self.assertTrue(response['sites'][0]['status'] <= response['sites'][1]['status'], 'Sites Returned Out Of Order: status DESC')
else:
print('\nInsufficient results to test response ordering')
def test_PerPageBounds(self):
response = self.v.sites({'per_page': -1})
self.assertTrue(len(response['sites']) >= 0, 'Response array is acting really weird')
response = self.v.sites({'per_page': 1})
self.assertTrue(len(response['sites']) <= 1, 'Page is too long, should be no longer than 1')
response = self.v.sites({'per_page': 61})
self.assertTrue(len(response['sites']) <= 50, 'Page is too long, should be no longer than 50')
def test_Searches(self):
# response = self.v.sites({'slug': 'vol'})
# self.assertTrue(len(response['sites']) == 1, 'Search by slug failed')
response = self.v.sites({'title': 'Vid'})
self.assertTrue(len(response['sites']) == 1, 'Seach by title failed')
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestAdvAccountInfo)
unittest.TextTestRunner(verbosity = 2).run(suite)
|
import grafo_lista
import grafo_matriz
grafo_orientado = False
while True:
print('___________________________')
op = int(input('[1] Lista\n[2] Matriz\nOpção: '))
print('\n___________________________')
if op == 1:
grafo = grafo_lista.grafo_lista(6, grafo_orientado)
break
elif op == 2:
grafo = grafo_matriz.grafo_matriz(6, grafo_orientado)
break
else:
print('\nOpção inválida!\n')
while True:
print('\n___________________________')
print('\n[1] CADASTRAR ARESTA\n[2] IMPRIMIR GRAFO\n[3] VERIFICAR GRAU DE VÉRTICE\n[4] VERIFICAR MAIOR GRAU\n[5] VERIFICAR LAÇOS\n[6] VERIFICAR PERCURSO DE EULLER\n[7] SAIR')
op = int(input('\nInforme sua opção: '))
print('\n___________________________')
if op == 1:
grafo.adicionar_aresta()
elif op == 2:
grafo.apresentar_grafo()
elif op == 3:
v = int(input('Informe o o vértice que deseja checar: '))
if v in grafo.vertices:
print('\nGrau do vértice {} é {}'.format(v,grafo.checar_grau(v)))
else:
print('\nVértice inválido!')
elif op == 4:
grafo.max_grau()
elif op == 5:
print('\nO grafo possui {} laços.'.format(grafo.num_lacos()))
elif op == 6:
grafo.graf_euller()
elif op == 7:
grafo.dfs()
print('\nSaindo...')
break |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-05-06 13:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('chat_room', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='room',
name='id',
),
migrations.AlterField(
model_name='room',
name='timestamp',
field=models.DateTimeField(auto_now_add=True, primary_key=True, serialize=False),
),
]
|
def getGroup(arr):
group = dict()
for letter in zip(arr, arr[1:]):
c = ''.join(letter).upper()
if c.isalpha():
group[c] = group.get(c, 0) + 1
return group
def solution(str1, str2):
group1 = getGroup(str1)
group2 = getGroup(str2)
intersection = 0
union = 0
for c in (set(group1.keys()) & set(group2.keys())):
intersection += min(group1.get(c, 0), group2.get(c, 0))
for c in (set(group1.keys()) | set(group2.keys())):
union += max(group1.get(c, 0), group2.get(c, 0))
return int((intersection / union) * 65536) if union != 0 else 65536 |
import pygame
from src import Config
from src.Game import *
import menu
def main():
#Dimensiones de la pantalla
display = pygame.display.set_mode((
Config['game']['width'],
Config['game']['height']
))
#Titulo
pygame.display.set_caption(Config['game']['caption'])
menus = menu.Menu(display)
if __name__ == '__main__':
main() |
import speech_recognition as speech_recog
import subprocess
def startDecod():#преобразование звука в текст
subprocess.call(['ffmpeg', '-i', 'new_file.ogg', '-c:a', 'pcm_s16le', 'new_file.wav','-y'])
sample_audio = speech_recog.AudioFile('new_file.wav')
recog = speech_recog.Recognizer()
with sample_audio as audio_file:
audio_content = recog.record(audio_file)
a = recog.recognize_google(audio_content, language="en-Us")
return str(a)
|
#coding: utf-8
import os
template = 'aermod.inp'
def generate_from_template(template):
pass
|
"""
-- Wrapper class for applying a selected regularizer on either
the weight matrix W or the jacobians (coming)
-- All methods expect symbolic or shared variables
"""
import theano.tensor as T
# TODO: Add Jacobian regularizers (contractive autoencoder)
class Regularizers():
def __init__(self, reg_op):
"""
-- Takes a theano tensor and returns an algebraic expression for the
regularization
-- In particular, expects a matrix argument
-- Assumes rows are n-dimensional vectors
"""
self._reg_op = reg_op
self.reg_set = {'weight_decay_L1': self.weight_decay_L1,
'jacobian_L1': self.jacobian_L1,
'weight_decay_L2': self.weight_decay_L2,
'jacobian_L2': self.jacobian_L2}
self._reg = None
def weight_decay_L1(self, x):
# mean of sum of absolute value of weight matrix
l1_reg = T.mean(T.sum(abs(x), axis=1))
return l1_reg
def weight_decay_L2(self, x):
# sum of square of the difference vectors
x_2 = (x ) **2
l2_reg = T.mean(T.sum(x_2, axis=1))
return l2_reg
def jacobian_L1(self, x):
raise NotImplementedError('Jacobian regularizers not available yet.')
def jacobian_L2(self, x):
raise NotImplementedError('Jacobian regularizers not available yet.')
def regularizer(self, x):
reg_callable = self.reg_set[self._reg_op]
self._reg = reg_callable(x)
return self._reg
|
# Import required modules
import numpy as np
import tensorflow as tf
import torch
import gym
import matplotlib.pyplot as plt
import argparse
import os
from gym.spaces import Discrete, Box
from tf_utils import *
from spg_tf import *
from spg_torch import *
E = '[ERROR]'
I = '[INFO]'
TF = 'tensorflow'
PT = 'pytorch'
def train_one_epoch(sess):
# Declaring variables to store epoch details
batch_acts = []
batch_len = []
batch_weights = []
batch_rews = []
batch_obs = []
# Reset env
obs = env.reset()
done = False
ep_rews = []
rendered_once_in_epoch = False
while True:
if not rendered_once_in_epoch:
env.render()
batch_obs.append(obs)
act = sess.run([actions], feed_dict={obs_ph: obs.reshape(1 ,-1)})[0][0]
# Take the action
obs, rewards, done, info = env.step(act)
# save action, reward
batch_acts.append(act)
ep_rews.append(rewards)
if done:
# Record info, as episode is complete
ep_ret = sum(ep_rews)
ep_len = len(ep_rews)
batch_rews.append(ep_ret)
batch_len.append(ep_len)
batch_weights += [ep_ret] * ep_len
# Reset the environment
obs, done, ep_rews = env.reset(), False, []
rendered_once_in_epoch = True
if batch_size < len(batch_obs):
break
batch_loss, _ = sess.run([loss, train_op], feed_dict={obs_ph: np.array(batch_obs),
act_ph: np.array(batch_acts),
weights_ph: np.array(batch_weights)})
return batch_loss, batch_rews, batch_len
if '__main__' == __name__:
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--train',
type=bool,
default=False,
help='Set to true if you want to train the model. \
Default: False')
parser.add_argument('-g', '--graph',
type=str,
default='./graphs/CartPole-v0_graph.pb',
help='Path to the graph file')
parser.add_argument('-il', '--input-layer',
type=str,
default='input',
help='The name of the input layer',)
parser.add_argument('-ol', '--output-layer',
type=str,
default='output',
help='The name of the output layer',)
parser.add_argument('-e', '--epochs',
type=int,
default=50,
help='The number of epochs')
parser.add_argument('-gp', '--graph-path',
type=str,
default='./graphs/',
help='Path where the .pb file is saved!')
parser.add_argument('-f', '--framework',
type=str,
default='tensorflow',
help='Framework to be used - TensorFlow or PyTorch')
FLAGS, unparsed = parser.parse_known_args()
# Arguments
env_name = 'CartPole-v0'
render = True
# Create the env
env = gym.make('CartPole-v0')
# Get the action space size and observation space size
act_size = env.action_space.n
obs_size = env.observation_space.shape[0]
print ('Action Space Size: {}'.format(act_size),
'\nObservation Space Size: {}'.format(obs_size))
# Choose the framework
f = FLAGS.framework
if f != TF and f != PT:
raise Exception('{}The value of framework can be either \
tensorflow as pytorch'.format(E))
if not FLAGS.train:
if not os.path.exists(FLAGS.graph):
raise Exception('{}Path to the Graph file does not exists!'.format(E))
if f == TF:
test_with_tf(FLAGS)
elif f == PT:
test_with_torch(FLAGS)
else:
if f == TF:
train_with_tf(FLAGS)
elif f == PT:
train_with_torch(FLAGS, obs_size, act_size)
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param head, a ListNode
# @param x, an integer
# @return a ListNode
def partition(self, head, x):
h1 = ListNode(-1)
h2 = ListNode(x)
n = head
a, b = h1, h2
while n is not None:
if n.val < x:
a.next = ListNode(n.val)
a = a.next
else:
b.next = ListNode(n.val)
b = b.next
n = n.next
a.next = h2.next
return h1.next
|
from abc import ABC
from datetime import datetime
from typing import Union, Tuple, Any, Iterable
class FieldException(ABC, Exception):
def __init__(self, key: str, *, error_msg: str = None):
super().__init__(f"Field (Key: {key}) - {error_msg}")
class FieldReadOnly(FieldException):
def __init__(self, key: str):
super().__init__(key, error_msg="Readonly.")
class FieldTypeMismatch(FieldException):
def __init__(self, key: str, actual_type: type, actual_value: Any,
expected_types: Union[type, Iterable[type]] = None, *, extra_message: str = None):
if expected_types is None:
expected_name = "(Unknown)"
elif isinstance(expected_types, type):
expected_name = expected_types.__name__
else:
expected_name = " or ".join([t.__name__ for t in expected_types])
super().__init__(
key,
error_msg=f"Type mismatch. {extra_message or ''} "
f"Expected Type: {expected_name} / Actual Type: {actual_type.__name__} / "
f"Actual Value: {actual_value}")
class FieldValueTypeMismatch(FieldException):
def __init__(self, key: str, actual: type, expected: Union[type, Tuple[type, ...]] = None, *,
extra_message: str = None):
if expected is None:
expected_name = "(Unknown)"
elif isinstance(expected, type):
expected_name = expected.__name__
else:
expected_name = " or ".join([t.__name__ for t in expected])
super().__init__(
key,
error_msg=f"Type mismatch. {extra_message or ''} "
f"Expected: {expected_name}, Actual: {actual.__name__}")
class FieldValueInvalid(FieldException):
def __init__(self, key: str, value: Any):
super().__init__(key, error_msg=f"Invalid value: {value}")
class FieldCastingFailed(FieldException):
def __init__(self, key: str, value: str, desired_type: type, *, exc: Exception = None):
super().__init__(
key,
error_msg=f"Auto casting failed. Value: ({value}) {type(value)} / Desired type: {desired_type} / "
f"Exception: {exc}")
class FieldNoneNotAllowed(FieldException):
def __init__(self, key: str):
super().__init__(key, error_msg="`None` not allowed.")
class FieldEmptyValueNotAllowed(FieldException):
def __init__(self, key: str):
super().__init__(key, error_msg="Empty value not allowed.")
class FieldMaxLengthReached(FieldException):
def __init__(self, key: str, cur_len: int, max_len: int):
super().__init__(key, error_msg=f"Max length reached. {cur_len}/{max_len}")
class FieldInvalidUrl(FieldException):
def __init__(self, key: str, url: str):
super().__init__(key, error_msg=f"Invalid URL: {url}")
class FieldFlagNotFound(FieldException):
def __init__(self, key: str, obj: Any, flag):
super().__init__(key, error_msg=f"Object ({obj}) not found in the flag ({flag}).")
class FieldFlagDefaultUndefined(FieldException):
def __init__(self, key: str, flag):
super().__init__(key, error_msg=f"Default value of the flag ({flag}) undefined.")
class FieldRegexNotMatch(FieldException):
def __init__(self, key: str, value: str, regex: str):
super().__init__(key, error_msg=f"Regex ({regex}) not match with ({value}).")
class FieldInstanceClassInvalid(FieldException):
def __init__(self, key: str, inst_cls):
super().__init__(key, error_msg=f"Invalid field instance class type: {inst_cls}")
class FieldModelClassInvalid(FieldException):
def __init__(self, key: str, model_cls):
super().__init__(key, error_msg=f"Invalid model class type: {model_cls}")
class FieldValueNegative(FieldException):
def __init__(self, key: str, val: Union[int, float]):
super().__init__(key, error_msg=f"Field value should not be negative. (Actual: {val})")
class FieldOidDatetimeOutOfRange(FieldException):
def __init__(self, key: str, dt: datetime):
super().__init__(key, error_msg=f"Datetime to initialize `ObjectId` out of range. (Actual: {dt})")
class FieldOidStringInvalid(FieldException):
def __init__(self, key: str, val: str):
super().__init__(key, error_msg=f"Invalid string initialize `ObjectId`. (Actual: {val})")
class FieldInvalidDefaultValue(FieldException):
def __init__(self, key: str, default_value: Any, *, exc: Exception = None):
super().__init__(key, error_msg=f"Invalid default value. {default_value} - <{exc}>")
class FieldValueRequired(FieldException):
def __init__(self, key: str):
super().__init__(key, error_msg=f"Field (key: {key}) requires value.")
|
import numpy as np #numpy ライブラリをnpという名前で導入
import cv2 #OpenCV ライブラリを導入
img = np.zeros((500,500,3), np.uint8) #img 変数を 500*500*3 の大きさにし, 0で初期化
for y in range(50,550,100):
for x in range(50,550,100):
img = cv2.circle(img,(x,y),50,(y/2,0,255),-1)
cv2.imshow('imgame',img) #画面表示
cv2.waitKey(0) #キーボード入力を待つ
cv2.destroyAllWindows() #すべての画面を閉じる
|
# -*- coding: utf-8 -*-
"""Exceptions for the :mod:`pybel.struct.pipeline` module."""
__all__ = [
"MissingPipelineFunctionError",
"MetaValueError",
"MissingUniverseError",
"DeprecationMappingError",
"PipelineNameError",
]
class MissingPipelineFunctionError(KeyError):
"""Raised when trying to run the pipeline with a function that isn't registered."""
class MetaValueError(ValueError):
"""Raised when getting an invalid meta value."""
class MissingUniverseError(ValueError):
"""Raised when running a universe function without a universe being present."""
class DeprecationMappingError(ValueError):
"""Raised when applying the deprecation function annotation and the given name already is being used."""
class PipelineNameError(ValueError):
"""Raised when a second function tries to use the same name."""
|
from mfcc_hdf5 import train_gen, val_gen
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.callbacks import EarlyStopping, ModelCheckpoint
import numpy as np
import pickle
import math
def main():
print('Building model...')
model = Sequential()
model.add(Flatten(input_shape=(9,20)))
model.add(Dense(1024, activation='relu'))
model.add(Dense(1024, activation='relu'))
model.add(Dense(1024, activation='relu'))
model.add(Dense(1024, activation='relu'))
model.add(Dense(30))
print('Compiling model...')
model.compile(loss='mean_squared_error',
optimizer='adam',
metrics=['mse'],
sample_weight_mode=None)
print(model.summary())
print('Training model...')
batch = 64
M = 4
model.fit_generator(generator=train_gen(batch, M),
steps_per_epoch=math.ceil(121069559/batch),
epochs=15,
validation_data=val_gen(batch, M),
validation_steps=math.ceil(60534779/batch),
class_weight='auto',
callbacks=[EarlyStopping(monitor='val_loss', patience=2, verbose=0),
ModelCheckpoint(filepath='jesus_frame_content3_weights.{epoch:02d}.hdf5', monitor='val_loss', save_best_only=True, verbose=0)])
if __name__ == '__main__':
main()
|
# 遞迴求最大公因數 gcd(a,b)
def gcd(a, b):
return a if b==0 else gcd(b, a%b) # 三元運算子等於 return b==0 ? a : gcd(b,a%b)
def main():
print('gcd(a,b)')
a = int(input('a = '))
b = int(input('b = '))
G = gcd(a,b)
print('result = ',G)
main() |
# импортируем библиотеку sqlalchemy и некоторые функции из нее
# импортируем пользовательский класс User
# импортируем datetime
import sqlalchemy as sa
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from users import User
from datetime import datetime
# базовый класс моделей таблиц
Base = declarative_base()
class Athletes(Base):
"""
Описывает структуру таблицы athelete
"""
__tablename__ = 'athelete'
# идентификатор атлета, первичный ключ
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
# возраст атлета
age = sa.Column(sa.Integer)
# дата рождения атлета
birthdate = sa.Column(sa.Text)
# пол атлета
gender = sa.Column(sa.Text)
# рост атлета
height = sa.Column(sa.REAL)
# имя атлета
name = sa.Column(sa.Text)
# вес атлета
weight = sa.Column(sa.Integer)
# количество золотых медалей
gold_medals = sa.Column(sa.Integer)
# количество серебряных медалей
silver_medals = sa.Column(sa.Integer)
# количество бронзовых медалей
bronze_medals = sa.Column(sa.Integer)
# общее количество медалей
total_medals = sa.Column(sa.Integer)
# вид спорта
sport = sa.Column(sa.Text)
# страна
country = sa.Column(sa.Text)
def research(query, user_birthdate, list_athletes):
"""
составляем список словарей данных по атлетам, чей рост наиболее близок
или совпадает с ростом пользователя
также находим самого близкого по возрасту атлета
первый параметр - объект запроса по имени пользователя
второй параметр - дата рождения пользователя в формате datetime
третий параметр - запрос с выводом всех атлетов
"""
delta_height = query.height
delta_birthdate = 10000
athletes_height = []
for athlete in list_athletes:
if athlete.height is not None:
if delta_height >= abs(float(athlete.height) * 100 - query.height):
delta_height = abs(float(athlete.height) * 100 - query.height)
athletes_height.append(athlete.__dict__)
if delta_birthdate > abs(datetime.strptime(athlete.birthdate, '%Y-%m-%d') - user_birthdate).days:
delta_birthdate = abs(datetime.strptime(athlete.birthdate, '%Y-%m-%d') - user_birthdate).days
athlete_birthdate = athlete.__dict__
if delta_height > float(athletes_height[0]['height']) * 100 - query.height:
athletes_height.pop(0)
return athletes_height, athlete_birthdate
def find_similiar(name, session):
"""
1) сначала сохраняем количество пользователей по ввведеному имени
2) если количество больше 1, то уже осуществляем фильтрацию и вывод на экран
ближайшего по дате рождения к данному пользователю
и ближайшего по росту к данному пользователю
3) если иное количество, то выводим ошибку.
"""
query = session.query(User).filter(User.first_name == name).count()
if query >= 1:
query = session.query(User).filter(User.first_name == name).first()
user_birthdate = datetime.strptime(query.birthdate, '%Y-%m-%d')
athletes_height, athlete_birthdate = research(query, user_birthdate, session.query(Athletes).all())
i = 0
while len(query.first_name) > i:
# отфильтровываем по наиболее близкому совпадению в имени
result = []
for athlete in athletes_height:
if athlete['name'][i] == query.first_name[i]:
result.append(athlete)
if len(result) == 0:
break
athletes_height = result
i += 1
print("Атлет {} с самым ближайшим ростом {} к данному пользователю ({}, {}). Вид спорта: {}".format(athletes_height[0]['name'], athletes_height[0]['height'], query.first_name, query.height, athletes_height[0]['sport']))
print("Атлет {} с ближайшей датой рождения {} к данному пользователю ({}, {}). Вид спорта: {}".format(athlete_birthdate['name'], athlete_birthdate['birthdate'], query.first_name, query.birthdate, athlete_birthdate['sport']))
else:
print("Ошибка. Такой пользователь не найден.") |
#get input
t=int(raw_input())
for i in range(1,t+1):
temp=raw_input()
resstr="0000000000"
reslist=list(resstr)
done=0
for n in range(1,101):
temp2=int(temp)*n
string=str(temp2)
for l in range(0,len(string)):
pos=string[l]
reslist[int(pos)]=1
if reslist==[1,1,1,1,1,1,1,1,1,1]:
print "Case #%d: %s" % (i,string)
done=1
break
else:
continue
if done:
break
if done==0:
print "Case #%d: INSOMNIA" % i
|
# Generated by Django 3.0.8 on 2020-07-30 16:51
from django.db import migrations
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
('users', '0002_profile'),
]
operations = [
migrations.AddField(
model_name='profile',
name='country',
field=django_countries.fields.CountryField(blank=True, max_length=2),
),
]
|
#!/usr/bin/python3
import threading
import time
import baostock as bs
import pandas as pd
result = pd.DataFrame()
class myThread (threading.Thread):
def __init__(self, threadID, stocks, startDay, endDay, lastTradeDay):
threading.Thread.__init__(self)
self.threadID = threadID
self.stocks = stocks
self.startDay = startDay
self.endDay = endDay
self.lastTradeDay = lastTradeDay
def run(self):
print ("开启线程:", self.threadID)
global result
# 获取锁,用于线程同步
threadLock.acquire()
result = result.append(getStocksDetail(self.stocks, self.startDay, self.endDay, 'd', '2', self.lastTradeDay))
# 释放锁,开启下一个线程
threadLock.release()
print ("关闭线程:", self.threadID)
# 获取交易日
def getTradeDays(start_date, end_date) :
tradeDays = bs.query_trade_dates(start_date, end_date)
# print('query_trade_dates respond error_code:'+tradeDays.error_code)
# print('query_trade_dates respond error_msg:'+tradeDays.error_msg)
tradeDays_data_list = []
while (tradeDays.error_code == '0') & tradeDays.next():
tradeDays_data_list.append(tradeDays.get_row_data())
return pd.DataFrame(tradeDays_data_list, columns=tradeDays.fields)
#获取股票列表
def getStocks(tradeDay) :
stocks = bs.query_all_stock(tradeDay)
# print('query_all_stock respond error_code:'+stocks.error_code)
# print('query_all_stock respond error_msg:'+stocks.error_msg)
stocks_data_list = []
while (stocks.error_code == '0') & stocks.next():
stocks_data_list.append(stocks.get_row_data())
return pd.DataFrame(stocks_data_list, columns=stocks.fields)
# 获取股票详情
def getStocksDetail(stocks, startDay, endDay, frequency, adjustflag, lastTradeDay):
stock_data_list=[]
stock_fields = []
for index, stock in stocks.iterrows():
if stock['tradeStatus'] == '1' and 'sh.600' in stock['code']:
rs=bs.query_history_k_data(stock['code'], "date,code,open,high,low,close,preclose,volume,amount,adjustflag,turn,tradestatus,pctChg,peTTM,pbMRQ,psTTM,pcfNcfTTM,isST",startDay,endDay,frequency, adjustflag)
print('请求历史数据返回信息:'+rs.error_msg)
stock_fields = rs.fields
while(rs.error_code=='0')&rs.next():
rowData = rs.get_row_data()
stock_data_list.append(rowData)
# todo 添加名称、添加昨日涨跌幅
return pd.DataFrame(stock_data_list,columns=stock_fields)
lg = bs.login()
# print('login respond error_code:'+lg.error_code)
# print('login respond error_msg:'+lg.error_msg)
startDay = "2021-03-01"
endDay = "2021-03-01"
threads = []
lastTradeDay = ""
threadLock = threading.Lock()
tradeDaysResult = getTradeDays("2021-03-01", "2021-03-01")
for index, tradeDay in tradeDaysResult.iterrows():
if tradeDay['is_trading_day'] == '1':
stocksResult = getStocks(tradeDay['calendar_date'])
page = 1
limit = 50
threadId = 1
while (page - 1) * limit < stocksResult.shape[0] :
thread = myThread(threadId, stocksResult[(int(page) - 1) * int(limit): (int(page) * int(limit))], tradeDay['calendar_date'], tradeDay['calendar_date'])
thread.start()
threads.append(thread)
page += 1
threadId += 1
# 等待所有线程完成
for t in threads:
t.join()
result.to_csv("~/Desktop/trade2.csv", encoding="gbk", index=False)
print ("退出主线程") |
target = 2020
rows = []
with open('input.txt') as f:
for row in f:
rows.append(row)
valid = 0
valid2 = 0
for row in rows:
sections = row.split(' ')
lower = int(sections[0].split('-')[0])
upper = int(sections[0].split('-')[1])
letter = sections[1][0]
text = sections[2]
# p1
num = text.count(letter)
if upper >= num >= lower:
valid += 1
# p2
lower_match = (lower - 1 < len(text) and text[lower - 1] == letter)
upper_match = (upper - 1 < len(text) and text[upper - 1] == letter)
if (lower_match or upper_match) and not (lower_match and upper_match):
valid2 += 1
print(valid)
# 564
print(valid2)
# 325
|
from django.contrib import admin
from twits.models import Person
@admin.register(Person)
class AuthorAdmin(admin.ModelAdmin):
pass
|
#!/usr/bin/env python
# coding=utf-8
from myBiSeNet import *
import numpy as np
model = create_BiSeNet(2)
x = np.asarray([np.random.rand(321, 321, 3)])
y = np.asarray([np.ones((321, 321, 3))])
print(x.shape, y.shape)
model.fit(x, y, epochs = 40, batch_size = 1)
|
import numpy as np
import random
import math
from numpy import linalg as LA
X_Cours = np.array([[1, 2, 1], [1, 0, -1], [1, -2, -1], [1, 0, 2]])
t_Cours = np.array([1, 1, -1, -1])
X_ET = np.array([[0, 0], [1, 0], [0, 1], [1, 1]])
t_ET = np.array([-1, -1, -1, 1])
X_XOR = np.array([[0, 0], [1, 0], [0, 1], [1, 1]])
t_XOR = np.array([-1, 1, 1, 1])
bias = 1
W = np.array([1, 1, 1])
alpha = 0.1
#Vérifie si le vecteur est correctement classé
def IsClassed(tau, y):
cpt = 0
for val in tau:
if val != y[cpt]:
return False
cpt = cpt + 1
return True
#Perceptron version online
def PerceptronIncremental(X, W, t, bias, alpha):
y = np.ones(len(X))
cpt = 0
while not IsClassed(t, y):
val = random.randint(0, len(X) - 1)
x_prime = X[val]
W[0] = bias
if np.transpose(W).dot(x_prime) > 0:
y_prime = 1
else:
y_prime = -1
if y_prime != t[val]:
e = alpha * (t[val] - y_prime)
delta_w = x_prime.dot(e)
W = W + delta_w
bias = bias + e * 1
y[val] = y_prime
cpt = cpt + 1
return y, W, cpt
#Perceptron version batch
def PerceptronBatch(X, W, t, bias, alpha):
y = np.ones(len(X))
nb_iter = 0
while not IsClassed(t, y):
cpt = 0
for val in X:
x_prime = val
W[0] = bias
if np.transpose(W).dot(x_prime) > 0:
y_prime = 1
else:
y_prime = -1
if y_prime != t[cpt]:
e = alpha * (t[cpt] - y_prime)
delta_w = x_prime.dot(e)
W = W + delta_w
bias = bias + e * 1
y[cpt] = y_prime
cpt = cpt + 1
nb_iter = nb_iter + cpt
nb_iter = nb_iter + 1
return y, W, nb_iter
#Génération des données aléatoires et des poids optimaux
def LSAleatoire(P, N):
X = np.random.rand(P, N)
X = 2 * X -1
W = np.random.rand(N + 1, 1)
W = 2 * W -1
t = []
X = np.insert(X, 0, 1, axis = 1)
for val in X:
if np.dot(val, W) <= 0:
t.append(-1)
else:
t.append(1)
W_tmp = []
for val in W:
W_tmp.append(val[0])
return X, W_tmp, t
def PerceptronEleveIncre(P, N):
pere = LSAleatoire(P, N)
t_pere = pere[2]
W_pere = pere[1]
X_pere = pere[0]
eleve = PerceptronIncremental(X_pere, W_pere, t_pere, 1, alpha)
W_fils = eleve[1]
R = math.cos(np.dot(W_pere, W_fils) / np.dot(LA.norm(W_pere), LA.norm(W_fils)))
return eleve[2], R
def PerceptronEleveBatch(P, N):
pere = LSAleatoire(P, N)
t_pere = pere[2]
W_pere = pere[1]
X_pere = pere[0]
eleve = PerceptronBatch(X_pere, W_pere, t_pere, 1, alpha)
W_fils = eleve[1]
R = math.cos(np.dot(W_pere, W_fils) / np.dot(LA.norm(W_pere), LA.norm(W_fils)))
return eleve[2], R
moy_it = 0
moy_R = 0
nb = 50
for i in range(nb):
p = PerceptronEleveIncre(500, 1000)
moy_it = moy_it + p[0]
moy_R = moy_R + p[1]
print(moy_it/nb)
print(moy_R/nb)
|
# Generated by Django 2.1.2 on 2018-11-02 18:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pages', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='birth_date',
),
migrations.RemoveField(
model_name='profile',
name='id',
),
migrations.RemoveField(
model_name='profile',
name='location',
),
migrations.AddField(
model_name='profile',
name='image',
field=models.ImageField(blank=True, upload_to=''),
),
migrations.AlterField(
model_name='profile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL),
),
]
|
from msa_sdk.variables import Variables
from msa_sdk.msa_api import MSA_API
from msa_sdk.order import Order
from msa_sdk import util
dev_var = Variables()
dev_var.add('name', var_type='String')
dev_var.add('device.0.target', var_type='Device')
dev_var.add('version', var_type='String')
dev_var.add('additional_device', var_type='String')
dev_var.add('additional_version', var_type='String')
context = Variables.task_call(dev_var)
process_id = context['SERVICEINSTANCEID']
devices = context['device']
for i in range(len(devices)):
# extract the database ID for ce ID
devicelongid=devices[i]['target'][-3:]
order = Order(devicelongid)
order.command_execute('IMPORT', {"Apache_Version":"0"})
version = order.command_objects_instances("Apache_Version")
ver = order.command_objects_instances_by_id("Apache_Version", version)
ret = MSA_API.process_content('ENDED', f'Version is {ver}', context, True)
print(ret) |
import smtplib
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
def mailing(bot):
sendEmail = "이메일 주소"
recvEmail = "이메일주소"
password = "비밀번호"
smtpName = "smtp.naver.com" # smtp 서버 주소
smtpPort = 587 # smtp 포트 번호
text = "본문 내용"
msg = MIMEMultipart()
msg['Subject'] = "제목"
msg['From'] = sendEmail
msg['To'] = recvEmail
fileName = '파일경로'
attachment = open(fileName,'rb')
part = MIMEBase('application','octat-stream')
part.set_payload(attachment.read())
encoders.encode_base64(part)
part.add_header('Content-Disposition',"attachment; filename= " + fileName)
msg.attach(part)
s = smtplib.SMTP(smtpName, smtpPort) # 메일 서버 연결
s.starttls() # TLS 보안 처리
s.login(sendEmail, password) # 로그인
s.sendmail(sendEmail, recvEmail, msg.as_string()) # 메일 전송, 문자열로 변환하여 보냅니다.
s.close() # smtp 서버 연결을 종료합니다.
|
import math
import struct
import sys
class Vertex:
"""docstring for ClassName"""
x = 0
y = 0
z = 0
def Convert(self):
self.Homograph()
self.Viewport()
def Homograph(self):
self.x = (camera.z * self.x) / (camera.z - self.z)
self.y = (camera.z * self.y) / (camera.z - self.z)
def Viewport(self):
#original viewport size
ViewWidth = camera.z * math.tan(math.pi / 8) * 2
#expand vertex to file viewport size
self.x = self.x * width / ViewWidth
self.y = self.y * width / ViewWidth
#move vertex from original to file viewport
self.x = width / 2 + self.x
self.y = width / 2 - self.y
#a
def GetFloat(self, s):
#stl is z axis top
self.x = struct.unpack('f',data[s] + data[s+1] + data[s+2] + data[s+3])[0]
self.z = struct.unpack('f',data[s+4] + data[s+5] + data[s+6] + data[s+7])[0]
self.y = struct.unpack('f',data[s+8] + data[s+9] + data[s+10] + data[s+11])[0]
self.y = self.y * -1
self.x = self.x * -1
#python Vector.py zoom filename
argv = sys.argv
zoom = argv[1]
file_name = str(argv[2])
vert = Vertex()
camera = Vertex()
camera.z = float(zoom)
width = 500
infile = open("objects/" + file_name + ".stl") #import file
out = open("vectordata/" + file_name + ".svg", 'w') #export file
data = infile.read()
#write header
out.write('<?xml version="1.0" standalone="no"?>\n<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n\n')
out.write("<svg width=\"%s\" height=\"%s\" version=\"1.1\" xmlns=\"http://www.w3.org/2000/svg\">\n" % (width, width))
#count faces
number = data[80] + data[81] + data[82] + data[83]
faces = struct.unpack('I',number)[0]
for x in range(0,faces):
out.write("<polygon style=\"fill:none;stroke:#000000;stroke-miterlimit:10;\" points=\"")
for y in range(0,3):
#data[96]~data[107] vertex1
vert.GetFloat(96+y*12+x*50)
vert.Convert()
out.write(str(round(vert.x,2)) + ",")
out.write(str(round(vert.y,2)) + ",")
out.write("\" />\n")
out.write("</svg>")
out.close()
print "end"
|
# -*- coding: utf-8 -*-
import os
import sys
from flask import Flask, render_template, redirect, request
from flask_session import Session
from handlers import depends as depends_handler
from handlers import blog as blog_handler
from handlers import mfdf as mfdf_handler
app = Flask(__name__, static_url_path='/static')
sess = Session()
@app.before_request
def clear_trailing():
from flask import redirect, request
rp = request.path
if rp != '/' and rp.endswith('/'):
return redirect(rp[:-1])
domain = "https://orkohunter.net"
url_to_domain = domain + request.script_root + request.path
if '139.59.63.73' in request.url_root:
return redirect(url_to_domain)
@app.route("/")
def main():
return render_template('home/index.html')
@app.route("/mfdf")
def mfdf():
data = mfdf_handler.main()
return render_template('home/mfdf.html', data=data)
@app.route("/blog")
def blog():
data = blog_handler.main()
return render_template('home/blog.html', data=data)
@app.route("/values")
def values():
return render_template('home/values.html')
@app.route("/values/inspirations")
def values_inspirations():
return render_template('home/inspirations.html')
@app.route("/abwid")
def abwid():
return render_template('home/abwid.html')
@app.route("/contact")
def contact():
return render_template('home/contact.html')
@app.route("/projects")
def projects():
return render_template('home/projects.html')
@app.route("/keep")
def keep():
return render_template('keep/index.html')
@app.route("/ping-me")
def ping_me():
return render_template('ping-me/index.html')
@app.route("/ping-me/faqs")
def ping_me_faqs():
return render_template('ping-me/faqs.html')
@app.route("/depends")
def depends():
data = depends_handler.index()
return render_template('depends/index.html', data=data)
@app.route("/depends/<package>")
def depends_package(package):
analysis_exists, data = depends_handler.package_view(package)
return render_template('depends/package.html', data=data, analysis_exists=analysis_exists)
@app.route("/depends/list")
def depends_list():
data = depends_handler.list()
return render_template('depends/list.html', data=data)
@app.route("/depends/<package>/refresh")
def depends_package_refresh(package):
depends_handler.package_refresh(package)
return redirect("/depends/" + package)
app.secret_key = os.environ["APP_SECRET_KEY"]
app.config['SESSION_TYPE'] = 'filesystem'
sess.init_app(app)
app.debug = False
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.