text stringlengths 38 1.54M |
|---|
import pygame
"""
Provides a bunch of name references for the custom
pygame events used in this game
Available Name Space = 25 - 31
"""
SURFACE = 25 # Event dict: surf=pygame.Surface, pos=(x, y), z=int
"""Allows the systems to send surfaces to the Renderer to be displayed"""
PRINT_LINE = 26 # Event dict: message="", color=(R, G, B)/(R, G, B, A)
"""Passes messages to the main output console for display"""
FIGHT_EVENT = 27 # Event dict: subtype=(any of the sub types listed), kwargs(as listed below)
"""A collection of events related to controlling and monitoring the current fight going on"""
# FIGHT SUB EVENTS #
FIGHT_BEGIN = 0 # Signals beginning of fight, creation of buttons // kwargs(player=Player, monster=Monster)
FIGHT_END = 1 # Signals end of fight, destruction of buttons
ACTION = 2 # Signals that an action was used, two per fight turn, logging of fight events // kwargs(num=int)
RESET = 3 # Used for control, lets you rewind turns // kwargs(rewind_amount=int)
BANNER = 28 # Event dict: banner=scripts.banners.Banner
"""Similar to PRINT_LINE, only this is displayed directly on the screen, not in the textbox"""
FLSCRN_TOGGLE = 29 # Event dict:
"""Signals GameEngine to switch between fullscreen and windowed mode"""
VAR_CHANGE = 30 # Event dict: key=int, value=object, toggle=bool
"""Allows direct editing of game_vars dict"""
def make_event(type, **kwargs):
event = pygame.event.Event(type, **kwargs)
pygame.event.post(event)
return event
def make_dummy_event(type, **kwargs):
# Works like make_event but doesn't atually post the event, just returns a dummy event to be passed through
# event_handlers
event = pygame.event.Event(type, **kwargs)
return event
|
import cx_Oracle
conn = cx_Oracle.connect('system/orcl@127.0.0.1/orcl')
c = conn.cursor()
x = c.execute('select sysdate from dual')
x.fetchone()
c.close();
conn.colse |
# Create your views here.
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from django.core import urlresolvers
from django.shortcuts import get_object_or_404
from django.views.generic import DetailView,DeleteView
from django.views.generic.edit import CreateView,UpdateView
from django.http import HttpResponseRedirect
from django.template.loader import get_template
from igame.models import Game,Shop,Producer,gameReview
from forms import gameForm, shopForm,producerForm
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.core.exceptions import PermissionDenied
from serializers import gameSerializer, shopSerializer,producerSerializer, gameReviewSerializer
from rest_framework import generics, permissions
class LoginRequiredMixin(object):
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(*args, **kwargs)
class CheckIsOwnerMixin(object):
def get_object(self, *args, **kwargs):
obj = super(CheckIsOwnerMixin, self).get_object(*args, **kwargs)
if not obj.user == self.request.user:
raise PermissionDenied
return obj
class gameDetail(DetailView):
model = Game
template_name = 'gameDetail.html'
def get_context_data(self,**kwargs):
context = super(gameDetail,self).get_context_data(**kwargs)
context['RATING_CHOICES'] = gameReview.RATING_CHOICES
return context
class shopDetail(DetailView):
model = Shop
template_name = 'shopDetail.html'
def get_context_data(self,**kwargs):
context = super(shopDetail,self).get_context_data(**kwargs)
context['RATING_CHOICES'] = gameReview.RATING_CHOICES
return context
class producerDetail(DetailView):
model = Producer
template_name = 'producerDetail.html'
def get_context_data(self,**kwargs):
context = super(producerDetail,self).get_context_data(**kwargs)
context['RATING_CHOICES'] = gameReview.RATING_CHOICES
return context
class gameSectionDetail(DetailView):
model = Game
template_name = 'gamesection.html'
def get_context_data(self,**kwargs):
context = super(gameSectionDetail,self).get_context_data(**kwargs)
context['RATING_CHOICES'] = gameReview.RATING_CHOICES
return context
class shopSectionDetail(DetailView):
model = Game
template_name = 'shopsection.html'
def get_context_data(self,**kwargs):
context = super(shopSectionDetail,self).get_context_data(**kwargs)
context['RATING_CHOICES'] = gameReview.RATING_CHOICES
return context
class producerSectionDetail(DetailView):
model = Game
template_name = 'producersection.html'
def get_context_data(self,**kwargs):
context = super(shopSectionDetail,self).get_context_data(**kwargs)
context['RATING_CHOICES'] = gameReview.RATING_CHOICES
return context
class gameCreate(LoginRequiredMixin, CreateView):
model = Game
template_name = 'form.html'
form_class = gameForm
def form_valid(self,form):
form.instance.user = self.request.user
return super(gameCreate,self).form_valid(form)
class shopCreate(LoginRequiredMixin, CreateView):
model = Shop
template_name = 'form.html'
form_class = shopForm
def form_valid(self,form):
form.instance.user = self.request.user
return super(shopCreate,self).form_valid(form)
class producerCreate(LoginRequiredMixin, CreateView):
model = Producer
template_name = 'form.html'
form_class = producerForm
def form_valid(self,form):
form.instance.user = self.request.user
return super(producerCreate,self).form_valid(form)
class LoginRequiredCheckIsOwnerUpdateView(LoginRequiredMixin, CheckIsOwnerMixin, UpdateView):
template_name = 'form.html'
class gameDelete(DeleteView):
model = Game
template_name = 'gameDelete.html'
class shopDelete(DeleteView):
model = Shop
template_name = 'shopDelete.html'
class producerDelete(DeleteView):
model = Producer
template_name = 'producerDelete.html'
@login_required()
def review(request,pk):
game = get_object_or_404(Game, pk=pk)
new_review = gameReview(
rating=request.POST['rating'],
comment=request.POST['comment'],
user=request.user,
game=game)
new_review.save()
return HttpResponseRedirect(urlresolvers.reverse('igame:game_detail', args=(game.id,)))
### RESTful API views ###
class IsOwnerOrReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
# Instance must have an attribute named `owner`.
return obj.user == request.user
class APIGameList(generics.ListCreateAPIView):
permission_classes = (IsOwnerOrReadOnly,)
model = Game
serializer_class = gameSerializer
class APIGameDetail(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (IsOwnerOrReadOnly,)
model = Game
serializer_class = gameSerializer
class APIShopList(generics.ListCreateAPIView):
permission_classes = (IsOwnerOrReadOnly,)
model = Shop
serializer_class = shopSerializer
class APIShopDetail(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (IsOwnerOrReadOnly,)
model = Shop
serializer_class = shopSerializer
class APIProducerList(generics.ListCreateAPIView):
permission_classes = (IsOwnerOrReadOnly,)
model = Producer
serializer_class = producerSerializer
class APIProducerDetail(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (IsOwnerOrReadOnly,)
model = Producer
serializer_class = producerSerializer
class APIGameReviewList(generics.ListCreateAPIView):
permission_classes = (IsOwnerOrReadOnly,)
model = gameReview
serializer_class = gameReviewSerializer
class APIGameReviewDetail(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (IsOwnerOrReadOnly,)
model = gameReview
serializer_class = gameReviewSerializer
|
#import all required libraries
from datetime import datetime
import re
import sqlite3 as sql
import itertools
# Variable for your data file
whatsapp_chat_data='WhatsAppChatwithDevOps&CloudBabies.txt'
#Variable to store unprocessed line of your data
#errorFile=
#pattern to match date and time
id=1
pattern='^([1-9]{1,2})\/([1-9]{1,2})\/([12][0-9]),\s([1-9]{1,2}):([0-9]{2})\s(AM|PM)'
#Patter to match phone number string
number_pattern='(\+[0-9]{1,2})\s[\(]?([0-9]{1,5})[\)]?\s?([0-9]{3})[-]?[0-9]{1,4}'
####### DATABASE SETUP##########################
def db_connect() :
try:
con=sql.connect('whatsapp_database.db')
return con
except Error:
print(Error)
def create_tables_groupchat(con,groupname):
cursor=con.cursor()
create_table_query='CREATE TABLE IF NOT EXISTS ' + groupname + ' ( DATE TEXT, TIME TEXT, NAME TEXT, GROUP_NAME TEXT, NUMBER TEXT, MESSAGE TEXT);'
cursor.execute(create_table_query)
con.commit()
con.close()
def read_file(whatsapp_chat_file):
try :
with open(whatsapp_chat_file,'r',encoding='utf-8') as whatsppfile :
lines=whatsppfile.read().splitlines()
return lines
except FileNotFoundError:
print("No such file or directory : " + whatsapp_chat_file)
print("Please try with correct file")
def filter_date(line):
day=re.search(pattern, str(line)).group(2)
mon=re.search(pattern, str(line)).group(1)
year=re.search(pattern, str(line)).group(3)
date=day+'/'+mon+'/'+year
date=datetime.strptime(date,'%d/%m/%y').date()
return date
def filter_time(line):
hour=re.search(pattern, str(line)).group(4)
mins=re.search(pattern, str(line)).group(5)
duration=re.search(pattern, str(line)).group(6)
if duration == 'PM' :
hour=int(hour) + 12
if hour == 24 :
hour=str(0)
else :
hour=str(hour)
time=hour+'::'+mins
time=datetime.strptime(time,'%H::%M').time()
return time
def filter_pi(data):
try :
name_number=data.split(' - ')[1].split(':')[0].split(' joined ')[0]
if re.match(number_pattern,name_number) :
number=name_number
name='NA'
else :
name=name_number
number='NA'
return(name,number)
except :
pass
def filter_message(data):
try :
message=data.split(' - ')[1].split(':')[-1]
return message
except :
pass
def insert_data_into_database(data,errorFile='unprocessed_data.txt', groupname='DevopsandCloudBabies'):
global id
con=db_connect()
cursor=con.cursor()
insert_query='INSERT INTO ' + groupname + ' VALUES (?,?,?,?,?,?)'
for line in data :
try :
date=filter_date(line)
time=filter_time(line)
pi_info=filter_pi(line)
name=pi_info[0]
number=pi_info[1]
message=filter_message(line)
record=(str(date),str(time),name,groupname,number,message)
print(record)
cursor.execute(insert_query,record)
con.commit()
except AttributeError:
with open(errorFile,'a+', encoding='utf-8') as errorfile:
errorfile.writelines(line + '\n')
pass
con.close()
def main():
groupname='DevopsandCloudBabies'
con=db_connect()
create_tables_groupchat(con,groupname)
data=read_file(whatsapp_chat_data)
insert_data_into_database(data)
main()
|
import os
import sys
import xml.etree.ElementTree as ET
import subprocess
import shutil
def main():
PRODUCT_FLAVOR = None
PROJECT_ROOT = None
if len(sys.argv) < 2 or not os.path.exists(sys.argv[1]):
print("Error: There is missing argument.")
sys.exit(1)
PROJECT_ROOT = sys.argv[1]
if len(sys.argv) >= 3:
PRODUCT_FLAVOR = sys.argv[2]
modules = {}
for line in sys.stdin:
line = line.strip()
if line != '' and os.path.exists(line):
module = line[len(PROJECT_ROOT):]
while module[0] == os.sep:
module = module[1:]
folders = module.split(os.sep)
if len(folders) == 1 or not os.path.exists(PROJECT_ROOT + "/" + folders[0] + "/build.gradle"):
continue
modules[folders[0]] = 1
lint_results_dir = PROJECT_ROOT + "/build/lint"
if os.path.exists(lint_results_dir):
shutil.rmtree(lint_results_dir)
os.makedirs(lint_results_dir)
failedModules = {}
issues = {}
for module in modules:
issuesModule = issues[module] = {}
gradlew = PROJECT_ROOT + "/gradlew"
report_dir = PROJECT_ROOT + "/" + module + "/build/reports"
report_filename = "lint-results"
if PRODUCT_FLAVOR is not None:
report_filename += "-" + PRODUCT_FLAVOR[0].lower() + PRODUCT_FLAVOR[1:]
report_xml = report_dir + "/" + report_filename + ".xml"
report_html = report_dir + "/" + report_filename + ".html"
cmd = gradlew + " " + module + ":lint" + PRODUCT_FLAVOR
print(cmd)
process = subprocess.Popen([gradlew, module + ":lint" + PRODUCT_FLAVOR])
process.wait()
if process.returncode != 0:
print("Error: Lint failed on module " + module)
failedModules[module] = 1
continue
print("Parsing: " + report_xml)
if not os.path.exists(report_xml):
print("Error: Lint result not found on module " + module)
failedModules[module] = 1
continue
xml = ET.parse(report_xml)
root = xml.getroot()
for issue in root:
severity = issue.attrib['severity']
if severity not in issuesModule:
issuesModule[severity] = 1
else:
issuesModule[severity] += 1
shutil.copyfile(report_xml, lint_results_dir + "/" + module + "-" + report_filename + ".xml")
shutil.copyfile(report_html, lint_results_dir + "/" + module + "-" + report_filename + ".html")
abort = False
print("Lint Result:")
if len(issues) == 0:
print("No changed module.")
else:
errors = 0
for module in issues:
issuesModule = issues[module]
print(module)
if len(issuesModule) != 0:
print("Found issue(s):")
issueStr = ""
for sev in issuesModule:
issueStr += str(issuesModule[sev]) + " " + sev + "(s); "
if sev == "Error" or sev == "Fatal":
errors += issuesModule[sev]
print(issueStr)
else:
print("No issue found.")
print("")
if errors !=0:
print("Error: Lint failed because there are some error or fatal issues found.")
abort = True
if len(failedModules) != 0:
print("Error: Lint failed on several modules:")
moduleList = ""
for module in failedModules:
moduleList += module + "; "
print(moduleList)
abort = True
if abort:
sys.exit(1)
if __name__ == "__main__":
main() |
import pytest
from polargraph import Polargraph
import env
# Immediate commands are interpreted quickly
CMD_SETPENWIDTH = 'C02,{pen_width},END'
CMD_SETMACHINESIZE = 'C24,{width},{height},END'
CMD_GETMACHINEDETAILS = 'C26,END'
# CMD_RESETEEPROM = 'C27,END'
CMD_SETMACHINEMMPERREV = 'C29,{mm_per_rev},END'
CMD_SETMACHINESTEPSPERREV = 'C30,{steps_per_rev},END'
CMD_SETMOTORSPEED = 'C31,{motor_speed},END'
CMD_SETMOTORACCEL = 'C32,{motor_accel},END'
CMD_SETMACHINESTEPMULTIPLIER = 'C37,{step_multiplier},END'
CMD_SETPENLIFTRANGE = 'C45,{down_pos},{up_pos},{write},END'
# CMD_SET_ROVE_AREA = 'C21,{pos_x},{pos_y},{width},{height},END'
# CMD_SET_DEBUGCOMMS = 'C47,END'
# CMD_MODE_STORE_COMMANDS = 'C33,{newfilename},{replace},END'
# CMD_MODE_EXEC_FROM_STORE = 'C34,{filename},END'
# CMD_MODE_LIVE = 'C35,END'
CMD_SETPENLIFTRANGE_TEST_RANGE = 'C45,{down_pos},{up_pos},END'
class TestImmediateTests(object):
pg = Polargraph()
@classmethod
def setup_class(self):
print 'Setting up'
opened = self.pg.start_serial_comms(comm_port=env.COMM_PORT)
if not opened:
print 'There was a problem opening the communications port (%s). '\
'It should be entered exactly as you see it in' \
'your operating system.' % env.COMM_PORT
exit()
@classmethod
def teardown_class(self):
print 'tearing down'
self.pg.close()
def get_response_to(self, command):
self.pg.write_command(command)
response_lines = list()
while 'MSG,C:{}'.format(command) not in response_lines:
response_lines.append(self.pg.read_line())
return response_lines
def get_machine_spec(self):
command = CMD_GETMACHINEDETAILS
response = self.get_response_to(command)
return response
def get_speed_spec(self):
speed_spec = [x for x in self.get_machine_spec() if x.startswith('PGSPEED')]
assert len(speed_spec) == 1
splitted_spec = speed_spec[0].split(',')
current_speed = splitted_spec[1]
current_accel = splitted_spec[2]
return {'speed': current_speed, 'accel': current_accel}
def test_get_machine_details(self):
spec = self.get_machine_spec()
expected = \
['PGSIZE', 'PGMMPERREV', 'PGSTEPSPERREV', 'PGSTEPMULTIPLIER',
'PGLIFT', 'PGSPEED', 'PGPENWIDTH']
assert len(spec) >= len(expected)
for response in expected:
match = [x for x in spec if x.startswith(response)]
assert len(match) == 1
@pytest.mark.skip()
def test_set_pen_width(self):
command = CMD_SETPENWIDTH.format(pen_width=1.0)
assert 'MSG,I,Changed Pen width to 1.00mm' in self.get_response_to(command)
assert 'PGPENWIDTH,1.00,END' in self.get_machine_spec()
command = CMD_SETPENWIDTH.format(pen_width=1.5)
assert 'MSG,I,Changed Pen width to 1.50mm' in self.get_response_to(command)
assert 'PGPENWIDTH,1.50,END' in self.get_machine_spec()
def test_set_motor_speed(self):
command = CMD_SETMOTORSPEED.format(motor_speed=1000)
assert 'New max speed: 1000.00' in self.get_response_to(command)
assert '1000.00' == self.get_speed_spec().get('speed')
command = CMD_SETMOTORSPEED.format(motor_speed=2000)
assert 'New max speed: 2000.00' in self.get_response_to(command)
assert '2000.00' == self.get_speed_spec().get('speed')
def test_set_motor_accel(self):
command = CMD_SETMOTORACCEL.format(motor_accel=1000)
assert 'New acceleration: 1000.00' in self.get_response_to(command)
assert '1000.00' == self.get_speed_spec().get('accel')
command = CMD_SETMOTORACCEL.format(motor_accel=2000)
assert 'New acceleration: 2000.00' in self.get_response_to(command)
assert '2000.00' == self.get_speed_spec().get('accel')
def test_set_mm_per_rev(self):
command = CMD_SETMACHINEMMPERREV.format(mm_per_rev=105)
assert 'New mm per rev: 105.00' in self.get_response_to(command)
assert 'PGMMPERREV,105.00,END' in self.get_machine_spec()
command = CMD_SETMACHINEMMPERREV.format(mm_per_rev=95)
assert 'New mm per rev: 95.00' in self.get_response_to(command)
assert 'PGMMPERREV,95.00,END' in self.get_machine_spec()
def test_set_steps_per_rev(self):
command = CMD_SETMACHINESTEPSPERREV.format(steps_per_rev=400)
assert 'New steps per rev: 400' in self.get_response_to(command)
assert 'PGSTEPSPERREV,400,END' in self.get_machine_spec()
command = CMD_SETMACHINESTEPSPERREV.format(steps_per_rev=200)
assert 'New steps per rev: 200' in self.get_response_to(command)
assert 'PGSTEPSPERREV,200,END' in self.get_machine_spec()
def test_set_step_multiplier(self):
command = CMD_SETMACHINESTEPMULTIPLIER.format(step_multiplier=16)
assert 'New step multiplier: 16' in self.get_response_to(command)
assert 'PGSTEPMULTIPLIER,16,END' in self.get_machine_spec()
command = CMD_SETMACHINESTEPMULTIPLIER.format(step_multiplier=8)
assert 'New step multiplier: 8' in self.get_response_to(command)
assert 'PGSTEPMULTIPLIER,8,END' in self.get_machine_spec()
def test_set_size(self):
command = CMD_SETMACHINESIZE.format(width=500, height=600)
assert 'New size: 500, 600' in self.get_response_to(command)
assert 'PGSIZE,500,600,END' in self.get_machine_spec()
command = CMD_SETMACHINESIZE.format(width=700, height=800)
assert 'New size: 700, 800' in self.get_response_to(command)
assert 'PGSIZE,700,800,END' in self.get_machine_spec()
def test_set_lift_range(self):
command = CMD_SETPENLIFTRANGE.format(down_pos=20, up_pos=60, write=1)
responses = self.get_response_to(command)
assert 'Down: 20' in responses
assert 'Up: 60' in responses
assert 'PGLIFT,20,60,END' in self.get_machine_spec()
command = CMD_SETPENLIFTRANGE.format(down_pos=90, up_pos=180, write=1)
responses = self.get_response_to(command)
assert 'Down: 90' in responses
assert 'Up: 180' in responses
assert 'PGLIFT,90,180,END' in self.get_machine_spec()
|
# Generated by Django 2.2.4 on 2019-08-19 23:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cash', models.FloatField(default=10000)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Stock',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('symbol', models.CharField(max_length=10, unique=True)),
('name', models.CharField(max_length=80)),
],
),
migrations.CreateModel(
name='Transaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.FloatField()),
('price', models.FloatField()),
('time', models.DateTimeField(auto_now_add=True)),
('stock', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stocktrader.Stock')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='transactions', to='stocktrader.Profile')),
],
),
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-07-30 23:19
from __future__ import unicode_literals
from django.db import migrations
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
('accounts', '0006_appuser_country'),
]
operations = [
migrations.AlterField(
model_name='appuser',
name='country',
field=django_countries.fields.CountryField(max_length=2),
),
]
|
import os
def relative_path(*segments):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', *segments)
|
from sqlwrapper import *
from Fetch_Current_Datetime import *
from collections import defaultdict
import json
def Query_Table_Status(request):
get_table_details = json.loads(dbget("select login_status.login_status,payment_type.*,table_status.table_status,\
table_details.*,order_timings.order_time,order_timings.bill_request_time,order_timings.close_time from table_details\
left join table_status on table_status.table_status_id = table_details.table_status_id\
left join login_status on login_status.login_status_id = table_details.login_status_id\
left join order_timings on table_details.table_no = order_timings.table_no\
left join payment_type on payment_type.payment_type_id = table_details.payment_type_id order by table_details.table_no"))
available_count = len(list(filter(lambda i: i['table_status'] == 'AVAILABLE', get_table_details)))
unavailable_count =len(list(filter(lambda i: i['table_status'] == 'UN AVAILABLE', get_table_details)))
payment_count = len(list(filter(lambda i: i['payment_type'] != 'NOPE', get_table_details)))
return json.dumps({"Return": "Record Retrived Successfully","ReturnCode": "RRS",
"Available_count":available_count,"unavailable_count":unavailable_count,"payment_count":payment_count,"Returnvalue":get_table_details,"Status": "Success","StatusCode": "200"},indent = 4)
def Update_Food_Order_Status_Item(request):
d = request.json
try:
dbput("update food_order set order_status_id = '"+str(d['order_status_id'])+"' where order_details_id = '"+str(d['order_details_id'])+"'")
dbput("update users set todayorder_flag=1,fdorderwaiter_flag=1")
return json.dumps({"Return": "Record Updated Successfully","ReturnCode": "RUS","Status": "Success","StatusCode": "200"},indent = 4)
except:
return json.dumps({"Return":"Wrong Value Error","ReturnCode":"WVE"})
def Update_Order_Status(request):
d = request.json
s = {'order_status_id' : 6}
gensql('update','food_order',s,d)
return json.dumps({"Return": "Record Updated Successfully","ReturnCode": "RUS","Status": "Success","StatusCode": "200"},indent = 4)
def Update_ReadyforPayment_Status(request):
d = request.json
try:
item_count = json.loads(dbget("select count(*) as item_count from food_order \
join food_menu on food_order.food_id = food_menu.food_id \
join food_category on food_menu.item_category_id = food_category.category_id\
where order_status_id!=7 and table_no='"+str(d['table_no'])+"' and notification_status_id!=1 and \
food_category.category_id!=62"))[0]['item_count']
print("item_count", item_count)
if 0 == item_count:
get_order_no = json.loads(dbget("select order_no from food_order \
where order_status_id != 7 and table_no = '"+str(d['table_no'])+"' "))
dbput("update order_timings set bill_request_time = '"+str(application_datetime())+"' \
where order_no = '"+str(get_order_no[0]['order_no'])+"'")
e={"table_no":d['table_no']}
s = {'table_status_id' : 3,'payment_type_id':d['payment_type_id']}
gensql('update','table_details',s,e)
dbput("update resturants set tablestatus_flag=1")
return json.dumps({"Return": "Record Updated Successfully","ReturnCode": "RUS",
"Status": "Success","StatusCode": "200"},indent = 4)
else:
return json.dumps({"Return": ""+str(item_count)+" item(s) is not served","ReturnCode": "INS",
"Status": "Success","item_count":item_count,"StatusCode": "200"},indent = 4)
except:
return json.dumps({"Return":"Wrong Value Error","ReturnCode":"WVE"})
def Update_Table_Available_Status(request):
d = request.json
dbput("update order_timings set close_time = '"+str(application_datetime())+"' where order_no = '"+str(d['order_no'])+"';\
INSERT INTO history_order_timings(order_no, order_time, bill_request_time, close_time,table_no) \
SELECT order_no, order_time, bill_request_time, close_time,table_no FROM order_timings where table_no = '"+str(d['table_no'])+"';\
update history_order_timings set total_items = '"+str(d['total_items'])+"',\
sub_total = '"+str(d['sub_total'])+"',total_offers = '"+str(d['total_offers'])+"',\
total_amount_offers = '"+str(d['total_amount_offers'])+"',cgst_amount = '"+str(d['CGST_Amount'])+"',sgst_amount='"+str(d['SGST_Amount'])+"',grand_total = '"+str(d['grand_total'])+"'\
where order_no = '"+str(d['order_no'])+"';\
delete from order_timings where table_no = '"+str(d['table_no'])+"';")
s = {'table_status_id' : 1,'payment_type_id':3}
z = {'table_no':d['table_no']}
gensql('update','table_details',s,z)
dbput("update resturants set tablestatus_flag=1")
e = {'order_status_id':7}
z.update({"order_no":d['order_no']})
gensql('update','food_order',e,z)
dbput("update users set todayorder_flag=1,fdorderwaiter_flag=1")
return json.dumps({"Return": "Record Updated Successfully","ReturnCode": "RUS","Status": "Success","StatusCode": "200"},indent = 4)
def Get_Order_Item_Table(request):
d = request.json
get_orders=json.loads(dbget("select food_type.*, \
order_status.order_status_desc,food_menu.food_name,food_menu.offer_value,food_menu.price, \
food_category.category, food_order.* from food_order\
left join food_menu on food_menu.food_id = food_order.food_id \
left join food_category on food_category.category_id =food_menu.item_category_id \
left join order_status on order_status.order_status_id = food_order.order_status_id \
left join food_type on food_type.food_type_id = food_menu.food_type_id \
where food_order.table_no = '"+str(d['table_no'])+"' and food_order.order_status_id != 7 \
and food_menu.item_category_id!=62 order by datetime"))
#print(total_amount)
c = defaultdict(int)
for d in get_orders:
#print(c)
c[d['food_name']] += (d['price']*d['quantity'])
#c[d['food_name']] += d['quantity']
#print(c)
finals = [{'food_name': food_name, 'total_price': price} for food_name, price in c.items()]
#print(finals)
z = defaultdict(int)
for s in get_orders:
z[s['food_name']] += s['quantity']
finals_value = [{'Names': food_name, 'quantity': quantity} for food_name, quantity in z.items()]
for final in finals:
for finals_va in finals_value:
if final['food_name'] == finals_va['Names']:
final['quantity'] = finals_va['quantity']
#finals = [ dict(final,price = x['price']) for x in get_orders for final in finals if final['food_name'] == x['food_name'] ]
if len(get_orders) != 0 :
sub_total = sum([x['total_price'] for x in finals])
offer_value = sum([x['offer_value']*x['quantity'] for x in get_orders])
total_value = sub_total-offer_value
food_menu_details = {"table_no":d['table_no'],"order_no":get_orders[0]['order_no'],
"items":finals,'grand_total':"{0:.2f}".format(sub_total+(total_value*6)/100+(total_value*6)/100),
"CGST_Amount":"{0:.2f}".format((total_value*6)/100),
"SGST_Amount":"{0:.2f}".format((total_value*6)/100),
"total_items":len(finals),"sub_total":sub_total,
"total_amount_offers":total_value,
"total_offers":offer_value}
else:
food_menu_details = {"table_no":d['table_no'],"order_no":0,
"items":get_orders,"total_items":0,"sub_total":0,
"CGST_Amount":0,"SGST_Amount":0,"total_amount_offers":0,
"total_offers":0,"GST_Amount":0,"grand_total":0}
return(json.dumps({"Return": "Record Retrived Successfully","ReturnCode": "RRS",
"Returnvalue":food_menu_details,"Status": "Success","StatusCode": "200"},indent = 4))
def Update_Notification_Status(request):
d = request.json
#values = ','.join("'{0}'".format(x) for x in d['order_details_id'])
values = ', '.join(map(str, d['order_details_id']))
print(values)
if d['notification_status_id'] == 2:
dbput("update food_order set notification_status_id='"+str(d['notification_status_id'])+"',\
notification_time='"+str(application_datetime().strftime("%Y-%m-%d %H:%M:%S"))+"' where order_details_id in ("+values+")")
else:
dbput("update food_order set notification_status_id='"+str(d['notification_status_id'])+"' where order_details_id in ("+values+")")
dbput("update resturants set tablestatus_flag=1;"
"update users set todayorder_flag=1,fdorderwaiter_flag=1,notifditem_flag=1")
return json.dumps({"Return": "Record Updated Successfully","ReturnCode": "RUS","Status": "Success","StatusCode": "200"},indent = 4)
def ServeAll_Food_Items(request):
d = request.json
values = ', '.join(map(str, d['order_details_id']))
print(values)
dbput("update food_order set notification_status_id='2',order_status_id = '6',\
notification_time='"+str(application_datetime().strftime("%Y-%m-%d %H:%M:%S"))+"' where order_details_id in ("+values+")")
dbput("update users set todayorder_flag=1,fdorderwaiter_flag=1,notifditem_flag=1")
return json.dumps({"Return": "Record Updated Successfully","ReturnCode": "RUS","Status": "Success","StatusCode": "200"},indent = 4)
def Query_Notification_Food_Items(request):
notify_time,final_results,table_no = [],[],[]
get_notifications = json.loads(dbget("select notification_status,notification_time, food_order.*,food_menu.food_name from food_order\
left join food_menu on food_menu.food_id =food_order.food_id \
left join notification_status on notification_status.notification_status_id = food_order.notification_status_id\
where order_status_id =6 and food_order.notification_status_id =2\
order by food_order.notification_time"))
get_extra_item = json.loads(dbget("select notification_status,notification_time, food_order.*,food_menu.food_name from food_order\
left join food_menu on food_menu.food_id =food_order.food_id \
left join notification_status on notification_status.notification_status_id = food_order.notification_status_id\
where food_order.notification_status_id =3 and food_menu.item_category_id =62 \
order by food_order.notification_time"))
get_notifications = get_notifications + get_extra_item
for get_notification in get_notifications:
if get_notification['notification_time'] not in notify_time:
notify_time.append(get_notification['notification_time'])
final_results.append({"notification_time":get_notification['notification_time'],"table_records":[]})
for get_notification in get_notifications:
for final_result in final_results:
if get_notification['notification_time'] == final_result['notification_time']:
if not any(d['table_no'] == get_notification['table_no'] for d in final_result['table_records']):
final_result['table_records'].append({"table_no":get_notification['table_no'],"items":[]})
for get_notification in get_notifications:
for final_result in final_results:
if get_notification['notification_time'] == final_result['notification_time']:
for d in final_result['table_records']:
if d['table_no'] == get_notification['table_no']:
d['items'].append(get_notification)
return json.dumps({"Return": "Record Retrived Successfully","ReturnCode": "RRS","Notification_Count":len(final_results),"Returnvalue":final_results,"Status": "Success","StatusCode": "200"},indent = 4)
|
from common import TreeLinkNode
class Solution:
def connect(self, root):
if not root:
return None
p = [root]
while p:
res = []
for index, node in enumerate(p):
node.next = p[index + 1] if index + 1 < len(p) else None
if node.left:
res.append(node.left)
if node.right:
res.append(node.right)
p = res
if __name__ == '__main__':
solution = Solution()
tree = TreeLinkNode.list2Tree([1, 2, 3, 4, 5, 6, 7])
solution.connect(tree)
print(tree)
|
from PyQt5.QtWidgets import QTreeWidgetItem
class Ext_Item(QTreeWidgetItem):
def __init__(self, parent, id_item=None):
super(Ext_Item, self).__init__(parent)
self.id_item = id_item
|
import requests
# test HTTP GET method
response = requests.get('http://localhost:3000/')
print("response status code: " + str(response.status_code))
print("response body: " + response.text)
# test HTTP PUT method
response = requests.put('http://localhost:3000/', data={"param": "value"})
print("response status code: " + str(response.status_code))
print("response body: " + response.text)
# test HTTP POST method
response = requests.post('http://localhost:3000/', data={"param": "value"})
print("response status code: " + str(response.status_code))
print("response body: " + response.text)
# test HTTP GET method with JSON endpoint
response = requests.get('http://localhost:3000/json')
json_response = response.json()
print("response status code: " + str(response.status_code))
print("response body: " + str(json_response))
# test HTTP POST method with JSON endpoint
postJSON={'key':'value'}
response = requests.post('http://localhost:3000/json', json=postJSON)
print("response status code: " + str(response.status_code))
print("response body: " + response.text)
# test HTTP POST method with INVALID JSON data
# this should return an error message, as the JSON data is improperly formatted
response = requests.post('http://localhost:3000/json', json="invalid JSON data")
print("response status code: " + str(response.status_code))
print("response body: " + response.text)
|
s = str(input())
if s.find("f") == -1:
exit()
if s.find("f") == s.rfind("f"):
print(s.find("f"))
else:
print(s.find("f"),s.rfind("f")) |
'''
Download random images using a sequential program
'''
import urllib.request
import time
import os
import threading
hosts = ["http://www.ox.ac.uk", "https://www.cam.ac.uk",
"http://www.canterbury.ac.nz", "http://www.lincoln.ac.nz",
"http://www.unitec.ac.nz", "http://www.victoria.ac.nz",
"http://www.nzherald.co.nz", "https://www.ucl.ac.uk",
"http://www.aut.ac.nz", "http://www.massey.ac.nz"]
def get_page(url, fname):
print("Getting page from", url)
urllib.request.urlretrieve(url, fname) #Get image and save it in a file
def sequentialProg():
t0 = time.time()
for page in hosts:
elements = os.path.split(page)
univ = elements[1].split('.')
univ = univ[1]
fname_page = "pages//" + univ + ".html"
get_page(page, fname_page)
t1 = time.time()
total_time = t1 - t0
print("Execution time: {} seconds".format(total_time))
def threadedProg ():
t0 = time.time()
threads = []
for page in hosts:
elements = os.path.split(page)
univ = elements[1].split('.')
univ = univ[1]
fnamePage = "pages//" + univ + ".html"
myThread = threading.Thread(name = "gp" + univ, target = get_page, args = [page, fnamePage])
threads.append(myThread)
myThread.start()
for t in threads:
t.join()
t1 = time.time()
total_time = t1 - t0
print("Execution time: {} seconds".format(total_time))
def main():
print("Sequential Program:\n")
sequentialProg()
print("\nThreaded Program:\n")
threadedProg()
if __name__ == "__main__":
main()
|
from tensorflow_utils import *
from tqdm import tqdm
import numpy as np
class DNN:
def __init__(self,no_hidden_layer,hidden_node_list,hidden_and_output_layer_activation_list):
self.no_hidden_layer = no_hidden_layer
self.hidden_and_output_layer_activation_list = hidden_and_output_layer_activation_list
self.hidden_node_list = []
for i in hidden_node_list:
self.hidden_node_list.append(i)
assert ( self.no_hidden_layer == len(self.hidden_node_list),"Lenght Hidden Node list is not matching with Number of Hidden Nodes")
assert ( self.no_hidden_layer + 1 == len(self.hidden_and_output_layer_activation_list) , "Mis Match")
self.model_tesors = {}
def build(self,input_vector_shape,output_vector_shape):
self.model_tesors['input'] = tf.placeholder(shape=[None,input_vector_shape],dtype=tf.float32,name="input")
self.model_tesors['output'] = tf.placeholder(shape=[None,output_vector_shape],dtype=tf.float32,name="output")
base_name = "hidden_"
prev_tensor = self.model_tesors['input']
for cnt,i in enumerate(self.hidden_node_list):
tesor_name = base_name+str(cnt)
self.model_tesors[tesor_name] = full_layer(input_vector=prev_tensor,size=i,prefix_name=tesor_name,activation=self.hidden_and_output_layer_activation_list[cnt])
prev_tensor = self.model_tesors[tesor_name]
self.model_tesors['output_layer'] = full_layer(input_vector=prev_tensor,size=output_vector_shape,prefix_name="output_layer",activation="relu")
def loss_and_optimizer(self,loss="mse",optimizer="adam",lr=0.001):
self.lr = lr
if loss == "mse":
self.loss = tf.reduce_mean(tf.losses.mean_squared_error(self.model_tesors['output'],self.model_tesors['output_layer']))
elif loss == "binary_cross":
self.loss = tf.keras.losses.binary_crossentropy(self.model_tesors['output'],self.model_tesors['output_layer'])
elif loss == "one-hot":
self.loss = tf.losses.softmax_cross_entropy(self.model_tesors['output'],self.model_tesors['output_layer'])
if optimizer == "adam":
self.optimizer = tf.train.AdamOptimizer(self.lr).minimize(self.loss)
elif optimizer == "sgd":
self.optimizer = tf.train.GradientDescentOptimizer(self.lr).minimize(self.loss)
def train(self,X,Y,batch_size=32,no_epochs=5,save_path_model="/tmp/model.ckpt"):
self.saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(no_epochs):
for i in tqdm(range(0,X.shape[0],32)):
temp_loss,_ = sess.run((self.loss,self.optimizer),feed_dict={self.model_tesors['input']:X,self
.model_tesors['output']:Y})
print ("loss:",temp_loss)
save_path = self.saver.save(sess,save_path_model)
def predict(self,X,save_path_model="/tmp/model.ckpt"):
with tf.Session() as sess:
self.saver.restore(sess,save_path_model)
temp_Y = sess.run(self.model_tesors['output_layer'],feed_dict={self.model_tesors['input']:X})
return temp_Y
def accuracy_mae(self,y_true,y_pred):
if self.hidden_and_output_layer_activation_list[self.no_hidden_layer] == "None" or self.hidden_and_output_layer_activation_list[self.no_hidden_layer] == "relu":
mae = np.sum(abs(y_true - y_pred))
mae = mae / y_true.shape[0]
return mae
else:
print ("Not possible")
# if __name__ == "__main__":
# dnn = DNN(1,[1],["None","None"])
# dnn.build(1,1)
# dnn.loss_and_optimizer(lr=0.1)
# x = np.arange(100)
# delta = np.random.uniform(-2,2, size=(100,))
# y = 4*x + 3 + delta
# dnn.train(x.reshape(100,1),y.reshape(100,1),batch_size=1,no_epochs=30)
# print (dnn.predict(np.array([3]).reshape(1,1)))
|
"""
Django settings for cy project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
from os.path import join, abspath, dirname
import os
here = lambda *x: join(abspath(dirname(__file__)), *x)
PROJECT_ROOT = here("..")
root = lambda *x: join(abspath(PROJECT_ROOT), *x)
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'journals',
'reports',
'property',
'people',
'ranking',
'search',
'commontag',
'rest_framework',
'debug_toolbar',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# 'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'cy.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(PROJECT_ROOT, 'templates'), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'cy.context_processor.current_url',
],
},
},
]
WSGI_APPLICATION = 'cy.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
# Additional locations of static files
STATICFILES_DIRS = (
root('static'),
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
SITE_DOMAIN = 'http://sunshine.cy.g0v.tw'
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10,
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
)
}
from local_settings import *
|
#
# Sample Todo module
#
"""
Your license message ...
"""
import os, bottle, json, datetime
from appmodule import AppModule
from .modeldb import setupDB, Todo, MyS
class MyAppModule(AppModule):
def init(self):
DSN = self.module_config.get('DSN')
try:
setupDB(DSN)
except:
# ignore error here
pass
app = MyAppModule()
def getApp():
return app
@app.get('/static/<path:path>')
def _(path):
return bottle.static_file(path, root=app.module_static_folder)
@app.route('/')
@app.auth('access module')
@app.view('index.tpl')
def _():
'''
Default view
'''
bs = app.get_beaker_session()
user = bs.get('username')
if user:
title = 'Todo for {}'.format(user)
else:
title = 'Todo for Anonymous'
return dict(user=user, title=title)
@app.post('/list')
@app.auth('access module')
def _():
try:
with MyS() as session:
bs = app.get_beaker_session()
userid = bs.get('userid', 0)
filter = bottle.request.forms.filter
if filter:
q = session.query(Todo) \
.filter(Todo.title.like(u'%{}%'.format(filter)) | Todo.description.like(u'%{}%'.format(filter))) \
.order_by(Todo.id.asc())
else:
q = session.query(Todo).order_by(Todo.id.asc())
L = []
for i in q.all():
d = {'id': i.id, 'userid': i.userid,
'userfullname': i.userfullname, 'title': i.title,
'dataora': i.dataora.strftime("%d-%b-%Y %H:%M:%S"),
'description': i.description,
'done': i.done}
L.append(d)
ret = dict(ok=True, data=L, userid=userid)
except Exception as ex:
ret = dict(ok=False, data=str(ex))
return ret
@app.post('/add')
@app.auth('access module')
def _():
try:
with MyS() as session:
bs = app.get_beaker_session()
userfullname = bs.get('userfullname', u'Anonymous')
userid = bs.get('userid', 0)
ob = Todo()
ob.userid = userid
ob.userfullname = userfullname
data = json.loads(bottle.request.forms.data)
ob.dataora = datetime.datetime.now()
ob.title = data[0]
ob.description = data[1]
ob.done = (data[2] == 'yes')
session.add(ob)
session.commit() # ob.id is available after commit
obid = ob.id
ret = dict(ok=True, data=obid)
except Exception as ex:
ret = dict(ok=False, data=str(ex))
return ret
@app.post('/delete')
@app.auth('access module')
def _():
try:
with MyS() as session:
bs = app.get_beaker_session()
userid = bs.get('userid', 0)
todo_id = int(bottle.request.forms.get('id', 0))
ob = session.query(Todo).filter(Todo.id == todo_id).first()
if ob:
obid = ob.id
if userid == ob.userid:
session.delete(ob)
else:
return dict(ok=False, data='Access denied.')
else:
obid = 0
ret = dict(ok=True, data=obid)
except Exception as ex:
ret = dict(ok=False, data=str(ex))
return ret
@app.post('/update')
@app.auth('access module')
def _():
try:
with MyS() as session:
bs = app.get_beaker_session()
userfullname = bs.get('userfullname', u'Anonymous')
userid = bs.get('userid', 0)
todo_id = int(bottle.request.forms.get('id', 0))
data = json.loads(bottle.request.forms.data)
ob = session.query(Todo).filter(Todo.id == todo_id).first()
if ob:
obid = ob.id
if userid == ob.userid:
ob.userfullname = userfullname
ob.dataora = datetime.datetime.now()
ob.title = data[0]
ob.description = data[1]
ob.done = (data[2] == 'yes')
else:
return dict(ok=False, data='Access denied.')
else:
obid = 0
ret = dict(ok=True, data=obid)
except Exception as ex:
ret = dict(ok=False, data=str(ex))
return ret
|
#!/usr/bin/env python
from std_msgs.msg import Float32
from std_msgs.msg import Float32MultiArray
from my_first.msg import Num
import numpy as np
import rospy
import serial
W = 0.5 #distance between wheel bases
R = 0.125 #radius of the wheels
head = 0.1 #head recieved from the other node
vel_flag = 0
vl_ref = 45
vr_ref = 46
velocity = []
s = serial.Serial('/dev/ttyUSB1',9600)
if s.isOpen():
s.close()
s.open()
def callback(data):
global vel_flag
global head
head = data.head
vel_flag = data.vel_flag
#print(data)
vel()
#s = serial.Serial('dev/ttyUSB0',9600)
def head_subscriber():
rospy.init_node('head_subscribe',anonymous = True)
rospy.Subscriber('/head_vel',Num,callback)
def vel():
global head
global vel_flag
global velocity
global vl_ref
global vr_ref
K = 0.27 # proportionality constant between the PWM and speed of the chair
const = 1 #constant that relates the head and the amount of time in which the outer wheel should cover the extra distance
theta_extra = W*abs(head)/R
if(head):
direction = abs(head)/head # if +1, then right, otherwise left
print(direction)
delta_t = const/head
vl = vel_flag*(vl_ref + (direction)*K*theta_extra)
vr = vel_flag*(vr_ref + (-direction)*K*theta_extra)
else:
vl = vel_flag*vl_ref
vr = vel_flag*vr_ref
send_vel(vl,vr)
#velocity = [vl, vr]
#print(vl,vr)
def send_vel(vel_l,vel_r):
global s
print(vel_l,vel_r)
s.write(str(str(vel_l) + ',' + str(vel_r)).encode()+'\n')
if __name__ == '__main__':
head_subscriber()
rospy.spin()
|
import time
import json
import redis
import datetime
import requests
import traceback
import configparser
from . import privilege
from functools import wraps
from flask_cors import cross_origin
from flask import session, redirect, url_for, current_app, flash, Response, request, jsonify, abort
from ..model.login_model import user
from ..model.privilege_model import role, privilege_role
from ..model.privilege_model import privilege as privilege_model
from ..common_func import CommonFunc
from .privilege_control import privilegeFunction
from ..privilege.privilege_control import user_list_get, role_list_get, privilege_list_get, privilege_role_list_get
from ..privilege.privilege_control import permission_required
URL_PREFIX = '/privilege'
cf = CommonFunc()
pf = privilegeFunction()
ALLOWED_TIME_SPAN = 100 # 盐过期X秒内允许修改,否则需要重新登录
# 用户列表获取(带有用户的角色信息)
@privilege.route('/userGet', methods=['POST'])
@permission_required(URL_PREFIX + '/userGet')
@cross_origin()
def userGet():
try:
user_id = int(request.get_json()['user_id'])
role_list = role_list_get()
user_list = user_list_get()
current_role_id = cf.dict_list_get_single_element(user_list, 'id', user_id, 'role_id')
current_user_role = cf.dict_list_get_single_element(role_list, 'id', current_role_id, 'name', current_role_id - 1)
if current_user_role == '管理员':
for single_user in user_list:
single_user['is_edit'] = 1
single_user['role_name'] = cf.dict_list_get_single_element(role_list, 'id', single_user['role_id'], 'name', single_user['role_id'] - 1)
else:
for single_user in user_list:
if single_user['id'] == user_id: # 允许编辑自己的
single_user['is_edit'] = 1
else:
single_user['is_edit'] = 0
return jsonify({'code': 200, 'msg': '成功!', 'data': user_list})
except Exception as e:
traceback.print_exc()
response = {'code': 500, 'msg': '失败!错误信息:' + str(e) + ',请联系管理员。', 'data': []}
return jsonify(response), 500
# 用户禁用
@privilege.route('/userDisable', methods=['POST'])
@permission_required(URL_PREFIX + '/userDisable')
@cross_origin()
def userDisable():
try:
user_id = int(request.get_json()['user_id'])
user.update(is_valid=0, update_time=datetime.datetime.now()).where(user.id == user_id).execute()
pf.del_user_id_to_redis(user_id)
return jsonify({'code': 200, 'msg': '成功!'})
except Exception as e:
traceback.print_exc()
response = {'code': 500, 'msg': '失败!错误信息:' + str(e) + ',请联系管理员。', 'data': []}
return jsonify(response), 500
# 用户启用
@privilege.route('/userEnable', methods=['POST'])
@permission_required(URL_PREFIX + '/userEnable')
@cross_origin()
def userEnable():
try:
user_id = int(request.get_json()['user_id'])
user.update(is_valid=1, update_time=datetime.datetime.now()).where(user.id == user_id).execute()
return jsonify({'code': 200, 'msg': '成功!'})
except Exception as e:
traceback.print_exc()
response = {'code': 500, 'msg': '失败!错误信息:' + str(e) + ',请联系管理员。', 'data': []}
return jsonify(response), 500
# 用户信息修改
@privilege.route('/userRoleChange', methods=['POST'])
@permission_required(URL_PREFIX + '/userRoleChange')
@cross_origin()
def userRoleChange():
try:
login_name = request.get_json()['login_name']
user_query = user.select().where(user.login_name == login_name).dicts()
if len(user_query) == 0:
response = {
'code': 403,
'msg': '用户名或密码错误!',
}
return (False, response)
else:
for row in user_query:
salt_expire_time = row['salt_expire_time']
server_timestamp = datetime.datetime.now()
if server_timestamp < salt_expire_time + datetime.timedelta(seconds=ALLOWED_TIME_SPAN):
role_id = request.get_json()['role_id']
user.update(role_id=role_id, update_time=datetime.datetime.now()).where(user.login_name == login_name).execute()
response = {'code': 200, 'msg': '成功'}
else:
response = {'code': 403, 'msg': '登录状态已过期,请返回并重新验证密码'}
return jsonify(response)
except Exception as e:
response = {'code': 500, 'msg': e, 'data': {}}
return jsonify(response), 500
# 用户删除
@privilege.route('/userDelete', methods=['POST'])
@permission_required(URL_PREFIX + '/userDelete')
@cross_origin()
def userDelete():
try:
user_id = int(request.get_json()['user_id'])
user_status = user.get(user.id == user_id).is_valid
if user_status == 0:
user.update(is_valid=-1, update_time=datetime.datetime.now()).where(user.id == user_id).execute()
pf.del_user_id_to_redis(user_id)
return jsonify({'code': 200, 'msg': '成功!'})
else:
return jsonify({'code': 500, 'msg': '失败!删除前请先禁用角色'})
except Exception as e:
traceback.print_exc()
response = {'code': 500, 'msg': '失败!错误信息:' + str(e) + ',请联系管理员。', 'data': []}
return jsonify(response), 500
# 角色列表获取
@privilege.route('/roleGet', methods=['GET'])
@permission_required(URL_PREFIX + '/roleGet')
@cross_origin()
def roleGet():
try:
return jsonify({'code': 200, 'msg': '成功!', 'data': role_list_get()})
except Exception as e:
traceback.print_exc()
response = {'code': 500, 'msg': '失败!错误信息:' + str(e) + ',请联系管理员。', 'data': []}
return jsonify(response), 500
# 角色具有的权限列表获取
@privilege.route('/rolePrivilegeGet', methods=['POST'])
@permission_required(URL_PREFIX + '/rolePrivilegeGet')
@cross_origin()
def rolePrivilegeGet():
try:
role_id = request.get_json()['role_id']
result = []
privilege_list = privilege_list_get()
privilege_role_query = privilege_role.select().where((privilege_role.role_id == role_id) & (privilege_role.is_valid == 1)).order_by(privilege_role.id).dicts()
for row in privilege_role_query:
result.append({
'privilege_id': row['privilege_id'],
'privilege_name': cf.dict_list_get_single_element(privilege_list, 'id', row['privilege_id'], 'name', row['privilege_id'] - 1),
})
return jsonify({'code': 200, 'msg': '成功!', 'data': result})
except Exception as e:
traceback.print_exc()
response = {'code': 500, 'msg': '失败!错误信息:' + str(e) + ',请联系管理员。', 'data': []}
return jsonify(response), 500
# 角色对应权限修改
@privilege.route('/rolePrivilegeEdit', methods=['POST'])
@permission_required(URL_PREFIX + '/rolePrivilegeEdit')
@cross_origin()
def rolePrivilegeEdit():
try:
role_id = request.get_json()['role_id']
checked_privilege_id = request.get_json()['checked_privilege_id']
privilege_role.update(is_valid=0).where(privilege_role.role_id == role_id).execute()
data_source = []
for single_checked_privilege_id in checked_privilege_id:
data_source.append((single_checked_privilege_id, role_id, 1))
field = [privilege_role.privilege_id, privilege_role.role_id, privilege_role.is_valid]
privilege_role.insert_many(data_source, field).execute()
pf.flush_role_privilege_to_redis(role_id)
response = {'code': 200, 'msg': '成功'}
return jsonify(response)
except Exception as e:
response = {'code': 500, 'msg': e, 'data': {}}
return jsonify(response), 500
# 角色新增和修改
@privilege.route('/roleEdit', methods=['POST'])
@permission_required(URL_PREFIX + '/roleEdit')
@cross_origin()
def roleEdit():
try:
role_id = request.get_json()['role_id']
name = request.get_json()['name']
remark = request.get_json()['remark']
if role_id == 0:
role.create(name=name, remark=remark, is_valid=1, update_time=datetime.datetime.now())
else:
role.update(name=name, remark=remark, update_time=datetime.datetime.now()).where(role.id == role_id).execute()
return jsonify({'code': 200, 'msg': '成功!'})
except Exception as e:
traceback.print_exc()
response = {'code': 500, 'msg': '失败!错误信息:' + str(e) + ',请联系管理员。', 'data': []}
return jsonify(response), 500
# 角色禁用
@privilege.route('/roleDisable', methods=['POST'])
@permission_required(URL_PREFIX + '/roleDisable')
@cross_origin()
def roleDisable():
try:
role_id = request.get_json()['role_id']
role.update(is_valid=0, update_time=datetime.datetime.now()).where(role.id == role_id).execute()
pf.del_role_to_redis(role_id)
return jsonify({'code': 200, 'msg': '成功!'})
except Exception as e:
traceback.print_exc()
response = {'code': 500, 'msg': '失败!错误信息:' + str(e) + ',请联系管理员。', 'data': []}
return jsonify(response), 500
# 角色启用
@privilege.route('/roleEnable', methods=['POST'])
@permission_required(URL_PREFIX + '/roleEnable')
@cross_origin()
def roleEnable():
try:
role_id = request.get_json()['role_id']
role.update(is_valid=1, update_time=datetime.datetime.now()).where(role.id == role_id).execute()
pf.flush_role_privilege_to_redis(role_id)
return jsonify({'code': 200, 'msg': '成功!'})
except Exception as e:
traceback.print_exc()
response = {'code': 500, 'msg': '失败!错误信息:' + str(e) + ',请联系管理员。', 'data': []}
return jsonify(response), 500
# 角色删除
@privilege.route('/roleDelete', methods=['POST'])
@permission_required(URL_PREFIX + '/roleDelete')
@cross_origin()
def roleDelete():
try:
role_id = request.get_json()['role_id']
role_status = role.get(role.id == role_id).is_valid
if role_status == 0:
role.update(is_valid=-1, update_time=datetime.datetime.now()).where(role.id == role_id).execute()
pf.del_role_to_redis(role_id)
return jsonify({'code': 200, 'msg': '成功!'})
else:
return jsonify({'code': 500, 'msg': '失败!删除前请先禁用角色'})
except Exception as e:
traceback.print_exc()
response = {'code': 500, 'msg': '失败!错误信息:' + str(e) + ',请联系管理员。', 'data': []}
return jsonify(response), 500
#权限列表获取
@privilege.route('/privilegeGet', methods=['GET'])
@permission_required(URL_PREFIX + '/privilegeGet')
@cross_origin()
def privilegeGet():
try:
return jsonify({'code': 200, 'msg': '成功!', 'data': privilege_list_get()})
except Exception as e:
traceback.print_exc()
response = {'code': 500, 'msg': '失败!错误信息:' + str(e) + ',请联系管理员。', 'data': []}
return jsonify(response), 500
#权限新增和修改
@privilege.route('/privilegeEdit', methods=['POST'])
@permission_required(URL_PREFIX + '/privilegeEdit')
@cross_origin()
def privilegeEdit():
try:
privilege_id = request.get_json()['privilege_id']
name = request.get_json()['name']
mark = request.get_json()['mark']
remark = request.get_json()['remark']
if privilege_id == 0:
if cf.is_data_existed_in_db(privilege_model, privilege_model.name, name):
response = {'code': 406, 'msg': '已经存在相同名称的权限'}
elif cf.is_data_existed_in_db(privilege_model, privilege_model.mark, mark):
response = {'code': 406, 'msg': '已经存在相同标识的权限'}
else:
privilege_model.create(name=name, mark=mark, remark=remark, is_valid=1, update_time=datetime.datetime.now())
else:
privilege_model.update(name=name, mark=mark, remark=remark, update_time=datetime.datetime.now()).where(privilege_model.id == privilege_id).execute()
response = {'code': 200, 'msg': '成功!'}
return jsonify(response)
except Exception as e:
traceback.print_exc()
response = {'code': 500, 'msg': '失败!错误信息:' + str(e) + ',请联系管理员。', 'data': []}
return jsonify(response), 500
# 权限禁用
@privilege.route('/privilegeDisable', methods=['POST'])
@permission_required(URL_PREFIX + '/privilegeDisable')
@cross_origin()
def privilegeDisable():
try:
privilege_id = request.get_json()['privilege_id']
privilege_model.update(is_valid=0, update_time=datetime.datetime.now()).where(privilege_model.id == privilege_id).execute()
pf.flush_privilege_which_belongs_to_role_with_target_privilege_to_redis(privilege_id)
return jsonify({'code': 200, 'msg': '成功!'})
except Exception as e:
traceback.print_exc()
response = {'code': 500, 'msg': '失败!错误信息:' + str(e) + ',请联系管理员。', 'data': []}
return jsonify(response), 500
# 权限启用
@privilege.route('/privilegeEnable', methods=['POST'])
@permission_required(URL_PREFIX + '/privilegeEnable')
@cross_origin()
def privilegeEnable():
try:
privilege_id = request.get_json()['privilege_id']
privilege_model.update(is_valid=1, update_time=datetime.datetime.now()).where(privilege_model.id == privilege_id).execute()
pf.flush_privilege_which_belongs_to_role_with_target_privilege_to_redis(privilege_id)
return jsonify({'code': 200, 'msg': '成功!'})
except Exception as e:
traceback.print_exc()
response = {'code': 500, 'msg': '失败!错误信息:' + str(e) + ',请联系管理员。', 'data': []}
return jsonify(response), 500
# 权限删除
@privilege.route('/privilegeDelete', methods=['POST'])
@permission_required(URL_PREFIX + '/privilegeDelete')
@cross_origin()
def privilegeDelete():
try:
privilege_id = request.get_json()['privilege_id']
privilege_status = privilege_model.get(privilege_model.id == privilege_id).is_valid
if privilege_status == 0:
privilege_model.update(is_valid=-1, update_time=datetime.datetime.now()).where(privilege_model.id == privilege_id).execute()
pf.flush_privilege_which_belongs_to_role_with_target_privilege_to_redis(privilege_id)
return jsonify({'code': 200, 'msg': '成功!'})
else:
return jsonify({'code': 500, 'msg': '失败!删除前请先禁用权限'})
except Exception as e:
traceback.print_exc()
response = {'code': 500, 'msg': '失败!错误信息:' + str(e) + ',请联系管理员。', 'data': []}
return jsonify(response), 500
|
from py_helium_console_client import ConsoleClient
## See full documentation for the Console API: https://docs.helium.com/api/console/
API_KEY = 'PASTE_API_KEY_HERE'
# initialize client
client = ConsoleClient(api_key=API_KEY)
# list devices on account
devices = client.get_devices()
# search for a device by app key, app eui, dev eui
single_device = client.get_device_by_details(devices[0].app_eui, devices[0].app_key, devices[0].dev_eui)
assert devices[0].__dict__ == single_device.__dict__
# search for a device by uuid
uuid_device = client.get_device_by_uuid(devices[0].id)
assert devices[0].__dict__ == uuid_device.__dict__
# get device events
events = client.get_device_events(devices[5].id)
# get integration events
integration_events = client.get_device_integration_events(devices[5].id)
# create device
created_device = client.create_device(name='python-client-test-device',
app_key='850AFDC6F1CF2397D3FEAB8C1850E6E1',
app_eui='B21C36EBBDC0D75F',
dev_eui='ABA47D469E1021AF')
# list labels
labels = client.get_labels()
# create label
created_label = client.create_label('python-client-test-label')
# search for label by id
queried_label = client.search_for_label(created_label.id)
assert created_label.id == queried_label.id
# add label to device
add_label_result = client.add_device_label(created_device.id, created_label.id)
assert add_label_result is True
# remove label from device
remove_label_result = client.remove_device_label(created_device.id, created_label.id)
assert remove_label_result is True
# delete device
deleted_device_result = client.delete_device(created_device.id)
assert deleted_device_result is True
# delete label
deleted_label_result = client.delete_label(created_label.id)
|
#!/bin/env python
import subprocess
import time
#import numpy as np
#dataWritingHistory=np.genfromtxt('userDefinedLog/dataWritingHistory',skip_header=0,delimiter=' ')
#with open('userDefinedLog/dataWritingHistory') as f:
# content = f.readlines()
def getTimes(fileName):
timeList = [line.rstrip('\n') for line in open(fileName)]
print timeList
timeString = ",".join(timeList[:-1]) # omitting the last one
print timeString
return timeList, timeString
def reconstruct(times2reconstruct):
return_code = subprocess.call("./reconstruct4.sh "+times2reconstruct, shell=True)
return return_code
def removeTimesInProcessorDirs(flag, Nb_procs, times2remove):
if not flag :
for timeI in times2remove:
print '\n'
for i in range(Nb_procs):
rm_command = 'rm -r processor'+str(i)+'/'+str(timeI)
return_value = subprocess.call(rm_command, shell=True)
if not return_value:
print rm_command
else:
print rm_command+' failed!'
# if return_value :
# continue
def main():
Nb_procs=4
timeList, timeString = getTimes('userDefinedLog/dataWritingHistory')
print "get times"
ifNextStep = reconstruct(timeString)
print "reconstruct complete"
time.sleep(10)
removeTimesInProcessorDirs(ifNextStep, Nb_procs, timeList)
print "finish removing times in processor directories"
main() |
# to keep the main game seperate
import logging
log = logging.getLogger('run_cfg_game')
from cocos.director import director
from cocos.scene import Scene
import config
import constants
def loadandrun(args = None):
if constants.DEBUG:
gamename = config.gamefile
else:
gamename = "game"
log.info("Loading :"+str(gamename))
try:
game = __import__(gamename)
if args is not None:
director.push(Scene (game.run(args)) )
else:
director.push(Scene (game.run()) ) # Dont push in the menu cause it will consume up and down key events
except ImportError:
print "Import error: "+gamename
|
import numpy as np
from sklearn.datasets import load_breast_cancer
class SVM(object):
def __init__(self):
self.b = 0
self.kernel = self.polynomial
self.gamma = 1
self.degree = 3
self.C = 1
def _ktt_violations(self, uy, alpha):
violations = np.zeros(len(uy))
violations[uy >= 1] = self.C - alpha[uy >= 1]
violations[uy <= 1] = alpha[uy <= 1]
violations[uy == 1] = (
(alpha[uy == 1] >= self.C) + (alpha[uy == 1] <= 0)) * self.C / 2
return violations
def _select_pair_by_delta_e(self, u, y, alpha):
violations = self._ktt_violations(u * y, alpha) > 0
if violations.max() == 0:
return -1, -1
e = u - y
repeat_e = np.repeat(e.reshape(1, -1), e.shape[0], axis=0)
delta_e = (violations * abs((repeat_e - repeat_e.T))).flatten()
idx = np.random.choice(
len(delta_e), 1, p=delta_e / delta_e.sum()).sum()
return idx % len(e), idx // len(e)
def _select_pair_by_max_violations(self, u, y, alpha):
n_data = len(y)
violations = self._ktt_violations(u * y, alpha)
if violations.max() == 0:
return -1, -1
idx1 = np.random.choice(
n_data, 1, p=violations / violations.sum()).sum()
delta_e = abs(u - y - u[idx1] + y[idx1])
idx2 = np.random.choice(n_data, 1, p=delta_e / delta_e.sum()).sum()
return idx1, idx2
def loss(self, alpha, x, y):
w = np.matmul(self.supp_w.reshape(-1, 1), self.supp_w.reshape(1, -1))
return alpha.sum() - (w * self.kernel(self.supp_x, self.supp_x)).sum() / 2
def fit(self, x, y): # SMO
n_data = x.shape[0]
self.supp_w = np.zeros(x.shape[0])
self.supp_x = x
self.b = 1
alpha = np.zeros(n_data)
for i in range(1000):
# select alpha1, alpha2
u = np.sign(self.predict(x))
idx1, idx2 = self._select_pair_by_max_violations(u, y, alpha)
if(idx1 == -1):
break
y1, y2 = y[idx1], y[idx2]
# update alpha1, alpha2
L = max(0, alpha[idx2] - alpha[idx1]) if y1 != y2 else max(0,
alpha[idx1] + alpha[idx2] - self.C)
H = min(self.C, self.C + alpha[idx2] - alpha[idx1]
) if y1 != y2 else min(self.C, alpha[idx1] + alpha[idx2])
e1, e2 = u[idx1] - y1, u[idx2] - y2
k11 = self.kernel(x[[idx1]], x[[idx1]]).sum()
k12 = self.kernel(x[[idx1]], x[[idx2]]).sum()
k22 = self.kernel(x[[idx2]], x[[idx2]]).sum()
alpha2 = min(
H, max(L, alpha[idx2] + y2 * (e1 - e2) / (k11 + k22 - 2 * k12)))
alpha1 = alpha[idx1] + y1 * y2 * (alpha[idx2] - alpha2)
# update b
b1 = self.b - e1 - y1 * \
(alpha1 - alpha[idx1]) * k11 - \
y2 * (alpha2 - alpha[idx2]) * k12
b2 = self.b - e2 - y1 * \
(alpha1 - alpha[idx1]) * k12 - \
y2 * (alpha2 - alpha[idx2]) * k22
if alpha1 > 0 and alpha1 < self.C:
self.b = b1
elif alpha2 > 0 and alpha2 < self.C:
self.b = b2
else:
self.b = (b1 + b2) / 2
# update model
alpha[[idx1, idx2]] = [alpha1, alpha2]
sv = (alpha != 0)
self.supp_w = alpha[sv] * y[sv]
self.supp_x = x[sv]
if i % 100 == 0:
print(self.loss(alpha, x, y))
print('support vectors:', self.supp_x)
def predict(self, x):
return self.supp_w.dot(self.kernel(self.supp_x, x)).flatten() + self.b
def rbf(self, x1, x2):
sub = np.array([[np.square(x1i - x2i).sum()
for x2i in x2] for x1i in x1])
return np.exp(-self.gamma * sub)
def polynomial(self, x1, x2):
return (x1.dot(x2.T) + 1)**self.degree
def linear(self, x1, x2):
return x1.dot(x2.T)
def main():
data = load_breast_cancer()
target = data.target * 2 - 1
test_ratio = 0.2
test_split = np.random.uniform(0, 1, len(target))
train_x = data.data[test_split >= test_ratio]
test_x = data.data[test_split < test_ratio]
train_y = target[test_split >= test_ratio]
test_y = target[test_split < test_ratio]
svm = SVM()
svm.fit(train_x, train_y)
print(sum(np.sign(svm.predict(train_x)) == train_y) / train_x.shape[0])
print(sum(np.sign(svm.predict(test_x)) == test_y) / test_x.shape[0])
if __name__ == "__main__":
main()
|
'''Problem
You just made a new friend at an international puzzle conference, and you asked for a way to keep in touch.
You found the following note slipped under your hotel room door the next day:
"Salutations, new friend!
I have replaced every digit of my phone number with its spelled-out uppercase
English representation ("ZERO", "ONE", "TWO", "THREE", "FOUR", "FIVE", "SIX", "SEVEN", "EIGHT", "NINE" for the digits 0 through 9, in that order), and then reordered all of those letters in some way to produce a string S. It's up to you to use S to figure out how many digits are in my phone number and what those digits are, but I will tell you that my phone number consists of those digits in nondecreasing order. Give me a call... if you can!"
"ZEROONETWOTHREEFOURFIVESIXSEVENEIGHTNINE"
You would to like to call your friend to tell him that this is an obnoxious way to give someone a phone number, but you need the phone number to do that! What is it?
Input
The first line of the input gives the number of test cases, T. T test cases follow. Each consists of one line with a string S of uppercase English letters.
Output
For each test case, output one line containing Case #x: y, where x is the test case number (starting from 1) and y is a string of digits: the phone number.
Limits
1 ≤ T ≤ 100.
A unique answer is guaranteed to exist.
Small dataset
3 ≤ length of S ≤ 20.
Large dataset
3 ≤ length of S ≤ 2000.
Sample
Input
Output
4
OZONETOWER
WEIGHFOXTOURIST
OURNEONFOE
ETHER
Case #1: 012
Case #2: 2468
Case #3: 114
Case #4: 3
"ZERO", "ONE", "TWO", "THREE", "FOUR", "FIVE", "SIX", "SEVEN", "EIGHT", "NINE"
"ONE", "TWO", "THREE", "FOUR", "FIVE", "SIX", "SEVEN", "EIGHT", "NINE" zERO
"ONE", "TWO", "THREE", "FOUR", "FIVE", "SEVEN", "EIGHT", "NINE" SIx
"ONE", "TWO", "THREE", "FOUR", "FIVE", "EIGHT", "NINE" SEvEN
"ONE", "THREE", "FOUR", "FIVE", "EIGHT", "NINE" TwO
"ONE", "THREE", "FOUR", "EIGHT", "NINE" FIvE
"ONE", "THREE", "FOUR", "NINE" EIgHT
"ONE", "FOUR", "NINE" "ThREE"
"ONE", "NINE" "FOuR"
"NINE" "oNE"
NINE
"ZERO", "ONE", "TWO", "THREE", "FOUR", "FIVE", "SIX", "SEVEN", "EIGHT", "NINE"
"ONE", "TWO", "THREE", "FOUR", "FIVE", "SIX", "SEVEN", "EIGHT", "NINE" zERO
"ONE", "TWO", "THREE", "FOUR", "FIVE", "SEVEN", "EIGHT", "NINE" SIx
"ONE", "THREE", "FOUR", "FIVE", SEvEN, "EIGHT", "NINE" TwO
"ONE", "THREE", "FOUR", "FIVE", SEvEN, "NINE" eiGht
"ONE", "THREE", "FIVE", SEvEN, "NINE" foUr
"ONE", "THREE", "FIVE", "NINE" sEVEN
six
two
eight
foUR
seven
FIVE
Nine
Three
one
FIvE
EIgHT
"ONE", "FOUR", "NINE" "ThREE"
"ONE", "NINE" "FOuR"
"NINE" "oNE"
NINE
'''
import jam
def solve(case):
numberOut = list()
letterCountDict = {'S':0, 'R':0, 'O':0, 'U':0, 'W':0, 'T':0, 'N':0, 'V':0, 'G':0, 'H':0, 'I':0, 'E':0, 'F':0, 'X':0, 'Z':0}
listLetters = list(case.readLine())
for x in listLetters:
letterCountDict[x] += 1
#print (letterCountDict)
#remove zeros
while letterCountDict['Z'] != 0:
numberToRemove = letterCountDict['Z']
numberOut.extend([0]*numberToRemove)
letterCountDict['E'] -= numberToRemove
letterCountDict['R'] -= numberToRemove
letterCountDict['O'] -= numberToRemove
letterCountDict['Z'] -= numberToRemove
while letterCountDict['X'] != 0:
numberToRemove = letterCountDict['X']
numberOut.extend([6]*numberToRemove)
letterCountDict['I'] -= numberToRemove
letterCountDict['S'] -= numberToRemove
letterCountDict['X'] -= numberToRemove
while letterCountDict['W'] != 0:
numberToRemove = letterCountDict['W']
numberOut.extend([2]*numberToRemove)
letterCountDict['T'] -= numberToRemove
letterCountDict['O'] -= numberToRemove
letterCountDict['W'] -= numberToRemove
#EIgHT
while letterCountDict['G'] != 0:
numberToRemove = letterCountDict['G']
numberOut.extend([8]*numberToRemove)
letterCountDict['E'] -= numberToRemove
letterCountDict['I'] -= numberToRemove
letterCountDict['H'] -= numberToRemove
letterCountDict['T'] -= numberToRemove
letterCountDict['G'] -= numberToRemove
#FOuR
while letterCountDict['U'] != 0:
numberToRemove = letterCountDict['U']
numberOut.extend([4]*numberToRemove)
letterCountDict['F'] -= numberToRemove
letterCountDict['O'] -= numberToRemove
letterCountDict['R'] -= numberToRemove
letterCountDict['U'] -= numberToRemove
'''six
two
eight
foUR
seven
FIVE
Nine
Three
one'''
while letterCountDict['S'] != 0:
numberToRemove = letterCountDict['S']
doubleNum = numberToRemove*2
numberOut.extend([7]*numberToRemove)
letterCountDict['E'] -= doubleNum
letterCountDict['V'] -= numberToRemove
letterCountDict['N'] -= numberToRemove
letterCountDict['S'] -= numberToRemove
#FIvE
while letterCountDict['V'] != 0:
numberToRemove = letterCountDict['V']
numberOut.extend([5]*numberToRemove)
letterCountDict['F'] -= numberToRemove
letterCountDict['I'] -= numberToRemove
letterCountDict['E'] -= numberToRemove
letterCountDict['V'] -= numberToRemove
#nIne
while letterCountDict['I'] != 0:
numberToRemove = letterCountDict['I']
doubleNum = numberToRemove*2
numberOut.extend([9]*numberToRemove)
letterCountDict['N'] -= doubleNum
letterCountDict['E'] -= numberToRemove
letterCountDict['I'] -= numberToRemove
#ThREE
while letterCountDict['H'] != 0:
numberToRemove = letterCountDict['H']
doubleNum = numberToRemove*2
numberOut.extend([3]*numberToRemove)
letterCountDict['T'] -= numberToRemove
letterCountDict['R'] -= numberToRemove
letterCountDict['E'] -= doubleNum
letterCountDict['H'] -= numberToRemove
#oNE
while letterCountDict['O'] != 0:
numberToRemove = letterCountDict['O']
numberOut.extend([1]*numberToRemove)
letterCountDict['N'] -= numberToRemove
letterCountDict['E'] -= numberToRemove
letterCountDict['O'] -= numberToRemove
finalStringOut = sorted(list((numberOut)))
finalOut = ''.join(str(e) for e in finalStringOut)
return finalOut #str(sorted(list((numberOut))))
jam.run("A-large.in", solve) |
# import os
# from urllib.parse import urlparse
from tkinter import *
from pytube import YouTube
# filename_s = input("Please Enter Your File Name To Save:-")
root = Tk()
root.geometry('500x300')
root.resizable(0,0)
root.title("@MuL's video downloader")
Label(root,text = 'Youtube Video Downloader', font ='arial 20 bold',bg = 'yellow').pack()
link = StringVar()
Label(root, text = 'Paste Link Here:', font = 'arial 15 bold',bg = 'Red').place(x= 160 , y = 60)
Label(root, text = '@Mul Video Downloader', font = 'arial 12 bold',bg = 'blue').place(x= 140 , y = 40)
link_enter = Entry(root, width = 70,textvariable = link).place(x = 32, y = 90)
def Downloader():
url =YouTube(str(link.get()))
video = url.streams.first().download(output_path = "D:\\Video's")
video.download()
Label(root, text = 'DOWNLOADED', font = 'arial 15',bg = 'purple').place(x= 180 , y = 210)
Button(root,text = 'DOWNLOAD', font = 'arial 15 bold' ,bg = 'green', padx = 2, command = Downloader).place(x=180 ,y = 150)
root.mainloop()
# link = input("Paste Your Link:-")
# yt = YouTube(link)
# # a = urlparse(link)
# try:
# yt.streams.filter(progressive = True,
# file_extension = input("Enter Extension(mp4/webm):-")).first().download(output_path = "D:\\Video's", filename = filename_s)
# except:
# print("This File Dont Have webm Support Please Try With mp4 extension\nThank You!")
# print('Task Completed!') |
from StringIO import StringIO
from unittest import TestCase
from rasmus.sexp import Sym
from rasmus.sexp import dict2sexp
from rasmus.sexp import parse
from rasmus.sexp import prepare
from rasmus.sexp import process
from rasmus.sexp import sexp2dict
from rasmus.sexp import write
from rasmus.sexp import write_pretty
class SexpTests(TestCase):
def test_parse(self):
exp = parse('(+ 223 36.6 (aaa) bbb \"ccc\")')
expected = [Sym('+'), 223, 36.6, [Sym('aaa')], Sym('bbb'), 'ccc']
self.assertEqual(exp, expected)
exp = parse(r'''(if (> 2 var) (cons a b)
(display "no \"quoted\" " 22 (#f #t)) )''')
expected = [
Sym('if'),
[Sym('>'), 2, Sym('var')],
[Sym('cons'), Sym('a'), Sym('b')],
[Sym('display'), 'no "quoted" ', 22, [False, True]]]
self.assertEqual(exp, expected)
exp = parse('''(hello there () (dict (a 1) ("bb" 222.0) (8 #f)
(more (dict (u v))))
("dict" (a b) (c d)))''')
expected = [Sym('hello'), Sym('there'), [], [Sym('dict'), [Sym('a'), 1], ['bb', 222.0], [8, False], [Sym('more'), [Sym('dict'), [Sym('u'), Sym('v')]]]], ['dict', [Sym('a'), Sym('b')], [Sym('c'), Sym('d')]]] # nopep8
self.assertEqual(exp, expected)
def test_write(self):
exp = parse(r'''(if (> 2 var) (cons a b)
(display "no \"quoted\" " 22 (#f #t)) )''')
t = StringIO()
write(exp, t)
expected = '(if (> 2 var) (cons a b) (display "no \\"quoted\\" " 22 (#f #t)))' # nopep8
self.assertEqual(t.getvalue(), expected)
expected = '''\
(if (> 2
var)
(cons a
b)
(display "no \\"quoted\\" "
22
(#f #t)))'''
t = StringIO()
write_pretty(exp, t)
self.assertEqual(t.getvalue(), expected)
def test_eval(self):
exp = parse('''(hello there (dict (a 1) ("bb" 222.0) (8 #f)
(more (dict (u v))))
("dict" (a b) (c d)))''',
{"dict": sexp2dict})
expected = [Sym('hello'), Sym('there'),
{8: False,
Sym('more'):
{Sym('u'):
Sym('v')},
'bb': 222.0,
Sym('a'): 1},
['dict', [Sym('a'), Sym('b')], [Sym('c'), Sym('d')]]]
self.assertEqual(exp, expected)
def test_dict2sexp(self):
exp = dict2sexp({"aaa": 111,
True: (((22, "abc", "adcd"), 9999),
"www",
(Sym("hello"), [], ("hi",
5555,
"---")
)),
78: False})
expected = [
Sym('dict'),
[True, (((22, 'abc', 'adcd'), 9999),
'www', (Sym('hello'), [], ('hi', 5555, '---')))],
['aaa', 111],
[78, False]
]
self.assertEqual(exp, expected)
def test_eval2(self):
o = parse('''
(account (usename "raz")
(started (date August 17 2005))
(renewed (date October 5 2009))
(score (+ 2 (* 3 7)))
(score2 (quote (+ 2 (* 3 7))))
)
''', {"account": sexp2dict,
"date": lambda x, e: tuple(x[1:]),
"+": lambda x, e: sum(map(lambda i: process(i, e), x[1:])),
"*": lambda x, e: reduce(lambda a, b: a*b,
map(lambda i: process(i, e), x[1:])),
"quote": lambda x, e: x[1]})
expected = {Sym('score2'): [Sym('+'), 2, [Sym('*'), 3, 7]], Sym('usename'): 'raz', Sym('score'): 23, Sym('renewed'): (Sym('October'), 5, 2009), Sym('started'): (Sym('August'), 17, 2005)} # nopep8
self.assertEqual(o, expected)
write_pretty(
prepare(o, [[dict, lambda x, e: dict2sexp(x, e, "account")],
[tuple, lambda x, e: [Sym("date")] + list(x)]]))
print
print
#=====================================================================
# tree
def parse_node(sexp, env):
name = sexp[1]
data = {}
children = []
for x in sexp[2:]:
if x[0] == "node":
children.append(parse_node(x, env))
else:
data[x[0]] = x[1]
return (name, data, children)
o = parse('''
;tree comments
(node "C" (dist .2) (boot 70) (species "root")
(node "A" (dist .1) (boot 100)) ; branch A
(node "B" (dist .11) (boot 98)
(node "Human1" (dist .01) (species "Human"))
(node "Chimp2" (dist .03) (species "Chimp"))))
''')
t = process(o, {"node": parse_node})
from pprint import pprint
pprint(o)
print t
write_pretty(o)
print
|
from environ import Env, Path
env = Env()
root = Path(__file__) - 2
BASE_DIR = root()
SECRET_KEY = env("SECRET_KEY")
DEBUG = env.bool("DEBUG", default=False)
ALLOWED_HOSTS = env.list("ALLOWED_HOSTS", default=[])
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"shared",
"project",
"dependency",
"platform",
"user",
"user_platform",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "config.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "config.wsgi.application"
DATABASES = {
"default": env.db(default="postgres://postgres:postgres@postgres:5432/postgres")
}
CACHES = {
"default": env.cache(
default="redis://redis:6379/1?client_class=django_redis.client.DefaultClient"
)
}
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
AUTH_USER_MODEL = "user.User"
CELERY_BROKER_URL = env("CELERY_BROKER_URL", default="redis://redis:6379/2")
CELERY_RESULT_BACKEND = env("CELERY_RESULT_BACKEND", default="redis://redis:6379/3")
CELERY_DEFAULT_QUEUE = env("CELERY_DEFAULT_QUEUE", default="default")
CELERY_BEAT_SCHEDULE = {}
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = "/static/"
GITLAB_PERSONAL_ACCESS_TOKEN = env("GITLAB_PERSONAL_ACCESS_TOKEN")
|
'''
Time O(N) | Space O(1)
'''
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
if not l1 or not l2: return l1 or l2
num1 = ""; num2 = ""
while l1:
num1 += str(l1.val)
l1 = l1.next
while l2:
num2 += str(l2.val)
l2 = l2.next
num1 = str(int(num1) + int(num2))
l1 = ListNode(-1)
head = l1
for x in num1:
head.next = ListNode(int(x))
head = head.next
return l1.next |
import csv
import os
import urllib.request
import requests
import goodreads
import psycopg2
import json
res = requests.get("https://www.goodreads.com/book/review_counts.json", params={"key": "JHDKyR1QW0crrofMApghQ", "isbns": "9781632168146"})
print(res.json())
from flask import Flask, session, render_template, request, flash, redirect, url_for
from flask_session import Session
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from functools import wraps
app = Flask(__name__)
# Check for environment variable
if not os.getenv("DATABASE_URL"):
raise RuntimeError("DATABASE_URL is not set")
# Configure session to use filesystem
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# Set up database
engine = create_engine(os.getenv("DATABASE_URL"))
db = scoped_session(sessionmaker(bind=engine))
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if session.get("user_id") is None:
return redirect("/login")
return f(*args, **kwargs)
return decorated_function
@app.route("/login", methods=["GET", "POST"])
def login():
session.clear()
if request.method == "GET":
return render_template('login.html')
else:
username=request.form.get("loginusername")
password=request.form.get("loginpassword")
rows=engine.execute(f"SELECT * FROM users WHERE username='{username}' AND password='{password}'")
result=rows.fetchall()
if len(result):
session["user_id"] = result[0][0]
return redirect('/search')
else:
return render_template('login.html')
@app.route("/logout")
def logout():
session.clear()
return redirect("/login")
@app.route("/register", methods=["GET", "POST"])
def register():
if request.method == "POST":
username=request.form.get("registerusername")
password=request.form.get("registerpassword")
usercheck = engine.execute(f"SELECT * FROM users WHERE username='{username}'")
result=usercheck.fetchall()
print(result)
if not result:
engine.execute(f"INSERT INTO users (username, password) VALUES('{username}', '{password}')")
return redirect("/login")
else:
return render_template('register.html')
else:
return render_template('register.html')
@app.route("/search", methods=["GET", "POST"])
@login_required
def search():
if request.method == "GET":
return render_template('search.html')
else:
book=request.form.get("searchtxt")
books=engine.execute(f"SELECT * FROM books WHERE isbn LIKE '%%{book}%%' OR title LIKE '%%{book}%%' OR author LIKE '%%{book}%%' ")
result=books.fetchall()
return render_template('sample.html', bookss=result)
@app.route("/book", methods=["GET", "POST"])
@login_required
def book():
if request.method == "GET":
return render_template("search.html")
else:
userch = False
info = request.form.get("result")
bookse=engine.execute(f"SELECT * FROM books WHERE isbn='{info}' ")
result1=bookse.fetchall()
reviewss=engine.execute(f"SELECT * FROM review WHERE isbn='{info}'")
reviewss=reviewss.fetchall()
goodreads = requests.get("https://www.goodreads.com/book/review_counts.json", params={"key": "JHDKyR1QW0crrofMApghQ", "isbns": f"{info}"})
goodreadsjson = goodreads.json()
ratingamount = goodreadsjson["books"][0]["ratings_count"]
average = goodreadsjson["books"][0]["average_rating"]
if reviewss != []:
i=0
for dict in reviewss:
if session["user_id"] == int(dict[4]):
userch = True
i+=1
return render_template("names.html", book=result1, otherrev=reviewss, userid=userch, average=average, ratingamount=ratingamount)
@app.route("/review", methods=["GET", "POST"])
@login_required
def review():
if request.method == "GET":
return render_template("search.html")
else:
user=session["user_id"]
isbn=request.form.get("isbn")
review=request.form.get("reviewtext")
rating=request.form.get("reviewnumber")
engine.execute(f"INSERT INTO review (isbn, review, rating, user_id) VALUES('{isbn}', '{review}', '{rating}', '{user}')")
return render_template("search.html")
@app.route("/api/<isbn>", methods=["GET"])
def api(isbn):
goodreads = requests.get("https://www.goodreads.com/book/review_counts.json", params={"key": "JHDKyR1QW0crrofMApghQ", "isbns": f"{isbn}"})
book = engine.execute(f"SELECT * FROM books WHERE isbn ='{isbn}'")
book = book.fetchall()
goodreads = goodreads.json()
jsondict = {"title": book[0][1],
"author": book[0][2],
"release_year": book[0][3],
"isbn": book[0][0],
"review_count": goodreads["books"][0]["ratings_count"],
"average_score": goodreads["books"][0]["average_rating"]}
return json.dumps(jsondict)
|
def stable_wall(R, C, rows):
res = []
graph = {}
in_degree = {}
res = ''
for i in range(R):
for j in range(C):
if rows[i][j] not in graph:
graph[rows[i][j]] = set()
if rows[i][j] not in in_degree:
in_degree[rows[i][j]] = 0
for c in range(C):
for r in range(R - 1, 0, -1):
if rows[r][c] != rows[r - 1][c]:
graph[rows[r][c]].add(rows[r - 1][c])
for k, v in graph.items():
for n in v:
in_degree[n] += 1
queue = [k for k, v in in_degree.items() if v == 0]
while queue:
curr = queue.pop()
res += curr
for n in graph[curr]:
in_degree[n] -= 1
if in_degree[n] == 0:
queue.append(n)
return res if len(res) == len(graph) else -1 |
#!/usr/bin/env python3
# Quick Script to convert NVD information to CVE records
# Removed the need to store xml and zip files on the disk
# In[90]:
__author__ = "Quentin Mayo"
__copyright__ = "None"
__credits__ = ["Quentin Mayo"]
__license__ = "None"
__version__ = "1.0.0"
__maintainer__ = "Quentin Mayo"
__email__ = "N/A"
__status__ = "Production"
#Imports
import os
import re
import io
import csv
import ssl
import glob
import time
import zipfile
import requests
import argparse
import urllib.request
from datetime import date
from zipfile import ZipFile
from bs4 import BeautifulSoup
# Command Line Section
class RawTextArgumentDefaultsHelpFormatter(
argparse.ArgumentDefaultsHelpFormatter,
argparse.RawTextHelpFormatter
):
pass
parser = argparse.ArgumentParser(
formatter_class=RawTextArgumentDefaultsHelpFormatter,description='''This program simplifies the process of converting nvd data into an csv. The goal was to make this program
robust enough that anyone could find use out of this script. If you goal is to get all CVE information in an cve or due some filtering based on this month, this program can handle
that task. Though this tool can be used by a layman, to get the most out of this tool, some understanding of xml and the raw nvd data will be very helpful. This script
was built with apis in mind.
'''
)
data_map_string ='CVE|vuln:cve-id,vulnerable-configuration|cpe-lang:fact-ref,vulnerable-software-list|vuln:product,cvss:score|cvss:score,cvss:access-vector|cvss:access-vector,cvss:access-complexity|cvss:access-complexity,cvss:confidentiality-impact|cvss:confidentiality-impact,cvss:integrity-impact|cvss:integrity-impact,cvss:availability-impact|cvss:availability-impact,cvss:source|cvss:source,vuln:summary|vuln:summary'
parser.add_argument('-cve_information_path', default="cve_information.csv", help='''Output csv file. You can also provide a path(example:results/cve_information.csv''')
parser.add_argument('-custom_filter_string', default="", help='''Setting this value will allow you to filter a given column via regex. The default reserve spacer is
[|], you can override this by setting the custom_filter_string_spacer.
Example 'CVE[|]2019' will filter everything in the column CVE defined in -data_map_string by the regex 2019.
''')
parser.add_argument('-custom_filter_string_spacer', default="[|]", help='''View -custom_filter_string''')
parser.add_argument('-date_range_string', default="2002|today", help='''Set the date range for loading the NVD xml files. the NVD files are broken up into years.
"type is keyword that will return that year. If you want just one year, you will need to say that year twice(ex: 2002|2002). pipe (|) is the breaker between the beginning and ending year''')
parser.add_argument('-data_map_string', default=data_map_string, help='''
This an advance setting but it allows any to customize the csv output. This command require some understanding of
the nvd xml format and Python's beautifulsoup library.cve_information is string input that is seperated by pipes(|).
Each item denotes a column in the csv.
index [0]
This column is the csv column name
index =[1]
This column tells the output. it will output the value if available . THe currently supported items are below
cpe-lang:logical-test --> this will return a list seperated by "," of vulnerable-configurations
vuln:vulnerable-software-list --> this will return a list seperated by "," of vulnerable-products
[others] --> if want to something else out, just provide the tag name. the script will automationly pull out
the string(ex: vuln:cve-id will give you back CVE-1999-0002 )
A sample Entry is below:
<entry id="CVE-1999-0002">
<vuln:vulnerable-configuration id="http://nvd.nist.gov/">
<cpe-lang:logical-test operator="OR" negate="false">
<cpe-lang:fact-ref name="cpe:/o:bsdi:bsd_os:1.1"/>
<cpe-lang:fact-ref name="cpe:/o:caldera:openlinux:1.2"/>
</cpe-lang:logical-test>
</vuln:vulnerable-configuration>
<vuln:vulnerable-software-list>
<vuln:product>cpe:/o:bsdi:bsd_os:1.1</vuln:product>
<vuln:product>cpe:/o:caldera:openlinux:1.2</vuln:product>
</vuln:vulnerable-software-list>
<vuln:cve-id>CVE-1999-0002</vuln:cve-id>
<vuln:published-datetime>1998-10-12T00:00:00.000-04:00</vuln:published-datetime>
<vuln:last-modified-datetime>2009-01-26T00:00:00.000-05:00</vuln:last-modified-datetime>
<vuln:cvss>
<cvss:base_metrics>
<cvss:score>10.0</cvss:score>
<cvss:access-vector approximated="true">NETWORK</cvss:access-vector>
<cvss:access-complexity approximated="true">LOW</cvss:access-complexity>
<cvss:authentication approximated="true">NONE</cvss:authentication>
<cvss:confidentiality-impact approximated="true">COMPLETE</cvss:confidentiality-impact>
<cvss:integrity-impact approximated="true">COMPLETE</cvss:integrity-impact>
<cvss:availability-impact approximated="true">COMPLETE</cvss:availability-impact>
<cvss:source>http://nvd.nist.gov</cvss:source>
<cvss:generated-on-datetime>2004-01-01T00:00:00.000-05:00</cvss:generated-on-datetime>
</cvss:base_metrics>
</vuln:cvss>
<vuln:security-protection>ALLOWS_ADMIN_ACCESS</vuln:security-protection>
<vuln:cwe id="CWE-119"/>
<vuln:references xml:lang="en" reference_type="UNKNOWN">
<vuln:source>SGI</vuln:source>
<vuln:reference href="ftp://patches.sgi.com/support/free/security/advisories/19981006-01-I" xml:lang="en">19981006-01-I</vuln:reference>
</vuln:references>
<vuln:references xml:lang="en" reference_type="UNKNOWN">
<vuln:source>CIAC</vuln:source>
<vuln:reference href="http://www.ciac.org/ciac/bulletins/j-006.shtml" xml:lang="en">J-006</vuln:reference>
</vuln:references>
<vuln:references xml:lang="en" reference_type="VENDOR_ADVISORY">
<vuln:source>BID</vuln:source>
<vuln:reference href="http://www.securityfocus.com/bid/121" xml:lang="en">121</vuln:reference>
</vuln:references>
<vuln:summary>Buffer overflow in NFS mountd gives root access to remote attackers, mostly in Linux systems.</vuln:summary>
</entry>
''' )
# Functions
# make dir
def mkdir(path):
try:
os.makedirs(path)
except OSError:
pass
def soup_get_attribute_text(entry,attribute,default_missing_message="None Found"):
if(entry.find(attribute)):
return entry.find(attribute).text
return default_missing_message
def extract_zip(input_zip):
input_zip=ZipFile(input_zip)
return [input_zip.read(name) for name in input_zip.namelist()]
def get_year(year):
if year == "today":
return date.today().year
return int(year)
def get_nvd_data(soup,data_map,custom_filter_map):
# Wall time: 1min 25s
cve_information = []
for entry in soup.find_all("entry"):
if(entry.find("vuln:cve-id")):
temp_entry = {}
for item in data_map:
if(item[1] == "cpe-lang:fact-ref"): temp_entry[item[0]] = ",".join([x["name"] for x in entry.find_all("cpe-lang:fact-ref")])
elif(item[1] == "vuln:product"):
temp_entry[item[0]] = ",".join([x.text for x in entry.find_all("vuln:product")])
else: temp_entry[item[0]] = soup_get_attribute_text(entry,item[1])
if(len(custom_filter_map)==2):
if(re.match(custom_filter_map[1], temp_entry[custom_filter_map[0]])):
cve_information+=[temp_entry]
else:
cve_information+=[temp_entry]
return cve_information
def main(parser):
# parser.print_help()
args = parser.parse_args()
print("Data from args:")
print("cve_information_path:%s" % args.cve_information_path)
print("data_map_string:%s" % args.data_map_string)
print("custom_filter_string:%s" % args.custom_filter_string)
print("date_range_string:%s" % args.date_range_string)
print("custom_filter_string_spacer:%s" % args.custom_filter_string_spacer)
print("Load Args into Variables")
get_nvd_data_from_online(args.cve_information_path,args.data_map_string,args.custom_filter_string,args.date_range_string,args.custom_filter_string_spacer,outfile=True)
def get_nvd_data_from_online(cve_information_path,data_map_string,custom_filter_string,date_range_string,custom_filter_string_spacer,outfile=False):
print("Clean Up user data")
data_map = list(map(lambda x:x.split("|") ,data_map_string.split(",")))
keys = list(map(lambda x:x.split("|")[0] ,data_map_string.split(",")))
custom_filter_map = custom_filter_string.split(custom_filter_string_spacer)
date_range = date_range_string.split("|")
cve_data = []
# Fix ssl issue with nvd.nist.gov
ssl._create_default_https_context = ssl._create_unverified_context
for index,year in enumerate(range(get_year(date_range[0]),get_year(date_range[1])+1)):
start = time.time()
url = "https://nvd.nist.gov/feeds/xml/cve/nvdcve-2.0-%s.xml.zip" %(year)
# this is loaded statement, it takes the output form xml zip, converts
# it ot Byte IO stream, unzip it, and then send
# that information to BeatuifulSoup which is a xml parser
print("Loading(%s):%s"%(index,url))
soup_data = BeautifulSoup(extract_zip(io.BytesIO(requests.get(url, stream=True).content))[0], 'lxml')
print("Extracting Data from Data(%s)"%(index))
cve_data.extend(get_nvd_data(soup_data,data_map,custom_filter_map))
print("Total Compute Time(s) forData(%s) = %s"%(index,time.time() - start))
# Output to file
if(outfile):
mkdir(os.path.split(cve_information_path)[0])
with open(cve_information_path, 'w',newline='') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(keys)
writer.writerows([[x[y] for y in keys] for x in cve_data])
return cve_data
if __name__ == "__main__":
main(parser)
|
# -*- coding: cp1252 -*-
import time
from scapy.all import *
#Declaration des variables
ipd = str(raw_input("Entrer une adresse IP: "))
portmin = str(raw_input("Entrer un port min: "))
portmax = str(raw_input("Entrer un port max : "))
ports = RandShort()
port = 0
ips = "10.101.200.13"
#Dictionnaire Port / services
services = {21:"FTP", 22:"SSH", 23:"Telnet", 25:"SMTP", 80:"HTTP", 53:"DNS"}
def isPortUp(ips,ipd,portmin, portmax):
# SYN
ip=IP(src=ips,dst=ipd)
SYN=TCP(sport=ports,dport=port,flags='S',seq=1000)
SYNACK=sr1(ip/SYN)
# ACK
ACK=TCP(sport=ports, dport=port, flags='A', seq=SYNACK.ack, ack=SYNACK.seq + 1)
send(ip/ACK)
# Selon le résultat du flags, on affiche "ouvert" ou "fermé"
for port in range(portmin, portmax):
if ACK.flags == 'A':
print "Port : " %port + " opened " + services.get(port)
else:
print "Port : " + str(port) + " closed"
isPortUp(ips,ipd,portmin,portmax)
start_time = time.time()
#boucle test de port + impression à l'écran
print "\n-------------------------------------------------"
print "Temps d'execution : %s secondes ---" % (time.time() - start_time)
print "-------------------------------------------------"
|
# Generated by Django 3.1.3 on 2021-04-11 03:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('talla', '0002_auto_20210410_2203'),
]
operations = [
migrations.AlterField(
model_name='talla',
name='eqv_letra',
field=models.CharField(max_length=10, unique=True),
),
]
|
# Copyright (c) 2020 Leedehai. All rights reserved.
# Use of this source code is governed under the MIT LICENSE.txt file.
import argparse
import multiprocessing
import os
import sys
from enum import Enum
from typing import Any, Dict, OrderedDict, Tuple
from pylibs.score_utils import error_s
# Types.
Args = argparse.Namespace
TaskMetadata = Dict[str, Any]
TaskResult = OrderedDict[str, Any]
TaskWorkerArgs = Tuple[str, bool, str, bool, TaskMetadata]
# Constants.
LOG_FILE_BASE = "log.json"
DELIMITER_STR = "#####"
GOLDEN_NOT_WRITTEN_PREFIX = "golden file not written"
class TaskExceptions(Enum):
GOLDEN_NOT_WRITTEN_SAME_CONTENT = "%s: content is the same" % GOLDEN_NOT_WRITTEN_PREFIX
GOLDEN_NOT_WRITTEN_WRONG_EXIT = "%s: the test's exit is not as expected" % GOLDEN_NOT_WRITTEN_PREFIX
GOLDEN_FILE_MISSING = "golden file missing"
class TaskEnvKeys(Enum):
CTIMER_DELIMITER_ENVKEY = "CTIMER_DELIMITER"
CTIMER_TIMEOUT_ENVKEY = "CTIMER_TIMEOUT"
def get_num_workers(env_var: str) -> int:
env_num_workers = os.environ.get(env_var, "")
if len(env_num_workers) > 0:
try:
env_num_workers_number = int(env_num_workers)
except ValueError:
sys.exit(error_s("env vairable '%s' is not an integer" % env_var))
if env_num_workers_number <= 0:
sys.exit(error_s("env variable '%s' is not positive" % env_var))
return env_num_workers_number
return multiprocessing.cpu_count()
NUM_WORKERS_MAX = get_num_workers(
env_var="NUM_WORKERS") # not "NUM_WORKERS_MAX", to be consistent
|
import numpy as np
#In order to reduce the computational cost of the particle filter and the trajectory generator, instead of reading every time through the whole
#MapFile as is extracted from its .csv file, it is more convenient to read only a particular region of interest. In this way, this script intends
# to extract, for a specified position in the map, only the points which are considered of interest. This region of interest is defined as the region
# within a 1.5*sensorLength radius of the vehicle.
class reducedMap:
def __init__(self, Map, x0, y0, sensorLength = 3, k = 1.5):
self.xValues = np.unique(Map[:,0])
self.yValues = np.unique(Map[:,1])
self.k = k
self.dx = self.xValues[1] - self.xValues[0]
self.dy = self.yValues[1] - self.yValues[0]
self.maxDim = np.ceil(2*self.k*sensorLength/self.dx)*np.ceil(2*self.k*sensorLength/self.dy)
newCx = np.argmin(abs(self.xValues - x0))
newCy = np.argmin(abs(self.yValues - y0))
self.xCenter = self.xValues[newCx]
self.yCenter = self.yValues[newCy]
self.iCenter = newCx
self.jCenter = newCy
#Cut the Map in X
xrange = [max(self.xCenter - k*sensorLength, -np.ptp(self.xValues)/2), min(self.xCenter + k*sensorLength, np.ptp(self.xValues)/2)]
isinX = []
for i,x in enumerate(self.xValues):
if x >= xrange[0] and x <= xrange[1]:
tempX = [len(self.yValues)*i + j for j in range(len(self.yValues))]
isinX.extend(tempX)
#Cut the Map in Y
yrange = [max(self.yCenter - k*sensorLength, -np.ptp(self.yValues)/2), min(self.yCenter + k*sensorLength, np.ptp(self.yValues)/2)]
isinY = []
for i,y in enumerate(self.yValues):
if y >= yrange[0] and y <= yrange[1]:
tempY = [i + j*len(self.yValues) for j in range(len(self.xValues))]
isinY.extend(tempY)
self.isin = np.intersect1d(isinX, isinY)
if len(self.isin) == 0:
print(self.xCenter - k*sensorLength, -np.ptp(self.xValues)/2, self.xCenter + k*sensorLength, np.ptp(self.xValues)/2)
print(self.xValues)
self.cutMap = Map[self.isin, :]
def propagateMotion(self, Map, x1, y1):
if len(self.cutMap) < self.maxDim:
self.__init__(Map, x1, y1)
newCx = np.argmin(abs(self.xValues - x1))
newCy = np.argmin(abs(self.yValues - y1))
self.xCenter = self.xValues[newCx]
self.yCenter = self.yValues[newCy]
deltaX = newCx - self.iCenter
deltaY = newCy - self.jCenter
self.iCenter = newCx
self.jCenter = newCy
#Translation in X
self.isin = self.isin + deltaX*len(self.yValues)
#Translation in Y
self.isin = self.isin + deltaY
#Check for Points outside the map
for i,j in enumerate(self.isin):
if j >= len(self.yValues)*len(self.xValues) or j < 0:
self.isin[i] = newCx*len(self.yValues) + newCy
# self.isin[i] = len(self.yValues)*len(self.xValues) - 1
# elif j < 0:
# self.isin[i] = 0
self.isin = np.unique(self.isin)
self.cutMap = Map[self.isin, :]
|
'''
Created on 31 Aug 2017
@author: Mathias Bucher
'''
class FileHandle(object):
'''
This class offers some methods for accessing files
in a sqlite database. It does not access the database
directly but offers methods to convert files into a
bytestream which can be stored in sql.
'''
syncword = bytearray([0xAA, 0xBB, 0xAA])
syncwordshort = bytearray([0xAA, 0xBB])
replaceword= bytearray([0xAA, 0xBB, 0xBB])
def __init__(self, log):
'''Constructor'''
self.log = log
self.log.add(self.log.Info, __file__, "init" )
def getStreamFromDictFiles(self, files):
'''Returns a binary stream where each filename (=key of files dict) is
followed by its content (=value of files dict)'''
barray = []
for key, value in files.iteritems():
if isinstance(key, unicode):
key = key.encode("utf-8")
barray.append(bytearray(key))
barray.append(value)
stream = self.insertSyncWords(barray)
return stream
def getDictFilesFromStream(self, stream):
'''Returns the files in a dictionary, where key = filename
and value = content. This method expects a bytestream where
filename is followed by content, separated by sync words'''
blist = self.removeSyncWords(stream)
files = dict()
for i, b in enumerate(blist):
# even elements are keys (=filenames)
if i%2 == 0:
key = str(b)
files[key] = bytearray()
# odd elements are values (=file contents)
else:
key = str(blist[i-1])
files[key] = b
return files
def getStreamFromFiles(self, files):
'''Converts the files list into a bytestream,
which can be stored in a database. The stream
separates the files by sync word'''
return self.insertSyncWords(files)
def getFilesFromStream(self, stream):
'''Converts the stream into files. The files
must be separated by sync words'''
return self.removeSyncWords(stream)
def insertSyncWords(self, bytestreams):
'''Returns a single bytestream which contains
all the streams of bytestreams list. The streams
are separated by sync word'''
stream = bytearray()
for s in bytestreams:
if isinstance(s, unicode):
s = s.encode("utf-8")
s = bytearray(s)
s = s.replace(self.syncwordshort, self.replaceword)
stream.extend(s)
stream.extend(self.syncword)
return stream
def removeSyncWords(self, stream):
'''Removes the sync words of stream and returns
a list of the separated bytestreams'''
splitted = stream.split(self.syncword)
splitted = splitted[0:splitted.__len__()-1]
bytestreams = []
for s in splitted:
s = s.replace(self.replaceword, self.syncwordshort)
bytestreams.append(s)
return bytestreams
|
from __future__ import absolute_import, unicode_literals
# This will make sure the app is always imported when
# Django starts so that shared_task will use this app.
from scrapy_app.tasks import app as scrapy_app
__all__ = ('scrapy_app',)
|
"""
A monitor for a Port.
"""
class PortMonitor:
""" Looks at the number of items in the Port, in service + in the queue,
and records that info in the sizes[] list. The monitor looks at the port
at time intervals given by the distribution dist.
Parameters
----------
env: simpy.Environment
the simulation environment.
port: Port
the switch port object to be monitored.
dist: function
a no parameter function that returns the successive inter-arrival
times of the packets
"""
def __init__(self, env, port, dist, pkt_in_service_included=False):
self.port = port
self.env = env
self.dist = dist
self.sizes = []
self.sizes_byte = []
self.action = env.process(self.run())
self.pkt_in_service_included = pkt_in_service_included
def run(self):
"""The generator function used in simulations."""
while True:
yield self.env.timeout(self.dist())
if self.pkt_in_service_included:
total_byte = self.port.byte_size + self.port.busy_packet_size
total = len(self.port.store.items) + self.port.busy
else:
total_byte = self.port.byte_size
total = len(self.port.store.items)
self.sizes.append(total)
self.sizes_byte.append(total_byte)
|
# -*- coding: utf-8 -*-
from openerp import fields, models, api
from dateutil.relativedelta import relativedelta
import json
from openerp.exceptions import UserError
class AccountInvoice(models.Model):
_inherit = "account.invoice"
@api.one
@api.depends('payment_move_line_ids.amount_residual')
def _get_payment_date(self):
payment = json.loads(self.payments_widget)
if payment:
self.payment_date = payment['content'][0]['date']
@api.model
def _default_currency(self):
journal = self._default_journal()
return journal.currency_id or journal.company_id.currency_id or self.env.user.company_id.currency_id
@api.one
def _get_vat_name(self):
for invoice_line_ids in self.invoice_line_ids:
for invoice_line_tax_ids in invoice_line_ids.invoice_line_tax_ids:
vat_name = invoice_line_tax_ids.name
self.vat_name = vat_name
@api.one
@api.depends('state')
def _compute_my_number(self):
if self.state == 'draft':
self.my_number = 'DRAFT'
elif self.state == 'open':
self.my_number = self.number
elif self.state == 'paid':
self.my_number = self.number
my_number = fields.Char(store=True, compute='_compute_my_number')
responsible_id = fields.Many2one('res.partner', string='Responsible', change_default=True,
required=False, readonly=False, states={'draft': [('readonly', False)]},
track_visibility='always')
payment_date = fields.Date('Payment Date', compute='_get_payment_date', store=True)
purchase_order = fields.Char(string='Purchase Order', readonly=True, states={'draft': [('readonly', False)]})
supplier_id = fields.Char(string='Supplier ID', readonly=True, states={'draft': [('readonly', False)]})
invoice_line_ids = fields.One2many('account.invoice.line', 'invoice_id', string='Invoice Lines', oldname='invoice_line',
readonly=False, copy=True)
partner_id = fields.Many2one('res.partner', string='Partner', change_default=True,
required=True, readonly=False, states={'draft': [('readonly', False)]},
track_visibility='always')
payment_term_id = fields.Many2one('account.payment.term', string='Payment Term', oldname='payment_term',
readonly=False, states={'draft': [('readonly', False)]},
help="If you use payment terms, the due date will be computed automatically at the generation "
"of accounting entries. If you keep the payment term and the due date empty, it means direct payment. "
"The payment term may compute several due dates, for example 50% now, 50% in one month.")
date_invoice = fields.Date(string='Invoice Date',
readonly=False, states={'draft': [('readonly', False)]}, index=True,
help="Keep empty to use the current date", copy=False)
date_due = fields.Date(string='Due Date',
readonly=False, states={'draft': [('readonly', False)]}, index=True, copy=False,
help="If you use payment terms, the due date will be computed automatically at the generation "
"of accounting entries. The payment term may compute several due dates, for example 50% "
"now and 50% in one month, but if you want to force a due date, make sure that the payment "
"term is not set on the invoice. If you keep the payment term and the due date empty, it "
"means direct payment.")
tax_line_ids = fields.One2many('account.invoice.tax', 'invoice_id', string='Tax Lines', oldname='tax_line',
readonly=False, states={'draft': [('readonly', False)]}, copy=True)
state = fields.Selection([
('draft','Draft'),
('proforma', 'Pro-forma'),
('proforma2', 'Pro-forma'),
('open', 'Unpaid'),
('paid', 'Paid'),
('cancel', 'Cancelled'),
], string='Status', index=True, readonly=True, default='draft',
track_visibility='onchange', copy=False,
help=" * The 'Draft' status is used when a user is encoding a new and unconfirmed Invoice.\n"
" * The 'Pro-forma' status is used the invoice does not have an invoice number.\n"
" * The 'Unpaid' status is used when user create invoice, an invoice number is generated. Its in unpaid status till user does not pay invoice.\n"
" * The 'Paid' status is set automatically when the invoice is paid. Its related journal entries may or may not be reconciled.\n"
" * The 'Cancelled' status is used when user cancel invoice.")
name = fields.Char(string='Reference/Description', index=True,
readonly=False, states={'draft': [('readonly', False)]}, copy=False, help='The name that will be used on account move lines')
# currency_id = fields.Many2one('res.currency', string='Currency',
# required=True, readonly=False, states={'draft': [('readonly', False)]},
# default=_default_currency, track_visibility='always')
customer_vat = fields.Char('Customer VAT')
vat_name = fields.Char(string='vat name', invisible='1', compute=_get_vat_name)
comment = fields.Text('Additional Information', readonly=False, states={'draft': [('readonly', False)]})
po_number = fields.Char(string="PO Number")
@api.onchange('payment_term_id', 'date_invoice', 'date_due')
def _onchange_payment_term_date_invoice(self):
date_invoice = self.date_invoice
date_due = self.date_due
if not date_invoice:
date_invoice = fields.Date.context_today(self)
if not self.payment_term_id:
# When no payment term defined
self.date_due = self.date_due or self.date_invoice
else:
pterm = self.payment_term_id
if self.payment_term_id.id in [4, 8, 15, ]:
pterm_list = pterm.with_context(currency_id=self.currency_id.id).compute(value=1, date_ref=date_invoice, date_ref2=date_due)[0]
else:
pterm_list = pterm.with_context(currency_id=self.currency_id.id).compute(value=1, date_ref=date_invoice)[0]
self.date_due = max(line[0] for line in pterm_list)
class AccountPaymentTerm(models.Model):
_inherit = "account.payment.term"
@api.one
def compute(self, value, date_ref=False, date_ref2=False):
date_ref = date_ref or fields.Date.today()
amount = value
result = []
if self.env.context.get('currency_id'):
currency = self.env['res.currency'].browse(self.env.context['currency_id'])
else:
currency = self.env.user.company_id.currency_id
prec = currency.decimal_places
for line in self.line_ids:
if line.value == 'fixed':
amt = round(line.value_amount, prec)
elif line.value == 'percent':
amt = round(value * (line.value_amount / 100.0), prec)
elif line.value == 'balance':
amt = round(amount, prec)
if amt:
if date_ref2:
next_date = fields.Date.from_string(date_ref2)
else:
next_date = fields.Date.from_string(date_ref)
if line.option == 'day_after_invoice_date' or 'custom_due_date':
next_date += relativedelta(days=line.days)
elif line.option == 'fix_day_following_month':
next_first_date = next_date + relativedelta(day=1, months=1) # Getting 1st of next month
next_date = next_first_date + relativedelta(days=line.days - 1)
elif line.option == 'last_day_following_month':
next_date += relativedelta(day=31, months=1) # Getting last day of next month
elif line.option == 'last_day_current_month':
next_date += relativedelta(day=31, months=0) # Getting last day of next month
result.append((fields.Date.to_string(next_date), amt))
amount -= amt
amount = reduce(lambda x, y: x + y[1], result, 0.0)
dist = round(value - amount, prec)
if dist:
last_date = result and result[-1][0] or fields.Date.today()
result.append((last_date, dist))
return result
class AccountPaymentTermLine(models.Model):
_inherit = "account.payment.term.line"
option = fields.Selection([
('day_after_invoice_date', 'Day(s) after the invoice date'),
('fix_day_following_month', 'Fixed day of the following month'),
('last_day_following_month', 'Last day of following month'),
('last_day_current_month', 'Last day of current month'),
('custom_due_date', 'Custom Due Date'),
], default='day_after_invoice_date', required=True, string='Options')
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 23 12:41:03 2017
@author: elizabethheld
"""
## Import packages and dependencies
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.decomposition import PCA
import random
random.seed(14)
# Choose the number of components for PCA
# Previous runs have shown that 85% of the
# variance is explained by the top 16 components
encoding_dim = 16
N=1000
# this is our input placeholder
# Load toy data
digits = datasets.load_digits()
target_names = digits.target_names
X_digits = digits.data
y_digits = digits.target
y_digits_round = y_digits > 4
y_digits_round = y_digits_round.astype('int')
# Get total number of images and their dims recorded
n_samples, h, w = digits.images.shape
# Split the data into testing and training data
X_train, X_test, Y_train, Y_test = train_test_split(X_digits, y_digits_round, test_size=0.25, random_state=42)
# Get simulation indices
n_train = N*len(X_train)
n_test = N*len(X_test)
ind_train = np.random.choice(np.array([0,1]), size=n_train, p=[0.9,0.1]).reshape(N, len(X_train))
ind_test = np.random.choice(np.array([0,1]), size=n_test, p=[0.9,0.1]).reshape(N, len(X_test))
n_components = 16
# perform analyses for sae
hold2 = [0]*N
for i in range(N):
x_train = X_train[ind_train.astype('bool')[i,],]
x_test = X_test[ind_test.astype('bool')[i,],]
y_train = Y_train[ind_train.astype('bool')[i,],]
y_test = Y_test[ind_test.astype('bool')[i,],]
# Build PCA
pca = PCA(n_components=n_components, svd_solver='randomized',
whiten=True).fit(x_train)
pca_digits = pca.components_.reshape((n_components, h, w))
# Transform using PCA
X_train_pca = pca.transform(x_train)
X_test_pca = pca.transform(x_test)
# Train a LR classification model
grid = {
'C': np.power(10.0, np.arange(-10, 10))
, 'solver': ['newton-cg']
}
clf = LogisticRegression(penalty='l2', random_state=777, max_iter=10000, tol=10)
gs = GridSearchCV(clf, grid)
# Fit LR
gs.fit(X_train_pca, y_train)
# Predict
ypca_pred = gs.predict(X_test_pca)
# Calculate FPR and TPR for ROC and AUC
fpr, tpr, _ = metrics.roc_curve(np.array(y_test), gs.predict_proba(X_test_pca)[:,1])
roc_auc = metrics.auc(fpr, tpr)
#print(roc_auc)
print(i)
hold2[i] = roc_auc
print(hold2)
|
import mes_from_cpmd.ext_fortran.fortran_io as fio
from mes_from_cpmd.toolbox import CubeFileTools
from mes_from_cpmd.toolbox import transformations
from mes_from_cpmd.toolbox import lib_dme as lime
import ipdb
from mes_from_cpmd.misc import git_control
import mes_from_cpmd.toolbox.cube as cube
import numpy as np
import os
import sys
import argparse
import subprocess
def main():
parser=argparse.ArgumentParser(description="convert wan file created by CPMD to cub file", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("fn_cube_in", help="Cubefile filename for header")
parser.add_argument("fn_wan_in", help="dat filename with volume data")
parser.add_argument("fn_cube_out", help="Cubefile filename for output")
parser.add_argument("--verbose", default=False, action='store_true', help="Verbose output")
args = vars(parser.parse_args())
if args['verbose']:
print(args)
cell_data = CubeFileTools.LoadCellData(args['fn_cube_in'])
state_data = np.asfortranarray(np.zeros(cell_data['mesh'], dtype=np.float64))
n_x, n_y, n_z = cell_data['mesh']
fio.fortran_read_unformatted(args['fn_wan_in'], state_data, n_x, n_y, n_z)
cube.WriteCubeFile(args['fn_cube_out'],
cell_data['comment1' ],
cell_data['comment2' ],
cell_data['numbers' ],
cell_data['coords_au'],
cell_data['cell_au' ],
state_data,
cell_data['origin_au'])
|
if __name__ == "__main__":
s = 'Hello from Python!'
words = s.split(' ')
print(words)
for w in words:
print(w)
for ch in s:
print(ch) |
from nltk.util import ngrams
def comment_length(comment, parent_traits):
return {'length': len(comment.body)}
def trigrams(comment, parent_traits):
traits = {}
for t in ngrams(comment.body, 3):
if t in parent_traits:
if 'shared_grams' in traits:
traits['shared_grams'] += 1
else:
traits['shared_grams'] = 0
traits[t] = True
return traits
trait_functions = [comment_length, trigrams]
def get_all_traits(comment, parent_traits):
traits = {}
for f in trait_functions:
traits.update(f(comment, parent_traits))
return traits
|
import csv
import requests
from urllib.parse import urlencode
import re
import os
import json
from hashlib import md5
from multiprocessing.pool import Pool
import time
def read_video(filename):
try:
url = []
with open(filename,'r',encoding='gbk') as csvfile:
reader = csv.reader(csvfile)
for reade in reader:
url.append(reade[0])
return url
except:
print('error')
return None
def get_url(url):
if url:
headers = {
'Referer': url,
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',
'X-requested-with': 'XMLHttpRequest'
}
params = {
'feedid':url[-17:],
'recommendtype':'0',
'datalvl':'all',
'format':'json',
'inCharset':'utf-8',
'outCharset':'utf-8'
}
fin_url = 'https://h5.weishi.qq.com/webapp/json/weishi/WSH5GetPlayPage?' + urlencode(params)
try:
r = requests.get(fin_url,headers=headers)
if r.status_code == 200:
return r.json()
except Exception as e:
print('get_url Error !!!: ',e)
def get_video(json_):
try:
if json_.get('data'):
pararms = re.compile("'video_spec_urls'.*?'url': '(.*?)'")
result = re.findall(pararms,str(json_))
return result[0]
else:
print("get_video Not Found \'video_spec_urls\'")
return None
except:
return None
def save_to_video(url):
video_path = 'Video'
if not os.path.exists(video_path):
os.mkdir(video_path)
try:
videos = requests.get(url)
if videos.status_code == 200:
file_path = '{0}{1}{2}.{3}'.format(video_path, os.path.sep, md5(videos.content).hexdigest(), 'mp4')
if not os.path.exists(file_path):
with open(file_path, 'wb') as file:
file.write(videos.content)
print('Already Download: {}'.format(file_path))
except Exception as e:
print('save_to_video Error :',e)
def main(url):
save_to_video(get_video(get_url(url)))
if __name__ == '__main__':
pool = Pool()
filename = 'movies.csv'
pool.map(main, read_video(filename))
time.sleep(1)
pool.close()
pool.join()
# done? |
# Generated by Django 3.2.5 on 2021-07-09 12:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0007_auto_20210709_1738'),
]
operations = [
migrations.RemoveField(
model_name='boarding',
name='is_pet_dlv_back',
),
migrations.AddField(
model_name='boarding',
name='pick_date',
field=models.DateField(auto_now=True),
),
migrations.AddField(
model_name='boarding',
name='status',
field=models.CharField(choices=[('Preparing', 'preparing'), ('Pickup', 'pickup'), ('delivered', 'delivered'), ('Boarded', 'boarded'), ('on the way', 'on the way')], default='Preparing', max_length=20),
),
]
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import numpy as np
import scipy.io
data=scipy.io.loadmat('ex3data1.mat')
Raw_X=data['X']
Raw_Y=data['y']
A=np.hstack((Raw_X,Raw_Y))
np.random.shuffle(A)
X=A[:,:399]
Y=A[:,400]
for i in range(5000):
if Y[i] == 10:
Y[i]=0
new_Y=np.zeros((5000,10))
for i in range(10):
print(i)
indices=(Y==i)
new_Y[:,i]=indices
train_x=X[:4000,:]
train_y=new_Y[:4000,:]
test_x=X[4001:,:]
test_y=new_Y[4001:,:]
lr=[]
l=classifier()
l.train(train_x,train_y[:,0])
for i in range(10):
l=classifier()
l.train(train_x,train_y[:,i])
lr.append(l)
train_predict_y=np.zeros((4000,10))
for i in range(10):
train_predict_y[:,i]=lr[i].predict(train_x)
test_predict_y=np.zeros((999,10))
for i in range(10):
test_predict_y[:,i]=lr[i].predict(test_x)
y1=np.argmax(test_predict_y,axis=1)
y2=np.argmax(test_y,axis=1)
c=0
for i in range(999):
if(y1[i]==y2[i]):
c=c+1
print(c)
"""E=X[9]
im_1=np.resize(E,(20,20))
cv2.imshow('image',im_1)
cv2.resizeWindow('image', 100,100)
cv2.waitKey(0)
cv2.destroyAllWindows()"""
class classifier(object):
def __init__(self):
self.W=None
def predict(self,X):
n_r,n_c=X.shape
T=np.zeros((n_r,))
L=np.zeros((n_r,n_c+1))
L[:,0]=1
for i in range(n_c):
L[:,i+1]=X[:,i]
for c in range(n_r):
T[c]=self.sigmoid(L.T[:,c])
print(T[c])
return T
def sigmoid(self,X):
h=1/(1+np.exp(-self.W.dot(X)))
return h
def train(self,train_x,train_y,alpha=1.5):
n_r,n_c=train_x.shape
self.W = np.random.randn(1, n_c + 1)
X1 = np.ones((n_r, 1))
X=np.hstack((X1,train_x))
Y = np.resize(train_y,(4000,1))
for i in range(500):
sigs=np.zeros((n_r,1))
temp = np.zeros((1, n_c + 1))
cost = 0
for c in range(n_r):
#print(c)
sigs[c,0]=self.sigmoid(X.T[:,c])
#print(sigs[c,0])
cost = cost - (Y[c] * (np.log(self.sigmoid(X.T[:, c]))) + (1 - Y[c]) * np.log(1 - self.sigmoid(X.T[:, c])))
cost = cost / (n_r)
print(cost)
temp=np.dot(np.subtract(sigs,Y).T,X)
self.W = self.W - (alpha / n_r) * temp
print(self.W)
|
from module import *
import torch
from torch import nn
from utils import *
EPOCH = 30
nIter = 1576
BATCH_SIZE = 10
LEARNING_RATE = 0.0001
vovab_size = len(word_counts)
# save training log
def write_txt(epoch, iteration, loss):
with open("/data/video-captioning/training_log.txt", 'a+') as f:
f.write("Epoch:[ %d ]\t Iteration:[ %d ]\t loss:[ %f ]\n" % (epoch, iteration, loss))
if __name__ == "__main__":
pkl_file = None
s2vt = S2VT(vocab_size=vovab_size, batch_size=BATCH_SIZE)
if pkl_file:
s2vt.load_state_dict(torch.load("/data/video-captioning/Data/s2vt_params.pkl"))
s2vt = s2vt.cuda()
loss_func = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(s2vt.parameters(), lr=LEARNING_RATE)
for epoch in range(EPOCH):
for i in range(nIter):
video, caption, cap_mask = fetch_train_data(BATCH_SIZE)
video, caption, cap_mask = torch.FloatTensor(video).cuda(), torch.LongTensor(caption).cuda(), \
torch.FloatTensor(cap_mask).cuda()
cap_out = s2vt(video, caption)
cap_labels = caption[:, 1:].contiguous().view(-1) # size [batch_size, 79]
cap_mask = cap_mask[:, 1:].contiguous().view(-1) # size [batch_size, 79]
logit_loss = loss_func(cap_out, cap_labels)
masked_loss = logit_loss*cap_mask
loss = torch.sum(masked_loss)/torch.sum(cap_mask)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i%20 == 0:
# print("Epoch: %d iteration: %d , loss: %f" % (epoch, i, loss))
write_txt(epoch, i, loss)
if i%2000 == 0:
torch.save(s2vt.state_dict(), "/data/video-captioning/Data/s2vt_params.pkl")
print("Epoch: %d iter: %d save successed!" % (epoch, i))
|
from rest_framework import viewsets, status
from rest_framework.exceptions import NotFound
from rest_framework.response import Response
from authtoken.permissions import HasTokenScope
from foobar import api
from ..serializers.account import AccountQuerySerializer
from ..serializers.purchase import (
PurchaseSerializer,
PurchaseStatusSerializer,
PurchaseRequestSerializer
)
from wallet.exceptions import InsufficientFunds
class PurchaseAPI(viewsets.ViewSet):
permission_classes = (HasTokenScope('purchases'),)
def list(self, request):
serializer = AccountQuerySerializer(data=request.query_params)
serializer.is_valid(raise_exception=True)
card_id = request.query_params.get('card_id')
account_obj = api.get_account_by_card(card_id=card_id)
purchases = api.list_purchases(account_obj.pk)
serializer = PurchaseSerializer(purchases, many=True)
return Response(serializer.data)
def create(self, request):
serializer = PurchaseRequestSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
try:
purchase_obj = api.create_purchase(
**serializer.as_purchase_kwargs()
)
except InsufficientFunds:
return Response(
'Insufficient funds',
status=status.HTTP_400_BAD_REQUEST
)
serializer = PurchaseSerializer(purchase_obj)
return Response(serializer.data, status=status.HTTP_200_OK)
def retrieve(self, request, pk):
"""Retrieves an existing purchase"""
purchase_obj = api.get_purchase(pk)
if purchase_obj[0] is None:
raise NotFound
serializer = PurchaseSerializer(purchase_obj)
return Response(serializer.data, status=status.HTTP_200_OK)
def partial_update(self, request, pk):
"""Updates the status of a purchase(i.e. FINALIZED or CANCELED)"""
purchase_obj, items = api.get_purchase(pk)
if purchase_obj is None:
raise NotFound
serializer = PurchaseStatusSerializer(
data=request.data,
context={'purchase': purchase_obj}
)
serializer.is_valid(raise_exception=True)
api.update_purchase_status(purchase_obj.pk, serializer.validated_data)
return Response(status=status.HTTP_204_NO_CONTENT)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .musk import MUSK1, MUSK2
from .hastie import Hastie_10_2
|
from info2soft import config
from info2soft import https
class Summary (object):
def __init__(self, auth):
self.auth = auth
'''
* 获取总览列表
*
* @param dict $body 参数详见 API 手册
* @return list
'''
def listSummaryView(self, body):
url = '{0}/active/summary/list_view'.format(config.get_default('default_api_host'))
res = https._get(url, body, self.auth)
return res
'''
* 总览页面
*
* @return list
'''
def listSummary(self, body):
url = '{0}/active/summary'.format(config.get_default('default_api_host'))
res = https._get(url, body, self.auth)
return res
'''
* 总览 操作 停止
*
* @return list
'''
def stopView(self, body):
url = '{0}/view/operate'.format(config.get_default('default_api_host'))
res = https._get(url, body, self.auth)
return res
'''
* 总览 操作 继续
*
* @return list
'''
def resumeView(self, body):
url = '{0}/view/operate'.format(config.get_default('default_api_host'))
res = https._get(url, body, self.auth)
return res
|
sep = ""
x = float(input("Enter x value : "))
if x > 0:
print("sign(",x,") = 1")
elif x < 0:
print("sign(",x,") = -1")
else:
print("sign(",x,") = 0")
|
# Import flask and template operators
from flask import Flask
app = Flask(__name__)
import server.controllers
|
from orm.models import Sensor, ExperimentSensor, Experiment
import datetime
from database_setup import trajectory_loader as tl
from database_setup import route_loader as rl
from database_setup import link_loader as lgl
from database_setup import taz_loader as tzl
import od_matrix_generator as od
import waypoint_od_matrix_generator as waypoints_od
import waypoint_matrix_generator as waypoints
import waypoint_od_separated_matrix_generator as separated
import os
import django_utils.config as config
from orm import load as lw
import all_links_matrix_generator_v2 as sm
import generate_phi as gp
import pickle
import link_sequences as ls
def create_experiment2():
experiment_name = 'e2'
Experiment.objects.filter(description=experiment_name).delete()
e2 = Experiment(description =experiment_name,
name = experiment_name,
run_time = datetime.datetime.now())
e2.save()
import_experiment2_sensors(experiment_name)
def import_experiment2_sensors(description):
experiment = Experiment.objects.get(description=description)
sensors = Sensor.objects.order_by('pems_id')
ExperimentSensor.objects.filter(experiment=experiment).delete()
for idx, s in enumerate(sensors):
es = ExperimentSensor(sensor=s, value=0, experiment=experiment, vector_index=idx)
es.save()
def setup_db():
print("creating experiment 2")
create_experiment2()
print("loading links")
lgl.load_LA_links()
print("creating routes")
tl.load()
print("importing routes into the db")
rl.load()
print ("load taz")
tzl.load()
print ("load waypoints")
lw.import_waypoints()
os.system("psql -U postgres -d geodjango -f waypoints/voronoi_python.sql")
os.system("psql -U megacell -d geodjango -f waypoints/set_waypoint_voronoi.sql")
print("create waypoint bins")
os.system("psql -U postgres -d geodjango -f experiments/experiment2/database_setup/waypoint_sequences.sql")
os.system("psql -U megacell -d geodjango -f experiments/experiment2/database_setup/create_od_waypoint_view.sql")
def ensure_directory(path):
if not os.path.exists(path):
os.makedirs(path)
def waypoint_matrix_file_name(routes, waypoint_density):
path = "{0}/{1}".format(config.EXPERIMENT_MATRICES_DIR, waypoint_density)
ensure_directory(path)
return "{0}/experiment2_waypoints_matrices_routes_{1}.mat".format(path, routes)
def matrix_generator(phi, routes, waypoint_density):
if waypoint_density == 0:
return od.ODMatrixGenerator(phi, routes)
else:
#return waypoints.WaypointMatrixGenerator(phi, routes, waypoint_density)
return separated.WaypointMatrixGenerator(phi,routes, waypoint_density)
#return waypoints_od.WaypointODMatrixGenerator(phi, routes, waypoint_density)
def get_phi(regenerate=False):
directory = "{0}/{1}".format(config.DATA_DIR, config.EXPERIMENT_MATRICES_DIR)
ensure_directory(directory)
filename = directory + "/phi.pkl"
if os.path.isfile(filename) and not regenerate:
return pickle.load(open(filename))
else:
pickle.dump([], open(filename,'w'))
phi = gp.PhiGenerator(2000).phi_generation_sql()
pickle.dump(phi, open(filename,'w'))
return phi
def generate_experiment_matrices():
phi = get_phi()
densities = [config.WAYPOINT_DENSITY] if config.SINGLE_WAYPOINT \
else config.WAYPOINT_DENSITIES
for d in densities:
for r in [50,40,30,20,10,3]:
print("Generating Matrix Set (waypoints: {0}, routes: {1})".format(d,r))
print waypoint_matrix_file_name(r, d)
generator = matrix_generator(phi, r, d)
matrices = generator.generate_matrices()
matrices.save_matrices(waypoint_matrix_file_name(r, d))
print matrices
#print_matrix_sizes(generator.matrices)
def all_link_matrix_file_name(routes, waypoint_density):
path = "{0}/AllLink".format(config.EXPERIMENT_MATRICES_DIR)
path2 = "{0}/{1}".format(path, waypoint_density)
ensure_directory(path)
ensure_directory(path2)
return "{0}/experiment2_all_link_matrices_routes_{1}.mat".format(path2, routes)
def print_matrix_sizes(matrices):
print ("A shape:", matrices['A'].shape)
print ("U shape:", matrices['U'].shape)
print ("x_true shape:", matrices['x_true'].shape)
print ("b shape:", matrices['b'].shape)
def sample_link_matrix_file_name(routes, probability):
path = "{0}/{1}".format(config.EXPERIMENT_MATRICES_DIR, probability)
ensure_directory(path)
return "{0}/sampled_links_routes_{1}.mat".format(path, routes)
def generate_sample_link_matrix():
for r in [50,40,30,20,10,3]:
for p in [.01,.1,.2,.3,.4,.5,.6,.7,.8, .9]:
generator = ls.make_generator(r, p)
generator.save_matrices(sample_link_matrix_file_name(r, p))
def generate_all_link_matrices():
for d in config.WAYPOINT_DENSITIES:
if d == 0: continue
for r in [50,40,30,20,10,3]:
print("Generating All Link Matrix Set (waypoints: {0}, routes: {1})".format(d,r))
generator = sm.AllLinksMatrixGenerator(r, d)
generator.save_matrices(all_link_matrix_file_name(r, d))
print_matrix_sizes(generator.matrices)
if __name__ == "__main__":
#setup_db()
get_phi(True)
#generate_sample_link_matrix()
generate_experiment_matrices()
#generate_all_link_matrices()
|
"""Main entry point for MARKS"""
import sys
if sys.argv[0].endswith("__main__.py"):
sys.argv[0] = "python -m marks"
from . import main
main.main(module=None)
|
from django.conf.urls import url, include
from rest_framework.urlpatterns import format_suffix_patterns
from . import views
app_name = 'restMap'
urlpatterns = [
url(r'^counties/update/rema/$', views.updateRema, name='update_rema'),
url(r'^counties/update/kiwi/$', views.updateKiwi, name='update_kiwi'),
url(r'^counties/update/spar/$', views.updateSpar, name='update_spar'),
url(r'^counties/update/joker/$', views.updateJoker, name='update_joker'),
url(r'^counties&cities/$', views.CountyCityList.as_view(), name='get_all_counties&cities'),
url(r'^counties/cities/$', views.PureCountyCityList.as_view(), name='get_all_c&c_pure'),
url(r'^counties/get/all/$', views.CountyList.as_view(), name='get_all_counties'),
url(r'^stores/all/$', views.StoreList.as_view(), name='get_all_stores'),
url(r'^stores/(?P<pk>[0-9]+)/$', views.CountyStoreList.as_view(), name='get_county_stores'),
]
urlpatterns = format_suffix_patterns(urlpatterns) |
#等于 ==
a=3==4
print(a)
#不等于 !=
a=3!=4
print(a)
#其它符号 >,>=,<,<=
print("wangxiaojing">"liudana")
print("WangXiaoJing">"LiuDaNa")
c=3
c+=3
print(c)
|
#Ageel 9/1/2019
#100 Days of Python
#Day 14 - List2
moviesToWatch = ["John Wick 3","Pets 2","Shaw and Hobbs","Crawl"]
print("My movies to watch "+str(moviesToWatch[0:3]))
print ("Is the Lion King in the list ? " + str("Lion King" in moviesToWatch))
print (str(" Beetlejuice"*3))
infinityStones1 = ["The Space Stone (blue)", "The Reality Stone (red)", "The Power Stone (purple)"]
infinityStones2 = ["The Mind Stone (yellow)", "The Time Stone (green)" , "The Soul Stone (orange)"]
infinityStones = infinityStones1 + infinityStones2
print("I have all the stones "+str(infinityStones)) |
#!/usr/bin/python\
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
# init list with pin numbers\
pinList = [2, 3, 14, 17]
# loop through pins and set mode and state to 'low'\
for i in pinList:
GPIO.setup(i, GPIO.OUT)
GPIO.output(i, GPIO.HIGH)
# main loop\
try:
GPIO.output(2, GPIO.LOW)
print "Adding Clean water to sample"
GPIO.output(3, GPIO.LOW)
print "Flushing out old water"
time.sleep(15);
GPIO.output(2, GPIO.HIGH)
GPIO.output(3, GPIO.HIGH)
time.sleep(5)
GPIO.output(14, GPIO.LOW)
print "Adding Phenol RED"
time.sleep(5);
GPIO.output(14, GPIO.HIGH)
time.sleep(5)
GPIO.output(17, GPIO.LOW)
print "Adding chlorine reagent"
time.sleep(2);
GPIO.output(17, GPIO.HIGH)
GPIO.output(2, GPIO.LOW)
print "Flushing out sample test"
GPIO.output(3, GPIO.LOW)
time.sleep(15);
GPIO.output(2, GPIO.HIGH)
time.sleep(15);
GPIO.output(3, GPIO.HIGH)
GPIO.cleanup()
print "Good bye!"
# End program cleanly with keyboard\
except KeyboardInterrupt:
print " Quit"
# Reset GPIO settings\
GPIO.cleanup()
|
#
# Tests for NSArchiver/NSKeyedArchiver interop with pure ObjC code.
# That is, when a Python script uses an archiver to write out
# a data structure with basic python types (list, tuple, unicode,
# str, int, float) a pure ObjC program should be able to read
# that archive as a datastructure with the corresponding Cocoa
# classes.
import os
import datetime
import platform
import subprocess
import tempfile
from plistlib import loads
import objc
from PyObjCTools.TestSupport import (
TestCase,
os_release,
os_level_key,
cast_ulonglong,
min_os_level,
)
MYDIR = os.path.dirname(os.path.abspath(__file__))
NSArray = objc.lookUpClass("NSArray")
NSArchiver = objc.lookUpClass("NSArchiver")
NSKeyedArchiver = objc.lookUpClass("NSKeyedArchiver")
NSUnarchiver = objc.lookUpClass("NSUnarchiver")
NSKeyedUnarchiver = objc.lookUpClass("NSKeyedUnarchiver")
NSSet = objc.lookUpClass("NSSet")
NSString = objc.lookUpClass("NSString")
class TestNSKeyedArchivingInterop(TestCase):
@classmethod
def setUpClass(cls):
src = os.path.join(MYDIR, "dump-nsarchive.m")
dst = cls.progpath = os.path.join(MYDIR, "dump-nsarchive")
subprocess.check_call(
[
"cc",
"-o",
dst,
src,
"-framework",
"Foundation",
"-DPyObjC_BUILD_RELEASE=%02d%02d"
% (tuple(map(int, platform.mac_ver()[0].split(".")[:2]))),
]
)
@classmethod
def tearDownClass(cls):
if os.path.exists(cls.progpath):
os.unlink(cls.progpath)
def test_interop_date(self):
testval = datetime.date.today()
v = NSArray.arrayWithObject_(testval)
data = NSKeyedArchiver.archivedDataWithRootObject_(v)
if data is None:
self.fail("Cannot create archive")
with tempfile.NamedTemporaryFile() as fp:
fp.write(data.bytes())
fp.flush()
converted = subprocess.check_output([self.progpath, "keyed", fp.name])
converted = loads(converted)
value = converted[0]
self.assertIsInstance(value, datetime.datetime)
# XXX: Checking the value itself is problematic because
# the datetime parser in plistlib is not timezone aware.
# self.assertEqual(value.year, testval.year)
# self.assertEqual(value.month, testval.month)
# self.assertEqual(value.day, testval.day)
def test_interop_datetime(self):
testval = datetime.datetime.now()
v = NSArray.arrayWithObject_(testval)
data = NSKeyedArchiver.archivedDataWithRootObject_(v)
if data is None:
self.fail("Cannot create archive")
with tempfile.NamedTemporaryFile() as fp:
fp.write(data.bytes())
fp.flush()
converted = subprocess.check_output([self.progpath, "keyed", fp.name])
converted = loads(converted)
value = converted[0]
self.assertIsInstance(value, datetime.datetime)
# XXX: Checking the value itself is problematic because
# the datetime parser in plistlib is not timezone aware.
# self.assertEqual(value, testval)
def test_interop_float(self):
for testval in (-4.5, 0, 5.5e10):
v = NSArray.arrayWithObject_(testval)
data = NSKeyedArchiver.archivedDataWithRootObject_(v)
with tempfile.NamedTemporaryFile() as fp:
fp.write(data.bytes())
fp.flush()
converted = subprocess.check_output([self.progpath, "keyed", fp.name])
converted = loads(converted)
self.assertEqual(converted, [testval])
def test_interop_int(self):
for testval in (-42, 0, 42, -(2**62), 2**62, 2**63 + 10):
with self.subTest(testval):
v = NSArray.arrayWithObject_(testval)
data = NSKeyedArchiver.archivedDataWithRootObject_(v)
out = NSKeyedUnarchiver.unarchiveObjectWithData_(data)
if testval > 2**63 and os_level_key(os_release()) < os_level_key(
"10.14"
):
# Bug in NSNumber
self.assertEqual(cast_ulonglong(out[0]), testval)
else:
self.assertEqual(out[0], testval)
with tempfile.NamedTemporaryFile() as fp:
fp.write(data.bytes())
fp.flush()
converted = subprocess.check_output(
[self.progpath, "keyed", fp.name]
)
converted = loads(converted)
if testval > 2**63 and os_level_key(os_release()) < os_level_key(
"10.14"
):
self.assertEqual(cast_ulonglong(converted[0]), testval)
else:
self.assertEqual(converted[0], testval)
@min_os_level("10.12")
def test_interop_int_overflow(self):
# Known error on macOS 10.11
testval = 2**64
v = NSArray.arrayWithObject_(testval)
data = NSKeyedArchiver.archivedDataWithRootObject_(v)
with tempfile.NamedTemporaryFile() as fp:
fp.write(data.bytes())
fp.flush()
with self.assertRaises(subprocess.CalledProcessError):
subprocess.check_output([self.progpath, "keyed", fp.name])
def test_interop_data(self):
for testval in (b"hello world",):
v = NSArray.arrayWithObject_(testval)
data = NSKeyedArchiver.archivedDataWithRootObject_(v)
with tempfile.NamedTemporaryFile() as fp:
fp.write(data.bytes())
fp.flush()
converted = subprocess.check_output([self.progpath, "keyed", fp.name])
converted = loads(converted)
self.assertEqual(converted, [testval])
def test_interop_seq(self):
for testval in (["a", "b", 3], ("a", "b", 3)):
data = NSKeyedArchiver.archivedDataWithRootObject_(testval)
with tempfile.NamedTemporaryFile() as fp:
fp.write(data.bytes())
fp.flush()
converted = subprocess.check_output([self.progpath, "keyed", fp.name])
converted = loads(converted)
self.assertIs(type(converted), list)
self.assertEqual(converted, list(testval))
def test_interop_set(self):
for testval in ({"a", "b", 3}, frozenset({"a", "b", 3})):
data = NSKeyedArchiver.archivedDataWithRootObject_(testval)
with tempfile.NamedTemporaryFile() as fp:
fp.write(data.bytes())
fp.flush()
converted = subprocess.check_output([self.progpath, "keyed", fp.name])
self.assertTrue(converted.startswith(b"{("))
self.assertTrue(converted.endswith(b")}\n"))
converted = b"{" + converted[2:-3] + b"}"
converted = eval(converted.decode("utf-8"), {"a": "a", "b": "b"})
self.assertEqual(converted, set(testval))
def test_interop_dict(self):
for testval in ({"a": "b", "c": 42},):
data = NSKeyedArchiver.archivedDataWithRootObject_(testval)
with tempfile.NamedTemporaryFile() as fp:
fp.write(data.bytes())
fp.flush()
converted = subprocess.check_output([self.progpath, "keyed", fp.name])
converted = loads(converted)
self.assertEqual(converted, testval)
class TestNSArchivingInterop(TestCase):
@classmethod
def setUpClass(cls):
src = os.path.join(MYDIR, "dump-nsarchive.m")
dst = cls.progpath = os.path.join(MYDIR, "dump-nsarchive")
subprocess.check_call(
[
"cc",
"-o",
dst,
src,
"-framework",
"Foundation",
"-DPyObjC_BUILD_RELEASE=%02d%02d"
% (tuple(map(int, platform.mac_ver()[0].split(".")[:2]))),
]
)
@classmethod
def tearDownClass(cls):
if os.path.exists(cls.progpath):
os.unlink(cls.progpath)
def test_interop_string(self):
for testval in ("hello world", "goodbye moon"):
v = NSArray.arrayWithObject_(testval)
data = NSArchiver.archivedDataWithRootObject_(v)
with tempfile.NamedTemporaryFile() as fp:
fp.write(data.bytes())
fp.flush()
converted = subprocess.check_output([self.progpath, "plain", fp.name])
converted = loads(converted)
self.assertEqual(converted, [testval])
def test_interop_float(self):
for testval in (-4.5, 0, 5.5e10):
v = NSArray.arrayWithObject_(testval)
data = NSArchiver.archivedDataWithRootObject_(v)
with tempfile.NamedTemporaryFile() as fp:
fp.write(data.bytes())
fp.flush()
converted = subprocess.check_output([self.progpath, "plain", fp.name])
converted = loads(converted)
self.assertEqual(converted, [testval])
def test_interop_int(self):
for testval in (-42, 0, 42, -(2**62), 2**62):
v = NSArray.arrayWithObject_(testval)
data = NSArchiver.archivedDataWithRootObject_(v)
with tempfile.NamedTemporaryFile() as fp:
fp.write(data.bytes())
fp.flush()
converted = subprocess.check_output([self.progpath, "plain", fp.name])
converted = loads(converted)
self.assertEqual(converted, [testval])
testval = 2**64
v = NSArray.arrayWithObject_(testval)
data = NSArchiver.archivedDataWithRootObject_(v)
with tempfile.NamedTemporaryFile() as fp:
fp.write(data.bytes())
fp.flush()
with self.assertRaises(subprocess.CalledProcessError):
subprocess.check_output([self.progpath, "plain", fp.name])
def test_interop_data(self):
for testval in (b"hello world",):
v = NSArray.arrayWithObject_(testval)
data = NSArchiver.archivedDataWithRootObject_(v)
with tempfile.NamedTemporaryFile() as fp:
fp.write(data.bytes())
fp.flush()
converted = subprocess.check_output([self.progpath, "plain", fp.name])
converted = loads(converted)
self.assertEqual(converted, [testval])
def test_interop_seq(self):
for testval in (["a", "b", 3], ("a", "b", 3)):
data = NSArchiver.archivedDataWithRootObject_(testval)
with tempfile.NamedTemporaryFile() as fp:
fp.write(data.bytes())
fp.flush()
converted = subprocess.check_output([self.progpath, "plain", fp.name])
converted = loads(converted)
self.assertIs(type(converted), list)
self.assertEqual(converted, list(testval))
def test_interop_set(self):
for testval in ({"a", "b", 3}, frozenset({"a", "b", 3})):
data = NSArchiver.archivedDataWithRootObject_(testval)
with tempfile.NamedTemporaryFile() as fp:
fp.write(data.bytes())
fp.flush()
converted = subprocess.check_output([self.progpath, "plain", fp.name])
self.assertTrue(converted.startswith(b"{("))
self.assertTrue(converted.endswith(b")}\n"))
converted = b"{" + converted[2:-3] + b"}"
converted = eval(converted.decode("utf-8"), {"a": "a", "b": "b"})
self.assertEqual(converted, set(testval))
def test_interop_dict(self):
for testval in ({"a": "b", "c": 42},):
data = NSArchiver.archivedDataWithRootObject_(testval)
with tempfile.NamedTemporaryFile() as fp:
fp.write(data.bytes())
fp.flush()
converted = subprocess.check_output([self.progpath, "plain", fp.name])
converted = loads(converted)
self.assertEqual(converted, testval)
class Class1:
pass
class Class2:
pass
class Class3:
def __init__(self):
self.a = None
self.b = None
def __getstate__(self):
return (self.a, self.b)
def __setstate__(self, state):
self.a, self.b = state
class Class4:
def __init__(self):
self.a = None
self.b = None
def __getstate__(self):
return {"a": self.a, "b": self.b, NSString.stringWithString_("c"): self.c}
class TestLoadingOlderVersions(TestCase):
def do_verify(self, path):
import __main__
# Ensure that class definitions are present:
__main__.Class1 = Class1
__main__.Class2 = Class2
__main__.Class3 = Class3
__main__.Class4 = Class4
if path.endswith("keyed"):
archiver = NSKeyedUnarchiver
else:
archiver = NSUnarchiver
data = archiver.unarchiveObjectWithFile_(path)
self.assertIsInstance(data, Class2)
self.assertEqual(data.lst, [1, 2, 3])
self.assertEqual(data.string, "hello world")
self.assertIsInstance(data.obj, Class1)
o = data.obj
self.assertEqual(o.a, 42)
self.assertEqual(o.b, 2.5)
self.assertIsInstance(data.o3, Class3)
self.assertIsInstance(data.o4, Class4)
o = data.o3
self.assertEqual(o.a, 42)
self.assertEqual(o.b, 21)
o = data.o4
self.assertEqual(o.a, "A")
self.assertEqual(o.b, "B")
for fname in os.listdir(os.path.join(MYDIR, "archives")):
def test(self, fname=fname):
self.do_verify(os.path.join(MYDIR, "archives", fname))
locals()["test_%s" % (fname.replace(".", "_").replace("-", "_"))] = test
del test
|
from setuptools import setup
setup(
name='pybem',
version='0.1.2',
author="Klim Naydenov",
author_email="knaydenov@gmail.com",
description='This package provides helpers for BEM classes generation',
url='https://github.com/knaydenov/pybem',
install_requires=[],
packages=['pybem'],
python_requires='>=3.4',
)
|
import sys
import os
import nibabel as nib
import numpy as np
import bigbadbrain as bbb
import warnings
warnings.filterwarnings("ignore")
sys.path.insert(0, '/home/users/brezovec/.local/lib/python3.6/site-packages/lib/python/')
import ants
def main(args):
directory = args[0]
motcorr_directory = args[1]
master_path = args[2]
slave_path = args[3]
master_path_mean = args[4]
vol_start = int(args[5])
vol_end = int(args[6])
# For the sake of memory, lets try to load only the part of the brain we will need.
master_brain = load_partial_brain(master_path,vol_start,vol_end)
slave_brain = load_partial_brain(slave_path,vol_start,vol_end)
mean_brain = ants.from_numpy(bbb.load_numpy_brain(master_path_mean))
bbb.motion_correction(master_brain,
slave_brain,
directory,
motcorr_directory,
meanbrain=mean_brain,
suffix='_'+str(vol_start))
def load_partial_brain(file, start, stop):
brain = nib.load(file).dataobj[:,:,:,start:stop]
brain = ants.from_numpy(np.asarray(np.squeeze(brain), 'float64'))
# always keep 4 axes:
if len(np.shape(brain)) == 3:
brain = brain[:,:,:,np.newaxis]
return brain
if __name__ == '__main__':
main(sys.argv[1:]) |
#!/usr/bin/python
def smallestEvenlyDivisible( divisors ):
product = 1
currentNum = 2
numsToFactor = divisors
while numsToFactor:
anyDivisible = False
updatedNumsToFactor = []
for numToFactor in numsToFactor:
if numToFactor % currentNum == 0:
anyDivisible = True
numToFactor /= currentNum
if numToFactor > 1:
updatedNumsToFactor.append( numToFactor )
if anyDivisible:
product *= currentNum
else:
currentNum += 1
numsToFactor = updatedNumsToFactor
return product
print smallestEvenlyDivisible( range( 1, 21 ) )
|
# Crie uma classe que modele uma pessoa
# a)Atributos nome, idade, peso e altura
# b)Métodos envelhecer, engordar, emagrecer, crescer
# Por padrão, a cada ano que a pessoa envelhece, sendo a idade dela menor que 21 anos, ela deve crescer 0,5 cm
class Pessoa:
def __init__(self, nome, idade, peso, altura):
self.nome = nome
self.idade = idade
self.peso = peso
self.altura = altura
def envelhecer(self, anos):
return self.crescer(anos)
def engordar(self, calorias):
self.peso += calorias * 0.000125
def emagrecer(self, horasExercicio):
self.peso -= horasExercicio * 0.1
def crescer(self, anos):
if self.idade < 21:
if self.idade + anos < 21:
self.altura += anos * (0.5)
else:
self.altura += (21 - self.idade) * 0.5
self.idade += anos
pessoa1 = Pessoa('Felipe', 20, 73, 178)
print(f'''RELATÓRIO INICIAL:
Nome: {pessoa1.nome}
Idade: {pessoa1.idade} anos
Peso: {pessoa1.peso:.2f} kg
ALtura: {pessoa1.altura:.2f} cm''')
pessoa1.envelhecer(50)
print(f'''\nIdade após 50 anos: {pessoa1.idade} anos.
Altura após 50 anos: {pessoa1.altura:.2f} cm.''')
pessoa1.crescer(2)
print(f'''\nIdade após +2 anos: {pessoa1.idade} anos.
Altura após +2 anos: {pessoa1.altura:.2f} cm.''')
pessoa1.engordar(8000)
print(f'\nPeso após ingerir 8000 calorias: {pessoa1.peso:.2f} kg.')
pessoa1.emagrecer(18)
print(f'\nPeso após fazer 18h de caminhada: {pessoa1.peso:.2f} kg.')
print(f'''\nRELATÓRIO FINAL:
Nome: {pessoa1.nome}
Idade: {pessoa1.idade} anos
Peso: {pessoa1.peso:.2f} kg
ALtura: {pessoa1.altura:.2f} cm''') |
from itertools import product
def first():
lines = open("14/dd.txt", "r").read().splitlines()
segments = []
temp = []
for line in lines:
if "mask" in line:
if temp:
segments.append(temp.copy())
temp = [line]
else:
temp.append(line)
segments.append(temp.copy())
memory = {}
for seg in segments:
mask = seg[0][7:]
for op in seg[1:]:
data = op.split(" = ")
slot = data[0][4:-1]
binary = str(bin(int(data[1])))[2:]
bin_36 = "0" * (36 - len(binary)) + binary
result = ""
for i, bit in enumerate(mask):
result += bin_36[i] if bit == "X" else mask[i]
memory[slot] = result
print(sum(map(lambda x: int(x, 2), memory.values())))
def second():
lines = open("14/dd.txt", "r").read().splitlines()
segments = []
temp = []
for line in lines:
if "mask" in line:
if temp:
segments.append(temp.copy())
temp = [line]
else:
temp.append(line)
segments.append(temp.copy())
memory = {}
def calc_slot(slot, i, slots, slot_36, mask):
if len(slot) == 36:
slots.add(slot)
elif mask[i] == "X":
calc_slot(slot + "0", i + 1, slots, slot_36, mask)
calc_slot(slot + "1", i + 1, slots, slot_36, mask)
elif mask[i] == "1":
calc_slot(slot + "1", i + 1, slots, slot_36, mask)
else:
calc_slot(slot + slot_36[i], i + 1, slots, slot_36, mask)
for seg in segments:
mask = seg[0][7:]
x_bits = []
for i, bit in enumerate(mask):
if bit == "X":
x_bits.append(i)
for op in seg[1:]:
data = op.split(" = ")
slot_bin = str(bin(int(data[0][4:-1])))[2:]
slot_36 = "0" * (36 - len(slot_bin)) + slot_bin
decimal = int(data[1])
slots = set()
calc_slot("", 0, slots, slot_36, mask)
for slot in slots:
memory[slot] = decimal
print(sum(memory.values()))
if __name__ == "__main__":
first()
second()
|
import os
from config_generation_utils import dump_json_file, fetch_from_env
TARGET_FILE_PATH = os.path.join("./user_identity_service/db_content.json")
TO_FETCH_FROM_ENV = [
('ROOT_USER_NAME', 'ROOT_PASSWORD'),
]
def create_db_content() -> None:
env_content = fetch_from_env(to_fetch=TO_FETCH_FROM_ENV)
config_file_content = {
'login': env_content[0][0], 'password': env_content[0][1]
}
dump_json_file(path=TARGET_FILE_PATH, content=config_file_content)
if __name__ == '__main__':
create_db_content()
|
# Generated by Django 3.2.4 on 2021-07-13 08:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data_aggregator', '0012_alter_jobtype_type'),
]
operations = [
migrations.RenameField(
model_name='participation',
old_name='time_tardy',
new_name='time_total',
),
migrations.AddField(
model_name='participation',
name='max_page_views',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='participation',
name='max_participations',
field=models.IntegerField(null=True),
),
]
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import interp2d
from scipy.optimize import minimize
df = pd.read_csv("measure.csv")
dv = df.values
norm = np.linalg.norm(dv[:, :2], axis=1)
mask = norm < 20
print(mask[:10])
d = dv[mask]
print(d.shape, norm[mask].shape)
x_interp = interp2d(d[:, 2], d[:, 3], norm[mask])
plt.quiver(d[:, 2], d[:, 3], d[:, 0], d[:, 1], color='blue')
plt.show()
fig = plt.figure()
ax = fig.gca(projection='3d')
x = np.linspace(-15, 15, 100)
y = np.linspace(-15, 15, 100)
Z = np.empty((100, 100))
for idx, i in enumerate(x):
for idy, j in enumerate(y):
Z[idx, idy] = x_interp(j, i)
Y, X = np.meshgrid(y, x)
ax.scatter(d[:, 2], d[:, 3], norm[mask])
# surf = ax.plot_surface(X, Y, Z,
# linewidth=0, antialiased=False)
xa = np.linspace(0, 20, 100)
def fun(x):
points = np.linalg.norm(d[:, 2:] - x, axis=1)
print(points.shape)
p = np.polyfit(points, norm[mask], 1)
interp = points*p[0] + p[1]
return np.sum((interp - points.T)**2)
plt.show()
plt.figure()
plt.scatter(np.linalg.norm(d[:, 2:] - np.array([-3, -1.5]), axis=1), norm[mask])
p = np.polyfit(np.linalg.norm(d[:, 2:] - np.array([-3, -1.5]), axis=1), norm[mask], 1)
interp = xa*p[0] + p[1]
plt.scatter(xa, interp)
plt.show()
print("Polyfit output : {}".format(p)) |
"""
Scrape projections from Hashtag Basketball
"""
import datefinder
import lxml.html
import pandas
import requests
from datetime import datetime
def main():
projections_page = download_projections_page()
root = lxml.html.fromstring(projections_page) # parse HTML
projections = extract_projections(root)
updated_at_string = "{:%Y-%m-%d}".format(extract_updated_at(root))
projections.to_csv("projections.csv", encoding='utf8', index=False)
projections.to_csv("historical/projections_{}.csv".format(updated_at_string), encoding='utf8', index=False)
def download_projections_page():
"""
Download projections HTML page from Hashtag Basketball
"""
r = requests.get('https://hashtagbasketball.com/fantasy-basketball-projections')
return r.text
def extract_projections(root):
"""
Given parsed HTML of projections page, extract the projections
"""
players = []
rows = root.cssselect('#ContentPlaceHolder1_GridView1 tr')
# get column headers for each stat
columns = []
for col in rows[0].cssselect('th'):
columns.append(col.text_content().lower())
for row in rows:
player = {}
for cell, col in zip(row.cssselect('td'), columns):
contents = cell.text_content().strip().split('\n')
player[col] = contents[0].strip()
if player['r#'] == 'R#':
player = {}
break # strip out tables rows that are just column headers
fraction_parts = contents[-1].strip().replace('(', '').replace(')', '').split('/')
if col == 'fg%':
player['fgm'] = fraction_parts[0]
player['fga'] = fraction_parts[1]
elif col == 'ft%':
player['ftm'] = fraction_parts[0]
player['fta'] = fraction_parts[1]
if player:
players.append(player)
projections = pandas.DataFrame(players)
projections = projections.rename(columns={'player': 'name'}) # rename the 'player' column to 'name'
projections['name'] = projections['name'].str.replace('.', '') # take out any periods in a player name
return projections
def extract_updated_at(root):
"""
Get date when projections were last updated
"""
updated_at_html = root.cssselect('#form1 > section > div > div.heading-pricing > span > small')
possible_dates = datefinder.find_dates(updated_at_html[0].text_content().split('by')[0].strip().replace('Last updated: ', ''))
updated_at_datetime = next(possible_dates)
return updated_at_datetime
if __name__ == '__main__':
main()
|
import CryptoCompareWebSocketOHLCV as cws
import unittest
from io import StringIO
from unittest import mock
import datetime
import pytz
# TODO:
# Add testing for socketio client + start()
# Maybe add tests for backup_logs() + get_date_filename
# Integration tests for DB ???
# Create 'real' Factory for Trades ???
# Class "Factories"
# Trades
def get_timestamp(ts):
timestamp_list =[1528378700, # 6/7/2018 15:38:20
1528379718, # 6/7/2018 15:55:18
1528379731, # 6/7/2018 15:55:31
1528379772] # 6/7/2018 15:56:12
return timestamp_list[ts]
def create_trade(timestamp=1):
exchange_name = "BitHouse"
currency_1 = "BTC"
currency_2 = "EUR"
flag = 0
quantity = 1.5
price = 6500.88
total = 10000
trade_time = get_timestamp(timestamp)
return cws.Trade(exchange_name, currency_1, currency_2, flag, trade_time, quantity, price, total)
# Class Testing
# Queue
class QueueTest(unittest.TestCase):
def setUp(self):
self.queue = cws.Queue('BTC', None)
def test_non_trade(self):
prev_len = len(self.queue.queue)
trade = "FOO"
self.queue.add_trade(trade)
self.assertEqual(prev_len, len(self.queue.queue))
def test_add_first_trade(self):
self.queue.queue = []
trade = create_trade(1)
self.queue.add_trade(trade)
self.assertEqual(self.queue.queue, [trade.get_trade()])
def test_add_second_trade(self):
trade_1 = create_trade(1)
trade_2 = create_trade(2)
self.queue.queue = [trade_1.get_trade()]
self.queue.min = int(trade_1.trade_time.minute)
self.queue.time = trade_1.trade_time
self.queue.add_trade(trade_2)
self.assertEqual(self.queue.queue, [trade_1.get_trade(), trade_2.get_trade()])
def test_add_previous_trade(self):
trade_1 = create_trade(1)
trade_2 = create_trade(0)
self.queue.queue = [trade_1.get_trade()]
self.queue.min = int(trade_1.trade_time.minute)
self.queue.time = trade_1.trade_time
self.queue.add_trade(trade_2)
self.assertEqual(self.queue.queue, [trade_1.get_trade()])
@mock.patch('CryptoCompareWebSocketOHLCV.Queue.calculate_ohlcv')
def test_add_next_min_trade(self, *args):
trade_1 = create_trade(1)
trade_2 = create_trade(3)
prev_queue = [trade_1.get_trade(), trade_2.get_trade()]
self.queue.time = trade_1.trade_time
self.queue.min = int(trade_1.trade_time.minute)
self.queue.queue = prev_queue
self.queue.symbol = 'BTC'
self.queue.db = None
next_min_trade = create_trade(3)
self.queue.add_trade(next_min_trade)
self.queue.calculate_ohlcv.assert_called_once()
self.assertEqual(self.queue.queue, [next_min_trade.get_trade()])
@mock.patch('CryptoCompareWebSocketOHLCV.upload_data_arcticdb')
def test_empty_calculate_ohlcv(self, *args):
self.queue.queue = []
self.queue.calculate_ohlcv()
cws.upload_data_arcticdb.assert_not_called()
@mock.patch('CryptoCompareWebSocketOHLCV.upload_data_arcticdb')
def test_calculate_ohlcv(self, *args):
trade_1 = create_trade(1)
trade_2 = create_trade(2)
self.queue.queue = [trade_1.get_trade(), trade_2.get_trade()]
self.queue.symbol = "BTC"
self.queue.db = None
self.queue.calculate_ohlcv()
cws.upload_data_arcticdb.assert_called_once()
class convert_message_to_trade_test(unittest.TestCase):
def test_good_message(self):
msg = b'2["m","0~Coinbase~BTC~EUR~1~14742062~'+bytes(str(get_timestamp(1)), 'utf8')+b'~0.01~6451.61~64.5161~1f"]'
start_time = datetime.datetime.fromtimestamp(get_timestamp(0)).replace(tzinfo=pytz.UTC)
processed_msg = cws.to_trade_message(start_time, msg)
self.assertEqual(processed_msg, cws.Trade('Coinbase','BTC','EUR', 1, get_timestamp(1), 0.01, 6451.61, 64.5161))
def test_bad_message(self):
msg = b"FOO"
start_time = datetime.datetime.fromtimestamp(get_timestamp(0)).replace(tzinfo=pytz.UTC)
processed_msg = cws.to_trade_message(start_time, msg)
self.assertEqual(processed_msg, None)
def test_prev_start_message(self):
msg = b'2["m","0~Coinbase~BTC~EUR~1~14742062~'+bytes(str(get_timestamp(0)), 'utf8')+b'~0.01~6451.61~64.5161~1f"]'
start_time = datetime.datetime.fromtimestamp(get_timestamp(3)).replace(tzinfo=pytz.UTC)
processed_msg = cws.to_trade_message(start_time, msg)
self.assertEqual(processed_msg, None) |
import pygame as pg
import sys
from settings import *
from sprites import *
from os import path
import numpy as np
import math
vec = pg.math.Vector2
class Game:
def __init__(self):
pg.init()
self.screen = pg.display.set_mode((WIDTH, HEIGHT))
pg.display.set_caption(TITLE)
self.clock = pg.time.Clock()
self.font_name = pg.font.match_font('FONT_NAME')
self.HIGHSCORE = 0
self.SCORE = 0
self.GENERATION = 1
self.game_time = 0
self.TRAINING = False
self.GAMMA = 0.8
self.dir = path.dirname(__file__)
self.Q = path.join(self.dir,Q_FILE)
self.saved_Q = path.join(self.dir,saved_Q)
self.load_data()
def load_data(self):
# load high SCORE
with open(path.join(self.dir,SCORE_FILE), 'r+') as f:
try:
self.HIGHSCORE = int(f.read())
except:
self.SCORE = 0
with open(path.join(self.dir,GENERATION_FILE), 'r+') as f:
try:
self.GENERATION = int(f.read())
except:
self.GENERATION = 0
if self.TRAINING:
self.Q_MATRIX = np.loadtxt(self.Q)
else:
self.Q_MATRIX = np.loadtxt(self.saved_Q)
def new(self):
# initialize all variables and do all the setup for a new game
self.all_sprites = pg.sprite.Group()
self.walls = pg.sprite.Group()
self.foods = pg.sprite.Group()
self.body = pg.sprite.Group()
self.vision_field = np.array([[7,8,9,10,11,12,13,7,8,9,10,11,12,13,7,8,9,10,11,12,13,7,8,9,11,12,13,7,8,9,10,11,12,13,7,8,9,10,11,12,13,7,8,9,10,11,12,13],[7,7,7,7,7,7,7,8,8,8,8,8,8,8,9,9,9,9,9,9,9,10,10,10,10,10,10,11,11,11,11,11,11,11,12,12,12,12,12,12,12,13,13,13,13,13,13,13]])
self.player = Player(self, 10, 10)
self.food = Food(self)
for x in range(0, 32):
Wall(self, x, 0)
for x in range(0, 32):
Wall(self, x , 23)
for y in range(0, 23):
Wall(self, 0, y)
for y in range(0, 23):
Wall(self, 31, y)
def run(self):
# game loop - set self.playing = False to end the game
self.playing = True
self.running = True
while self.playing:
self.dt = self.clock.tick(FPS) / 1000
self.game_time += self.dt
# print(self.game_time)
self.events()
np.savetxt(self.saved_Q,self.Q_MATRIX)
self.update()
self.draw()
self.SCORE = 0
def quit(self):
if self.TRAINING:
np.savetxt(self.Q,self.Q_MATRIX)
np.savetxt(self.saved_Q,self.Q_MATRIX)
self.GENERATION +=1
with open(path.join(self.dir,GENERATION_FILE), 'w') as f:
f.write(str(self.GENERATION))
else:
np.savetxt(self.saved_Q,self.Q_MATRIX)
pg.quit()
sys.exit()
def update(self):
# update portion of the game loop
self.all_sprites.update()
if self.player.hit_wall:
self.playing = False
if self.SCORE > self.HIGHSCORE:
self.HIGHSCORE = self.SCORE
with open(path.join(self.dir,SCORE_FILE), 'w') as f:
f.write(str(self.SCORE))
self.SCORE = 0
if self.game_time > 600:
self.playing = False
def draw_grid(self):
for x in range(0, WIDTH, TILESIZE):
pg.draw.line(self.screen, LIGHTGREY, (x, 0), (x, HEIGHT))
for y in range(0, HEIGHT, TILESIZE):
pg.draw.line(self.screen, LIGHTGREY, (0, y), (WIDTH, y))
def draw(self):
self.screen.fill(BGCOLOR)
#self.draw_grid()
self.all_sprites.draw(self.screen)
self.draw_text(str('Score: ') , 40, WHITE, 2 * TILESIZE, 24 * TILESIZE)
self.draw_text(str(self.SCORE) , 40, WHITE, 4 * TILESIZE, 24 * TILESIZE)
self.draw_text(str('Generation: ') , 40, WHITE, 24 * TILESIZE, 24 * TILESIZE)
self.draw_text(str(self.GENERATION) , 40, WHITE, 27 * TILESIZE, 24 * TILESIZE)
pg.display.flip()
def events(self):
# catch all events here
for event in pg.event.get():
if event.type == pg.QUIT:
self.quit()
if event.type == pg.KEYDOWN:
if event.key == pg.K_ESCAPE:
self.quit()
if event.key == pg.K_LEFT:
if self.player.dx != -1 and self.player.dx != 1:
self.player.move(dx=-1)
if event.key == pg.K_RIGHT:
if self.player.dx != -1 and self.player.dx !=1:
self.player.move(dx=1)
if event.key == pg.K_UP:
if self.player.dy != -1 and self.player.dy !=1:
self.player.move(dy=-1)
if event.key == pg.K_DOWN:
if self.player.dy != -1 and self.player.dy !=1:
self.player.move(dy=1)
#self.player.move_body()
def show_start_screen(self):
# game splash/start screen
self.screen.fill(LIGHTGREY)
self.draw_text(TITLE, 48, WHITE, WIDTH / 2, HEIGHT / 4)
self.draw_text("Press any Key to start the A.I.", 22, WHITE, WIDTH / 2, HEIGHT / 2)
# self.draw_text("In main.py, Set TRAINING to tru", 22, WHITE, WIDTH / 2, HEIGHT * 3 / 4)
self.draw_text("A.I. HIGH SCORE: " + str(self.HIGHSCORE), 22, WHITE, WIDTH / 2, 15)
pg.display.flip()
self.wait_for_key()
def draw_text(self, text, size, color, x, y):
font = pg.font.Font(self.font_name, size)
text_surface = font.render(text, True, color)
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
self.screen.blit(text_surface, text_rect)
def wait_for_key(self):
waiting = True
while waiting:
self.clock.tick(FPS)
for event in pg.event.get():
if event.type == pg.QUIT:
# waiting = False
# self.running = False
#Write Q to file if Quitting
self.quit()
if event.type == pg.KEYUP:
waiting = False
def show_go_screen(self):
# game over/continue
#Change This if you want to modify Game Over Screen. Click X to view screen
if not self.running:
return
self.screen.fill(LIGHTGREY)
self.draw_text("GAME OVER", 48, WHITE, WIDTH / 2, HEIGHT / 4)
self.draw_text("Score: " + str(self.SCORE), 22, WHITE, WIDTH / 2, HEIGHT / 2)
self.draw_text("Press a key to play again", 22, WHITE, WIDTH / 2, HEIGHT * 3 / 4)
if self.SCORE > self.HIGHSCORE:
self.HIGHSCORE = self.SCORE
self.draw_text("NEW HIGH SCORE!", 22, WHITE, WIDTH / 2, HEIGHT / 2 + 40)
with open(path.join(self.dir,SCORE_FILE), 'w') as f:
f.write(str(self.SCORE))
else:
self.draw_text("High Score: " + str(self.HIGHSCORE), 22, WHITE, WIDTH / 2, HEIGHT / 2 + 40)
pg.display.flip()
self.wait_for_key()
self.SCORE = 0
pass
# create the game object
g = Game()
g.show_start_screen()
while True:
g.new()
g.run()
if g.TRAINING:
np.savetxt(g.Q,g.Q_MATRIX)
g.GENERATION +=1
with open(path.join(g.dir,GENERATION_FILE), 'w') as f:
f.write(str(g.GENERATION))
if not g.TRAINING:
if g.TRAINING:
np.savetxt(g.Q,g.Q_MATRIX)
g.GENERATION +=1
with open(path.join(g.dir,GENERATION_FILE), 'w') as f:
f.write(str(g.GENERATION))
|
import unittest
import day_7
challenge = (
"llyhqfe (21)",
"vpbdpfm (74) -> ndegtj, wnwxs",
"dosteiu (262) -> vliyv, rfxmk, nulxd, tckql",
"leqnli (222) -> wuttw, nckca",
"cgztcyz (59) -> zbtmpkc, lleaucw, zxvjkqv, tqjyoj",
"dqfti (67)",
"vsjhe (34) -> zpbbgqh, menyi, ksasli, uahdbi, ccfiz, kdwmlx",
"ntzuhe (98)",
"mpjrzt (53)",
"dnzll (23)",
"ensyb (18) -> usvzfi, uxxtnll, phrkfo, vntjo",
"airqzst (39)",
"hfzvg (26)",
"wpojcme (79)",
"xggisxm (37)",
"jkqcelt (35)",
"apjsu (299) -> rgylin, yrmfcs",
"odoni (18)",
"gzatvf (27)",
"azkpaf (81)",
"dnyaj (76)",
"chfcnsc (70)",
"wjdkcjo (29) -> jdntuc, htaxf, edpqtnn",
"bejkc (194) -> lqjnh, xkfwmh",
"lfapaod (97)",
"eidqfh (24)",
"haeyms (23) -> akxrge, qgqrmeu, nsnhdll, ydyvay",
"ialdd (67)",
"otqufza (116) -> dvasofv, mxdxz",
"jbopt (91)",
"mkxsdn (46)",
"vkcim (63)",
"ypokgio (14)",
"wiihwvv (55) -> mivrqpc, hdqgdm, muulq, tveyfha",
"rvdldy (47)",
"xzsfek (87)",
"shkfwm (26) -> yjpzyzx, vdnvw, nsbyncu, wpafb",
"jdryrup (43)",
"zqxhle (53)",
"xaaqdv (21) -> kxkwc, mpwnd",
"lfmlqs (79)",
"mcctaf (37)",
"qewiy (18)",
"lfzvi (240) -> gxmqlu, sfteyu",
"zbtmpkc (94)",
"jaathmh (33)",
"sjwxyqb (55)",
"wxvsp (187) -> umiohmp, zeauj",
"opghigg (97) -> dletgs, bcgqdc",
"kabjov (239) -> eulcspz, nxttce",
"jivdw (14)",
"nckca (6)",
"nfeok (203) -> apqan, ywtywz, inoyp",
"ejyegf (71)",
"ccmfbok (82) -> jbopt, cteuws, rrsxb, atfjks",
"qizkjh (350) -> mxsacj, liiwwfa",
"tdfirdf (62)",
"jwboky (54)",
"ksnnnc (638) -> iuuzow, eiyvtz, dlxcy, ltfbsgc",
"mhbiyxk (15396) -> ehpfjr, zqgeod, hiccoc",
"bzenp (37)",
"uymhfo (37) -> wiihwvv, jgpsybl, zpxuxph",
"vewathl (261) -> bzbxoa, ntzbt, jsizfuj, ikrlxqw",
"bvqhn (82)",
"jqtxjrm (1199) -> aqkclfk, eipaxu, hzvctd, zpohg",
"kfuwkh (52)",
"aamghal (79)",
"jqywsxa (74)",
"ehpfjr (42) -> sdxwhvp, itdxbrj, cgztcyz, awylric",
"gxmyk (166)",
"beknji (29)",
"ahvdop (50)",
"fpuhllh (8)",
"cadtows (49)",
"shbrz (874) -> hcywj, pkgyjn, hwxxvlb",
"tebvlpn (106) -> kfuwkh, nkuhc",
"jexcm (33)",
"gwplv (33)",
"nsckvp (49)",
"ghaxmrh (10881) -> hhosv, ximzx, ztphu",
"gkwamq (54)",
"vonve (204) -> scxkq, ubsbx",
"hmlil (63)",
"rpmzw (97)",
"fzkqz (75) -> xxyjm, yjqgw, ejyegf",
"dfwamci (36)",
"bzbxoa (42)",
"qlmbqwi (58)",
"ivygtzl (1708) -> vjfsl, xfvhi, sbhfnav",
"vliyv (20)",
"mecsrr (81)",
"uskdpcu (388) -> nktkgz, xcuud",
"vwktc (60)",
"fkpjukc (42)",
"nsnhdll (70)",
"dpgggti (8)",
"nxmxgax (70)",
"vwntogi (64)",
"jshekxk (42)",
"ncxhv (184) -> ddxiiha, hcvuc, tebvlpn, kkjen, wjkalv",
"urpzfa (58)",
"ocrgjl (2738) -> qogmb, qxirdyg, aovhss",
"gtxvgr (58)",
"psqgnhx (20)",
"vkoor (69)",
"mzpeoz (50)",
"sbebrkf (12)",
"zupsoqc (20)",
"eiyvtz (65) -> modakko, mlydcn",
"jdvuj (8)",
"bytizsx (61)",
"dhamym (17)",
"zldebh (76)",
"esmltj (21)",
"tihzzf (701) -> wyeoaxt, hrkhlaq, vyccl, jezmn, nmmrik",
"rmriv (27)",
"byiqom (88)",
"mnkamc (1717) -> idhjov, pyurvrc, ahpitb",
"fvtofr (44) -> jexcm, jokgw, slmnzei",
"ppkpq (41)",
"kxkwc (57)",
"epnvhbn (21)",
"tusmlk (295) -> ialdd, kipiwwk",
"lwqscns (14) -> zgyryw, oiooued",
"zbmsz (35)",
"czmmh (44)",
"fmwid (1567) -> dhbxpw, xkzrkzh, wxvsp, zqyrggw",
"sviwi (15)",
"nodqkan (89)",
"qddbmn (72)",
"kiphte (353) -> kfiggar, rncuf",
"jyajecr (14)",
"clqwflm (17)",
"kkjen (36) -> rftaqhw, hxtejel",
"iokwnq (25)",
"yelgho (38) -> uiagqs, dzrflyr, tdfirdf",
"rgocso (41)",
"mdsywgy (70)",
"knhvwhl (298)",
"zpxuxph (399)",
"dhbxpw (19) -> itfnye, yghucrl, ekvkidl",
"rxeqfsj (24)",
"sjzapjt (85) -> opndzmu, ilexb, tqddro",
"nbybi (7) -> vwntogi, mhvzqc",
"bnkbyp (73) -> hagkc, arfsqdz, wbzmjq, eisjz",
"jezmn (40) -> nsmlghl, lakhmm",
"iwlxpz (18)",
"vjfsl (87) -> gccvp, wkble, ilshxl, jqywsxa",
"ztphu (983) -> picliob, wcblyq, ollvgn",
"euwfw (12)",
"rccvm (363) -> mutyu, kqltwau",
"hifms (54)",
"byldgs (79)",
"forycux (37)",
"xwwjzx (39)",
"qvqzuic (6)",
"uylrp (7179) -> apqwz, nsqaxp, yffumkx",
"jblzpyq (45)",
"peexz (96)",
"ldcaht (98)",
"wwggl (157) -> ssxpawm, brjzpkm, woionr",
"yyhkwha (179) -> yiehfd, jkqcelt, fuvikt",
"rjtdc (44)",
"wnwxs (83)",
"msigvaq (96)",
"ojrggba (69)",
"pvctv (3341) -> gkwamq, sattu",
"ctrdahm (24)",
"xfvhi (283) -> zkphtd, qmncedz, lsdkm, iokwnq",
"gkrtbv (30)",
"aovhss (185) -> wevhizp, lmnews",
"tfpbait (45)",
"mkeen (102) -> jttgtsg, phkcge, zxyrq, telnuq",
"efbrhl (31)",
"wjkalv (54) -> eloku, xwwjzx, etyja, vghvcv",
"ndois (317) -> csuoxe, jwboky",
"vfpwu (712) -> tusmlk, vewathl, jxfbflh, lcefyg, bnkbyp",
"zdkgm (13) -> xpkyf, eaqhut",
"rhpxizt (449) -> lcnqmai, cpjkn, ccfbpoc",
"dmhfz (66) -> ojrui, bkuazfi, yedrd",
"bpbwn (97)",
"cdglv (1515) -> pbimnll, rcyjnsi, sfnsx, hfdoqqt",
"kfcaozk (27)",
"lqjnh (38)",
"qhjui (47)",
"ntzbt (42)",
"atfjks (91)",
"kfiggar (24)",
"qeoyu (42) -> liukun, tdvorom, knhvwhl, ombds",
"tlnuq (76)",
"zpedug (76)",
"arrok (230) -> ypokgio, qonkb",
"wvvmksv (21)",
"scxkq (33)",
"iuuzow (117) -> unlwjj, ayfcyul",
"bkuazfi (55)",
"hrkhlaq (66) -> mwavu, jbtqs",
"nbtsze (80)",
"darmn (96)",
"kaugsh (378) -> fhzkakn, epdzg, ogsxfk, rzoojpm, dabvuui",
"elhxdco (220) -> wmmrhf, tcxkqku",
"mgnux (46)",
"yzhwurz (136) -> pksyw, osrkwa",
"muulq (86)",
"ayfcyul (43)",
"uryery (39)",
"xipivez (33)",
"ubsbx (33)",
"tcxkqku (21)",
"dhqjni (17)",
"nokkziw (73)",
"yzjiby (79) -> numbey, jqtxjrm, ybkdekt, ciwpis, smkqg",
"ybjghed (91)",
"axleb (81)",
"jfoztzy (37)",
"rjoszhu (92)",
"vimazqc (93)",
"svhcnju (24)",
"mwssex (55)",
"wfmmajk (177) -> kfcaozk, pmfbr",
"hxjopp (228) -> nsckvp, cadtows",
"bieswf (51)",
"dabvuui (38) -> nxmxgax, xmtosc, chfcnsc",
"apqan (38)",
"livac (240)",
"kcotwhf (1006) -> jbztwms, pfpmube, bgeec, hhawhzk",
"mxdxz (44)",
"hhosv (40) -> klnemf, vrzsj, bacazl, gzepcax, onqop, afkeosv, zvlafea",
"eytppcy (309) -> ltifq, ehxjsgn",
"kyzjusc (37)",
"gmsmnlz (36)",
"mncyztp (1990) -> fsmzfjp, kndrzyc, svvirl",
"bnkfzle (261) -> bvqhn, edihrrv",
"jakfuqo (317) -> sxfxnp, tdrdp",
"uxqiqg (95) -> xwyggz, oxtvu, zwtaqj, cxvse",
"tftwygl (29)",
"ytaus (47)",
"uxxtnll (81)",
"yghucrl (66)",
"tnxoqxw (93)",
"emkyoy (354) -> rddeecm, dugvnav",
"hmorsv (64)",
"jwidjq (217) -> qfyor, bpsyylv",
"keily (231) -> ueywo, xlpqnhm",
"umiohmp (15)",
"kiuayw (24)",
"enuzo (63)",
"noejr (83)",
"tveyfha (86)",
"ohmvcr (98)",
"xfzxw (67)",
"modakko (69)",
"cwemvgf (85) -> iebsger, mtoqh, ciabx, puzwwgx",
"wmmrhf (21)",
"vxghl (48) -> umgqr, cbvwcv",
"cnvghq (33) -> byiqom, ehljn",
"iemkgdl (79)",
"gpucfv (270) -> bscob, leyohju",
"mlydcn (69)",
"gsgexgb (90) -> cwemvgf, cukbzsw, iaiqz, gveadp, rccvm",
"iybrmf (27)",
"frruz (7530) -> pkrxt, ifwkgxo, abpry, dydso, fjjuj, sgfbfq",
"xxvlxs (37)",
"uqttm (33)",
"eryxwj (90) -> vxtwg, bmtjkw",
"bsdxw (29)",
"ffxkad (91)",
"ciwpis (1215) -> elhxdco, ccmod, etuteik, jaxkva",
"zzxzeuo (62)",
"qycoh (66)",
"suprw (34) -> vpbdpfm, kacamw, dwdczlx, mrqaryt, rjnzfa",
"efpvvp (7686) -> mncyztp, tleviy, uuftjqx, vsjhe, mmutg, wzvjkiu",
"bpsyylv (17)",
"thmnm (80)",
"rvpbx (256) -> zwxlf, ojrggba",
"ndegtj (83)",
"ksasli (331) -> cyxtnfe, xrtkqi",
"vyfbsgv (81) -> rwxfhk, kihifp, ndois, kupmpp, eytppcy, bnkfzle, fycnyn",
"kjyufi (37)",
"ykxkv (299) -> ijzgy, dsbxavd",
"wnfcsap (32)",
"icoti (69)",
"akowch (67)",
"xlpqnhm (59)",
"yzrfzv (73)",
"jmmbca (91)",
"mfacoz (23)",
"bcgqdc (77)",
"ohbfa (79)",
"numbey (1861) -> lwqscns, arlrk, klqvgm",
"kfgyus (42)",
"dlfay (16) -> zksnaz, miocbjk",
"dwdczlx (162) -> cmdcov, pbhsevc",
"liiwwfa (22)",
"ikrlxqw (42)",
"fwbang (99684) -> lvxnl, aspplbw, uylrp, yzjiby",
"ryzfgj (44) -> lrbozkj, mpjrzt",
"zrjtxfa (9314) -> iixjr, vfpwu, ivygtzl, vgwfukr",
"mkxatmb (91)",
"zxmsme (66) -> lgjbhwy, jugycbw, dnzll",
"hxtejel (87)",
"rsblo (385)",
"rqbgxlt (24)",
"rbbhhe (91)",
"kcbag (36)",
"fdorzyx (49)",
"lcnqmai (12)",
"dvpmg (1474) -> uagszs, otqufza, mjmpkq",
"lakhmm (99)",
"zxyrq (18)",
"fthgkl (55) -> ekuibos, zmtszz, peexz",
"onqop (60) -> bnryi, kjyufi, kyzjusc, elukq",
"tismupk (86)",
"hhrqbn (75)",
"cycky (75)",
"xksjes (103)",
"ufhjnc (106) -> rbbhhe, ybjghed",
"aqkclfk (88) -> nodqkan, tahov",
"fuehgn (175) -> dnliq, dhqjni, ruszodn, cjagg",
"afkeosv (181) -> qfwtxzq, aowuj, trkvrk",
"brdkwc (45)",
"yedrd (55)",
"wyeoaxt (104) -> inlzx, akowch",
"owfrl (1516) -> rsblo, hfytix, ggwwhvf, ykxkv",
"gosjs (24)",
"mxsacj (22)",
"osrkwa (32)",
"drffb (15)",
"zsucroj (76)",
"ermgcbt (78)",
"jpjehc (37)",
"vdnvw (58)",
"holcy (66) -> frruz, hbzxaji, mhbiyxk, zrjtxfa, efpvvp, acmrndk, trrkrqa",
"gjbijgl (64)",
"lphqgek (21) -> qdpnoic, qvjiwvb, qxoly",
"uhsdpj (72) -> rslnx, bmaoav",
"akxrge (70)",
"yerckb (255) -> dpgggti, ssysjwe",
"eipaxu (150) -> qlmbqwi, fsuglk",
"pbhsevc (39)",
"pfmordc (36)",
"rdjfp (1393) -> dmhfz, wfmmajk, utoogeb",
"tpbbd (91)",
"dexwo (223) -> fqjdoe, ovpyq",
"hwinqpr (27)",
"mqgmc (150) -> vkoor, hvdwvo",
"qrhweil (99)",
"krdsv (24)",
"dydso (1002) -> evbilqr, glbaxl, yzhwurz, usubx, uxxyr, gremk",
"wowirye (47)",
"eaqhut (70)",
"ilshxl (74)",
"yqsfolo (11) -> vrdrc, utqxez",
"qniem (85) -> qdnuduv, zsseyik, xxardqs, xafip, uhsdpj, xxehapc, pmwosk",
"ojbyg (21)",
"vkwcj (96)",
"rtvpznv (257) -> emxviup, dfwamci, pfmordc, ikcjmxi",
"abpry (1005) -> uxqiqg, adxplm, xlbjv",
"nhtetdw (29)",
"zstbuv (60)",
"zmtszz (96)",
"nfccf (26) -> zpqpd, skbxo, hzkvyoj, fhivbs",
"krkeek (52)",
"owgbqb (30)",
"tekug (77)",
"jaxkva (138) -> zzxzeuo, npxeql",
"hqqxg (43)",
"ungfmbw (61)",
"edpqtnn (96)",
"ydyvay (70)",
"niopwq (42)",
"jgpsybl (317) -> ppkpq, fezoee",
"oyypq (142) -> iwxgwc, qyovvxb",
"inlzx (67)",
"edihrrv (82)",
"qsloy (44)",
"yffumkx (9) -> jakfuqo, ouxsgm, keily, pshyy",
"fhivbs (92)",
"fkprhv (31)",
"rhgyz (38)",
"eiyxgk (76)",
"pzemz (76)",
"lcefyg (353) -> twvjddq, rhgyz",
"vrpyfgm (88)",
"hfytix (81) -> cjctf, fckcu, ztcqm, pzemz",
"voiqnou (147) -> czmmh, rjtdc",
"ozwdh (96)",
"ekhsrgq (25)",
"xmtosc (70)",
"yfrewb (77)",
"qfyor (17)",
"ijuod (93) -> ldcaht, ruuhrmf, dfbabey, bbdfr",
"pknpuej (91) -> livac, vxghl, qcccxc, mxprsl",
"gwvsbo (76)",
"fqjdoe (39)",
"gqahoa (57)",
"vykav (89) -> dfhtf, yopex, ypzxdhs",
"picliob (97) -> bzenp, jfoztzy",
"fhzkakn (107) -> wowirye, aseilg, jmutqq",
"umgqr (96)",
"qmlguo (96)",
"gosak (27)",
"nvatz (63)",
"zsgnve (39)",
"fyvjfxi (58)",
"oxtvu (76)",
"hoewyjx (47)",
"qykdedu (63)",
"tocrk (88) -> nzyls, qlgljuh",
"ssnoqt (24)",
"mhvzqc (64)",
"arfsqdz (89)",
"vrzsj (127) -> tetfdv, ornacig, yrdbx",
"tszune (40)",
"jcuhfsd (80) -> sordz, zmfhyr",
"dqaov (96)",
"jeafpic (32)",
"tdvorom (246) -> hfzvg, dapey",
"dldcoc (149) -> gwvsbo, eadjn",
"ngxtfx (91)",
"oiooued (60)",
"nmhmw (979) -> krdsv, kiuayw, rovftl",
"yqmbbyr (24)",
"zvlafea (140) -> wtjoxu, dxszgsr",
"uiagqs (62)",
"lhpjahj (86)",
"uagszs (36) -> cflribm, dsukkg",
"umqlwls (202)",
"pazby (45)",
"mqayze (55)",
"nzeqmqi (12216) -> nmhmw, pknpuej, rfkvap",
"hfdoqqt (128) -> sevcqp, bieswf",
"ejmfnnu (43)",
"hupmm (76)",
"wnahs (18)",
"oomve (87)",
"hdfsofm (75)",
"qonkb (14)",
"cnlny (278)",
"ogsxfk (140) -> iybrmf, xixiloi, hwinqpr, jdmrbxc",
"nafdo (23)",
"jsizfuj (42)",
"mwavu (86)",
"ciabx (96)",
"ljwcd (16) -> nokkziw, cjgpfb, yzrfzv",
"etyja (39)",
"hcrzxz (78)",
"xwyyfr (256) -> jyajecr, jivdw",
"lrbozkj (53)",
"oibnbf (15)",
"bbdfr (98)",
"zwzgp (14)",
"svvirl (62)",
"tlkrx (23)",
"kndrzyc (62)",
"euenhl (53)",
"ipvrlll (86)",
"ggwwhvf (160) -> hhrqbn, zvazn, hdfsofm",
"sofrg (281)",
"jyovf (29)",
"agobkww (69)",
"gzepcax (50) -> wpojcme, pprspr",
"cxvse (76)",
"rftaqhw (87)",
"yzbmyaw (86)",
"jjbmtij (54)",
"whuozum (35) -> suftfkn, gtxvgr, igxdio",
"jxfbflh (229) -> xkyocjn, ahvdop, ewlsf, jejwwxj",
"woionr (15)",
"pkgyjn (76) -> urpzfa, fyvjfxi",
"pwydnik (63)",
"chhli (46)",
"gdvcou (189) -> jdglmn, mgnux",
"cbvwcv (96)",
"gtiqar (29)",
"txcwm (29)",
"fvojv (116) -> rycpngd, hifms",
"dzxjy (96)",
"niznnko (44)",
"jmutqq (47)",
"sevcqp (51)",
"pmfbr (27)",
"shoxg (7)",
"ltfbsgc (109) -> rvdldy, qhjui",
"ltifq (58)",
"jxzyg (37)",
"jttgtsg (18)",
"zwtaqj (76)",
"mwussz (143) -> etotvx, lbmvl, xlavrvm, rmriv",
"zwlok (143) -> zvtgd, shoxg",
"hcywj (102) -> brdkwc, tfpbait",
"kbuslbp (81)",
"smunvi (172) -> zqxhle, euenhl",
"qlgljuh (26)",
"gwcqtcr (73)",
"kacamw (170) -> bexrple, wetutqh",
"dvkbqm (20)",
"eaerpmi (1342) -> rjoszhu, migwxez, izydgv",
"ealilsq (50)",
"eisjz (89)",
"rihil (12)",
"ysabu (24)",
"ombds (178) -> wlrihpy, vwktc",
"sueftvh (81)",
"mrqaryt (80) -> thmnm, kligtj",
"xkyocjn (50)",
"prywl (43)",
"fonky (47)",
"bhddwe (64)",
"mtoqh (96)",
"pksyw (32)",
"jndnfa (45)",
"guqul (222) -> oonfc, irpjsbf",
"nkuhc (52)",
"apqwz (934) -> gjvcdp, fnuzrye, zwlok",
"ojrui (55)",
"tckql (20)",
"umsilqj (32)",
"xswwe (212) -> ffxkad, ngxtfx",
"hwtztim (187) -> idaqt, jshekxk",
"nuzxo (83)",
"itfnye (66)",
"qcedbm (2484) -> bkipqaq, xmcqygt, fvtofr, zjksxbk",
"jutbah (55)",
"scntyh (5)",
"liukun (70) -> oftcgd, eiyxgk, sqbfin",
"vqxwlkh (8119) -> kcotwhf, ksnnnc, shbrz, jtxdihn, yixpr",
"ywqtog (136) -> epelgzz, lpvwee",
"oftcgd (76)",
"rdzvcb (184) -> clqwflm, dhamym, qkmkjm",
"tqjyoj (94)",
"pprdw (42) -> tpbbd, mkxatmb",
"bgmypwk (22)",
"hznriv (96)",
"tleviy (973) -> rtvpznv, gbpxwcx, kiphte",
"csuoxe (54)",
"pqqcnkr (39)",
"hcqrrju (100) -> xfzxw, zwyhf",
"amccpoz (254) -> zsaen, jfkvg",
"wbzmjq (89)",
"xpkyf (70)",
"hnofc (85)",
"dsukkg (84)",
"qfifp (6)",
"qmncedz (25)",
"dyrik (37)",
"iaiqz (445) -> sgjywom, alwbi",
"xpjzc (86) -> qrhweil, vsgqkho",
"agagr (81)",
"fpkktd (25)",
"mjmpkq (44) -> nbtsze, hwlgay",
"kwhtsv (26)",
"awljibm (5)",
"hssykro (81)",
"vyccl (80) -> aamghal, tkwmbxl",
"nelgvnr (2052) -> jwidjq, mwussz, nkuwwiy, opghigg",
"zbhioc (20)",
"zqgeod (1323) -> sbnod, rtxzoap, zdkgm",
"rgqjtw (243)",
"vaxouij (343)",
"jsrpud (57)",
"wetutqh (35)",
"lsdkm (25)",
"sdttg (27)",
"arlrk (40) -> xlsmzu, hoewyjx",
"ybkdekt (1234) -> vaxouij, fthgkl, kabjov",
"sebno (279) -> qfifp, ghxvqb, qvqzuic, wfazzy",
"keidsd (73) -> hqqxg, ejmfnnu, prywl",
"uevcnw (39)",
"zvazn (75)",
"xdjola (63)",
"miocbjk (75)",
"ghxvqb (6)",
"ywtywz (38)",
"vtpoo (89) -> vskibye, nelgvnr, qcedbm, owfrl, vyfbsgv",
"jlfukd (57) -> tnxoqxw, vimazqc",
"vkxyhk (41)",
"dlxcy (203)",
"kosbvn (19)",
"wfazzy (6)",
"uufonho (46)",
"kdwmlx (357)",
"lokmiua (2132) -> hdrab, eftrvo, hbnnhyi, avnxndg, tihzzf, nbvtfz, qniem",
"dugvnav (46)",
"xxardqs (216) -> llyhqfe, ojbyg",
"wkble (74)",
"lpvwee (67)",
"pkrxt (1524) -> drwpdaj, ojcinc, hqetmky",
"zjksxbk (143)",
"iebsger (96)",
"ofwijoe (60)",
"qyovvxb (41)",
"igxdio (58)",
"xkzrkzh (148) -> nafdo, tlkrx, gclbhxw",
"ruszodn (17)",
"dnliq (17)",
"qcccxc (114) -> ibiuha, byykf",
"bscob (28)",
"mivrqpc (86)",
"yiehfd (35)",
"hbnnhyi (1055) -> ptnjpp, hqcxvkr, aqlvute, yqsfolo",
"jpexkf (86)",
"hagkc (89)",
"hlscl (214) -> jblzpyq, jndnfa, twvfw, pazby",
"irrca (56)",
"sgjywom (12)",
"epdzg (248)",
"yixpr (1141) -> pjlhta, xksjes, icfxlu",
"yjtsmy (85)",
"inwmb (53046) -> ghaxmrh, vqxwlkh, nzeqmqi, lokmiua, znypga, vtpoo",
"etuteik (262)",
"wbtqez (87)",
"lmnews (26)",
"zoovdc (75)",
"smkqg (44) -> vyzukfk, nfeok, apjsu, vbjlbhq, wjdkcjo, ztstgc, olvxzb",
"telnuq (18)",
"jdmrbxc (27)",
"rrsxb (91)",
"tetfdv (27)",
"cpjkn (12)",
"gveadp (429) -> zbhioc, psqgnhx",
"zxvjkqv (94)",
"mmvszx (12)",
"xkfwmh (38)",
"vyzukfk (255) -> efbrhl, tqdfypr",
"gbpxwcx (77) -> azkpaf, axleb, ngwafk, sueftvh",
"blcnplx (32)",
"trkvrk (9)",
"nxttce (52)",
"rzoojpm (234) -> kbguoiu, yvjjec",
"idhjov (49) -> mcctaf, jpjehc",
"ctnucjw (105) -> ohmvcr, ntzuhe",
"tdrdp (16)",
"qkmkjm (17)",
"bacazl (82) -> enuzo, hmlil",
"lgjbhwy (23)",
"etotvx (27)",
"pfkbcg (228) -> xkvtxav, oibnbf",
"fjlqp (30)",
"nsbyncu (58)",
"xnackkp (33)",
"sxfxnp (16)",
"hhawhzk (89) -> dserbhu, ckfagrp",
"dsbxavd (43)",
"mtbszl (12)",
"bkipqaq (71) -> ctrdahm, sptjz, svhcnju",
"sndkiv (73)",
"idaqt (42)",
"sxcfr (98) -> ungfmbw, ynjccf, bytizsx",
"gtutcoq (154) -> zqmizps, vxdcv, nmshjlp",
"efxxl (69)",
"npxeql (62)",
"ljelubc (437) -> gwxgtm, umfqiru",
"hzbtbe (46)",
"bjvncf (49)",
"hwxxvlb (48) -> qddbmn, vomiow",
"sfteyu (19)",
"sbhfnav (255) -> jeafpic, ywecj, wnfcsap, cvgzkp",
"bmaoav (93)",
"fnuzrye (125) -> vlqcuq, jdvuj, mykvbt, fpuhllh",
"vsgqkho (99)",
"vkbgz (63)",
"rcyjnsi (206) -> rihil, usfvqn",
"hdqgdm (86)",
"unaqbx (23)",
"umyrtu (33)",
"sordz (47)",
"byykf (63)",
"qggexrc (33)",
"rovftl (24)",
"qdnuduv (120) -> icoti, efxxl",
"qogmb (81) -> rnyqcj, hcrzxz",
"dfbabey (98)",
"vdmkp (38)",
"kacpu (90) -> hrbfs, vdmkp",
"jcegjy (92) -> xoaxcg, mecsrr",
"xrtkqi (13)",
"cosllh (38) -> hzfwp, gjbijgl",
"jfkvg (15)",
"ifwkgxo (1752) -> pbrcoyl, ryzfgj, luswq",
"grcsr (64)",
"dzrflyr (62)",
"hjtwqe (748) -> mkeen, zowbv, lphqgek, jcuhfsd, qymfgaf",
"yjqgw (71)",
"idiorl (24)",
"usvzfi (81)",
"acmrndk (13437) -> cdglv, gsgexgb, fmwid",
"vijilqr (271)",
"dxszgsr (34)",
"ehljn (88)",
"yutfxcu (72) -> wobno, mwmfw",
"ngwafk (81)",
"eijlg (51) -> dzxjy, msigvaq",
"pkchh (24)",
"izydgv (56) -> odoni, wnahs",
"wobno (93)",
"idfxtqr (96)",
"hqetmky (127) -> gwplv, umyrtu, uqttm",
"fvjrau (166) -> qsloy, niznnko",
"ehxjsgn (58)",
"hqcxvkr (37) -> oeyrk, ipvrlll",
"vaztjkc (75)",
"ynnfzdz (54)",
"vrdrc (99)",
"hdrtnjm (273) -> drffb, sviwi",
"ywecj (32)",
"iixjr (99) -> hlscl, xswwe, gtutcoq, qizkjh, nfccf, rvpbx, extmwcb",
"fuvikt (35)",
"kipiwwk (67)",
"lakzkpk (84) -> rpmzw, lfapaod",
"extmwcb (142) -> vkcim, ihramd, kwkdq, hjwjw",
"pmwosk (118) -> leefit, mdsywgy",
"ruuhrmf (98)",
"tqdfypr (31)",
"itdxbrj (366) -> fjebkm, mfacoz, unaqbx",
"kfgmuj (54) -> ljelubc, jlewu, rhpxizt, pjujpa, ijuod, xauyij, bbrdet",
"jtxdihn (45) -> sofrg, vgdtk, sxcfr, rmivzpg, gdvcou",
"yzulmo (33)",
"uuamcdc (1954) -> qycoh, jhbov",
"ochjr (68) -> xhujxe, fonky, olyohzo",
"nbvtfz (807) -> hwtztim, yerckb, nyszg, vijilqr",
"imtvzmm (40)",
"tshcqcu (33)",
"ovsgve (83)",
"ueywo (59)",
"gxmqlu (19)",
"tahov (89)",
"elukq (37)",
"zadsb (87)",
"bgeec (53) -> tftwygl, txcwm",
"yjpzyzx (58)",
"xlbjv (288) -> jxzyg, dyrik, cdanu",
"zsaen (15)",
"qywtwbt (124) -> esmltj, qsppfv",
"dwbirs (5)",
"oonfc (16)",
"pyurvrc (49) -> xwidhe, xxvlxs",
"zksnaz (75)",
"xhujxe (47)",
"wctphrv (19)",
"otadcxu (2028) -> bsdxw, beknji",
"hzkvyoj (92)",
"zeauj (15)",
"zayxe (2822) -> ochjr, cnvghq, whuozum",
"aqlvute (27) -> absogx, jmmbca",
"ilexb (72)",
"vrdtrmn (69)",
"dfhtf (79)",
"sbnod (153)",
"lwyirb (77)",
"hwlgay (80)",
"sfnapsi (55)",
"cflribm (84)",
"cjgpfb (73)",
"menyi (185) -> lhpjahj, yzbmyaw",
"ubkmjag (78)",
"fycnyn (313) -> lwuvg, irrca",
"rzixiwv (10)",
"qtvcuqk (85)",
"alwbi (12)",
"sqbfin (76)",
"hcvuc (78) -> yzulmo, xnackkp, qggexrc, wjdhaf",
"ccmod (13) -> ovsgve, noejr, nuzxo",
"rslnx (93)",
"opndzmu (72)",
"mxprsl (84) -> uivazm, ennkek, krkeek",
"zowbv (28) -> sndkiv, gwcqtcr",
"pjujpa (433) -> rgyaijv, kwhtsv",
"rnyqcj (78)",
"zwxlf (69)",
"ccfiz (231) -> pwydnik, vkbgz",
"zqyrggw (61) -> qqbgbeo, ubkmjag",
"puzwwgx (96)",
"xwidhe (37)",
"asmikyo (24)",
"hdrab (1083) -> umqlwls, tuldcdj, keidsd, wwggl",
"ximzx (146) -> ywqtog, bejkc, vonve, wmdgia, ggeae",
"hzfwp (64)",
"wmdgia (14) -> bhddwe, zrzgp, hmorsv, grcsr",
"pfpmube (73) -> wctphrv, kosbvn",
"eadjn (76)",
"txplq (24)",
"qsppfv (21)",
"qxoly (51)",
"sardhwu (77) -> iemkgdl, byldgs",
"vlqcuq (8)",
"eloku (39)",
"ypzxdhs (79)",
"fsmzfjp (62)",
"zqmizps (80)",
"qqpnt (1108) -> hxjopp, gpucfv, vykav",
"aowuj (9)",
"xlsmzu (47)",
"jokgw (33)",
"epelgzz (67)",
"cjctf (76)",
"qgqrmeu (70)",
"eulcspz (52)",
"sattu (54)",
"vgdtk (101) -> ggffqux, jlinuge, hvinb",
"brjzpkm (15)",
"yvlwtb (114) -> wbtqez, zadsb",
"ssysjwe (8)",
"ptnjpp (135) -> forycux, xggisxm",
"adxplm (295) -> nfxyjl, lksgoz",
"qvjiwvb (51)",
"lleaucw (94)",
"absogx (91)",
"zflsryn (239) -> fkprhv, blloue",
"inoyp (38)",
"luswq (102) -> yhlyk, mmvszx, euwfw, sbebrkf",
"rncuf (24)",
"jdntuc (96)",
"gremk (86) -> gqahoa, jsrpud",
"mutyu (53)",
"yhlyk (12)",
"ewlsf (50)",
"xixiloi (27)",
"woiwqf (5)",
"fckcu (76)",
"ddxiiha (16) -> bpbwn, uuxhyt",
"nmmrik (73) -> jutbah, sjwxyqb, mqayze",
"ssxpawm (15)",
"rksykyt (77) -> ohbfa, lfmlqs",
"zwyhf (67)",
"jhbov (66)",
"tqddro (72)",
"aafpxpx (30)",
"zsseyik (248) -> awljibm, woiwqf",
"vxdcv (80)",
"ovpyq (39)",
"znypga (14031) -> emkyoy, uskdpcu, ccmfbok",
"gwxgtm (24)",
"ztstgc (125) -> jqwbc, darmn",
"usubx (100) -> mzpeoz, ealilsq",
"klqvgm (56) -> zsgnve, pqqcnkr",
"uuxhyt (97)",
"yjxneui (76)",
"awylric (393) -> wvvmksv, epnvhbn",
"wtjoxu (34)",
"ztcqm (76)",
"wjdhaf (33)",
"hrase (25)",
"llcpsj (178) -> mwssex, sfnapsi",
"hrbfs (38)",
"vomiow (72)",
"uumrue (29)",
"xxyjm (71)",
"yrdbx (27)",
"dserbhu (11)",
"trrkrqa (48) -> awytebt, ocrgjl, zayxe, pvctv, sdovaq, kfgmuj",
"xoaxcg (81)",
"sdxwhvp (192) -> agagr, hssykro, kbuslbp",
"beqez (142) -> mtbszl, dytsvc",
"kihifp (117) -> tekug, lsxwznl, lwyirb, yfrewb",
"fwlyuh (70) -> zbmsz, btldlkh",
"cvgzkp (32)",
"migwxez (72) -> rzixiwv, bwekmvc",
"jqwbc (96)",
"ihramd (63)",
"bwekmvc (10)",
"oeyrk (86)",
"ccfbpoc (12)",
"bexrple (35)",
"rtxzoap (153)",
"bvwnlaw (88)",
"pbimnll (136) -> pkbitw, ytaus",
"zciuy (63)",
"zvtgd (7)",
"uuftjqx (1414) -> guqul, jcegjy, fvjrau",
"qymfgaf (66) -> gzatvf, sdttg, gosak, pcacjm",
"esbnpk (43)",
"ekuibos (96)",
"azyccec (67)",
"vghvcv (39)",
"icfxlu (31) -> asmikyo, gosjs, fafrerl",
"cmdcov (39)",
"rjnzfa (192) -> eidqfh, yqmbbyr",
"qxirdyg (121) -> mwhopi, jyovf, nhtetdw, tmvjt",
"wlrihpy (60)",
"nyszg (95) -> bvwnlaw, vrpyfgm",
"hiccoc (1118) -> osbsdhc, dlfay, fsomlm, cosllh",
"ixiqnn (138) -> vthnh, cycky",
"uahdbi (313) -> bgmypwk, rzxyny",
"ahpitb (25) -> yyoptv, qihhif",
"zrzgp (64)",
"xxehapc (120) -> vrdtrmn, agobkww",
"kwkdq (63)",
"prhgge (49)",
"ornacig (27)",
"wuttw (6)",
"sjbalvv (186) -> uufonho, chhli",
"xauyij (427) -> uumrue, gtiqar",
"thahonu (208) -> eacnma, fpkktd",
"vgwfukr (1921) -> hcqrrju, ckqwb, leqnli, xavfse",
"pshyy (97) -> qykdedu, zciuy, nvatz, xdjola",
"tkwmbxl (79)",
"phkcge (18)",
"szrnpdw (27)",
"fafrerl (24)",
"aryqw (118) -> ysabu, pkchh",
"twvfw (45)",
"yopex (79)",
"sptjz (24)",
"mwmfw (93)",
"suftfkn (58)",
"fjjuj (1782) -> fwlyuh, tocrk, eryxwj",
"jbztwms (111)",
"ollvgn (15) -> ermgcbt, kjikhxm",
"pcacjm (27)",
"nldrlb (55)",
"yyoptv (49)",
"qfwtxzq (9)",
"xavfse (42) -> hznriv, vkwcj",
"pbrcoyl (40) -> nldrlb, wnhseb",
"dytsvc (12)",
"wpafb (58)",
"aspplbw (4922) -> eaerpmi, hjtwqe, fikvmjg, kaugsh",
"mpwnd (57)",
"xmcqygt (65) -> uryery, tlkive",
"wevkksz (49)",
"ikcjmxi (36)",
"ojcinc (106) -> fjlqp, owgbqb, aafpxpx, gkrtbv",
"jbtqs (86)",
"lksgoz (52)",
"tntqpl (202) -> jdryrup, esbnpk",
"zpbbgqh (53) -> zpedug, hupmm, yjxneui, zldebh",
"rgylin (9)",
"jdglmn (46)",
"pkbitw (47)",
"rfxmk (20)",
"mykvbt (8)",
"twvjddq (38)",
"slmnzei (33)",
"zpqpd (92)",
"lvxnl (3990) -> qeoyu, uymhfo, suprw, ncxhv, drrbwlp, kzwamsk",
"fikvmjg (70) -> arrok, thahonu, pfkbcg, yutfxcu, shkfwm, clsve",
"ekvkidl (66)",
"iwxgwc (41)",
"cbwsr (55)",
"kqltwau (53)",
"jlewu (320) -> zphlpeu, kcxfwz, cbwsr",
"bnryi (37)",
"dletgs (77)",
"jlinuge (60)",
"ennkek (52)",
"rzxyny (22)",
"uivazm (52)",
"hgoesez (214) -> umsilqj, blcnplx",
"xkvtxav (15)",
"clsve (138) -> ofwijoe, zstbuv",
"leefit (70)",
"ymwqj (301)",
"klnemf (76) -> xipivez, tshcqcu, jaathmh, vpzylgj",
"qdpnoic (51)",
"fsuglk (58)",
"dvasofv (44)",
"cjagg (17)",
"kcxfwz (55)",
"kjikhxm (78)",
"nulxd (20)",
"zgyryw (60)",
"nktkgz (29)",
"pprspr (79)",
"wzvjkiu (2006) -> yjtsmy, gjpjta",
"usfvqn (12)",
"utoogeb (183) -> idiorl, txplq",
"gclbhxw (23)",
"fsomlm (74) -> hzbtbe, mkxsdn",
"pxjgtg (219) -> uevcnw, airqzst",
"nfxyjl (52)",
"htaxf (96)",
"ouxsgm (241) -> ynnfzdz, jjbmtij",
"cdanu (37)",
"zpohg (230) -> qewiy, iwlxpz",
"nmshjlp (80)",
"uxxyr (26) -> oomve, xzsfek",
"wcblyq (171)",
"qqbgbeo (78)",
"ibiuha (63)",
"ippnuw (342)",
"jusoe (27)",
"eftrvo (919) -> rgqjtw, fuehgn, eijlg, jlfukd",
"rgyaijv (26)",
"fezoee (41)",
"zmfhyr (47)",
"mxltn (14)",
"rfkvap (655) -> zxmsme, nbybi, xaaqdv",
"umfqiru (24)",
"syeyppr (20)",
"tuldcdj (152) -> ekhsrgq, hrase",
"skbxo (92)",
"lwuvg (56)",
"hzvctd (256) -> dwbirs, scntyh",
"cteuws (91)",
"gjpjta (85)",
"ijzgy (43)",
"rddeecm (46)",
"hvinb (60)",
"gsrui (49)",
"vxtwg (25)",
"vthnh (75)",
"olyohzo (47)",
"btldlkh (35)",
"lbmvl (27)",
"gjvcdp (23) -> azyccec, dqfti",
"spnzn (70) -> ixiqnn, llcpsj, ufhjnc, fzkqz, mqgmc, tntqpl, yvlwtb",
"fjebkm (23)",
"kupmpp (41) -> qmlguo, dqaov, ozwdh, idfxtqr",
"awytebt (2553) -> oyypq, pprdw, fvojv, yelgho",
"rycpngd (54)",
"vpzylgj (33)",
"osbsdhc (166)",
"bdplsy (20)",
"wnhseb (55)",
"emxviup (36)",
"azqje (72) -> holcy, fwbang, inwmb",
"blloue (31)",
"gccvp (74)",
"sfnsx (83) -> prhgge, bjvncf, wevkksz",
"nylej (42)",
"zkphtd (25)",
"drwpdaj (58) -> nylej, fkpjukc, niopwq, kfgyus",
"olvxzb (89) -> tlnuq, zsucroj, dnyaj",
"pjlhta (63) -> zupsoqc, dvkbqm",
"xafip (176) -> rgocso, vkxyhk",
"qihhif (49)",
"ggffqux (60)",
"bbrdet (445) -> bdplsy, syeyppr",
"vbjlbhq (219) -> gsrui, fdorzyx",
"mwhopi (29)",
"eacnma (25)",
"drrbwlp (59) -> ljwcd, voiqnou, rdzvcb, sardhwu, rksykyt",
"dapey (26)",
"rmivzpg (227) -> szrnpdw, jusoe",
"ckfagrp (11)",
"nkuwwiy (79) -> jpexkf, tismupk",
"jejwwxj (50)",
"cukbzsw (441) -> mxltn, zwzgp",
"eobbt (229) -> rxeqfsj, rqbgxlt, ssnoqt",
"ggeae (270)",
"xdfnmvr (147) -> vaztjkc, zoovdc",
"fmcwdv (85)",
"yvjjec (7)",
"kzwamsk (325) -> sebno, haeyms, hdrtnjm",
"hvdwvo (69)",
"bmtjkw (25)",
"rwxfhk (353) -> gmsmnlz, kcbag",
"ynjccf (61)",
"zxozp (42) -> hnofc, fmcwdv, qtvcuqk",
"xcuud (29)",
"xwyggz (76)",
"glbaxl (120) -> imtvzmm, tszune",
"unlwjj (43)",
"hjwjw (63)",
"jugycbw (23)",
"cyxtnfe (13)",
"phrkfo (81)",
"mmutg (1040) -> yyhkwha, xwyyfr, xpjzc, amccpoz",
"vntjo (81)",
"tlkive (39)",
"kbguoiu (7)",
"nsqaxp (575) -> kacpu, gxmyk, aryqw, qywtwbt, beqez",
"tmvjt (29)",
"vskibye (1388) -> sjbalvv, hgoesez, lfzvi, lakzkpk, smunvi, cnlny",
"sgfbfq (95) -> eobbt, ymwqj, sjzapjt, dldcoc, dexwo, ctnucjw, zflsryn",
"aseilg (47)",
"hbzxaji (6140) -> otadcxu, dvpmg, qqpnt, mnkamc, spnzn, uuamcdc, rdjfp",
"xlavrvm (27)",
"wevhizp (26)",
"yrmfcs (9)",
"nsmlghl (99)",
"sdovaq (2558) -> zxozp, pxjgtg, xdfnmvr",
"irpjsbf (16)",
"evbilqr (200)",
"avnxndg (865) -> ippnuw, ensyb, dosteiu",
"utqxez (99)",
"leyohju (28)",
"nzyls (26)",
"lsxwznl (77)",
"zphlpeu (55)",
"ckqwb (234)",
"kligtj (80)",
)
class TestPart1(unittest.TestCase):
"""
--- Day 7: Recursive Circus ---
Wandering further through the circuits of the computer, you come upon a
tower of programs that have gotten themselves into a bit of trouble. A
recursive algorithm has gotten out of hand, and now they're balanced
precariously in a large tower.
One program at the bottom supports the entire tower. It's holding a large
disc, and on the disc are balanced several more sub-towers. At the bottom
of these sub-towers, standing on the bottom disc, are other programs, each
holding their own disc, and so on. At the very tops of these
sub-sub-sub-...-towers, many programs stand simply keeping the disc below
them balanced but with no disc of their own.
You offer to help, but first you need to understand the structure of these
towers. You ask each program to yell out their name, their weight, and (if
they're holding a disc) the names of the programs immediately above them
balancing on that disc. You write this information down (your puzzle input).
Unfortunately, in their panic, they don't do this in an orderly fashion; by
the time you're done, you're not sure which program gave which information.
For example, if your list is the following:
pbga (66)
xhth (57)
ebii (61)
havc (66)
ktlj (57)
fwft (72) -> ktlj, cntj, xhth
qoyq (66)
padx (45) -> pbga, havc, qoyq
tknk (41) -> ugml, padx, fwft
jptl (61)
ugml (68) -> gyxo, ebii, jptl
gyxo (61)
cntj (57)
...then you would be able to recreate the structure of the towers that looks
like this:
gyxo
/
ugml - ebii
/ \
| jptl
|
| pbga
/ /
tknk --- padx - havc
\ \
| qoyq
|
| ktlj
\ /
fwft - cntj
\
xhth
In this example, tknk is at the bottom of the tower (the bottom program),
and is holding up ugml, padx, and fwft. Those programs are, in turn, holding
up other programs; in this example, none of those programs are holding up
any other programs, and are all the tops of their own towers. (The actual
tower balancing in front of you is much larger.)
Before you're ready to help them, you need to make sure your information is
correct. What is the name of the bottom program?
"""
def test_sample(self):
sample=(
"pbga (66)",
"xhth (57)",
"ebii (61)",
"havc (66)",
"ktlj (57)",
"fwft (72) -> ktlj, cntj, xhth",
"qoyq (66)",
"padx (45) -> pbga, havc, qoyq",
"tknk (41) -> ugml, padx, fwft",
"jptl (61)",
"ugml (68) -> gyxo, ebii, jptl",
"gyxo (61)",
"cntj (57)",
)
tree = day_7.Tower(sample)
self.assertEqual("tknk", tree.root())
def test_challenge(self):
tree = day_7.Tower(challenge)
self.assertEqual("azqje", tree.root())
class TestPart2(unittest.TestCase):
"""
--- Part Two ---
"""
def test_sample(self):
sample=(
"pbga (66)",
"xhth (57)",
"ebii (61)",
"havc (66)",
"ktlj (57)",
"fwft (72) -> ktlj, cntj, xhth",
"qoyq (66)",
"padx (45) -> pbga, havc, qoyq",
"tknk (41) -> ugml, padx, fwft",
"jptl (61)",
"ugml (68) -> gyxo, ebii, jptl",
"gyxo (61)",
"cntj (57)",
)
tree = day_7.Tower(sample)
self.assertEqual(('ugml', 60), tree.adjustment(tree.root()))
def test_challenge(self):
tree = day_7.Tower(challenge)
self.assertEqual(('rfkvap', 646), tree.adjustment(tree.root()))
|
from collections import Counter
from project.models.song_info import song
from project.models.inverted_index import InvertedIndex, convert_list
from project.models.base import Database
import math
num_of_song = 99
name_weight = 2
def get_weight_list(text):
str_list = convert_list(text)
res = Counter()
for value in str_list:
id_w = value.lstrip().rstrip().split(":")
res[int(id_w[0])] = float(id_w[1])
return res
# 将counter类数据排序并返回list格式
def counter_to_list(co):
res = {"id_list": [], "w_list": []}
for sid in list(co.most_common()):
res["id_list"].append(sid[0])
res["w_list"].append(sid[1])
return res
# 该类用于处理向量空间模型搜索内容的处理
class VectorSearch:
def __init__(self, text):
self.text = text
self.song = song(-1, text)
self.tf = Counter()
# 格式:{ sid: tf-idf, sid: tf-idf}
self.lyric_tf_idf = Counter()
self.name_tf_idf = Counter()
self.lyric_list = []
self.name_list = []
self.lyric_cos = Counter()
self.name_cos = Counter()
def get_tf(self):
self.tf = self.song.get_tf()
# 处理得出其各个词项的tf-idf值
def get_tf_idf(self, kind=0):
words = self.song.getwords()
for word in words:
data = InvertedIndex.get_word_info(word, kind)
if data['num'] != 0:
if kind == 0:
self.lyric_tf_idf[data['id']] = (1+math.log10(self.tf[word]))*math.log10(num_of_song/data['num'])
self.lyric_list = list(set(self.lyric_list).union(set(data['list'])))
else:
self.name_tf_idf[data['id']] = (1 + math.log10(self.tf[word])) * math.log10(num_of_song / data['num'])
self.name_list = list(set(self.name_list).union(set(data['list'])))
def deal(self):
self.get_tf()
self.get_tf_idf(0)
self.get_tf_idf(1)
def get_one_cos(self, sid, kind=0):
vector_space = VectorSpace(sid, kind)
res = 0
for tf_idf_value in vector_space.w_list.items():
if kind == 0:
res += self.lyric_tf_idf[tf_idf_value[0]]*tf_idf_value[1]
else:
res += self.name_tf_idf[tf_idf_value[0]]*tf_idf_value[1]
return res/vector_space.length
def get_cos_list(self, kind=0):
if kind == 0:
the_list = self.lyric_list
else:
the_list = self.name_list
for sid in the_list:
if kind == 0:
self.lyric_cos[sid] = self.get_one_cos(sid, kind)
else:
self.name_cos[sid] = self.get_one_cos(sid, kind)
def get_sort(self, kind=0):
self.deal()
self.get_cos_list(0)
self.get_cos_list(1)
if kind == 0:
return counter_to_list(self.lyric_cos)
elif kind == 1:
return counter_to_list(self.name_cos)
w_list = Counter()
keys = set(self.lyric_cos).union(self.name_cos)
for key in keys:
w_list[key] = self.name_cos[key]*name_weight + self.lyric_cos[key]
return counter_to_list(w_list)
# 该类用于将一个歌曲的内容处理为数据库所需要的形式
# 形式: 歌曲id,tf-idf列表 ,歌曲tf-idf空间长度
# 例子: 4 1:2.34 2:4.113 5:1.341 3.1435
class VectorIndex:
def __init__(self, text, id, kind=0):
self.vector_search = VectorSearch(text)
self.vector_search.deal()
self.kind = kind
self.id = id
def get_index(self):
if self.kind == 0:
return self.vector_search.lyric_tf_idf.items()
else:
return self.vector_search.name_tf_idf.items()
def get_length(self):
tf_idf_list = self.get_index()
res = 0
for value in tf_idf_list:
res += value[1]*value[1]
res = math.sqrt(res/len(tf_idf_list))
return res
def get_json(self):
return {
'id': self.id,
'tf-idf-matrix': list(self.get_index()),
'length': self.get_length()
}
# 该类用于将数据库中的vector_index处理成为需要的格式
class VectorSpace:
def __init__(self, sid, kind=0):
self.id = sid
self.kind = kind
m_d = Database()
if kind == 0:
sql = "SELECT * from lyric_vector_index where sid = %d" % int(sid)
else:
sql = "SELECT * from name_vector_index where sid = %d" % int(sid)
m_d.cursor.execute(sql)
data = m_d.cursor.fetchall()
if not data == ():
self.w_list = get_weight_list(data[0][1])
self.length = data[0][2]
else:
self.w_list = Counter()
self.length = 0
@staticmethod
def get_vector_list(kind=0):
m_d = Database()
if kind == 0:
sql = "SELECT * from lyric_vector_index " \
"INNER JOIN song_list " \
"on song_list.ID = lyric_vector_index.Sid limit 10"
else:
sql = "SELECT * from name_vector_index " \
"INNER JOIN song_list " \
"on song_list.ID = name_vector_index.Sid limit 10"
m_d.cursor.execute(sql)
data = m_d.cursor.fetchall()
return data
|
from pymoo.model.crossover import Crossover
from pymoo.operators.crossover.util import crossover_mask
import numpy as np
from random import random,randrange
def computeConsensus(solution,seqs,l_mer):
consensus = ""
j = -1
for i in range(l_mer):
j += 1
a = c = g = t = 0
for k in range(len(solution)):
#print(solution)
#print(int(solution[k])+j)
if seqs[k][int(solution[k])+j] == 'A':
a += 1
elif seqs[k][int(solution[k])+j] == 'C':
c += 1
elif seqs[k][int(solution[k])+j] == 'G':
g += 1
else:
t += 1
if a == max(max(a,c),max(g,t)):
consensus += 'A'
elif c == max(max(a,c),max(g,t)):
consensus += 'C'
elif g == max(max(a,c),max(g,t)):
consensus += 'G'
elif t == max(max(a,c),max(g,t)):
consensus += 'T'
print(consensus)
return consensus
def hammingDistance(str1,str2):
count = 0
for i in range(len(str1)):
if str1[i] == str2[i]:
count += 1
return count/len(str1)
class MyCrossover(Crossover):
def __init__(self, prob=0.5, **kwargs):
super().__init__(2, 2, **kwargs)
self.prob = prob
def _do(self, problem, X, **kwargs):
#print(X.shape)
if self.prob is None:
self.prob = 1/len(X[0])
_X = np.copy(X)
for i in range(len(_X[0])):
p = random()
if p <= self.prob:
index = randrange(len(problem.seqs))
aaaa = _X[0][i][index]
_X[0][i][index] = _X[1][i][index]
_X[1][i][index] = aaaa
return _X
|
from typing import List
class Solution:
def groupAnagrams(self, strs: List[str]) -> List[List[str]]:
res = {}
for str in strs:
res.setdefault(''.join(sorted(str)), []).append(str)
return list(res.values())
solution = Solution()
# ans = solution.groupAnagrams(["eat", "tea", "tan", "ate", "nat", "bat"])
ans = solution.groupAnagrams(["cab", "tin", "pew", "duh", "may", "ill", "buy", "bar", "max", "doc"])
print(ans)
|
from mongo import db
def get_user(uid, allow_test=False):
query = {'_id' : uid}
if not allow_test:
query['is_test'] = False
return db.users.find_one(query)
def get_all_users(limit=0, allow_test=False):
query = {}
if not allow_test:
query['is_test'] = False
return list({"name":i["name"], "id":i["_id"]} for i in db.users.find(query, limit=limit))
def create_user(uid, name):
doc = {
'_id' : uid,
'name' : name
}
db.users.insert(doc)
|
# -*- coding: utf-8 -*-
def strToNumBase(numStr,base):
if numStr=='1':
return 1
elif base==10:
return int(numStr)
else:
power=len(numStr)-1
num=0
for digit in numStr:
if digit=='1':
num+=base**power
power-=1
return num
def getPrimes(num):
primes=[]
for i in xrange(2,num+1):
j=0
isPrime=True
while j<len(primes) and isPrime:
if i%primes[j]==0:
isPrime=False
j+=1
if isPrime:
primes.append(i)
return primes
def getDivisorLimited(num,limit):
i=0
result=True
if int(num**(0.5))<limit:
primes=getPrimes(int(num**(0.5)))
limit=False
else:
primes=getPrimes(limit)
limit=True
numPrimes=len(primes)
while result and i<numPrimes:
if num%primes[i]==0:
result=False
limit=False
i+=1
if limit:
return -2
elif result or primes==[]:
return -1
else:
return primes[i-1]
def produceJamCoins(numCoins,length):
if length<=3:
return None
else:
jamCoins={}
num=0
maxNum=2**(length-2)
while len(jamCoins)!=numCoins and num<maxNum:
binNum=bin(num)[2:]
binNumLen=len(binNum)
if binNumLen!=length-2:
binNum=(length-2-binNumLen)*'0'+binNum
coin='1'+binNum+'1'
coinDivisors=[]
result=True
base=2
while result and base<=10:
coinToBase=strToNumBase(coin,base)
divisor=getDivisorLimited(coinToBase,10**3)
if divisor==-1 or divisor==-2:
result=False
else:
coinDivisors.append(divisor)
base+=1
if base==11 and len(coinDivisors)==9:
jamCoins[coin]=coinDivisors
num+=1
return jamCoins
def main(fout):
fout.write('Case #'+str(1)+': \n')
jamCoins=produceJamCoins(500,32)
for coin in jamCoins.keys():
fout.write(coin)
for divisor in jamCoins[coin]:
fout.write(' '+str(divisor))
fout.write('\n')
fout=open('C:\Users\exin1\Google Drive\Study\Google CodeJam\C.out','w')
main(fout)
fout.close() |
import random
from pygame.sprite import Group
from plane.base_plane import BasePlane
from plane.missile_gun import MissileGun
class EnemyPlane(BasePlane):
def __init__(self, ai_settings, screen):
BasePlane.__init__(self, ai_settings, screen, "images/ship6small.png")
self.min = random.randint(0, 30) - 30
self.max = random.randint(0, 30)
self.speed = random.randint(1, 3)
self.counter = 0
self.bullets = Group()
self.gun = MissileGun(ai_settings, screen)
def fire_bullet(self):
new_bullets = self.gun.fire(self.point)
self.bullets.add(new_bullets)
def update(self):
self.counter = self.counter + 1
if self.counter % 10 == 0:
x = random.randint(self.min, self.max)
self.turn(x)
self.drive(self.speed)
self.bullets.update()
|
# PEP8
# Комментарий комметариевич
# name = input('Ваше имя?\n')
while True:
age = input('Возвраст, число \n')
if age.isdigit():
age = int(age)
break
else:
print('Введите чЕсло')
print(age)
# this_year = 2020
#
# if age.isdigit():
# age = int(age)
# b_year = this_year - age
# print(b_year)
# while True:
# if not age:
# break
# print(age)
# age -= 1
#
# if age >= 18:
# print('УРА!')
# elif age >= 16:
# print('Есть эротика')
# elif 12 >= age > 8:
# print('Есть новое аниме')
# else:
# print('Есть смешарики!')
#
# hello_str = "Hello,bro!"
# hello_str.isdigit()
# var_int = 33
# var_int2 = 44
#
# var_int3 = var_int + var_int2
#
|
from datetime import datetime
from typing import List, Optional
from pydantic import BaseModel
# Define choices
from enum import Enum
class Species(str, Enum):
human = 'human'
alien = 'alien'
class User(BaseModel):
id: int
name: str
# Optional: it's either the specified type or `None` by default
signup_ts: Optional[datetime] # = default_value
friends: List[int] = []
# Static values
alive = True
# Validating choices
species: Species
# Using custom classes
# custom_class = CustomClass
external_data = {
'id': '123',
'name': 'John Doe',
# 'signup_ts': '2019-06-01 12:22',
'friends': [1, 2, '3'], # Types are converted if possible
'species': Species.alien,
}
user = User(**external_data)
print(user.id)
print(repr(user.signup_ts))
print(user.friends)
print(user.dict())
|
import urllib
import bson
from datetime import datetime
import setting
# use constant
timestamp = datetime.utcnow().strftime("%Y_%m_%d_%H_%M_%S")
db = setting.db_connection["PW_" + timestamp]
#This method is used to get entries for both APIs and Mashups from Programmableweb
def getEntries(service_type):
#the page number for response
apikey = "?apikey=24aaa22632fe29dd1abee0381f7ae2b6"
basic_url = "http://api.programmableweb.com/" + service_type + "/-/"
exception_file = open(setting.working_path + "/files/error/error_"+service_type+"_entries","a")
page = 1
url = basic_url + apikey + "&alt=json&page=" + str(page)
url.encode("UTF-8")
content = urllib.urlopen(url).read()
metadata = eval("{"+content.rstrip(";").partition("{")[2])
errornum = 0
while eval(metadata["startIndex"]) < eval(metadata["totalResults"]):
print eval(metadata["startIndex"])
if service_type == "apis":
document = db.apis
else: document = db.mashups
for entry in metadata["entries"]:
#Pre-processing, make all "." to "_" in mashup's apis's keys for MongoDB
if service_type == "mashups":
for key in sorted(entry["apis"]):
if key.find(".") != -1:
newkey = key.replace(".","_")
value = entry["apis"][key]
del entry["apis"][key]
entry["apis"][newkey] = value
#exception handling for mongoDB, and record all the error in file
try:
document.insert(entry)
except (bson.errors.InvalidStringData, bson.errors.InvalidDocument), e:
print e," insert"
errornum += 1
exception_file.write(str(datetime.now()) + "\n")
exception_file.write(str(errornum) + " ")
exception_file.write(str(entry) + "\n")
exception_file.write(str(e))
exception_file.write("\n")
page +=1
url = basic_url + apikey + "&alt=json&page="+str(page)
url = url.encode("UTF-8")
#pre-processing for replace all the windows' \r
content = urllib.urlopen(url).read().replace("\r"," ")
#exception handling for reading from response, and record all the error in file
try:
metadata = eval("{"+content.rstrip(";").partition("{")[2])
except Exception,e:
print e,"reading"
errornum += 1
exception_file.write(str(datetime.now()) + "\n")
exception_file.write(str(errornum) + " ")
exception_file.write(str(errornum) + " ")
exception_file.write(str(content) + "\n")
exception_file.write(str(e) + "\n")
exception_file.write("\n")
exception_file.close()
#print metadata["totalResults"]
#print metadata["entries"][0]
log_file_path = "../log"
#This method check duplicated apis, and write Warning to log
def get_duplicated_apis():
duplicated_lst = []
count = 0
lst = db.apis.group({"id":1},condition = [], initial={'count':0},reduce="function(obj,prev){prev.count++;}")
for entry in lst:
if entry["count"] > 1:
duplicated_lst.append(entry["id"])
if len(duplicated_lst) > 0:
log = open(log_file_path, "a")
log.write(str(len(duplicated_lst)) + " apis duplicated in db:\n")
for entry in duplicated_lst:
log.write(str(entry))
print duplicated_lst, len(duplicated_lst)
return duplicated_lst, len(duplicated_lst)
#This method is used to find duplicated mashups in ProgrammableWeb
def get_duplicated_mashups():
duplicated_lst = []
count = 0
lst = db.mashups.group({"id":1},condition = [], initial={'count':0},reduce="function(obj,prev){prev.count++;}")
for entry in lst:
if entry["count"] > 1:
duplicated_lst.append(entry["id"])
print duplicated_lst, len(duplicated_lst)
return duplicated_lst, len(duplicated_lst)
#delete the duplicated mushups in db
def delete_duplicates():
duplicated_ids = get_duplicated_mashups()[0]
for duplicated_id in duplicated_ids:
url = "http://api.programmableweb.com/mashups/"+duplicated_id.rpartition("/")[2]+"?apikey=24aaa22632fe29dd1abee0381f7ae2b6&alt=json"
#get Programmable responded one for duplicated mashups, and going to delete the other one
content = urllib.urlopen(url).read()
metadata = eval("{"+content.rstrip(";").partition("{")[2])
entries = metadata["entries"]
#currently each duplicated mashup has two entries and only return 1 when querying by id
#write to log when detecting more than 1 entries in response
if len(entries) > 1:
log = open(log_file_path,"a")
log.write(str(datetime.now()))
log.write("Warming:the duplicated mashup returned " + str(len(entries)) + " from API response:\n")
log.write(entries)
deleted_entry = {}
print entries[0]['id']
for entry in db.mashups.find({'id': entries[0]['id']}, {'_id':0}):
if entry != entries[0]:
deleted_entry = entry
db.mashups.remove(entry, True)
#remove from pair database, but this will remove the shared apis used by duplicated mashups. We should add back one later.
for api_id in deleted_entry["apis"].values():
db.pairs.remove({"api":api_id,"mashup":duplicated_id})
if api_id in db.mashups.find_one({"id":duplicated_id})["apis"].values():
db.pairs.insert({"api":api_id,"mashup":duplicated_id})
|
import numpy as np
from caffe2.python import \
core, device_checker, gradient_checker, test_util, workspace
from caffe2.proto import caffe2_pb2, caffe2_legacy_pb2
import collections
import sys
import unittest
core.GlobalInit(["python"])
if workspace.has_gpu_support and workspace.NumberOfGPUs() > 0:
gpu_device_option = caffe2_pb2.DeviceOption()
gpu_device_option.device_type = caffe2_pb2.CUDA
cpu_device_option = caffe2_pb2.DeviceOption()
gpu_device_checker = device_checker.DeviceChecker(
0.01, [gpu_device_option]
)
device_checker = device_checker.DeviceChecker(
0.01, [gpu_device_option, cpu_device_option]
)
gpu_gradient_checkers = [
gradient_checker.GradientChecker(
0.005, 0.05, gpu_device_option, "gpu_checker_ws"
),
]
gradient_checkers = [
gradient_checker.GradientChecker(
0.005, 0.05, gpu_device_option, "gpu_checker_ws"
),
gradient_checker.GradientChecker(
0.01, 0.05, cpu_device_option, "cpu_checker_ws"
),
]
else:
cpu_device_option = caffe2_pb2.DeviceOption()
gpu_device_option = None
gpu_device_checker = device_checker.DeviceChecker(
0.01, []
)
device_checker = device_checker.DeviceChecker(0.01, [cpu_device_option])
gradient_checkers = [
gradient_checker.GradientChecker(
0.01, 0.05, cpu_device_option, "cpu_checker_ws"
)
]
gpu_gradient_checkers = []
class TestConv(test_util.TestCase):
def setUp(self):
self.test_configs = [
(1, 1, 0, 7, "NHWC", ""),
(1, 1, 1, 7, "NHWC", "CUDNN"),
(1, 3, 0, 7, "NHWC", ""),
(1, 3, 1, 7, "NHWC", "CUDNN"),
(1, 3, 2, 7, "NHWC", ""),
(2, 3, 0, 7, "NHWC", "CUDNN"),
(2, 3, 1, 7, "NHWC", ""),
(2, 3, 2, 7, "NHWC", "CUDNN"),
(1, 5, 0, 10, "NHWC", ""),
(1, 5, 1, 10, "NHWC", "CUDNN"),
(1, 5, 2, 10, "NHWC", ""),
(1, 1, 0, 7, "NCHW", "CUDNN"),
(1, 1, 1, 7, "NCHW", ""),
(1, 3, 0, 7, "NCHW", "CUDNN"),
(1, 3, 1, 7, "NCHW", ""),
(1, 3, 2, 7, "NCHW", "CUDNN"),
(2, 3, 0, 7, "NCHW", ""),
(2, 3, 1, 7, "NCHW", "CUDNN"),
(2, 3, 2, 7, "NCHW", ""),
(1, 5, 0, 10, "NCHW", "CUDNN"),
(1, 5, 1, 10, "NCHW", ""),
(1, 5, 2, 10, "NCHW", "CUDNN"),
]
def testConvolutionnPadding(self):
for stride, kernel, pad, size, order, engine in self.test_configs:
print('conv {} {} {} {} {} {}'.format(
stride, kernel, pad, size, order, engine)
)
op = core.CreateOperator("Conv",
["X", "w", "b"],
["Y"],
stride=stride,
kernel=kernel,
pad=pad,
order=order,
engine=engine,
)
if order == "NHWC":
X = np.random.rand(2, size, size, 3).astype(np.float32) - 0.5
w = np.random.rand(4, kernel, kernel,
3).astype(np.float32) - 0.5
else:
X = np.random.rand(2, 3, size, size).astype(np.float32) - 0.5
w = np.random.rand(4, 3, kernel,
kernel).astype(np.float32) - 0.5
b = np.random.rand(4).astype(np.float32) - 0.5
res = device_checker.CheckSimple(op, [X, w, b], [0])
self.assertTrue(res)
for checker in gradient_checkers:
for i in range(3):
res, grad, grad_estimated = checker.CheckSimple(
op, [X, w, b], i, [0]
)
self.assertTrue(res)
def testConvolutionLayoutCorrespondence(self):
for stride, kernel, pad, size, _, engine in self.test_configs:
print('conv {} {} {} {} {}'.format(
stride, kernel, pad, size, engine)
)
for device_option in device_checker._device_options:
X = np.random.rand(2, size, size, 3).astype(np.float32) - 0.5
w = np.random.rand(4, kernel, kernel,
3).astype(np.float32) - 0.5
b = np.random.rand(4).astype(np.float32) - 0.5
outputs = {}
for order in ["NCHW", "NHWC"]:
op = core.CreateOperator("Conv",
["X", "w", "b"],
["Y"],
stride=stride,
kernel=kernel,
pad=pad,
order=order,
engine=engine,
device_option=device_option,
)
if order == "NCHW":
X_f = X.transpose((0, 3, 1, 2))
w_f = w.transpose((0, 3, 1, 2))
else:
X_f = X
w_f = w
workspace.FeedBlob("X", X_f, device_option=device_option)
workspace.FeedBlob("w", w_f, device_option=device_option)
workspace.FeedBlob("b", b, device_option=device_option)
workspace.RunOperatorOnce(op)
outputs[order] = workspace.FetchBlob("Y")
np.testing.assert_allclose(
outputs["NCHW"],
outputs["NHWC"].transpose((0, 3, 1, 2)),
atol=1e-4,
rtol=1e-4)
class TestConvLegacyPooling(test_util.TestCase):
def setUp(self):
self.test_configs = [
# stride, kernel, legacy_pad, size, order
(1, 1, 1, 7, "NHWC"),
(1, 1, 2, 7, "NHWC"),
(1, 3, 1, 7, "NHWC"),
(1, 3, 2, 7, "NHWC"),
(1, 5, 1, 10, "NHWC"),
(1, 5, 2, 10, "NHWC"),
(2, 7, 1, 10, "NHWC"),
(2, 7, 2, 10, "NHWC"),
(1, 1, 1, 7, "NCHW"),
(1, 1, 2, 7, "NCHW"),
(1, 3, 1, 7, "NCHW"),
(1, 3, 2, 7, "NCHW"),
(1, 5, 1, 10, "NCHW"),
(1, 5, 2, 10, "NCHW"),
(2, 7, 1, 10, "NCHW"),
(2, 7, 2, 10, "NCHW"),
]
def testConvolutionLegacyPadding(self):
for stride, kernel, legacy_pad, size, order in self.test_configs:
print('conv legacypad {} {} {} {} {}'.format(
stride, kernel, legacy_pad, size, order)
)
op = core.CreateOperator("Conv",
["X", "w", "b"],
["Y"],
stride=stride,
kernel=kernel,
legacy_pad=legacy_pad,
order=order
)
if order == "NHWC":
X = np.random.rand(2, size, size, 3).astype(np.float32) - 0.5
w = np.random.rand(4, kernel, kernel,
3).astype(np.float32) - 0.5
else:
X = np.random.rand(2, 3, size, size).astype(np.float32) - 0.5
w = np.random.rand(4, 3, kernel,
kernel).astype(np.float32) - 0.5
b = np.random.rand(4).astype(np.float32) - 0.5
res = device_checker.CheckSimple(op, [X, w, b], [0])
self.assertTrue(res)
for checker in gradient_checkers:
for i in range(3):
res, grad, grad_estimated = checker.CheckSimple(
op, [X, w, b], i, [0]
)
self.assertTrue(res)
class TestMaxPoolingLegacyPadding(test_util.TestCase):
def setUp(self):
self.test_configs = [
(2, 3, 2, 12, "NHWC"),
(2, 3, 2, 16, "NHWC"),
(1, 3, 2, 8, "NHWC"),
(1, 3, 2, 14, "NHWC"),
(2, 3, 2, 14, "NHWC"),
(1, 3, 2, 7, "NHWC"),
(2, 3, 2, 12, "NCHW"),
(2, 3, 2, 16, "NCHW"),
(1, 3, 2, 8, "NCHW"),
(1, 3, 2, 14, "NCHW"),
(2, 3, 2, 14, "NCHW"),
(1, 3, 2, 7, "NCHW"),
]
def testMaxPoolingLegacyPadding(self):
for stride, kernel, legacy_pad, size, order in self.test_configs:
print('MaxPool {} {} {} {} {}'.format(stride, kernel, legacy_pad,
size, order))
op = core.CreateOperator("MaxPool",
["X"],
["Y"],
stride=stride,
kernel=kernel,
legacy_pad=legacy_pad,
order=order
)
# In order to avoid the problem of race conditions, we will do a
# randperm so that the values will be apart at least 0.01
if order == "NHWC":
X = np.random.permutation(1 * size * size * 3).reshape(
1, size, size, 3).astype(np.float32) * 0.01
else:
X = np.random.permutation(1 * size * size * 3).reshape(
1, 3, size, size).astype(np.float32) * 0.01
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestAveragePoolingLegacyPadding(test_util.TestCase):
def setUp(self):
self.test_configs = [
(1, 7, 1, 7, "NHWC"),
(1, 7, 2, 7, "NHWC"),
(1, 7, 1, 7, "NCHW"),
(1, 7, 2, 7, "NCHW"),
]
def testAveragePoolingLegacyPadding(self):
for stride, kernel, legacy_pad, size, order in self.test_configs:
print('AveragePool {} {} {} {} {}'.format(
stride, kernel, legacy_pad, size, order))
op = core.CreateOperator("AveragePool",
["X"],
["Y"],
stride=stride,
kernel=kernel,
legacy_pad=legacy_pad,
order=order
)
if order == "NHWC":
X = np.random.rand(2, size, size, 3).astype(np.float32)
else:
X = np.random.rand(2, 3, size, size).astype(np.float32)
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestLRN(test_util.TestCase):
def setUp(self):
self.test_configs = [(6, 10), (3, 13), ]
def testLRN(self):
for input_size, depth in self.test_configs:
op = core.CreateOperator("LRN",
["X"],
["Y", "Y_scale"],
size=11,
alpha=0.001,
beta=0.5,
bias=2.0,
order="NHWC"
)
X = np.random.rand(2, input_size, input_size,
depth).astype(np.float32)
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestFlatten(test_util.TestCase):
def testFlatten(self):
op = core.CreateOperator("Flatten", ["X"], ["Y"])
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestDepthConcat(test_util.TestCase):
def setUp(self):
self.test_configs = [
# input_size, depth1, depth2, depth3, depth4
(3, 2, 3, 4, 5),
(4, 5, 4, 3, 2),
]
def testDepthConcatNHWC(self):
for input_size, d1, d2, d3, d4 in self.test_configs:
op = core.CreateOperator("DepthConcat",
["X1", "X2", "X3", "X4"],
["Y", "Y_dims"],
order="NHWC"
)
Xs = [
np.random.rand(2, input_size, input_size,
d1).astype(np.float32),
np.random.rand(2, input_size, input_size,
d2).astype(np.float32),
np.random.rand(2, input_size, input_size,
d3).astype(np.float32),
np.random.rand(2, input_size, input_size, d4).astype(np.float32)
]
for i in range(4):
res = device_checker.CheckSimple(op, Xs, [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, Xs, i,
[0])
self.assertTrue(res)
def testDepthConcatNCHW(self):
for input_size, d1, d2, d3, d4 in self.test_configs:
op = core.CreateOperator("DepthConcat",
["X1", "X2", "X3", "X4"],
["Y", "Y_dims"],
order="NCHW"
)
Xs = [
np.random.rand(2, d1, input_size,
input_size).astype(np.float32),
np.random.rand(2, d2, input_size,
input_size).astype(np.float32),
np.random.rand(2, d3, input_size,
input_size).astype(np.float32),
np.random.rand(2, d4, input_size, input_size).astype(np.float32)
]
for i in range(4):
res = device_checker.CheckSimple(op, Xs, [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, Xs, i,
[0])
self.assertTrue(res)
class TestRelu(test_util.TestCase):
def setUp(self):
self.test_configs = [
# input size
(1, 1),
(2, 1),
(1, 3, 3, 1),
(2, 3, 3, 1),
(1, 5, 5, 3),
(2, 5, 5, 3),
]
def testRelu(self):
for input_size in self.test_configs:
op = core.CreateOperator("Relu", ["X"], ["Y"])
X = np.random.rand(*input_size).astype(np.float32)
# go away from the origin point to avoid kink problems
X += 0.01 * np.sign(X)
X[X == 0] = 0.01
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestTanh(test_util.TestCase):
def setUp(self):
self.test_configs = [(1, 1), (2, 1), (1, 2, 3, 4), ]
def testTanh(self):
for input_size in self.test_configs:
op = core.CreateOperator("Tanh", ["X"], ["Y"])
X = np.random.rand(*input_size).astype(np.float32) - 0.5
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestSigmoid(test_util.TestCase):
def setUp(self):
self.test_configs = [(1, 1), (2, 1), (1, 2, 3, 4), ]
def testSigmoid(self):
for input_size in self.test_configs:
op = core.CreateOperator("Sigmoid", ["X"], ["Y"])
X = np.random.rand(*input_size).astype(np.float32) - 0.5
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestSum(test_util.TestCase):
def setUp(self):
self.test_configs = [((1, 2, 3, 4), True), ((1, 2, 3, 4), False) ]
def testSum(self):
for (input_size, in_place) in self.test_configs:
op = core.CreateOperator("Sum", ["X1", "X2"],
["Y" if not in_place else "X1"])
X1 = np.random.rand(*input_size).astype(np.float32) - 0.5
X2 = np.random.rand(*input_size).astype(np.float32) - 0.5
res = device_checker.CheckSimple(op, [X1, X2], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(
op, [X1, X2], 0, [0])
self.assertTrue(res)
class TestMakeTwoClass(test_util.TestCase):
def setUp(self):
self.test_configs = [
# input size
(1,),
(7,),
(1, 3),
(2, 5),
]
def testMakeTwoClass(self):
for input_size in self.test_configs:
op = core.CreateOperator("MakeTwoClass", ["X"], ["Y"])
X = np.random.rand(*input_size).astype(np.float32)
# step a little to avoid gradient problems
X[X < 0.01] += 0.01
X[X > 0.99] -= 0.01
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
@unittest.skipIf(not workspace.has_gpu_support,
"Recurrent only implemented on GPU")
class TestRecurrent(test_util.TestCase):
R = collections.namedtuple('R', [
'hidden_size',
'bidirectional',
'rnn_mode',
'input_mode',
'num_layers',
'T',
'N',
'D',
'dropout',
])
def test_recurrent(self):
CONFIGS = [
self.R(
hidden_size=3,
bidirectional=False,
rnn_mode="gru",
input_mode="linear",
num_layers=2,
dropout=0.0,
T=3,
N=4,
D=2
),
self.R(
hidden_size=5,
bidirectional=True,
rnn_mode="gru",
input_mode="linear",
num_layers=2,
dropout=0.0,
T=3,
N=4,
D=2
),
self.R(
hidden_size=1,
bidirectional=False,
rnn_mode="lstm",
input_mode="linear",
num_layers=1,
T=3,
N=4,
D=2,
dropout=0.0,
),
self.R(
hidden_size=2,
bidirectional=True,
rnn_mode="lstm",
input_mode="linear",
num_layers=2,
dropout=0.0,
T=2,
N=2,
D=2
),
]
for r in CONFIGS:
print(r)
init_op = core.CreateOperator("RecurrentInit",
["INPUT"],
["WEIGHT", "DROPOUT_STATES"],
hidden_size=r.hidden_size,
bidirectional=r.bidirectional,
rnn_mode=r.rnn_mode,
dropout=r.dropout,
input_mode=r.input_mode,
num_layers=r.num_layers,
device_option=gpu_device_option
)
op = core.CreateOperator("Recurrent",
["INPUT", "HIDDEN_INPUT", "CELL_INPUT", "WEIGHT"],
["OUTPUT", "HIDDEN_OUTPUT", "CELL_OUTPUT",
"RNN_SCRATCH", "DROPOUT_STATES"],
hidden_size=r.hidden_size,
bidirectional=r.bidirectional,
rnn_mode=r.rnn_mode,
dropout=r.dropout,
input_mode=r.input_mode,
num_layers=r.num_layers,
)
num_directions = 2 if r.bidirectional else 1
X = np.random.randn(r.T, r.N, r.D).astype(np.float32)
workspace.FeedBlob("INPUT", X, device_option=gpu_device_option)
workspace.RunOperatorOnce(init_op)
W = workspace.FetchBlob("WEIGHT")
H = np.random.randn(
r.hidden_size, r.N, r.num_layers * num_directions).astype(
np.float32)
C = np.random.randn(
r.hidden_size, r.N, r.num_layers * num_directions).astype(
np.float32) if r.rnn_mode == "lstm" else \
np.empty((1,)).astype(np.float32) # unused in GRU
inputs = [X, H, C, W]
self.assertTrue(gpu_device_checker.CheckSimple(op, inputs, [0]))
for checker in gpu_gradient_checkers:
input_idxs = [i for (i, _) in enumerate(inputs)] \
if r.rnn_mode == "lstm" else [0, 1, 3] # ignore C
for input_idx in input_idxs:
res, grad, grad_estimated = checker.CheckSimple(
op, inputs, input_idx, [0, 1, 2])
if not res:
print(input_idx, grad, grad_estimated)
self.assertTrue(res)
if __name__ == '__main__':
unittest.main()
|
import shelve
from typing import Any, Dict
from uuid import UUID, uuid4
class Database:
def __init__(self, filename="shelve.db"):
self.filename = filename
def create(self, item: Any) -> UUID:
with shelve.open(self.filename) as db:
uuid = uuid4()
db[uuid] = item
return uuid
def read(self, uuid: UUID) -> Any:
with shelve.open(self.filename) as db:
return db[uuid]
def update(self, uuid: UUID, item: Any):
with shelve.open(self.filename) as db:
db[uuid] = item
def delete(self, uuid: UUID):
with shelve.open(self.filename) as db:
db.pop(uuid)
|
# Car button codes
class CruiseButtons:
# VAL_ 69 SpdCtrlLvr_Stat 32 "DN_1ST" 16 "UP_1ST" 8 "DN_2ND" 4 "UP_2ND" 2 "RWD" 1 "FWD" 0 "IDLE" ;
RES_ACCEL = 16
DECEL_SET = 32
CANCEL = 1
MAIN = 2
#car chimes: enumeration from dbc file. Chimes are for alerts and warnings
class CM:
MUTE = 0
SINGLE = 3
DOUBLE = 4
REPEATED = 1
CONTINUOUS = 2
#car beepss: enumeration from dbc file. Beeps are for activ and deactiv
class BP:
MUTE = 0
SINGLE = 3
TRIPLE = 2
REPEATED = 1
class AH:
#[alert_idx, value]
# See dbc files for info on values"
NONE = [0, 0]
FCW = [1, 1]
STEER = [2, 1]
BRAKE_PRESSED = [3, 10]
GEAR_NOT_D = [4, 6]
SEATBELT = [5, 5]
SPEED_TOO_HIGH = [6, 8]
|
import os
import js2py
import sqlite3
from django.template.loader import render_to_string
import cv2
from django.contrib.auth.models import User
import numpy as np
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import UserCreationForm
from django.http import HttpResponse
from django.shortcuts import redirect, render
from django.views.generic import TemplateView
from PIL import Image
from django.core.mail import send_mail
from child.forms import addmemberform
from django.template import Template,Context
from .forms import UserRegisterForm
from .models import esehi
from .tokens import account_activation_token
from django.core.mail import EmailMessage
from django.contrib.sites.shortcuts import get_current_site
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode,urlsafe_base64_decode
import requests
def register(request):
if request.method=='POST':
form=UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username=form.cleaned_data.get('username')
messages.success(request,f'Account Created for {username}!')
return redirect('/child/login')
else:
form=UserRegisterForm()
return render(request,'child/register.html',{"form":form})
@login_required
def congrats(request):
faceDetect=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cam=cv2.VideoCapture(0)
members=esehi.objects.all()
id=0
for member in members:
if(id<member.id):
id=member.id
sample=0
while(True):
ret,img=cam.read()
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces=faceDetect.detectMultiScale(gray,1.3,5)
for(x,y,w,h) in faces:
sample=sample+1
cv2.imwrite('DataSet/User.'+str(id)+"."+str(sample)+'.jpg',gray[y:y+h,x:x+w])
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
cv2.waitKey(100)
cv2.imshow("Face",img);
if(sample>20):
break
cam.release()
cv2.destroyAllWindows()
recognizer=cv2.face.LBPHFaceRecognizer_create();
path='DataSet'
def getImageWithID(path):
imagePaths=[os.path.join(path,f) for f in os.listdir(path)]
faces=[]
IDs=[]
for imagePath in imagePaths:
faceImg=Image.open(imagePath).convert('L')
facenp=np.array(faceImg,'uint8')
ID=int(os.path.split(imagePath)[-1].split('.')[1])
faces.append(facenp)
IDs.append(ID)
cv2.waitKey(10)
return IDs,faces
Ids,faces=getImageWithID(path)
recognizer.train(faces,np.array(Ids))
recognizer.write('recognizer/trainningData.yml')
return render(request,'child/congrats.html')
@login_required
def laststep(request):
return render(request,'child/laststep.html')
def home(request):
return render(request,'child/index.html')
def login(request):
return render(request,'child/login.html')
def success(request):
return HttpResponse('successfuly uploaded')
@login_required
def addmember(request):
if request.method == 'POST':
form = addmemberform(request.POST,request.FILES)
if form.is_valid():
form1=form.save(commit=False)
form1.user=request.user
form1.save()
return redirect('/child/laststep')
else:
form = addmemberform()
return render(request, 'child/addmember.html',{"form":form})
def aboutus(request):
return render(request,'child/aboutus.html')
def howitworks(request):
return render(request,'child/howitworks.html')
@login_required
def dashboard(request):
return render(request,'child/dashboard.html')
@login_required
def allmembers(request):
print((esehi.objects.all().count()) > 0)
return render(request,'child/allmembers.html')
@login_required
def searchmember(request):
return render(request,'child/searchmember.html')
@login_required
def addtolost(request,id):
data = esehi.objects.filter(id=id).values()
# u=lost(**data[0])
# u.save()
return render(request,'child/addtolost.html')
def display_ip():
""" Function To Print GeoIP Latitude & Longitude """
ip_request = requests.get('https://get.geojs.io/v1/ip.json')
my_ip = ip_request.json()['ip']
geo_request = requests.get('https://get.geojs.io/v1/ip/geo/' +my_ip + '.json')
geo_data = geo_request.json()
a=[geo_data['region'],geo_data['latitude'],geo_data['longitude']]
return a
@login_required
def searchresult(request):
faceDetect=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
def getans(Id):
conn = sqlite3.connect("db.sqlite3")
cmd = "SELECT * from child_esehi WHERE id="+str(Id)
cursor = conn.execute(cmd)
profile = None
for row in cursor:
profile = row
conn.close()
return profile
cam=cv2.VideoCapture(0)
rec=cv2.face.LBPHFaceRecognizer_create();
rec.read('recognizer\\trainningData.yml')
id=0
flag=0
font=cv2.FONT_HERSHEY_COMPLEX_SMALL
while(True):
ret,img=cam.read()
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces=faceDetect.detectMultiScale(gray,1.3,5)
for(x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
id,conf=rec.predict(gray[y:y+h,x:x+w])
profile = getans(id)
if profile!=None:
print(profile)
cv2.destroyAllWindows()
flag=1
break
#cv2.putText(img,str(id),(x,y+h), font, 4,(255,255,255),2,cv2.LINE_AA)
cv2.imshow("Face",img);
if(cv2.waitKey(1)==ord('q') or flag==1):
break;
cam.release()
cv2.destroyAllWindows()
current_site=get_current_site(request)
mail_subject='Give Permisssion to access Details of child'
ip_request = requests.get('https://get.geojs.io/v1/ip.json')
my_ip = ip_request.json()['ip'] # ip_request.json() => {ip: 'XXX.XXX.XX.X'}
geo_request_url = 'https://get.geojs.io/v1/ip/geo/' + my_ip + '.json'
geo_request = requests.get(geo_request_url)
geo_data = geo_request.json()
r=display_ip()
message = render_to_string('child/acc_active_email.html',{'user':request.user,'domain':current_site.domain,'uid':urlsafe_base64_encode(force_bytes(id)),'token':account_activation_token.make_token(request.user),'region':r[0],'long':r[1],'lat':r[2]})
to_email='akeshav53@gmail.com'
email=EmailMessage(mail_subject,message,to=[to_email])
email.send()
messages.success(request,f'We have sent the confirmation mail')
return redirect('/child')
# return render(request,'child/searchresult.html',{'profile':profile})
def activate(request,uidb64,token,year):
try:
child_id=force_text(urlsafe_base64_decode(uidb64))
user=User.objects.get(pk=year)
child1=esehi.objects.get(pk=child_id)
except (TypeError,ValueError,OverflowError,User.DoesNotExist):
user=None
if user is not None and account_activation_token.check_token(user,token):
child1.perms=True
child1.uperms=year
child1.save()
return HttpResponse('<h2>Access Granted</h2>')
else:
return HttpResponse('activation link is invalid!')
def deletefromlost(request,id):
#lost.objects.filter(id=id).delete()
return HttpResponse("Member has been successfully removed from lost list of our database.")
def childdetails(request):
conn = sqlite3.connect("db.sqlite3")
cmd = "SELECT * from child_esehi WHERE perms=1 AND uperms="+str(request.user.pk)
cursor = conn.execute(cmd)
profile=None
for row in cursor:
print(row)
profile = row
conn.close()
return render(request,'child/searchresult.html',{'profile':profile})
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = 'kieran-nichols'
SITENAME = 'Kieran Nichols'
SITEURL = 'https://www.kieran-nichols.com'
PATH = 'content'
OUTPUT_PATH = 'docs/'
TIMEZONE = 'US/Central'
DEFAULT_LANG = 'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
DISPLAY_CATEGORIES_ON_MENU = True
THEME = 'themes/Bulrush'
PLUGIN_PATHS = ['pelican-plugins']
PLUGINS = ['assets']
import bulrush
THEME = bulrush.PATH
JINJA_ENVIRONMENT = bulrush.ENVIRONMENT
JINJA_FILTERS = bulrush.FILTERS
SOCIAL = (
('Email', 'knichols4@wisc.edu'),
("Github", "https://github.com/kieran-nichols/"),
("LinkedIn", "https://www.linkedin.com/in/kieran-nichols-24134479/")
)
#MENUITEMS = (
#('Resume','resume'),
#('Portfolio','portfolio'),
#('About Me','about-me'),
#)
#DEFAULT_PAGINATION = 10
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True |
"""TcEx Framework Module"""
# standard library
import logging
from abc import ABC
from collections.abc import Generator
from typing import Any
# third-party
from requests import Response, Session
from requests.exceptions import ProxyError, RetryError
# first-party
from tcex.api.tc.v3.tql.tql import Tql
from tcex.exit.error_code import handle_error
from tcex.logger.trace_logger import TraceLogger
from tcex.pleb.cached_property import cached_property
from tcex.util import Util
# get tcex logger
_logger: TraceLogger = logging.getLogger(__name__.split('.', maxsplit=1)[0]) # type: ignore
class ObjectCollectionABC(ABC):
"""Case Management Collection Abstract Base Class
This class is a base class for Case Management collections that use
multi-inheritance with a pydantic BaseModel class. To ensure
properties are not added to the model both @property and @setter
methods are used.
"""
def __init__(
self,
session: Session,
tql_filters: list | None = None, # This will be removed!
params: dict | None = None,
):
"""Initialize instance properties."""
self._params = params or {}
self._tql_filters = tql_filters or []
# properties
self._session = session
self.log = _logger
self.request: Response
self.tql = Tql()
self._model = None
self.type_ = None # defined in child class
self.util = Util()
def __len__(self) -> int:
"""Return the length of the collection."""
parameters = self._params.copy()
parameters['resultLimit'] = 1
parameters['count'] = True
tql_string = self.tql.raw_tql
if not self.tql.raw_tql:
tql_string = self.tql.as_str
if tql_string:
parameters['tql'] = tql_string
# convert all keys to camel case
for k, v in list(parameters.items()):
k = self.util.snake_to_camel(k)
# if result_limit and resultLimit both show up use the proper cased version
if k not in parameters:
parameters[k] = v
self._request(
'GET',
self._api_endpoint,
body=None,
params=parameters,
headers={'content-type': 'application/json'},
)
return self.request.json().get('count', len(self.request.json().get('data', [])))
@property
def _api_endpoint(self): # pragma: no cover
"""Return filter method."""
raise NotImplementedError('Child class must implement this method.')
def _request(
self,
method: str,
url: str,
body: bytes | str | None = None,
params: dict | None = None,
headers: dict | None = None,
):
"""Handle standard request with error checking."""
try:
self.request = self._session.request(
method, url, data=body, headers=headers, params=params
)
self.log.debug(f'feature=api-tc-v3, request-body={self.request.request.body}')
except (ConnectionError, ProxyError, RetryError): # pragma: no cover
handle_error(
code=951,
message_values=[
method.upper(),
None,
'{\"message\": \"Connection/Proxy Error/Retry\"}',
url,
],
)
if not self.success(self.request):
err = self.request.text or self.request.reason
handle_error(
code=950,
message_values=[
self.request.request.method,
self.request.status_code,
err,
self.request.url,
],
)
# log content for debugging
self.log_response_text(self.request)
@property
def filter(self): # pragma: no cover
"""Return filter method."""
raise NotImplementedError('Child class must implement this method.')
def log_response_text(self, response: Response):
"""Log the response text."""
response_text = 'response text: (text to large to log)'
if len(response.content) < 5000: # check size of content for performance
response_text = response.text
self.log.debug(f'feature=api-tc-v3, response-body={response_text}')
@property
def model(self):
"""Return the model."""
return self._model
@model.setter
def model(self, data):
self._model = type(self.model)(**data)
def iterate(
self,
base_class: Any,
api_endpoint: str | None = None,
params: dict | None = None,
) -> Generator:
"""Iterate over CM/TI objects."""
url = api_endpoint or self._api_endpoint
params = params or self.params
# special parameter for indicators to enable the return the the indicator fields
# (value1, value2, value3) on std-custom/custom-custom indicator types.
if self.type_ == 'Indicators' and api_endpoint is None:
params.setdefault('fields', []).append('genericCustomIndicatorValues')
# convert all keys to camel case
for k, v in list(params.items()):
k = self.util.snake_to_camel(k)
params[k] = v
tql_string = self.tql.raw_tql or self.tql.as_str
if tql_string:
params['tql'] = tql_string
while True:
self._request(
'GET',
body=None,
url=url,
headers={'content-type': 'application/json'},
params=params,
)
# reset some vars
params = {}
response = self.request.json()
data = response.get('data', [])
url = response.pop('next', None)
for result in data:
yield base_class(session=self._session, **result) # type: ignore
# break out of pagination if no next url present in results
if not url:
break
@property
def params(self) -> dict:
"""Return the parameters of the case management object collection."""
return self._params
@params.setter
def params(self, params: dict):
"""Set the parameters of the case management object collection."""
self._params = params
@staticmethod
def success(r: Response) -> bool:
"""Validate the response is valid.
Args:
r (requests.response): The response object.
Returns:
bool: True if status is "ok"
"""
status = True
if r.ok:
try:
if r.json().get('status') != 'Success': # pragma: no cover
status = False
except Exception: # pragma: no cover
status = False
else:
status = False
return status
@property
def timeout(self) -> int:
"""Return the timeout of the case management object collection."""
return self._timeout
@timeout.setter
def timeout(self, timeout: int):
"""Set the timeout of the case management object collection."""
self._timeout = timeout
@cached_property
def tql_options(self):
"""Return TQL data keywords."""
_data = []
r = self._session.options(f'{self._api_endpoint}/tql', params={})
if r.ok:
_data = r.json()['data']
return _data
@property
def tql_keywords(self):
"""Return supported TQL keywords."""
return [to.get('keyword') for to in self.tql_options]
|
#Tim Grose - timothy.h.grose@gmail.com
#instructions for running locally:
#download as a .py. save as main.py into lq_app folder.
#open command prompt in folder containing lq_app folder and type "bokeh serve lq_app --show"
#for instructions on how to employ on heroku, see readme file
#coding: utf-8
#import packages
import bokeh, pandas as pd
from bokeh.plotting import *
from bokeh.models import HoverTool, ColumnDataSource, Axis, Span, NumeralTickFormatter, Label, LabelSet
from bokeh.layouts import row, widgetbox, column
from bokeh.models.widgets import Select, Div
from bokeh.io import curdoc, show
from bokeh.palettes import Inferno256
from os.path import dirname, join
#read processed location quotient (lq) data file
#for main.py
lqdf=pd.read_csv(join(dirname(__file__),'data','Trimmed_MSA_LQs_2017-08-06.csv'))
#read processed lq data file. the data processing was done using another script.
#for ipynb
#lqdf=pd.read_csv("data/Trimmed_MSA_LQs_2017-08-06.csv")
#get subset "_agg" of lq data for one aggregation level and one location. this will be displayed when the tool is first opened.
lqdf_agg=lqdf[(lqdf['agglvl_title']=='Supersector')&(lqdf['area_title']=='New York-Newark-Jersey City, NY-NJ-PA MSA')]
#set x, y, and desc variables in "_agg" subset with msa lq value, employment level, and industry title respectively
x=lqdf_agg['msa_lq']
y=lqdf_agg['annual_avg_emplvl']
desc=lqdf_agg['industry_title']
#set source data. this is what gets charted.
source=ColumnDataSource(data=dict(
x=x,
y=y,
desc=desc,
))
#set up contents of hover tool with source data. this allows the user to hover over a data point and see stats for that point
hover = HoverTool(
tooltips="""
<div style="background:white;">
<div>
<span style="font-size: 12px; color: blue;">Industry:</span>
<span style="font-size: 12px; color: black;">@desc</span>
</div>
<div>
<span style="font-size: 12px; color: blue;">MSA Location Quotient</span>
<span style="font-size: 12px; color: black;">@x</span>
</div>
<div>
<span style="font-size: 12px; color: blue;">Employment:</span>
<span style="font-size: 12px; color: black;">@y</span>
</div>
</div>
"""
)
#set up figure with titles and hover
p1=Figure(x_axis_label='MSA Employment Location Quotient versus all MSAs',
y_axis_label='MSA Employment',
tools=[hover],
logo=None,
plot_width=600,
plot_height=600,
)
#set x and y axis formats
p1.yaxis[0].formatter=NumeralTickFormatter(format="0,0")
p1.xaxis[0].formatter=NumeralTickFormatter(format="0,0.00")
#circle plot on p1 figure. use source, which was created from "_agg" above.
p1.circle('x','y',size=9,fill_color='#ff9000',line_color='firebrick',alpha=.9,source=source)
#create and then render vertical line at x=1
vline=Span(location=1,dimension='height',line_color='yellow',line_width=1)
p1.renderers.extend([vline])
#get unique agg levels
agglvl=lqdf['agglvl_title'].unique()
#set up select agg level widget
agglvl_select=Select(
title="Industry Aggregation Level:",
value=agglvl[0],
options=agglvl.tolist()
)
#get unique MSAs
geo=lqdf['area_title'].unique()
#set up select geo widget
geo_select=Select(
title="Geography:",
value='New York-Newark-Jersey City, NY-NJ-PA MSA',
options=geo.tolist()
)
#define function where "_agg" is subset based on what user selects for geography and aggregation level
def update(attrname,old,new):
lqdf_agg=lqdf[(lqdf['agglvl_title']==agglvl_select.value)&(lqdf['area_title']==geo_select.value)]
source.data=dict(
x=lqdf_agg['msa_lq'],
y=lqdf_agg['annual_avg_emplvl'],
desc=lqdf_agg['industry_title'],
)
#apply above function when user changes values of geography and aggregation level
for menu in [agglvl_select,geo_select]:
menu.on_change('value', update)
#put controls in widget box
controls = widgetbox([agglvl_select,geo_select],width=420)
#add explanatory text in html
desc=Div(text="""
<h1>
How is Your City's Economy Unique?
</h1>
<p>
Location Quotients (LQs) compare the proportion of a particular region's employment in a particular industry
to the proportion of a larger reference area's employment in that same industry. An LQ of greater than 1 indicates
that an industry has a higher concentration of employment in that region than in the overall reference area. An LQ of
less than 1 indicates that an industry has a lower concentration of employment in that region than in the overall
reference area.
<br>
<br>
This chart shows 2016 LQs for each Metropolitan Statistical Area (MSA) in the U.S. with a reference area of
all MSAs in the U.S. It conveys how concentrations in employment vary from city to city. These LQs are recalcualted
from overall LQs provided by the Bureau of Labor Statistics (BLS) Quarterly Census of Employment and Wages.
<br>
<br>
Use the filters below to view LQs for different MSAs and different industry aggregation levels, ranging from
supersector (larger groupings) down to NAICS 4-digit sector (smaller groupings). (NAICS: North American Industry
Classification System)
</p>
""",width=400)
#more explanatory text with citations in html
cit=Div(text="""
<br>
<i>
<font-size:9px;font-style:italic>
These MSA LQs are recalculated from overall LQs provided by the Bureau of Labor Statistics (BLS)
Quarterly Census of Employment and Wages. Government LQs are incorporated into the displayed industry
aggregation levels too; these do not appear directly in the BLS data.
<br>
<br>
U.S. Bureau of Labor Statistics, 'Quarterly Census of Employment and Wages,'
26 June 2017. <a href="https://www.bls.gov/cew/datatoc.htm" style="color:white">www.bls.gov/cew/datatoc.htm</a>.
<br>
<br>
Developed by Tim Grose, <a href="https://github.com/thgrose" style="color:white">github.com/thgrose</a>.
</i>
""",width=1000)
#make layout with controls and figure p1
layout = column(row(column(desc,controls),p1),cit)
#for inline viewing
show(layout)
#show layout in bokeh document and add title
curdoc().add_root(layout)
curdoc().title="LQ"
|
#!/usr/bin/python3
def knapsack(W, wt, val):
k = [[0 for _ in range(W+1)] for _ in range(len(wt) + 1)]
for i in range(1, len(wt)+1):
for w in range(1, W+1):
if wt[i-1] <= w:
k[i][w] = max(val[i-1] + k[i-1][w-wt[i-1]], k[i-1][w])
else:
k[i][w] = k[i-1][w]
return k[len(wt)][W]
def main():
n = int(input())
val = [int(input()) for _ in range(n)]
wt = [int(input()) for _ in range(n)]
W = int(input())
print(knapsack(W, wt, val))
if __name__ == "__main__":
main()
|
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7,GPIO.OUT)
for x in range(0,1):
GPIO.output(7,True)
time.sleep(.8)
GPIO.output(7,False)
time.sleep(.8)
GPIO.cleanup() |
import GPy
from GPyOpt.methods import BayesianOptimization
def optimizer_func(X, Y, BatchSize):
'''
Bayesian Optimizer Function
BatchSize is the number of suggestions for the next rounds
X should be the input variables
Y should be the metric to be optimized
'''
bds = [{'name': 'x1', 'type': 'continuous', 'domain': (0, 1)},
{'name': 'x2', 'type': 'continuous', 'domain': (0, 1)},
{'name': 'x3', 'type': 'continuous', 'domain': (0, 1)},
]
constraints = [{'name': 'constr_1',
'constraint': 'x[:,0] + x[:,1] + x[:,2] -(1 + 0.005)'},###<= 0
{'name': 'constr_2',
'constraint': '(1- 0.005) - (x[:,0] + x[:,1] + x[:,2]) '}]###<= 0
kernel = GPy.kern.Matern52(input_dim=len(bds), ARD = True)
optimizer = BayesianOptimization(f=None,
domain=bds,
constraints = constraints,
model_type='GP',
acquisition_type ='EI',
acquisition_jitter = 0.1,
X=X,
Y=Y,
evaluator_type = 'local_penalization',
batch_size = BatchSize,
normalize_Y= True,
#noise_var = 0.02**2,
kernel = kernel
)
return optimizer |
import mysql
from configparser import ConfigParser
from mysql.connector import (connection)
from mysql.connector import errorcode
class Conexion:
def __init__(self):
self.Conexion = None
def crear_conexion(self):
config_object = ConfigParser()
config_object.read("config/config.ini")
userInfo = config_object["USERINFO"]
serverConfig = config_object["SERVERCONFIG"]
try:
self.Conexion = connection.MySQLConnection(user=userInfo['user'],
password=userInfo['password'],
host=serverConfig['host'],
database=serverConfig['database'])
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
def get_conexion(self):
return self.Conexion
def cerrar_conexion(self):
self.Conexion.close()
|
string_in1=input("input costs")
string_in2=input("input costs")
records = []
records.append(string_in1)#
records.append(string_in2)#
print(records)
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 7 12:58:07 2017
@author: wangxj
"""
#!/usr/bin/python
#coding: utf-8
import numpy as np
import matplotlib.pyplot as plt
# 获取所有的自带样式
print (plt.style.available)
# 使用自带的样式进行美化
plt.style.use("ggplot")
fig, axes = plt.subplots(ncols = 2, nrows = 2)
# 四个子图的坐标轴赋予四个对象
ax1, ax2, ax3, ax4 = axes.ravel()
x, y = np.random.normal(size = (2, 100))
ax1.plot(x, y, "o")
x = np.arange(1, 10)
y = np.arange(1, 10)
# plt.rcParams['axes.prop_cycle']获取颜色的字典
# 会在这个范围内依次循环
ncolors = len(plt.rcParams['axes.prop_cycle'])
# print ncolors
# print plt.rcParams['axes.prop_cycle']
shift = np.linspace(1, 20, ncolors)
for s in shift:
# print s
ax2.plot(x, y + s, "-")
x = np.arange(5)
y1, y2, y3 = np.random.randint(1, 25, size = (3, 5))
width = 0.25
# 柱状图中要显式的指定颜色
ax3.bar(x, y1, width, color = "r")
ax3.bar(x + width, y2, width, color = "g")
ax3.bar(x + 2 * width, y3, width, color = "y")
for i, color in enumerate(plt.rcParams['axes.prop_cycle']):
xy = np.random.normal(size= 2)
for c in color.values():
ax4.add_patch(plt.Circle(xy, radius = 0.3, color= c))
ax4.axis("equal")
plt.show() |
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: jescudero
#
# Created: 28/04/2015
# Copyright: (c) jescudero 2015
# Licence: <your licence>
#-------------------------------------------------------------------------------
import win32serviceutil
import win32service
import win32event
import servicemanager
import socket
class AppServerSvc (win32serviceutil.ServiceFramework):
_svc_name_ = "TestService"
_svc_display_name_ = "Test Service"
def __init__(self,args):
win32serviceutil.ServiceFramework.__init__(self,args)
self.hWaitStop = win32event.CreateEvent(None,0,0,None)
socket.setdefaulttimeout(60)
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.hWaitStop)
def SvcDoRun(self):
servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE,
servicemanager.PYS_SERVICE_STARTED,
(self._svc_name_,''))
self.main()
def main(self):
pass
if __name__ == '__main__':
win32serviceutil.HandleCommandLine(AppServerSvc) |
import numpy as np
import matplotlib.pyplot as plt
M1=np.array([1,1])#vektor srednjih vrednosti
M2=np.array([5,2])#vektror srednjih vrrednosti
Sigma1=np.array([[2,1],[1,1]])#kovariaciona matrica, prve klase
Sigma2=np.array([[3,-1],[-1,2]])#kovariaciona matrica druge klase
np.random.seed(0)
N=700
P1=P2=0.5
y1, y2=np.random.multivariate_normal(M1,Sigma1,N).T
z1, z2=np.random.multivariate_normal(M2,Sigma2,N).T
from scipy import linalg
invSigma1=linalg.inv(Sigma1)
invSigma2=linalg.inv(Sigma2)
M3=M2.T-M1.T
m1=M3[0]
m2=M3[1]
m11=M1[0]
m12=M1[1]
m21=M2[0]
m22=M2[1]
z11=invSigma1[0][0]
z12=invSigma1[0][1]
z21=invSigma1[1][0]
z22=invSigma1[1][1]
k11=invSigma2[0][0]
k12=invSigma2[0][1]
k21=invSigma2[1][0]
k22=invSigma2[1][1]
######################## D2 KRIVE KLASA 1 ###################################
s1 = np.linspace(-5, 11, 120) #u odnosu na x-osu 2D slučajnog vektora formiramo raspon tačaka u proizvoljnom brojutačaka, ovde je uzeto 120
s2 = np.linspace(-5, 11, 120)
x1pom,x2pom=np.meshgrid(s1,s2)
d1=(invSigma1[0,0]*(x1pom-M1[0])+invSigma1[0,1]*(x2pom-M1[1]))*(x1pom-M1[0])+(invSigma1[1,0]*(x1pom-M1[0])+invSigma1[1,1]*(x2pom-M1[1]))*(x2pom-M1[1])
d2=(invSigma2[0,0]*(x1pom-M2[0])+invSigma2[0,1]*(x2pom-M2[1]))*(x1pom-M2[0])+(invSigma2[1,0]*(x1pom-M2[0])+invSigma2[1,1]*(x2pom-M2[1]))*(x2pom-M2[1]) #formula uzeta iz predavanja
fig = plt.figure()
plt.plot(y1,y2,'m*')
plt.plot(z1,z2,'c.')
plt.contour(x1pom,x2pom,d2,[1,4,9],colors='k',linewidths=3)
plt.contour(x1pom,x2pom,d1,[1,4,9],colors='k',linewidths=3)
plt.title('Dvodimenzioni slucajni vektor i d^2 krive')
plt.xlabel('x1')
plt.ylabel('x2')
plt.grid(True)
plt.show()
fig.savefig('zadatak1-d2linije.png')
############################## PRIKAZ KLASA I DISKRIMINACIONE LINIJE #############################################
X=np.arange(-2,7,0.1)
Y=np.arange(-2,7,0.1)
x1,x2=np.meshgrid(X,Y)
y=0.5*(((x1-m11)*z11+(x2-m12)*z21)*(x1-m11)+((x1-m11)*z12+(x2-m12)*z22)*(x2-m12))-0.5*(((x1-m21)*k11+(x2-m22)*k21)*(x1-m21)+((x1-m21)*k12+(x2-m22)*k22)*(x2-m22))+0.5*np.log(np.linalg.det(Sigma1)/np.linalg.det(Sigma2))+np.log(P1/P2)
l=0.5*(((x1-m11)*z11+(x2-m12)*z21)*(x1-m11)+((x1-m11)*z12+(x2-m12)*z22)*(x2-m12))-0.5*(((x1-m21)*k11+(x2-m22)*k21)*(x1-m21)+((x1-m21)*k12+(x2-m22)*k22)*(x2-m22))+0.5*np.log(np.linalg.det(Sigma1)/np.linalg.det(Sigma2))+np.log(0.2/0.8)
b=0.5*(((x1-m11)*z11+(x2-m12)*z21)*(x1-m11)+((x1-m11)*z12+(x2-m12)*z22)*(x2-m12))-0.5*(((x1-m21)*k11+(x2-m22)*k21)*(x1-m21)+((x1-m21)*k12+(x2-m22)*k22)*(x2-m22))+0.5*np.log(np.linalg.det(Sigma1)/np.linalg.det(Sigma2))+np.log(0.8/0.2)
fig = plt.figure()
plt.plot(y1, y2, '*m',label='Klasa I')
plt.plot(z1, z2, '.c',label='Klasa II')
plt.contour(x1,x2,y,0)
plt.contour(x1,x2,l,0,colors='red')
plt.contour(x1,x2,b,0,colors='blue')
plt.grid(True)
plt.title('Prikaz dve klase u x1 x2 prostoru')
plt.xlabel('x1')
plt.ylabel('x2')
plt.legend()
plt.show()
fig.savefig('zadatak1-discKrive.png')
####################################### TESTIRANJE NOVIM KLASAMA #####################
M11 =np.array([1.1,1.01])
M21 =np.array([5.04, 2.2])
SigmaM1 =np.array([[2,1],[1,1]])
SigmaM2 =np.array([[3,-1],[-1,2]])
N1=60
y111, y211 = np.random.multivariate_normal(M11, SigmaM1, N1).T
z111, z211 = np.random.multivariate_normal(M21, SigmaM2, N1).T
x1p1=y111
x2p1=y211
h1=0.5*(((x1p1-m11)*z11+(x2p1-m12)*z21)*(x1p1-m11)+((x1p1-m11)*z12+(x2p1-m12)*z22)*(x2p1-m12))-0.5*(((x1p1-m21)*k11+(x2p1-m22)*k21)*(x1p1-m21)+((x1p1-m21)*k12+(x2p1-m22)*k22)*(x2p1-m22))+0.5*np.log(np.linalg.det(Sigma1)/np.linalg.det(Sigma2)) + np.log(P1/P2)
greska1=0
for i in h1:
if i>0: #tamo gde nam je diskriminaciona prava za odbirke iz prve␣→klase veća od nule do znači da nam odbirci nisu dobro klasifikovani, jer za␣,→prvu klasu mora biti manja od nule
greska1=greska1+1
x1p2=z111
x2p2=z211
h2=0.5*(((x1p2-m11)*z11+(x2p2-m12)*z21)*(x1p2-m11)+((x1p2-m11)*z12+(x2p2-m12)*z22)*(x2p2-m12))-0.5*(((x1p2-m21)*k11+(x2p2-m22)*k21)*(x1p2-m21)+((x1p2-m21)*k12+(x2p2-m22)*k22)*(x2p2-m22))+0.5*np.log(np.linalg.det(Sigma1)/np.linalg.det(Sigma2)) + np.log(P1/P2)
greska2=0
for i in h2:
if i<0: #tamo gde nam je diskriminaciona prava za odbirke iz prve␣→klase veća od nule do znači da nam odbirci nisu dobro klasifikovani, jer za␣,→prvu klasu mora biti manja od nule
greska2=greska2+1
fig = plt.figure()
plt.plot(y111, y211, '*m',label='Klasa I')
plt.plot(z111, z211, '.c',label='Klasa II')
plt.contour(x1,x2,y,0)
plt.grid(True)
plt.title('Prikaz dve klase u x1 x2 prostoru')
plt.xlabel('x1')
plt.ylabel('x2')
plt.legend()
plt.show()
fig.savefig('zadatak1-testKlasa.png')
Eps1=((greska1)/20.0) #verovatnoća greške prvog tipa (broj pogrešno klasifikovanih␣,→odbiraka iz prve klase kroz ukupan broj odbiraka)
Eps2=((greska2)/20.0) #verovatnoća greške drugog tipa (broj pogrešno␣,→klasifikovanih odbiraka iz druge klase kroz ukupan broj odbiraka)
P=0.5*Eps1+0.5*Eps2 #Ukupna Bajesova greska računa se na osnovu formule (8)
T=(1-P)*100 #Tačnost klasifikatora
print(T)
print(greska1,greska2)
####################################################TESTIRANJE ZA P(08/02) ##################
x1p2=y111
x2p2=y211
h28=0.5*(((x1p2-m11)*z11+(x2p2-m12)*z21)*(x1p2-m11)+((x1p2-m11)*z12+(x2p2-m12)*z22)*(x2p2-m12))-0.5*(((x1p2-m21)*k11+(x2p2-m22)*k21)*(x1p2-m21)+((x1p2-m21)*k12+(x2p2-m22)*k22)*(x2p2-m22))+0.5*np.log(np.linalg.det(Sigma1)/np.linalg.det(Sigma2))
greska281=0
for i in h28:
if i>np.log(0.8/0.2): #tamo gde nam je diskriminaciona prava za odbirke iz prve␣→klase veća od nule do znači da nam odbirci nisu dobro klasifikovani, jer za␣,→prvu klasu mora biti manja od nule
greska281=greska281+1
x1p2=z111
x2p2=z211
h28=0.5*(((x1p2-m11)*z11+(x2p2-m12)*z21)*(x1p2-m11)+((x1p2-m11)*z12+(x2p2-m12)*z22)*(x2p2-m12))-0.5*(((x1p2-m21)*k11+(x2p2-m22)*k21)*(x1p2-m21)+((x1p2-m21)*k12+(x2p2-m22)*k22)*(x2p2-m22))+0.5*np.log(np.linalg.det(Sigma1)/np.linalg.det(Sigma2))
greska282=0
for i in h28:
if i<np.log(0.8/0.2): #tamo gde nam je diskriminaciona prava za odbirke iz prve␣→klase veća od nule do znači da nam odbirci nisu dobro klasifikovani, jer za␣,→prvu klasu mora biti manja od nule
greska282=greska282+1
fig = plt.figure()
#plt.plot(y111, y211, '*m',label='Klasa I')
plt.plot(z111, z211, '+c',label='Klasa II')
plt.contour(x1,x2,l,0,colors='red')
plt.grid(True)
plt.title('Apriorne verovatnoce P1=0.2/ P2=0.8')
plt.xlabel('x1')
plt.ylabel('x2')
plt.legend()
plt.show()
fig.savefig('zadatak1-0208.png')
Eps1=((greska281)/20.0) #verovatnoća greške prvog tipa (broj pogrešno klasifikovanih␣,→odbiraka iz prve klase kroz ukupan broj odbiraka)
Eps2=((greska282)/20.0) #verovatnoća greške drugog tipa (broj pogrešno␣,→klasifikovanih odbiraka iz druge klase kroz ukupan broj odbiraka)
P=0.2*Eps1+0.8*Eps2 #Ukupna Bajesova greska računa se na osnovu formule (8)
T=(1-P)*100 #Tačnost klasifikatora
print(T)
print('greska 2/8')
print(greska281,greska282)
############################################### TESTIRANJE ZA P(02/08) #################################################
x1p2=y111
x2p2=y211
h82=0.5*(((x1p2-m11)*z11+(x2p2-m12)*z21)*(x1p2-m11)+((x1p2-m11)*z12+(x2p2-m12)*z22)*(x2p2-m12))-0.5*(((x1p2-m21)*k11+(x2p2-m22)*k21)*(x1p2-m21)+((x1p2-m21)*k12+(x2p2-m22)*k22)*(x2p2-m22))+0.5*np.log(np.linalg.det(Sigma1)/np.linalg.det(Sigma2))
greska821=0
for i in h82:
if i> np.log(0.2/0.8): #tamo gde nam je diskriminaciona prava za odbirke iz prve␣→klase veća od nule do znači da nam odbirci nisu dobro klasifikovani, jer za␣,→prvu klasu mora biti manja od nule
greska821=greska821+1
x1p2=z111
x2p2=z211
h82=0.5*(((x1p2-m11)*z11+(x2p2-m12)*z21)*(x1p2-m11)+((x1p2-m11)*z12+(x2p2-m12)*z22)*(x2p2-m12))-0.5*(((x1p2-m21)*k11+(x2p2-m22)*k21)*(x1p2-m21)+((x1p2-m21)*k12+(x2p2-m22)*k22)*(x2p2-m22))+0.5*np.log(np.linalg.det(Sigma1)/np.linalg.det(Sigma2))
greska822=0
for i in h82:
if i<np.log(0.2/0.8):
greska822=greska822+1
fig = plt.figure()
plt.plot(y111, y211, '*m',label='Klasa I')
plt.plot(z111, z211, '.c',label='Klasa II')
plt.contour(x1,x2,b,0,colors='blue')
plt.grid(True)
plt.title('Apriorne verovatnoce P1=0.8, P2=0.2')
plt.xlabel('x1')
plt.ylabel('x2')
plt.legend()
plt.show()
fig.savefig('zadatak1-0802.png')
Eps1=((greska821)/20.0) #verovatnoća greške prvog tipa (broj pogrešno klasifikovanih␣,→odbiraka iz prve klase kroz ukupan broj odbiraka)
Eps2=((greska822)/20.0) #verovatnoća greške drugog tipa (broj pogrešno␣,→klasifikovanih odbiraka iz druge klase kroz ukupan broj odbiraka)
P=0.8*Eps1+0.2*Eps2 #Ukupna Bajesova greska računa se na osnovu formule (8)
T=(1-P)*100 #Tačnost klasifikatora
print(T)
print('greska za 8/2')
print(greska821,greska822) |
__title__ = "django_invoice"
__summary__ = "Django + Stripe Made Easy"
__uri__ = "https://github.com/josephmisiti/django_invoice/"
__version__ = "0.7.0"
__author__ = "Daniel Greenfeld"
__email__ = "josephmisiti@gmail.com"
__license__ = "BSD"
__license__ = "License :: OSI Approved :: BSD License"
__copyright__ = "Copyright 2015 Daniel Greenfeld"
|
# Copyright 2021 The Layout Parser team and Paddle Detection model
# contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
from typing import Any, Optional
from urllib.parse import urlparse
import tarfile
import uuid
from iopath.common.file_io import PathHandler
from iopath.common.file_io import HTTPURLHandler
from iopath.common.file_io import get_cache_dir, file_lock
from iopath.common.download import download
from ..base_catalog import PathManager
MODEL_CATALOG = {
"PubLayNet": {
"ppyolov2_r50vd_dcn_365e": "https://paddle-model-ecology.bj.bcebos.com/model/layout-parser/ppyolov2_r50vd_dcn_365e_publaynet.tar",
},
"TableBank": {
"ppyolov2_r50vd_dcn_365e": "https://paddle-model-ecology.bj.bcebos.com/model/layout-parser/ppyolov2_r50vd_dcn_365e_tableBank_word.tar",
# "ppyolov2_r50vd_dcn_365e_tableBank_latex": "https://paddle-model-ecology.bj.bcebos.com/model/layout-parser/ppyolov2_r50vd_dcn_365e_tableBank_latex.tar",
# TODO: Train a single tablebank model for paddlepaddle
},
}
# fmt: off
LABEL_MAP_CATALOG = {
"PubLayNet": {
0: "Text",
1: "Title",
2: "List",
3: "Table",
4: "Figure"},
"TableBank": {
0: "Table"
},
}
# fmt: on
# Paddle model package everything in tar files, and each model's tar file should contain
# the following files in the list:
_TAR_FILE_NAME_LIST = [
"inference.pdiparams",
"inference.pdiparams.info",
"inference.pdmodel",
]
def _get_untar_directory(tar_file: str) -> str:
base_path = os.path.dirname(tar_file)
file_name = os.path.splitext(os.path.basename(tar_file))[0]
target_folder = os.path.join(base_path, file_name)
return target_folder
def _untar_model_weights(model_tar):
"""untar model files"""
model_dir = _get_untar_directory(model_tar)
if not os.path.exists(
os.path.join(model_dir, _TAR_FILE_NAME_LIST[0])
) or not os.path.exists(os.path.join(model_dir, _TAR_FILE_NAME_LIST[2])):
# the path to save the decompressed file
os.makedirs(model_dir, exist_ok=True)
with tarfile.open(model_tar, "r") as tarobj:
for member in tarobj.getmembers():
filename = None
for tar_file_name in _TAR_FILE_NAME_LIST:
if tar_file_name in member.name:
filename = tar_file_name
if filename is None:
continue
file = tarobj.extractfile(member)
with open(os.path.join(model_dir, filename), "wb") as model_file:
model_file.write(file.read())
return model_dir
def is_cached_folder_exists_and_valid(cached):
possible_extracted_model_folder = _get_untar_directory(cached)
if not os.path.exists(possible_extracted_model_folder):
return False
for tar_file in _TAR_FILE_NAME_LIST:
if not os.path.exists(os.path.join(possible_extracted_model_folder, tar_file)):
return False
return True
class PaddleModelURLHandler(HTTPURLHandler):
"""
Supports download and file check for Baidu Cloud links
"""
MAX_FILENAME_LEN = 250
def _get_supported_prefixes(self):
return ["https://paddle-model-ecology.bj.bcebos.com"]
def _isfile(self, path):
return path in self.cache_map
def _get_local_path(
self,
path: str,
force: bool = False,
cache_dir: Optional[str] = None,
**kwargs: Any,
) -> str:
"""
As paddle model stores all files in tar files, we need to extract them
and get the newly extracted folder path. This function rewrites the base
function to support the following situations:
1. If the tar file is not downloaded, it will download the tar file,
extract it to the target folder, delete the downloaded tar file,
and return the folder path.
2. If the extracted target folder is present, and all the necessary model
files are present (specified in _TAR_FILE_NAME_LIST), it will
return the folder path.
3. If the tar file is downloaded, but the extracted target folder is not
present (or it doesn't contain the necessary files in _TAR_FILE_NAME_LIST),
it will extract the tar file to the target folder, delete the tar file,
and return the folder path.
"""
self._check_kwargs(kwargs)
if (
force
or path not in self.cache_map
or not os.path.exists(self.cache_map[path])
):
logger = logging.getLogger(__name__)
parsed_url = urlparse(path)
dirname = os.path.join(
get_cache_dir(cache_dir), os.path.dirname(parsed_url.path.lstrip("/"))
)
filename = path.split("/")[-1]
if len(filename) > self.MAX_FILENAME_LEN:
filename = filename[:100] + "_" + uuid.uuid4().hex
cached = os.path.join(dirname, filename)
if is_cached_folder_exists_and_valid(cached):
# When the cached folder exists and valid, we don't need to redownload
# the tar file.
self.cache_map[path] = _get_untar_directory(cached)
else:
with file_lock(cached):
if not os.path.isfile(cached):
logger.info("Downloading {} ...".format(path))
cached = download(path, dirname, filename=filename)
if path.endswith(".tar"):
model_dir = _untar_model_weights(cached)
try:
os.remove(cached) # remove the redundant tar file
# TODO: remove the .lock file .
except:
logger.warning(
f"Not able to remove the cached tar file {cached}"
)
logger.info("URL {} cached in {}".format(path, model_dir))
self.cache_map[path] = model_dir
return self.cache_map[path]
class LayoutParserPaddleModelHandler(PathHandler):
"""
Resolve anything that's in LayoutParser model zoo.
"""
PREFIX = "lp://paddledetection/"
def _get_supported_prefixes(self):
return [self.PREFIX]
def _get_local_path(self, path, **kwargs):
model_name = path[len(self.PREFIX) :]
dataset_name, *model_name, data_type = model_name.split("/")
if data_type == "weight":
model_url = MODEL_CATALOG[dataset_name]["/".join(model_name)]
else:
raise ValueError(f"Unknown data_type {data_type}")
return PathManager.get_local_path(model_url, **kwargs)
def _open(self, path, mode="r", **kwargs):
return PathManager.open(self._get_local_path(path), mode, **kwargs)
PathManager.register_handler(PaddleModelURLHandler())
PathManager.register_handler(LayoutParserPaddleModelHandler())
|
def greatest_number(lst) :
size = len(lst)
myHigh = lst[0]
for item in lst :
if myHigh < item :
myHigh = item
else :
continue
return myHigh
myList = list()
for i in range(4) :
myList.append(int(input("enter the number")))
a = greatest_number(myList)
print "greatest of 4 numbers is: ", a
myList.append(int(input("enter the fifth number")))
a = greatest_number(myList)
print "greatest of 5 numbers is: ", a
|
def letter_combination(digits: str):
letter_mapping = {
2: list('abc'),
3: list('def'),
4: list('ghi'),
5: list('jkl'),
6: list('mno'),
7: list('pqs'),
8: list('tuv'),
9: list('wxyz')
}
def _gen_comb(cur_comb, cur_idx):
if cur_idx == len(digits):
res.append(cur_comb)
else:
for l in letter_mapping[int(digits[cur_idx])]:
_gen_comb(cur_comb + l, cur_idx + 1)
res = []
_gen_comb('', 0)
return res
print(letter_combination('23')) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.