text stringlengths 38 1.54M |
|---|
import time
from messages import Message
class Command(object):
def __init__(self, message=None):
self._message = message
@property
def message(self) -> Message:
return self._message
@message.setter
def message(self, message: Message):
self._message = message
class BarkCommand(Command):
REACTS_TO = 'bark'
def process(self) -> Message:
self._message.reply = 'BARK!!BARK!!BARK!!'
self._message.reply_generated_at = int(time.time())
return self._message
class CommandFactory(object):
REGISTERED_COMMANDS = Command.__subclasses__()
@classmethod
def build(cls, command: str, message: Message) -> Command:
if command == 'bark':
return BarkCommand(message)
else:
raise NameError('Unknown command')
|
from os.path import join
import numpy as np
def write_normalized_spectra(shard_dict, normed_bstar_path):
norm_fs = {}
for order, shard in shard_dict.items():
for fname, spectrum in shard.spectra.items():
if fname not in norm_fs:
norm_fs[fname] = {}
norm_fs[fname]["{}.lin_x".format(order)] = spectrum.lin_x
norm_fs[fname]["{}.log_y".format(order)] = spectrum.log_y
norm_fs[fname]["{}.z".format(order)] = spectrum.z
norm_fs[fname]["{}.l_cutoff_wv".format(order)] = shard.l_cutoff_wv
norm_fs[fname]["{}.r_cutoff_wv".format(order)] = shard.r_cutoff_wv
for fname, data in norm_fs.items():
fpath = join(normed_bstar_path, fname.split("/")[-1][:-5])
np.savez(fpath, **data)
|
"""Print the following pattern Pattern for N = 4 1 23 345 4567 """
n= int(input())
for i in range(1,n+1):
temp=i
for j in range(1, n-i+1):
print(" ",end="")
for k in range(n-i, n):
print(temp,end="")
temp=temp+1
print()
|
#encoding=utf-8
import requests
from bs4 import BeautifulSoup
import os
from random import choice
from time import sleep
url = "http://www.zhuoku.com/"
header_list = [
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.91 Safari/537.36",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)"]
header1 = {"User-Agent":"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)"}
try:
r = requests.get(url,headers=header1,timeout=30)
except:
r = requests.get(url,headers=header1,timeout=30)
res = r.content.decode("gb2312","ignore").encode("utf-8","ignore")
soup = BeautifulSoup(res,"html.parser")
all_a = soup.find("div",id="zuixin").find_all('a',attrs={"class":"title"})
for a in all_a:
header = {"User-Agent": choice(header_list)}
title = a.get_text().replace("/",'')
href = a.get("href")
img_url = url + href[1:-4] + "(1).htm"#补第一张图片的全href
if os.path.isdir(os.path.join("D:\zhuoku",title)): #如果存在文件夹
print("exist" + title)
pass
else:
os.makedirs(os.path.join("D:\zhuoku",title)) #创建文件夹
print("makedir" + title)
os.chdir("D:\zhuoku\\" + title) #切换到此文件夹
try:
img_url_get = requests.get(img_url,headers=header,timeout=30)
except:
img_url_get = requests.get(img_url,headers=header,timeout=30)
sleep(0.5)
img_url_soup = BeautifulSoup(img_url_get.text,"html.parser")
max_img_page = img_url_soup.find('div',id="yema").find_all("a")[-1].get_text()
for page in range(1,int(max_img_page)+1):
jpg_href = url + href[1:-4] + "(" + str(page) + ").htm" + "#turn"
try:
jpg_href_get = requests.get(jpg_href,headers=header,timeout=30)
except:
jpg_href_get = requests.get(jpg_href,headers=header,timeout=30)
sleep(0.5)
jpg_soup = BeautifulSoup(jpg_href_get.text,"html.parser")
jpg_url = jpg_soup.find("div",id="bizhiimg").find("img")["src"] #在find方法后面用方括号将属性括起来可以取出该属性的值
name = jpg_url[-9:] #截取倒数第九位至末尾为图片的名字
if os.path.isfile(name): #如果存在名为name的文件
print(name + " exist skip")
pass #下面全跳过
else:
jpg_header = {
"Referer": jpg_href,
"User-Agent":choice(header_list)
}
try:
jpg = requests.get(jpg_url,headers=jpg_header,timeout=30)
except:
jpg = requests.get(jpg_url,headers=header,timeout=30)
sleep(0.5)
with open(name,'wb') as f:
f.write(jpg.content)
print(name+" saved")
print("congratulations! all finished!")
|
#!/usr/bin/python
from job import *
from tool import *
m = Job(300, 300, 5)
t1 = Tool(TYPE_DRILL)
t1.setDiameter(6)
m.drill(200, 200, 5, t1)
m.export("demo.scad")
m.export("demo.svg")
#m.export("demo.dxf")
#m.export("demo.vcarve")
#m.export("demo.nc")
|
import pandas as pd
import numpy as np
import networkx as nx
import pickle
dataset_edges = pd.read_csv('enrondatasetfinal.csv')
dataset_nodes = pd.read_csv('Enron.true', sep = ';')
node_from = dataset_edges.iloc[:,0].values
node_to = dataset_edges.iloc[:,1].values
num_nodes = len(dataset_nodes)
print num_nodes
# Creare graph
G = nx.Graph()
for i, j in zip(node_from, node_to):
G.add_edge(i, j)
'''
d_bet_cen = dict()
d_bet_cen = nx.betweenness_centrality(G)
output = open('bet_cen_dict.pkl', 'wb')
pickle.dump(d_bet_cen, output)
output.close()
'''
pkl_file = open('bet_cen_dict.pkl','rb')
d_bet_cen =pickle.load(pkl_file)
pkl_file2 = open('closeness_dict.pkl', 'rb')
d_closeness = pickle.load(pkl_file2)
# l = filter(lambda x:x[1]>=0.0000001,d_bet_cen.items())
# print len(l)
# print min(d_bet_cen.items(), key = lambda x:x[1])
# print d_bet_cen
cnt = 0
outfile = open('bet_cen_closeness_enron_data.txt', 'w')
for (k1, v1), (k2, v2) in zip(d_bet_cen.iteritems(), d_closeness.iteritems()):
if v1 > 0.0000001 and v2>0.25 and k1 == k2:
cnt += 1
outfile.write(str(i) + " 1\n")
else:
outfile.write(str(i) + " 0\n")
print cnt
outfile.close()
pkl_file.close()
pkl_file2.close()
|
import os
import pickle
from pathlib import Path
from typing import Sequence, Union
import numpy as np
import skimage.io as io
from skimage.util import img_as_float
from constants import *
ToImgArray = Union[os.PathLike, np.ndarray]
ZeroOneFloatArray = np.ndarray
UbyteArray = np.ndarray
ToPoints = Union[os.PathLike, np.ndarray]
Triangle = np.ndarray
def to_img_arr(x: ToImgArray) -> np.ndarray:
if isinstance(x, np.ndarray):
return img_as_float(x).clip(0, 1)
elif isinstance(x, (str, Path, os.PathLike)):
x = Path(x)
if x.suffix in (".jpeg", ".jpg"):
img = io.imread(x)
img = img_as_float(img)
assert_img_type(img)
return img
else:
raise ValueError(f"Didn't expect type {type(x)}")
def to_points(x: ToPoints) -> np.ndarray:
if isinstance(x, np.ndarray):
return x
elif isinstance(x, (str, Path, os.PathLike)):
x = Path(x)
if x.suffix in (".pkl", ".p"):
points = pickle.load(open(x, "rb"))
assert_points(points)
return points
elif x.suffix == ".asf":
asf = open(x, "r")
lines_read = asf.readlines()
num_pts = int(lines_read[9])
lines = []
for i in range(16, num_pts + 16):
lines.append(lines_read[i])
points = []
for line in lines:
data = line.split(" \t")
c = float(data[2]) # x coordinates = cols
r = float(data[3]) # y coordinates = rows
points.append((r, c))
points = np.array(points)
assert_points(points)
return points
else:
raise ValueError(f"Didn't expect type {type(x)}")
def assert_img_type(img: np.ndarray) -> bool:
""" Check image data type """
assert isinstance(img, np.ndarray), f"expect ndarray but got {type(img)}"
assert img.dtype == "float64", img.dtype
assert np.max(img) <= 1.0 and np.min(img) >= 0.0, (np.min(img), np.max(img))
assert np.ndim(img) == 3
return True
def assert_is_triangle(triangle: np.ndarray) -> bool:
""" Check image data type """
assert triangle.shape == (3, 2), triangle.shape
assert (triangle >= 0).all(), triangle
return True
def assert_indices(indices: np.ndarray) -> bool:
assert isinstance(indices, np.ndarray)
assert indices.dtype == "int"
assert (indices >= 0).all()
assert indices.shape[1] == 2
return True
def assert_points(points: np.ndarray) -> bool:
assert isinstance(points, np.ndarray)
assert points.shape[1] == 2
assert (points >= 0).all()
return True
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-05-26 18:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='battlenet',
field=models.CharField(blank=True, max_length=100, null=True, unique=True),
),
migrations.AddField(
model_name='user',
name='discord',
field=models.CharField(default='', max_length=100, unique=True),
preserve_default=False,
),
migrations.AddField(
model_name='user',
name='iccup',
field=models.CharField(blank=True, max_length=100, null=True, unique=True),
),
migrations.AddField(
model_name='user',
name='race',
field=models.CharField(choices=[('Z', 'Zerg'), ('T', 'Terran'), ('P', 'Protoss'), ('R', 'Random')], default='', max_length=1),
preserve_default=False,
),
migrations.AddField(
model_name='user',
name='rating',
field=models.IntegerField(default=1200),
),
migrations.AddField(
model_name='user',
name='shield_battery',
field=models.CharField(blank=True, max_length=100, null=True, unique=True),
),
]
|
import aiohttp.web
import asyncio
import json
import re
import typing
import uuid
from . import multi_queue
class Task:
def __init__(self, type: str, result_future: asyncio.Future, payload):
self.type = type
self.result_future = result_future
self.payload = payload
self.task_id: typing.Optional[str] = None
class Api:
def __init__(self, arguments: dict):
self.arguments = arguments
# queued tasks from the task producer (not assigned to any worker)
self.pending_tasks = multi_queue.MultiQueue()
# tasks assigned to workers, assigned task ID -> task (future, ...)
self.running_tasks = {}
def add_routes(self, app: aiohttp.web.Application):
app.add_routes([
aiohttp.web.post(
'/task/run',
self.handle_task_run,
),
aiohttp.web.get(
'/task/get',
self.handle_task_get,
),
aiohttp.web.post(
'/task/heartbeat',
self.handle_task_heartbeat,
),
aiohttp.web.post(
'/result/set',
self.handle_result_set,
),
])
def json_formatter(self, data):
return json.dumps(data, sort_keys=True, indent=4)
async def handle_task_run(self, request: aiohttp.web.Request):
'''Task Producer -> Router'''
# create task from request
try:
task_type = request.query['taskType']
except KeyError:
raise aiohttp.web.HTTPBadRequest(reason='Missing taskType')
task = Task(
type=task_type,
result_future=asyncio.Future(),
payload=await request.json(),
)
try:
# put task in pending task queue
self.pending_tasks.push(task.type, task)
# wait for result and return it
return aiohttp.web.json_response(
await task.result_future,
dumps=self.json_formatter,
)
except asyncio.CancelledError:
try:
self.pending_tasks.remove(task.type, task)
except ValueError:
try:
running_task: Task = self.running_tasks[task.task_id]
except KeyError:
return
del self.running_tasks[task.task_id]
running_task['heartbeat_task'].cancel()
try:
await running_task['heartbeat_task']
except asyncio.CancelledError:
pass
async def heartbeat_timeout_trigger(self, task: Task):
await asyncio.sleep(self.arguments['heartbeat_timeout'])
del self.running_tasks[task.task_id]
self.pending_tasks.push(task.type, task)
async def handle_task_get(self, request: aiohttp.web.Request):
'''Worker -> Router'''
# RFC 7240
try:
prefer_match = re.fullmatch(
r'wait=(\d+)', request.headers['Prefer'])
if not prefer_match:
raise aiohttp.web.HTTPBadRequest(
reason='Malformed Prefer header')
except KeyError:
raise aiohttp.web.HTTPBadRequest(reason='Missing Prefer header')
timeout = int(prefer_match.group(1))
# retrieve task of given types
try:
task_types = request.query.getall('taskType')
except KeyError:
raise aiohttp.web.HTTPBadRequest(reason='Missing taskType')
try:
task: Task = await asyncio.wait_for(self.pending_tasks.pop(task_types), timeout)
except asyncio.TimeoutError:
raise aiohttp.web.HTTPNoContent(
reason='Prefer timeout before task availability')
# move task into running_tasks and start heartbeat timeout
task.task_id = str(uuid.uuid4())
self.running_tasks[task.task_id] = {
'task': task,
'heartbeat_task': asyncio.create_task(
self.heartbeat_timeout_trigger(
task,
),
),
}
# return task to worker
return aiohttp.web.json_response(
{
'taskType': task.type,
'taskId': task.task_id,
'payload': task.payload,
},
dumps=self.json_formatter,
)
async def handle_task_heartbeat(self, request: aiohttp.web.Request):
'''Worker -> Router'''
# extract task id
try:
task_id = request.query['taskId']
except KeyError:
raise aiohttp.web.HTTPBadRequest(reason='Missing taskId')
# get running task
try:
running_task: Task = self.running_tasks[task_id]
except KeyError:
raise aiohttp.web.HTTPNotFound(reason='Task with taskId not found')
# cancel and restart heartbeat timeout
running_task['heartbeat_task'].cancel()
try:
await running_task['heartbeat_task']
except asyncio.CancelledError:
pass
running_task['heartbeat_task'] = asyncio.create_task(
self.heartbeat_timeout_trigger(
running_task['task'],
),
)
raise aiohttp.web.HTTPOk()
async def handle_result_set(self, request: aiohttp.web.Request):
'''Worker -> Router'''
# extract task id
try:
task_id = request.query['taskId']
except KeyError:
raise aiohttp.web.HTTPBadRequest(reason='Missing taskId')
# get running task
try:
running_task: Task = self.running_tasks[task_id]
except KeyError:
raise aiohttp.web.HTTPNotFound(reason='Task with taskId not found')
# set result future of task
try:
running_task['task'].result_future.set_result(await request.json())
except asyncio.InvalidStateError:
pass
# re-get running task, because it may be deleted because of a cancellation in the producer request task (context switch)
try:
running_task: Task = self.running_tasks[task_id]
except KeyError:
# while waiting for this request's JSON body, task got deleted
raise aiohttp.web.HTTPNotFound(reason='Task with taskId not found')
# remove task from running, stop heartbeat timeout
del self.running_tasks[task_id]
running_task['heartbeat_task'].cancel()
try:
await running_task['heartbeat_task']
except asyncio.CancelledError:
pass
raise aiohttp.web.HTTPOk()
|
from tasks import add
from celery import group
r = group(add.s(i, i) for i in range(1000000)).apply_async()
print(r.get())
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 30 19:42:55 2017
@author: qxd
"""
import tensorflow as tf
import data_preparation
import model_generation
import numpy as np
N_CLASSES = 2
IM_W = 256
IM_H = 256
BATCH_SIZE = 50
CAPACITY = 10000
def evaluation():
cat_dir = 'D:\\machine_learning_python\\tensorflow_classification\\PetImages\\Cat\\'
dog_dir = 'D:\\machine_learning_python\\tensorflow_classification\\PetImages\\Dog\\'
logs_train_dir = 'D:\\machine_learning_python\\tensorflow_classification\\records\\'
train_images, train_labels, eval_images, eval_labels = data_preparation.get_data(cat_dir, dog_dir)
iter = int(len(eval_images)/BATCH_SIZE)
eval_batch, eval_label_batch = data_preparation.create_batch(eval_images,
eval_labels,
IM_W,
IM_H,
BATCH_SIZE,
CAPACITY)
logits = model_generation.inference(eval_batch, BATCH_SIZE, N_CLASSES)
accu_batch = model_generation.evaluation_batch(logits, eval_label_batch)
saver = tf.strain.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(logs_train_dir)
if ckpt and ckpt.model_checkpoint_path:
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
saver.restore(sess, ckpt.model_checkpoint_path)
print('Loading success, global_step is $s' % global_step)
else:
print('No checkpoint file fouund')
accuracy = 0.0;
for i in range(iter):
accuracy += sess.run(accu_batch)
accuracy = accuracy / iter
print('Accuracy is about %.3f' % (accuracy))
|
import datetime
import unittest
from twisted.python.threadable import registerAsIOThread
import mock
import sys
from mock import Mock
from dispersy.candidate import LoopbackCandidate
from dispersy.dispersy import Dispersy
from dispersy.endpoint import ManualEnpoint
from dispersy.member import DummyMember, Member
from market import Global
from market.api import APIMessage
from market.api.api import MarketAPI, STATUS
from market.community.community import MortgageMarketCommunity
from market.community.conversion import MortgageMarketConversion
from market.community.payload import SignedConfirmPayload
from market.database.backends import MemoryBackend
from market.database.database import MarketDatabase
from market.models import DatabaseModel
from market.models.house import House
from market.models.loans import LoanRequest, Mortgage, Campaign, Investment
from market.models.profiles import BorrowersProfile, Profile
from market.models.user import User
class FakeMessage(object):
def __init__(self, payload):
self.payload = payload
class FakePayload(object):
request = ''
fields = []
models = {}
class CommunityTestSuite(unittest.TestCase):
"""Conversion test cases."""
def setUp(self):
# Faking IOThread
registerAsIOThread()
# Object creation and preperation
self.dispersy = Dispersy(ManualEnpoint(0), unicode("dispersy_temporary"))
self.dispersy_bank = Dispersy(ManualEnpoint(0), unicode("dispersy_temporary2"))
self.dispersy_investor = Dispersy(ManualEnpoint(0), unicode("dispersy_temporary3"))
# a neutral api to generate the intial id's for loan requests and such to skip
# having to save the loan request to the (sending) user from each test as that
# isn't relevant.
self.neutral_api = MarketAPI(MarketDatabase(MemoryBackend()))
self.api = MarketAPI(MarketDatabase(MemoryBackend()))
self.api_bank = MarketAPI(MarketDatabase(MemoryBackend()))
self.api_investor = MarketAPI(MarketDatabase(MemoryBackend()))
self.api.db.backend.clear()
self.api_bank.db.backend.clear()
self.api_investor.db.backend.clear()
self.user, _, priv_user = self.api.create_user()
self.bank, _, priv_bank = self.api.create_user()
self.investor, _, priv_investor = self.api.create_user()
# save the user to the bank and investor db
self.user.post_or_put(self.api_bank.db)
self.bank.post_or_put(self.api_bank.db)
self.investor.post_or_put(self.api_bank.db)
self.user.post_or_put(self.api_investor.db)
self.bank.post_or_put(self.api_investor.db)
self.investor.post_or_put(self.api_investor.db)
self.dispersy._database.open()
self.dispersy_bank._database.open()
self.dispersy_investor._database.open()
self.master_member = DummyMember(self.dispersy, 1, "a" * 20)
self.member = self.dispersy.get_member(private_key=priv_user.decode("HEX"))
self.member_bank = self.dispersy.get_member(private_key=priv_bank.decode("HEX"))
self.member_investor = self.dispersy.get_member(private_key=priv_investor.decode("HEX"))
self.community = MortgageMarketCommunity.init_community(self.dispersy, self.master_member, self.member)
self.community_bank = MortgageMarketCommunity.init_community(self.dispersy_bank, self.master_member, self.member_bank)
self.community_investor = MortgageMarketCommunity.init_community(self.dispersy_investor, self.master_member, self.member_investor)
self.community.api = self.api
self.community.user = self.user
self.api.community = self.community
self.community_bank.api = self.api_bank
self.community_bank.user = self.bank
self.api.community = self.community_bank
self.community_investor.api = self.api_investor
self.community_investor.user = self.investor
self.api.community = self.community_investor
# Add our conversion to the community.
self.conversion = MortgageMarketConversion(self.community)
self.dispersy_mock = Mock()
self.dispersy_mock.store_update_forward.return_value = True
self.setupModels()
def setupModels(self):
self.house = House('2500AA', '34', 'Aa Weg', 1000)
self.house.post_or_put(self.neutral_api.db)
self.loan_request = LoanRequest(user_key=self.user.id,
house_id=self.house.id,
house_link='http://www.example.com',
seller_phone_number='06000000',
seller_email='example@email.com',
mortgage_type=1,
banks=[self.bank.id],
description=u'Unicode description',
amount_wanted=10000,
status={}
)
self.loan_request.post_or_put(self.neutral_api.db)
self.borrowers_profile = BorrowersProfile(first_name=u'Jebediah', last_name=u'Kerman',
email='exmaple@asdsa.com', iban='sadasdas',
phone_number='213131', current_postal_code='2312AA',
current_house_number='2132', current_address='Damstraat 1',
document_list=[])
self.borrowers_profile.post_or_put(self.neutral_api.db)
self.investors_profile = Profile(first_name=u'Jebediah', last_name=u'Kerman', email='exmaple@asdsa.com',
iban='sadasdas', phone_number='213131')
self.investors_profile.post_or_put(self.neutral_api.db)
self.mortgage = Mortgage(
request_id=self.loan_request.id,
house_id=self.house.id,
bank=self.bank.id,
amount=10000,
mortgage_type=1,
interest_rate=1.0,
max_invest_rate=2.0,
default_rate=3.0,
duration=60,
risk='A',
investors=[],
status=STATUS.PENDING
)
self.mortgage.post_or_put(self.neutral_api.db)
self.campaign = Campaign(mortgage_id=self.mortgage.id, amount=self.mortgage.amount, end_date=datetime.datetime.now(),
completed=False)
self.campaign.post_or_put(self.neutral_api.db)
self.investment = Investment(investor_key=self.investor.id, amount=1000, duration=36, interest_rate=2.0,
mortgage_id=self.mortgage.id, status=STATUS.PENDING)
self.investment.post_or_put(self.neutral_api.db)
def isModelInDB(self, api, model):
return not api.db.get(model.type, model.id) is None
def remove_from_db(self, model):
self.api.db.backend.delete(model)
def remove_payload_models_from_db(self, payload):
for key in payload.models:
self.remove_from_db(payload.models[key])
def test_init(self):
self.assertIsInstance(self.conversion, MortgageMarketConversion)
self.assertIsInstance(self.community, MortgageMarketCommunity)
self.assertIsInstance(self.user, User)
self.assertIsInstance(self.member, Member)
self.assertEqual(self.user.id, self.member.public_key.encode("HEX"))
def test_master_member(self):
master_member = MortgageMarketCommunity.get_master_members(self.dispersy)[0]
self.assertEqual(Global.MASTER_KEY, master_member.public_key)
def test_on_loan_request_receive(self):
"""
Test a user sending a loan request to a bank
user --> bank
"""
payload = FakePayload()
payload.request = APIMessage.LOAN_REQUEST
payload.models = {self.house.type: self.house, self.loan_request.type: self.loan_request,
self.user.type: self.user, self.borrowers_profile.type: self.borrowers_profile}
# Bank doesn't have them yet
self.assertFalse(self.isModelInDB(self.api_bank, self.loan_request))
self.assertFalse(self.isModelInDB(self.api_bank, self.borrowers_profile))
self.assertFalse(self.isModelInDB(self.api_bank, self.house))
self.community_bank.on_loan_request_receive(payload)
self.assertTrue(self.isModelInDB(self.api_bank, self.loan_request))
self.assertTrue(self.isModelInDB(self.api_bank, self.borrowers_profile))
self.assertTrue(self.isModelInDB(self.api_bank, self.house))
def test_on_loan_request_reject(self):
"""
Test a bank rejecting a users loan_request
bank --> user
"""
# Save the user-side initial data which is a pending loan request.
self.loan_request.status[self.bank.id] = STATUS.PENDING
self.user.loan_request_ids.append(self.loan_request.id)
self.user.post_or_put(self.api.db)
self.assertIn(self.loan_request.id, self.user.loan_request_ids)
# Deep copy the loan request
loan_request_bank = DatabaseModel.decode(self.loan_request.encode())
loan_request_bank.status[self.bank.id] = STATUS.REJECTED
# Make the payload
payload = FakePayload()
payload.request = APIMessage.LOAN_REQUEST_REJECT
payload.models = {self.loan_request.type: loan_request_bank,
self.user.type: self.bank}
self.community.on_loan_request_reject(payload)
# Now let's pull the loan request from the user database
self.assertTrue(self.isModelInDB(self.api, loan_request_bank))
loan_request = self.api.db.get(loan_request_bank.type, loan_request_bank.id)
self.assertEqual(loan_request.status[self.bank.id], STATUS.REJECTED)
self.assertNotIn(self.loan_request.id, self.user.loan_request_ids)
def test_on_mortgage_offer(self):
"""
Test a bank sending a mortgage offer to a user
bank -> user
"""
payload = FakePayload()
payload.request = APIMessage.MORTGAGE_OFFER
payload.models = {self.loan_request.type: self.loan_request,
self.mortgage.type: self.mortgage}
self.loan_request.status[self.bank.id] = STATUS.ACCEPTED
self.mortgage.status = STATUS.ACCEPTED
self.assertFalse(self.isModelInDB(self.api, self.loan_request))
self.assertFalse(self.isModelInDB(self.api, self.mortgage))
self.community.on_mortgage_offer(payload)
# The user now has the models.
self.assertTrue(self.isModelInDB(self.api, self.loan_request))
self.assertTrue(self.isModelInDB(self.api, self.mortgage))
# Check if the mortgage id is in the user
self.user.update(self.api.db)
self.assertIn(self.mortgage.id, self.user.mortgage_ids)
self.assertEqual(self.api.db.get(self.mortgage.type, self.mortgage.id).status, STATUS.ACCEPTED)
def test_on_mortgage_accept(self):
"""
Test a user accepting a mortgage
user -> bank
user -> investor
"""
payload = FakePayload()
# Fake the signing time
self.loan_request._time_signed = sys.maxint
self.mortgage._time_signed = sys.maxint
self.user._time_signed = sys.maxint
self.campaign._time_signed = sys.maxint
self.house._time_signed = sys.maxint
payload.request = APIMessage.MORTGAGE_ACCEPT_UNSIGNED
payload.models = {self.loan_request.type: self.loan_request,
self.mortgage.type: self.mortgage,
self.user.type: self.user,
self.campaign.type: self.campaign,
self.house.type: self.house
}
self.loan_request.status[self.bank.id] = STATUS.ACCEPTED
self.mortgage.status = STATUS.ACCEPTED
self.user.campaign_ids.append(self.campaign.id)
self.user.mortgage_ids.append(self.mortgage.id)
self.user.loan_request_ids.append(self.loan_request.id)
self.community_bank.on_mortgage_accept_signed(payload)
self.community_investor.on_mortgage_accept_unsigned(payload)
# The bank now has the models.
self.assertTrue(self.isModelInDB(self.api_bank, self.mortgage))
self.assertTrue(self.isModelInDB(self.api_bank, self.campaign))
# The loan request isn't sent to the bank
self.assertFalse(self.isModelInDB(self.api_bank, self.loan_request))
# The investor has the models.
self.assertTrue(self.isModelInDB(self.api_investor, self.loan_request))
self.assertTrue(self.isModelInDB(self.api_investor, self.mortgage))
self.assertTrue(self.isModelInDB(self.api_investor, self.campaign))
# And knowledge of the campaign.
user_from_inv_db = self.api_investor.db.get(self.user.type, self.user.id)
self.assertIn(self.campaign.id, user_from_inv_db.campaign_ids)
# Check of the campaign has been added to the bank
self.bank.update(self.api_bank.db)
self.assertIn(self.campaign.id, self.bank.campaign_ids)
def test_on_mortgage_reject(self):
"""
Test a user rejecting a mortgage
user -> bank
"""
#Pre-condition. Bank has the mortgage saved with status.PENDING
self.mortgage.post_or_put(self.api_bank.db)
self.bank.mortgage_ids.append(self.mortgage.id)
self.bank.post_or_put(self.api_bank.db)
self.mortgage._time_signed = sys.maxint
self.user._time_signed = sys.maxint
# Create the payload
payload = FakePayload()
payload.request = APIMessage.MORTGAGE_REJECT
payload.models = {
self.mortgage.type: self.mortgage,
self.user.type: self.user,
}
self.mortgage.status = STATUS.REJECTED
self.community_bank.on_mortgage_reject(payload)
self.bank.update(self.api_bank.db)
mortgage = self.api_bank.db.get(self.mortgage.type, self.mortgage.id)
self.assertEqual(mortgage.status, STATUS.REJECTED)
self.assertNotIn(mortgage.id, self.bank.mortgage_ids)
def test_on_investment_offer(self):
"""
Test an investor sending an investment offer to a borrower
investor -> user
"""
payload = FakePayload()
payload.request = APIMessage.INVESTMENT_OFFER
payload.models = {self.investor.type: self.investor,
self.investment.type: self.investment,
self.investors_profile.type: self.investors_profile}
# Check if user doesn't have the investment yet
self.assertFalse(self.isModelInDB(self.api, self.investment))
# Send investment offer message
self.community.on_investment_offer(payload)
# Check if the user has the investment
self.assertTrue(self.isModelInDB(self.api, self.investment))
def test_on_campaign_bid_with_investment(self):
"""
Test sending a campaign bid
user -> user
user -> bank
user -> investor
investor -> user
investor -> bank
investor -> investor
"""
payload = FakePayload()
payload.request = APIMessage.CAMPAIGN_BID
payload.fields = [User.type, Investment.type, Campaign.type, LoanRequest.type, Mortgage.type, House.type]
payload.models = {self.user.type: self.user,
self.investment.type: self.investment,
self.campaign.type: self.campaign,
self.loan_request.type: self.loan_request,
self.mortgage.type: self.mortgage,
self.house.type: self.house}
# Check if user doesn't have the models yet
self.assertFalse(self.isModelInDB(self.api, self.investment))
self.assertFalse(self.isModelInDB(self.api_bank, self.investment))
self.assertFalse(self.isModelInDB(self.api_investor, self.investment))
self.assertFalse(self.isModelInDB(self.api, self.campaign))
self.assertFalse(self.isModelInDB(self.api_bank, self.campaign))
self.assertFalse(self.isModelInDB(self.api_investor, self.campaign))
self.assertFalse(self.isModelInDB(self.api, self.loan_request))
self.assertFalse(self.isModelInDB(self.api_bank, self.loan_request))
self.assertFalse(self.isModelInDB(self.api_investor, self.loan_request))
self.assertFalse(self.isModelInDB(self.api, self.mortgage))
self.assertFalse(self.isModelInDB(self.api_bank, self.mortgage))
self.assertFalse(self.isModelInDB(self.api_investor, self.mortgage))
self.assertFalse(self.isModelInDB(self.api, self.house))
self.assertFalse(self.isModelInDB(self.api_bank, self.house))
self.assertFalse(self.isModelInDB(self.api_investor, self.house))
# Send campaign bid
self.community.on_campaign_bid(payload)
self.community_bank.on_campaign_bid(payload)
self.community_investor.on_campaign_bid(payload)
# Check if the user has the models
self.assertTrue(self.isModelInDB(self.api, self.investment))
self.assertTrue(self.isModelInDB(self.api_bank, self.investment))
self.assertTrue(self.isModelInDB(self.api_investor, self.investment))
self.assertTrue(self.isModelInDB(self.api, self.campaign))
self.assertTrue(self.isModelInDB(self.api_bank, self.campaign))
self.assertTrue(self.isModelInDB(self.api_investor, self.campaign))
self.assertTrue(self.isModelInDB(self.api, self.loan_request))
self.assertTrue(self.isModelInDB(self.api_bank, self.loan_request))
self.assertTrue(self.isModelInDB(self.api_investor, self.loan_request))
self.assertTrue(self.isModelInDB(self.api, self.mortgage))
self.assertTrue(self.isModelInDB(self.api_bank, self.mortgage))
self.assertTrue(self.isModelInDB(self.api_investor, self.mortgage))
self.assertTrue(self.isModelInDB(self.api, self.house))
self.assertTrue(self.isModelInDB(self.api_bank, self.house))
self.assertTrue(self.isModelInDB(self.api_investor, self.house))
def test_on_campaign_bid_without_investment(self):
"""
Test sending a campaign bid
user -> user
user -> bank
user -> investor
investor -> user
investor -> bank
investor -> investor
"""
payload = FakePayload()
payload.request = APIMessage.CAMPAIGN_BID
payload.models = {self.user.type: self.user,
self.investment.type: None,
self.campaign.type: self.campaign,
self.loan_request.type: self.loan_request,
self.mortgage.type: self.mortgage,
self.house.type: self.house}
# Check if user doesn't have the models yet
self.assertFalse(self.isModelInDB(self.api, self.investment))
self.assertFalse(self.isModelInDB(self.api_bank, self.investment))
self.assertFalse(self.isModelInDB(self.api_investor, self.investment))
self.assertFalse(self.isModelInDB(self.api, self.campaign))
self.assertFalse(self.isModelInDB(self.api_bank, self.campaign))
self.assertFalse(self.isModelInDB(self.api_investor, self.campaign))
self.assertFalse(self.isModelInDB(self.api, self.loan_request))
self.assertFalse(self.isModelInDB(self.api_bank, self.loan_request))
self.assertFalse(self.isModelInDB(self.api_investor, self.loan_request))
self.assertFalse(self.isModelInDB(self.api, self.mortgage))
self.assertFalse(self.isModelInDB(self.api_bank, self.mortgage))
self.assertFalse(self.isModelInDB(self.api_investor, self.mortgage))
self.assertFalse(self.isModelInDB(self.api, self.house))
self.assertFalse(self.isModelInDB(self.api_bank, self.house))
self.assertFalse(self.isModelInDB(self.api_investor, self.house))
# Send campaign bid
self.community.on_campaign_bid(payload)
self.community_bank.on_campaign_bid(payload)
self.community_investor.on_campaign_bid(payload)
# Check if the user has the models
# self.assertFalse(self.isModelInDB(self.api, None))
# self.assertFalse(self.isModelInDB(self.api_bank, None))
# self.assertFalse(self.isModelInDB(self.api_investor, None))
self.assertTrue(self.isModelInDB(self.api, self.campaign))
self.assertTrue(self.isModelInDB(self.api_bank, self.campaign))
self.assertTrue(self.isModelInDB(self.api_investor, self.campaign))
self.assertTrue(self.isModelInDB(self.api, self.loan_request))
self.assertTrue(self.isModelInDB(self.api_bank, self.loan_request))
self.assertTrue(self.isModelInDB(self.api_investor, self.loan_request))
self.assertTrue(self.isModelInDB(self.api, self.mortgage))
self.assertTrue(self.isModelInDB(self.api_bank, self.mortgage))
self.assertTrue(self.isModelInDB(self.api_investor, self.mortgage))
self.assertTrue(self.isModelInDB(self.api, self.house))
self.assertTrue(self.isModelInDB(self.api_bank, self.house))
self.assertTrue(self.isModelInDB(self.api_investor, self.house))
def test_on_investment_accept(self):
"""
Test a user rejecting an investment
user -> investor
"""
# Pre-condition. Investor has the investment saved with status.PENDING
self.investment.post_or_put(self.api_investor.db)
self.investor.investment_ids.append(self.investment.id)
self.investor.post_or_put(self.api_investor.db)
self.investment._time_signed = sys.maxint
self.user._time_signed = sys.maxint
self.borrowers_profile._time_signed = sys.maxint
# Create the payload
payload = FakePayload()
payload.request = APIMessage.INVESTMENT_ACCEPT
payload.models = {self.user.type: self.user,
self.investment.type: self.investment,
self.borrowers_profile.type: self.borrowers_profile}
self.investment.status = STATUS.ACCEPTED
self.community_investor.on_investment_accept(payload)
self.investor.update(self.api_investor.db)
investment = self.api_investor.db.get(self.investment.type, self.investment.id)
self.assertEqual(investment.status, STATUS.ACCEPTED)
self.assertIn(investment.id, self.investor.investment_ids)
def test_on_investment_reject(self):
"""
Test a user accepting an investment
user -> investor
"""
# Pre-condition. Investor has the investment saved with status.PENDING
self.investment.post_or_put(self.api_investor.db)
self.investor.investment_ids.append(self.investment.id)
self.investor.post_or_put(self.api_investor.db)
# Fake the signing time
self.user._time_signed = sys.maxint
self.investment._time_signed = sys.maxint
# Create the payload
payload = FakePayload()
payload.request = APIMessage.INVESTMENT_REJECT
payload.models = {self.user.type: self.user,
self.investment.type: self.investment}
self.investment.status = STATUS.REJECTED
self.community_investor.on_investment_reject(payload)
self.investor.update(self.api_investor.db)
investment = self.api_investor.db.get(self.investment.type, self.investment.id)
self.assertEqual(investment.status, STATUS.REJECTED)
self.assertNotIn(investment.id, self.investor.investment_ids)
@mock.patch('dispersy.dispersy.Dispersy.store_update_forward')
def test_send_community_message(self, patch):
self.assertFalse(patch.called)
# Set them as false to override the defaults
store = update = forward = False
message_name = APIMessage.MORTGAGE_OFFER.value
self.community.send_api_message_community(message_name, [self.loan_request.type],
{self.loan_request.type: self.loan_request}, store, update, forward)
self.assertTrue(patch.called)
args, kwargs = patch.call_args
self.assertEqual(type(args[0]), list)
self.assertEqual(args[0][0].payload.request, message_name)
self.assertEqual(args[1], store)
self.assertEqual(args[2], update)
self.assertEqual(args[3], forward)
@mock.patch('dispersy.dispersy.Dispersy.store_update_forward')
def test_send_candidate_message(self, patch):
self.assertFalse(patch.called)
# Set them as false to override the defaults
store = update = forward = False
message_name = APIMessage.MORTGAGE_OFFER.value
candidates = (LoopbackCandidate(),)
self.community.send_api_message_candidate(message_name, [self.loan_request.type],
{self.loan_request.type: self.loan_request}, candidates, store, update, forward)
self.assertTrue(patch.called)
args, kwargs = patch.call_args
self.assertEqual(type(args[0]), list)
message = args[0][0]
self.assertEqual(message.payload.request, message_name)
self.assertEqual(args[1], store)
self.assertEqual(args[2], update)
self.assertEqual(args[3], forward)
@mock.patch('dispersy.dispersy.Dispersy.store_update_forward')
def test_send_introduce_user(self, patch):
self.assertFalse(patch.called)
# Set them as false to override the defaults
store = update = forward = False
message_name = u"introduce_user"
candidate = LoopbackCandidate()
self.community.send_introduce_user([self.user.type], {self.user.type: self.user}, candidate, store, update, forward)
self.assertTrue(patch.called)
args, kwargs = patch.call_args
self.assertEqual(type(args[0]), list)
message = args[0][0]
self.assertEqual(message.name, message_name)
self.assertEqual(args[1], store)
self.assertEqual(args[2], update)
self.assertEqual(args[3], forward)
@mock.patch('market.database.database.MarketDatabase.post')
@mock.patch('dispersy.dispersy.Dispersy.store_update_forward')
def test_on_user_introduction(self, store_patch, api_patch):
# We'll be introducer the user to the bank. So remove user from the bank
self.api_bank.db.delete(self.user)
self.assertFalse(store_patch.called)
# Set them as false to override the defaults
store = update = forward = False
message_name = u"introduce_user"
candidate = LoopbackCandidate()
self.community.send_introduce_user([self.user.type], {self.user.type: self.user}, candidate)
self.assertTrue(store_patch.called)
args, _= store_patch.call_args
self.assertEqual(type(args[0]), list)
message = args[0][0]
self.assertEqual(message.name, message_name)
# Receive the user as the bank and check if it is found in the database.
self.assertIsNone(self.api_bank._get_user(self.user))
# Send it to the bank
self.assertFalse(api_patch.called)
self.community_bank.on_user_introduction([message])
self.assertTrue(api_patch.called)
# Check if received
args, _ = api_patch.call_args
self.assertEqual(self.user.id, args[1].id)
@mock.patch('market.community.community.MortgageMarketCommunity.create_signature_request')
@mock.patch('market.community.community.MortgageMarketCommunity._get_latest_hash')
@mock.patch('market.community.community.MortgageMarketCommunity._get_next_sequence_number')
@mock.patch('market.community.community.MortgageMarketCommunity.update_signature')
@mock.patch('market.community.community.MortgageMarketCommunity.persist_signature')
def test_signature_request_flow(self, persist, update, next_seq, next_hash, create_sig):
persist.return_value = True
update.return_value = True
next_seq.return_value = 1
next_hash.return_value = 'hasdhashdsa'
create_sig.return_value = True
# Attempt to sign without having a user candidate
self.assertFalse(self.community_bank.publish_signed_confirm_request_message(self.user.id, self.mortgage))
# Set the candidate for the user
candidate = LoopbackCandidate()
candidate.associate(self.member)
self.api_bank.user_candidate[self.user.id] = candidate
# Attempt to sign without having a user candidate
self.assertTrue(self.community_bank.publish_signed_confirm_request_message(self.user.id, self.mortgage))
self.assertTrue(create_sig.called)
@mock.patch('market.community.community.MortgageMarketCommunity.create_signature_request')
@mock.patch('market.community.community.MortgageMarketCommunity._get_latest_hash')
@mock.patch('market.community.community.MortgageMarketCommunity._get_next_sequence_number')
@mock.patch('market.community.community.MortgageMarketCommunity.update_signature')
@mock.patch('market.community.community.MortgageMarketCommunity.persist_signature')
def test_create_signed_confirm_request_message(self, persist, update, next_seq, next_hash, create_sig):
persist.return_value = True
update.return_value = True
next_seq.return_value = 1
next_hash.return_value = 'hasdhashdsa'
create_sig.return_value = True
# Save the agreement for the user
self.mortgage.post_or_put(self.api.db)
self.loan_request.post_or_put(self.api_bank.db)
# Set the candidate for the user
candidate = LoopbackCandidate()
candidate.associate(self.member)
self.api_bank.user_candidate[self.user.id] = candidate
# Attempt to sign without having a user candidate
message = self.community_bank.create_signed_confirm_request_message(candidate, self.mortgage)
self.assertEqual(message.name, u"signed_confirm")
self.assertEqual(message.payload.agreement_benefactor, self.mortgage)
self.assertEqual(message.payload.benefactor, self.bank.id)
self.assertTrue(next_hash.called)
self.assertTrue(next_seq.called)
self.assertTrue(persist.called)
self.assertFalse(update.called)
persist.reset_mock()
next_hash.reset_mock()
next_seq.reset_mock()
message2 = self.community.allow_signed_confirm_request(message)
self.assertTrue(next_hash.called)
self.assertTrue(next_seq.called)
self.assertTrue(persist.called)
self.assertFalse(update.called)
self.assertEqual(message.name, message2.name)
self.assertEqual(message.payload.benefactor, message2.payload.benefactor)
self.assertNotEqual(message.payload.beneficiary, message2.payload.beneficiary)
# Finally check if the update call works
persist.reset_mock()
next_hash.reset_mock()
next_seq.reset_mock()
self.assertTrue(self.community_bank.allow_signed_confirm_response(message, message2, True))
self.assertFalse(next_hash.called)
self.assertFalse(next_seq.called)
self.assertFalse(persist.called)
self.assertFalse(update.called)
self.community_bank.received_signed_confirm_response([message2])
self.assertTrue(update.called)
def tearDown(self):
# Closing and unlocking dispersy database for other tests in test suite
self.dispersy._database.close()
self.dispersy_bank._database.close()
self.dispersy_investor._database.close()
class IncomingQueueTestCase(unittest.TestCase):
def setUp(self):
self.api = MarketAPI(MarketDatabase(MemoryBackend()))
mock = Mock()
self.api.community = mock
self.api.incoming_queue.assign_message_handlers(mock)
mock.on_loan_request_receive.return_value = True
mock.on_loan_request_reject.return_value = True
mock.on_mortgage_accept_signed.return_value = True
mock.on_mortgage_accept_unsigned.return_value = True
mock.on_investment_accept.return_value = True
mock.on_mortgage_reject.return_value = True
mock.on_investment_reject.return_value = True
mock.on_mortgage_offer.return_value = True
mock.on_investment_offer.return_value = True
def test_incoming_loan_request(self):
payload = FakePayload()
payload.request = APIMessage.LOAN_REQUEST
payload.models = {}
message = FakeMessage(payload)
self.api.incoming_queue._queue.append(message)
self.api.incoming_queue.process()
self.assertTrue(self.api.community.on_loan_request_receive.called)
def test_incoming_loan_request_reject(self):
payload = FakePayload()
payload.request = APIMessage.LOAN_REQUEST_REJECT
payload.models = {}
message = FakeMessage(payload)
self.api.incoming_queue._queue.append(message)
self.api.incoming_queue.process()
self.assertTrue(self.api.community.on_loan_request_reject.called)
def test_incoming_mortgage_accept_signed(self):
payload = FakePayload()
payload.request = APIMessage.MORTGAGE_ACCEPT_SIGNED
payload.models = {}
message = FakeMessage(payload)
self.api.incoming_queue._queue.append(message)
self.api.incoming_queue.process()
self.assertTrue(self.api.community.on_mortgage_accept_signed.called)
def test_incoming_mortgage_accept_unsigned(self):
payload = FakePayload()
payload.request = APIMessage.MORTGAGE_ACCEPT_UNSIGNED
payload.models = {}
message = FakeMessage(payload)
self.api.incoming_queue._queue.append(message)
self.api.incoming_queue.process()
self.assertTrue(self.api.community.on_mortgage_accept_unsigned.called)
def test_incoming_investment_accept(self):
payload = FakePayload()
payload.request = APIMessage.INVESTMENT_ACCEPT
payload.models = {}
message = FakeMessage(payload)
self.api.incoming_queue._queue.append(message)
self.api.incoming_queue.process()
self.assertTrue(self.api.community.on_investment_accept.called)
def test_incoming_investment_offer(self):
payload = FakePayload()
payload.request = APIMessage.INVESTMENT_OFFER
payload.models = {}
message = FakeMessage(payload)
self.api.incoming_queue._queue.append(message)
self.api.incoming_queue.process()
self.assertTrue(self.api.community.on_investment_offer.called)
def test_incoming_investment_reject(self):
payload = FakePayload()
payload.request = APIMessage.INVESTMENT_REJECT
payload.models = {}
message = FakeMessage(payload)
self.api.incoming_queue._queue.append(message)
self.api.incoming_queue.process()
self.assertTrue(self.api.community.on_investment_reject.called)
def test_incoming_mortgage_reject(self):
payload = FakePayload()
payload.request = APIMessage.MORTGAGE_REJECT
payload.models = {}
message = FakeMessage(payload)
self.api.incoming_queue._queue.append(message)
self.api.incoming_queue.process()
self.assertTrue(self.api.community.on_mortgage_reject.called)
def test_incoming_mortgage_offer(self):
payload = FakePayload()
payload.request = APIMessage.MORTGAGE_OFFER
payload.models = {}
message = FakeMessage(payload)
self.api.incoming_queue._queue.append(message)
self.api.incoming_queue.process()
self.assertTrue(self.api.community.on_mortgage_offer.called)
def test_api_message_handlers_in_queue(self):
handler = self.api.incoming_queue.handler
for message in list(APIMessage):
assert message in handler, "%s has no handler in the queue but can be sent" % message
class OutgoingQueueTestCase(unittest.TestCase):
def setUp(self):
self.api = MarketAPI(MarketDatabase(MemoryBackend()))
mock = Mock()
self.api.community = mock
mock.send_api_message_candidate.return_value = True
mock.send_api_message_community.return_value = True
def test_send_community_message(self):
request = APIMessage.MORTGAGE_OFFER
fields = ['int']
models = {'int': 4}
receivers = []
self.api.outgoing_queue.push((request, fields, models, receivers))
# Not called yet
self.assertFalse(self.api.community.send_api_message_community.called)
self.api.outgoing_queue.process()
self.assertTrue(self.api.community.send_api_message_community.called)
self.api.community.send_api_message_community.assert_called_with(request.value, fields, models)
def test_send_candidate_message(self):
fake_user = User('ss', 1)
fake_user2 = User('ss2', 2)
fake_candidate = 'bob_candidate'
self.api.user_candidate[fake_user.id] = fake_candidate
request = APIMessage.MORTGAGE_OFFER
fields = ['int']
models = {'int': 4}
receivers = [fake_user, fake_user2]
self.api.outgoing_queue.push((request, fields, models, receivers))
# Not called yet
self.assertFalse(self.api.community.send_api_message_candidate.called)
self.api.outgoing_queue.process()
self.assertTrue(self.api.community.send_api_message_candidate.called)
self.api.community.send_api_message_candidate.assert_called_with(request.value, fields, models, tuple([fake_candidate]))
# Confirm that the message is still in the queue since fake_user2 has no candidate.
self.assertIn((request, fields, models, receivers), self.api.outgoing_queue._queue)
# Reset for the next part
self.api.community.reset_mock()
self.assertFalse(self.api.community.send_api_message_candidate.called)
# Add a candidate for fake_user2 and process messages
fake_candidate2 = 'bob_candidate2'
self.api.user_candidate[fake_user2.id] = fake_candidate2
self.api.outgoing_queue.process()
self.assertTrue(self.api.community.send_api_message_candidate.called)
self.api.community.send_api_message_candidate.assert_called_with(request.value, fields, models, tuple([fake_candidate2]))
# Confirm that the message is gone
self.assertNotIn((request, fields, models, receivers), self.api.outgoing_queue._queue)
class ConversionTestCase(unittest.TestCase):
def setUp(self):
# Faking IOThread
registerAsIOThread()
# Object creation and preperation
self.dispersy = Dispersy(ManualEnpoint(0), unicode("dispersy_temporary"))
self.api = MarketAPI(MarketDatabase(MemoryBackend()))
self.api.db.backend.clear()
user, _, priv = self.api.create_user()
self.bank, _, _ = self.api.create_user()
self.user = user
self.private_key = priv
self.dispersy._database.open()
self.master_member = DummyMember(self.dispersy, 1, "a" * 20)
self.member = self.dispersy.get_member(private_key=self.private_key.decode("HEX"))
self.community = MortgageMarketCommunity.init_community(self.dispersy, self.master_member, self.member)
self.community.api = self.api
self.community.user = self.user
self.api.community = self.community
# Add our conversion to the community.
self.conversion = MortgageMarketConversion(self.community)
self.community._conversions = []
self.community.add_conversion(self.conversion)
self.setupModels()
def setupModels(self):
self.house = House('2500AA', '34', 'Aa Weg', 1000)
self.house.post_or_put(self.api.db)
self.loan_request = LoanRequest(user_key=self.user.id,
house_id=self.house.id,
house_link='http://www.example.com',
seller_phone_number='06000000',
seller_email='example@email.com',
mortgage_type=1,
banks=[self.bank.id],
description=u'Unicode description',
amount_wanted=10000,
status={}
)
self.loan_request.post_or_put(self.api.db)
self.profile = BorrowersProfile(first_name=u'Jebediah', last_name=u'Kerman', email='exmaple@asdsa.com', iban='sadasdas',
phone_number='213131', current_postal_code='2312AA',
current_house_number='2132', current_address='Damstraat 1', document_list=[])
self.profile.post_or_put(self.api.db)
def test_encode_introduce_user(self):
meta = self.community.get_meta_message(u"introduce_user")
message = meta.impl(authentication=(self.member,),
distribution=(self.community.claim_global_time(),),
payload=([self.user.type], {self.user.type: self.user}),
destination=(LoopbackCandidate(),))
encoded_message = self.conversion._encode_model(message)[0]
decoded_payload = self.conversion._decode_model(message, 0, encoded_message)[1]
self.assertEqual(message.payload.fields, decoded_payload.fields)
self.assertEqual(message.payload.models, decoded_payload.models)
def test_encode_api_request_community(self):
meta = self.community.get_meta_message(u"api_message_community")
message = meta.impl(authentication=(self.member,),
distribution=(self.community.claim_global_time(),),
payload=(APIMessage.MORTGAGE_OFFER.value, [self.user.type], {self.user.type: self.user},),
destination=(LoopbackCandidate(),))
encoded_message = self.conversion._encode_api_message(message)[0]
decoded_payload = self.conversion._decode_api_message(message, 0, encoded_message)[1]
self.assertEqual(message.payload.models, decoded_payload.models)
def test_encode_api_request_candidate(self):
meta = self.community.get_meta_message(u"api_message_candidate")
message = meta.impl(authentication=(self.member,),
distribution=(self.community.claim_global_time(),),
payload=(APIMessage.MORTGAGE_OFFER.value, [self.user.type], {self.user.type: self.user},),
destination=(LoopbackCandidate(),))
encoded_message = self.conversion._encode_api_message(message)[0]
decoded_payload = self.conversion._decode_api_message(message, 0, encoded_message)[1]
self.assertEqual(message.payload.models, decoded_payload.models)
def test_encode_signed_confirm(self):
payload_list = []
for k in range(1, 12):
payload_list.append(None)
payload_list[0] = self.user.id # benefactor, 0
payload_list[1] = self.bank.id # beneficiary, 1
payload_list[2] = self.loan_request
payload_list[3] = None # agreement beneficiary
payload_list[4] = 0
payload_list[5] = 0 # sequence number beneficiary
payload_list[6] = 'hashsas'
payload_list[7] = 'asdasdas' # previous hash beneficiary
payload_list[8] = 'sig1' # Signature benefactor
payload_list[9] = 'sig2' # Signature beneficiary
payload_list[10] = 324325252
meta = self.community.get_meta_message(u"signed_confirm")
loop = LoopbackCandidate()
message = meta.impl(authentication=([self.member, self.member],),
distribution=(self.community.claim_global_time(),),
payload=tuple(payload_list))
encoded_message = self.conversion._encode_signed_confirm(message)[0]
decoded_payload = self.conversion._decode_signed_confirm(message, 0, encoded_message)[1]
p1 = message.payload
p2 = decoded_payload
assert isinstance(p1, SignedConfirmPayload.Implementation)
assert isinstance(p2, SignedConfirmPayload.Implementation)
self.assertEqual(p1.agreement_benefactor, p2.agreement_benefactor)
self.assertEqual(p1.agreement_beneficiary, p2.agreement_beneficiary)
self.assertEqual(p1.benefactor, p2.benefactor)
self.assertEqual(p1.beneficiary, p2.beneficiary)
self.assertEqual(p1.previous_hash_benefactor, p2.previous_hash_benefactor)
self.assertEqual(p1.previous_hash_beneficiary, p2.previous_hash_beneficiary)
self.assertEqual(p1.sequence_number_benefactor, p2.sequence_number_benefactor)
self.assertEqual(p1.sequence_number_beneficiary, p2.sequence_number_beneficiary)
self.assertEqual(p1.signature_beneficiary, p2.signature_beneficiary)
self.assertEqual(p1.signature_benefactor, p1.signature_benefactor)
self.assertEqual(p1.insert_time, p2.insert_time)
if __name__ == '__main__':
unittest.main()
|
# -*- coding=utf-8 -*-
import re
import random
import string
import ConfigParser
def load_cfg():
return
cfg = ConfigParser.ConfigParser()
cfg.read(r"D:\PycharmProjects\Hellow\venv\cfg.ini")
# 加载ini配置文件
def get_option(key):
if cfg.has_option('cfg1',key):
return cfg.get('cfg1',key)
Img_path = get_option("Img_path")
# 生成随机数string.ascii_letters包含所有字母符号 string.digits包含0-9
# print ''.join(random.sample(string.ascii_letters + string.digits,62))
def get_randomText(num):
KEY_LEN = num
randomTxt = ""
keylist = [random.choice(string.ascii_letters + string.digits) for i in range(KEY_LEN)]
# for j in range(10):
# keylist = [random.choice(string.ascii_letters + string.digits) for i in range(KEY_LEN)]
# randomTxt += ''.join(keylist)
randomTxt += ''.join(keylist)
return randomTxt
#在文件末尾插入随机的随机文本
def changFile(filePath):
fileT = open(filePath,'a')
fileT.write(get_randomText(100000))
fileT.close()
changFile(r"D:\PycharmProjects\Hellow\avatar_bg_1.PNG")
text = get_randomText(5)
print text
def fun_fromwork(path):
rule = r"\(.+\)"
file_txt = open(path)
function_list = re.findall(rule,file_txt.read())
for item in function_list:
print item
|
# -*- coding: utf-8 -*-
from twisted.protocols import basic
from twisted.internet import protocol, reactor
from binascii import unhexlify
import psycopg2
from socket import inet_aton
from struct import pack
import crc16
# bit mask to check if memory in smartbox is empty
f_key_empty_memory = 128
# bit mask - smartbox saying that it is ready for configuration package
f_key_ready_for_configuration = 64
# bit mask - smartbox saying that it is report package
f_key_report = 16
# bit mask - smartbox saying that server resend wrong pin
f_key_wrong_pin = 8
# bit mask - smartbox saying that something draws current
f_key_draw_current = 4
# bit mask - smartbox saying that something cause alarm
f_key_alarm = 2
# bit mask - output state of smartbox
f_key_output_state = 1
class HTTPEchoProtocol(basic.LineReceiver):
def __init__(self):
self.lines = []
def lineReceived(self, line):
self.lines.append(line)
# print line
self.f_key_interpretation(line)
if not line:
self.sendResponse()
def sendResponse(self, send_package):
# self.sendLine("HTTP/1.1 200 OK")
# self.sendLine("")
responseBody = "Return package : %r\r\n" % send_package
self.transport.write(responseBody)
# self.transport.loseConnection()
def f_key_interpretation(self, data):
# unfortunately python replace escape character \x with \\x so below code convert received string
data_replace_backslash = data.replace("\\x", "")
list_of_bytes = [ord(my_byte) for my_byte in data]
print "%r" %data
# Convert the hex string to string of bytes.
# try:
# data_string_of_bytes = unhexlify(data_replace_backslash)
# except TypeError:
# print "Błędny format danych"
# return
#
# list_of_bytes = bytearray(data_string_of_bytes)
# print "%r" %list_of_bytes
f_key = list_of_bytes[2]
list_of_bytes_send = []
# get id of currently communicating smartbox
my_smart_id_hex = ''.join('{:02x}'.format(x) for x in list_of_bytes[:2])
my_smart_id = int(my_smart_id_hex, 16)
# print f_key
# TODO this will work only if one bit will be set in f_key byte
# first we must check sum control
calculate_sum_control = sum(list_of_bytes[:-2])
receive_sum_control_hex = ''.join('{:02x}'.format(x) for x in list_of_bytes[-2:])
receive_sum_control = int(receive_sum_control_hex, 16)
print 'wyliczona suma kontrolna: %d' % calculate_sum_control
print 'otrzymana suma kontrolna %d' % receive_sum_control
# if calculate_sum_control != receive_sum_control:
# print "Błędna suma kontrolna"
# return
if f_key == f_key_empty_memory:
print "Mam pustą pamięć proszę o paczkę konfiguracji"
# TODO this query is hardcoded,it must be changed
query = "Select ip_adress, port from servers where id = 1"
query_result = self.database_operation(query, "select")
ip = query_result[0][0]
port = query_result[0][1]
ip_byte = bytearray(inet_aton(str(ip)))
# port is saved in two bytes, port varaible in example i equal to 8000
# so port port_byte is write in two bytes, for 8000 is equal to '@\x1f'
# port_byte[0] = 64 , port_byte[1] = 31
port_byte = bytearray(pack('h', port))
# TODO don't know how set 3rd byte
# for now 3rd byte will be resend
# TODO for now nothing change in first 3 byte so maybe mask is not necessary?
# TODO in bytes from 11 to 29 we send information about smartboxes working in current network,
# this must be done later beacouse i dont know how to get this information
# 64 because its means that next package will be configuration package
list_of_bytes_send.extend([list_of_bytes[0], list_of_bytes[1], 64,
list_of_bytes[3], ip_byte[0], ip_byte[1], ip_byte[2],
ip_byte[3], port_byte[0], port_byte[1]])
# list_of_bytes_send is bytearray type which is represented by bytearray(b'\xff\xff')
# to send only bytes we must convert this array and we get only '\xff\xff'
self.sendResponse(bytes(list_of_bytes_send))
if f_key == f_key_ready_for_configuration:
# \x01\xc3\x40\xff\x04\xe2
print "Dobra jestem gotowy na przyjęcie nowej paczki z konfiguracją - dawaj ją!"
# TODO this query is hardcoded,it must be changed
query = "Select ip_adress, port from servers where id = 1"
query_result = self.database_operation(query, "select")
ip = query_result[0][0]
port = query_result[0][1]
ip_byte = bytearray(inet_aton(str(ip)))
# port is saved in two bytes, port varaible in example i equal to 8000
# so port port_byte is write in two bytes, for 8000 is equal to '@\x1f'
# port_byte[0] = 64 , port_byte[1] = 31
port_byte = bytearray(pack('h', port))
# TODO don't know how set 3rd byte
# for now 3rd byte will be resend
# TODO for now nothing change in first 3 byte so maybe mask is not necessary?
# TODO in bytes from 11 to 29 we send information about smartboxes working in current network,
# get first two bytes - smartbox id
smart_id_hex = ''.join(chr(bt) for bt in list_of_bytes[:2])
# convert id to decimal
smart_id = int(smart_id_hex.encode('hex'), 16)
query_network_id = "Select network_id from smartbox_settings where smart_id = %d" % smart_id
# print smart_id
network_id = self.database_operation(query_network_id, "select")[0][0]
query = "Select smart_id, smart_password from smartbox_settings where network_id = %d" % network_id
all_smartboxes_ids = self.database_operation(query, "select")
smartboxes_count = len(all_smartboxes_ids)
# -1 beacuse select return also id of master smartbox
# 128 because its means that this package is the configuration package
list_of_bytes_send.extend([list_of_bytes[0], list_of_bytes[1], 128, list_of_bytes[3], ip_byte[0],
ip_byte[1], ip_byte[2], ip_byte[3], port_byte[0], port_byte[1],
smartboxes_count - 1])
if all_smartboxes_ids:
for ids in all_smartboxes_ids:
# send ids of smartboxes which id is different than currently comunicating smartbox
if ids[0] != my_smart_id:
next_smart_id = bytearray(pack('h', ids[0]))
# in this order because first is send high byte
list_of_bytes_send.extend([next_smart_id[1], next_smart_id[0], int(ids[1])])
# TODO send sum control
# generate sum control by crc16 module
sum_of_bytes = sum(list_of_bytes_send)
sum_control = crc16.crc16xmodem(str(sum_of_bytes))
sum_control_array = bytearray(pack('h', sum_control))
list_of_bytes_send.extend([sum_control_array[1], sum_control_array[0]])
print "%r" % list_of_bytes_send
# list_of_bytes_send_hex = [hex(x) for x in list_of_bytes_send]
self.sendResponse(bytes(list_of_bytes_send))
if f_key == f_key_report:
# \x01\xc3\x10\xff\x04\xe2\xe6\x01\xc4\x01\xff\x04\xe2\xe6\x01\xc5\x01\xff\x04\xe2\xe6\xff\xff
print "Ta paczka to raport na temat sieci i ostatniego połaczenia"
if f_key == f_key_wrong_pin:
print "W naszej poprzedniej rozmowie podałeś błędny PIN"
if f_key == f_key_draw_current:
print "Coś pobiera prąd w mniejszym lub większym stopniu prąd ale jednak"
if f_key == f_key_alarm:
print "Zaistniał alarm z przeciążenia/zwarcie gniazdka"
if f_key == f_key_output_state:
# \x01\xc3\x01\xff\x04\xe2\xe6\x01\xc4\x01\xff\x04\xe2\xe6\x01\xc5\x01\xff\x04\xe2\xe6\xff\xff
count_smartboxes = (len(list_of_bytes)-2)/7
print "Stan wyjścia obecny"
for smartbox in range(count_smartboxes):
smart_id_hex = ''.join(chr(bt) for bt in list_of_bytes[smartbox*7:smartbox*7+2])
# convert id to decimal
smart_id = int(smart_id_hex.encode('hex'), 16)
# 5 and 6 bytes represent power consumption of electric socket which master smartbox is connected to
power_consumption_hex = ''.join(chr(bt) for bt in list_of_bytes[smartbox*7+4:smartbox*7+6])
# convert power consumption to decimal
power_consumption = int(power_consumption_hex.encode('hex'), 16)
# 7 bytes represent voltage of electric socket which master smartbox is connected to
# current_voltage_hex = chr(list_of_bytes[smartbox*7+6])
# convert voltage to decimal
# current_voltage = int(current_voltage_hex.encode('hex'), 16)
current_voltage = list_of_bytes[smartbox*7+6]
print "My smart id: %d" % smart_id
print "Power consumption: %d mA/s" % power_consumption
print "Voltage of electrical socket: %d V" % current_voltage
# build package which will be sending
list_of_bytes_send.extend([list_of_bytes[0], list_of_bytes[1], 128, list_of_bytes[3]])
self.sendResponse(bytes(list_of_bytes_send))
def database_operation(self, query, query_type):
# database connection
conn = psycopg2.connect(database="smartbox", user="postgres", password="postgres", host="127.0.0.1",
port="5432")
cur = conn.cursor()
cur.execute(query)
rows = cur.fetchall()
return rows
conn.close()
class HTTPEchoFactory(protocol.ServerFactory):
def buildProtocol(self, addr):
return HTTPEchoProtocol()
# class MyClass(HTTPEchoProtocol):
# def __init__(self, line):
# self.f_key_interpretation(int(line))
#
# def f_key_interpretation(self, line):
# # f_key_mask = 111111110000000000000000
# f_key_mask = 16711680
# f_key = int(line) & f_key_mask
# #TODO this will work only if one bit will be set in f_key byte
# if f_key == f_key_empty_memory:
# print "Mam pustą pamięć proszę o paczkę konfiguracji"
# if f_key == f_key_ready_for_configuration:
# print "Dobra jestem gotowy na przyjęcie nowej paczki z konfiguracją - dawaj ją!"
# if f_key == f_key_report:
# print "Ta paczka to raport na temat sieci i ostatniego połaczenia"
# if f_key == f_key_wrong_pin:
# print "W naszej poprzedniej rozmowie podałeś błędny PIN"
# if f_key == f_key_draw_current:
# print "Coś pobiera prąd w mniejszym lub większym stopniu prąd ale jednak"
# if f_key == f_key_alarm:
# print "Zaistniał alarm z przeciążenia/zwarcie gniazdka"
# if f_key == f_key_output_state:
# print "Stan wyjścia obecny"
reactor.listenTCP(8880, HTTPEchoFactory())
reactor.run()
|
from flask import Flask
from marshmallow import Schema, fields, pre_load, validate
from flask_marshmallow import Marshmallow
from flask_sqlalchemy import SQLAlchemy
ma = Marshmallow()
db = SQLAlchemy() |
from EPICS_serial_CA import Serial
port = Serial("14IDB:serial3") # loop back connector
string = "SET:TEMP 4.000\n"
port.query(string)
# generates reply 'SET:TEMP 4.000\nUT'
from CA import caput
encoded_string = repr(string)[1:-1]
##caput("14IDB:serial3.AOUT",encoded_string,wait=True)
caput('14IDB:serial3.AOUT','SET:TEMP 4.000\\n',wait=True)
|
def prime(n):
cont = 0
for c in range(1, n+1):
if n % c == 0:
cont += 1
if cont == 2:
return f'{n} is prime'
else:
return f'{n} not prime'
print(prime(6))
|
import matplotlib.pyplot as plt
from libpysal.weights.contiguity import Queen
import libpysal as lp
from libpysal import examples
import geopandas as gpd
import numpy as np
from nose.tools import assert_raises, assert_warns
from esda.moran import (Moran_Local, Moran, Moran_BV,
Moran_Local_BV, Moran_BV_matrix)
from splot.esda import (moran_scatterplot,
plot_moran_simulation,
plot_moran,
plot_moran_bv_simulation,
plot_moran_bv,
plot_local_autocorrelation,
lisa_cluster,
moran_facet)
from splot._viz_esda_mpl import (_moran_global_scatterplot,
_moran_loc_scatterplot,
_moran_bv_scatterplot,
_moran_loc_bv_scatterplot)
def _test_data():
guerry = examples.load_example('Guerry')
link_to_data = guerry.get_path('guerry.shp')
gdf = gpd.read_file(link_to_data)
return gdf
def test_moran_scatterplot():
gdf = _test_data()
x = gdf['Suicids'].values
y = gdf['Donatns'].values
w = Queen.from_dataframe(gdf)
w.transform = 'r'
# Calculate `esda.moran` Objects
moran = Moran(y, w)
moran_bv = Moran_BV(y, x, w)
moran_loc = Moran_Local(y, w)
moran_loc_bv = Moran_Local_BV(y, x, w)
# try with p value so points are colored or warnings apply
fig, _ = moran_scatterplot(moran, p=0.05, aspect_equal=False)
plt.close(fig)
fig, _ = moran_scatterplot(moran_loc, p=0.05)
plt.close(fig)
fig, _ = moran_scatterplot(moran_bv, p=0.05)
plt.close(fig)
fig, _ = moran_scatterplot(moran_loc_bv, p=0.05)
plt.close(fig)
def test_moran_global_scatterplot():
# Load data and apply statistical analysis
gdf = _test_data()
y = gdf['Donatns'].values
w = Queen.from_dataframe(gdf)
w.transform = 'r'
# Calc Global Moran
w = Queen.from_dataframe(gdf)
moran = Moran(y, w)
# plot
fig, _ = _moran_global_scatterplot(moran)
plt.close(fig)
# customize
fig, _ = _moran_global_scatterplot(moran, zstandard=False,
aspect_equal=False,
fitline_kwds=dict(color='#4393c3'))
plt.close(fig)
def test_plot_moran_simulation():
# Load data and apply statistical analysis
gdf = _test_data()
y = gdf['Donatns'].values
w = Queen.from_dataframe(gdf)
w.transform = 'r'
# Calc Global Moran
w = Queen.from_dataframe(gdf)
moran = Moran(y, w)
# plot
fig, _ = plot_moran_simulation(moran)
plt.close(fig)
# customize
fig, _ = plot_moran_simulation(moran,
fitline_kwds=dict(color='#4393c3'))
plt.close(fig)
def test_plot_moran():
# Load data and apply statistical analysis
gdf = _test_data()
y = gdf['Donatns'].values
w = Queen.from_dataframe(gdf)
w.transform = 'r'
# Calc Global Moran
w = Queen.from_dataframe(gdf)
moran = Moran(y, w)
# plot
fig, _ = plot_moran(moran)
plt.close(fig)
# customize
fig, _ = plot_moran(moran, zstandard=False,
aspect_equal=False,
fitline_kwds=dict(color='#4393c3'))
plt.close(fig)
def test_moran_bv_scatterplot():
gdf = _test_data()
x = gdf['Suicids'].values
y = gdf['Donatns'].values
w = Queen.from_dataframe(gdf)
w.transform = 'r'
# Calculate Bivariate Moran
moran_bv = Moran_BV(x, y, w)
# plot
fig, _ = _moran_bv_scatterplot(moran_bv)
plt.close(fig)
# customize plot
fig, _ = _moran_bv_scatterplot(moran_bv, aspect_equal=False,
fitline_kwds=dict(color='#4393c3'))
plt.close(fig)
def test_plot_moran_bv_simulation():
# Load data and calculate weights
gdf = _test_data()
x = gdf['Suicids'].values
y = gdf['Donatns'].values
w = Queen.from_dataframe(gdf)
w.transform = 'r'
# Calculate Bivariate Moran
moran_bv = Moran_BV(x, y, w)
# plot
fig, _ = plot_moran_bv_simulation(moran_bv)
plt.close(fig)
# customize plot
fig, _ = plot_moran_bv_simulation(moran_bv, aspect_equal=False,
fitline_kwds=dict(color='#4393c3'))
plt.close(fig)
def test_plot_moran_bv():
# Load data and calculate weights
gdf = _test_data()
x = gdf['Suicids'].values
y = gdf['Donatns'].values
w = Queen.from_dataframe(gdf)
w.transform = 'r'
# Calculate Bivariate Moran
moran_bv = Moran_BV(x, y, w)
# plot
fig, _ = plot_moran_bv(moran_bv)
plt.close(fig)
# customize plot
fig, _ = plot_moran_bv(moran_bv,aspect_equal=False,
fitline_kwds=dict(color='#4393c3'))
plt.close(fig)
def test_moran_loc_scatterplot():
columbus = examples.load_example('Columbus')
link_to_data = columbus.get_path('columbus.shp')
df = gpd.read_file(link_to_data)
x = df['INC'].values
y = df['HOVAL'].values
w = Queen.from_dataframe(df)
w.transform = 'r'
moran_loc = Moran_Local(y, w)
moran_bv = Moran_BV(x, y, w)
# try without p value
fig, _ = _moran_loc_scatterplot(moran_loc)
plt.close(fig)
# try with p value and different figure size
fig, _ = _moran_loc_scatterplot(moran_loc, p=0.05,
aspect_equal=False,
fitline_kwds=dict(color='#4393c3'))
plt.close(fig)
# try with p value and zstandard=False
fig, _ = _moran_loc_scatterplot(moran_loc, p=0.05, zstandard=False,
fitline_kwds=dict(color='#4393c3'))
plt.close(fig)
# try without p value and zstandard=False
fig, _ = _moran_loc_scatterplot(moran_loc, zstandard=False,
fitline_kwds=dict(color='#4393c3'))
plt.close(fig)
assert_raises(ValueError, _moran_loc_scatterplot, moran_bv, p=0.5)
assert_warns(UserWarning, _moran_loc_scatterplot, moran_loc, p=0.5,
scatter_kwds=dict(c='#4393c3'))
def test_lisa_cluster():
columbus = examples.load_example('Columbus')
link_to_data = columbus.get_path('columbus.shp')
df = gpd.read_file(link_to_data)
y = df['HOVAL'].values
w = Queen.from_dataframe(df)
w.transform = 'r'
moran_loc = Moran_Local(y, w)
fig, _ = lisa_cluster(moran_loc, df)
plt.close(fig)
def test_plot_local_autocorrelation():
columbus = examples.load_example('Columbus')
link_to_data = columbus.get_path('columbus.shp')
df = gpd.read_file(link_to_data)
y = df['HOVAL'].values
w = Queen.from_dataframe(df)
w.transform = 'r'
moran_loc = Moran_Local(y, w)
fig, _ = plot_local_autocorrelation(moran_loc, df, 'HOVAL', p=0.05)
plt.close(fig)
# also test with quadrant and mask
fig, _ = plot_local_autocorrelation(moran_loc, df, 'HOVAL', p=0.05,
region_column='POLYID',
aspect_equal=False,
mask=['1', '2', '3'], quadrant=1)
plt.close(fig)
# also test with quadrant and mask
assert_raises(ValueError, plot_local_autocorrelation, moran_loc,
df, 'HOVAL', p=0.05, region_column='POLYID',
mask=['100', '200', '300'], quadrant=1)
def test_moran_loc_bv_scatterplot():
gdf = _test_data()
x = gdf['Suicids'].values
y = gdf['Donatns'].values
w = Queen.from_dataframe(gdf)
w.transform = 'r'
# Calculate Univariate and Bivariate Moran
moran_loc = Moran_Local(y, w)
moran_loc_bv = Moran_Local_BV(x, y, w)
# try with p value so points are colored
fig, _ = _moran_loc_bv_scatterplot(moran_loc_bv)
plt.close(fig)
# try with p value and different figure size
fig, _ = _moran_loc_bv_scatterplot(moran_loc_bv, p=0.05,
aspect_equal=False)
plt.close(fig)
assert_raises(ValueError, _moran_loc_bv_scatterplot, moran_loc, p=0.5)
assert_warns(UserWarning, _moran_loc_bv_scatterplot, moran_loc_bv, p=0.5,
scatter_kwds=dict(c='r'))
def test_moran_facet():
sids2 = examples.load_example('sids2')
f = lp.io.open(sids2.get_path('sids2.dbf'))
varnames = ['SIDR74', 'SIDR79', 'NWR74', 'NWR79']
vars = [np.array(f.by_col[var]) for var in varnames]
w = lp.io.open(examples.get_path("sids2.gal")).read()
# calculate moran matrix
moran_matrix = Moran_BV_matrix(vars, w, varnames=varnames)
# plot
fig, axarr = moran_facet(moran_matrix)
plt.close(fig)
# customize
fig, axarr = moran_facet(moran_matrix, scatter_glob_kwds=dict(color='r'),
fitline_bv_kwds=dict(color='y'))
plt.close(fig)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import signal
from PyQt5.QtWidgets import QApplication
from .gui.MainWindow import MainWindow
from .storage.Storage import Storage
from .shared import get_storage
TMP_STORAGE = "/home/nia/Development/_Python/_DCat/ExportA/gz"
class AppQT(object):
def __init__(self):
#--- перехват системных сигналов
signal.signal(signal.SIGINT, self.__signal_handler) # обработка Ctrl+C
get_storage().read_dir(TMP_STORAGE)
self.app = QApplication(sys.argv)
self.gui = MainWindow(None)
def start(self):
sys.exit(self.app.exec_())
def __signal_handler(self, signum, frame):
"""обработчик сигнала завершения от системы"""
print("перехвачен сигнал SIGINT(Ctrl+C)")
print("запрос на выход из cmd")
self.gui.act_exit()
if __name__ == "__main__":
app = AppQT()
app.start() |
#!/usr/bin/python
import socket
import sys
if len(sys.argv) != 4:
print("Usage: %s server_ip server_port prediction_file" % (sys.argv[0])) # MUST BE A VALID IP AND VALID PORT
exit(1)
CHUNK_SIZE = 8 * 1024
SERVER_IP = sys.argv[1] # "10.10.55.100"
SERVER_PORT = int(sys.argv[2]) # 1337
sock = socket.socket()
sock.connect( (SERVER_IP, SERVER_PORT) )
print("[*] Connected to " + SERVER_IP + "\n")
#filename = "checkpoints/predictions.npy"
filename = sys.argv[3]
with open(filename, "rb") as f:
data = f.read(CHUNK_SIZE)
while data:
sock.send(data)
data = f.read(CHUNK_SIZE)
print("Sent predictions!\n")
sock.close()
|
from distutils.core import setup, Extension
x12_hash_module = Extension('x12_hash',
sources = ['x12module.c',
'x12.c',
'sha3/blake.c',
'sha3/bmw.c',
'sha3/groestl.c',
'sha3/jh.c',
'sha3/keccak.c',
'sha3/skein.c',
'sha3/cubehash.c',
'sha3/echo.c',
'sha3/luffa.c',
'sha3/simd.c',
'sha3/hamsi.c',
'sha3/hamsi_helper.c',
'sha3/shavite.c'],
include_dirs=['.', './sha3'])
setup (name = 'x12_hash',
version = '1.0',
description = 'Bindings for proof of work used by x12',
ext_modules = [x12_hash_module])
|
# Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: step3 --conditions auto:phase1_2017_realistic -n 10 --era Run2_2017 --eventcontent RECOSIM,MINIAODSIM,DQM --runUnscheduled -s RAW2DIGI,L1Reco,RECO,EI,PAT,VALIDATION:@standardValidation+@miniAODValidation,DQM:@standardDQM+@miniAODDQM --datatier GEN-SIM-RECO,MINIAODSIM,DQMIO --geometry DB:Extended --filein file:step2.root --fileout file:step3.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('RECO',eras.Run2_2017)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.RawToDigi_cff')
process.load('Configuration.StandardSequences.L1Reco_cff')
process.load('Configuration.StandardSequences.Reconstruction_cff')
process.load('CommonTools.ParticleFlow.EITopPAG_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load("Geometry.VeryForwardGeometry.geometryRPFromDB_cfi")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:GluGlu_DIGI_DIGI2RAW_2017.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('step3 nevts:10'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.output = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('file:GluGlu_RAW2DIGI_L1Reco_RECO_2017.root'),
outputCommands = cms.untracked.vstring("drop *","keep PSimHits*_*_*_*","keep CTPPS*_*_*_*","keep *_*RP*_*_*",'keep *_LHCTransport_*_*',"keep *_ak4*_*_*")
)
# Additional output definition
# Other statements
process.mix.playback = True
process.mix.digitizers = cms.PSet()
for a in process.aliases: delattr(process, a)
process.RandomNumberGeneratorService.restoreStateLabel=cms.untracked.string("randomEngineStateProducer")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:phase1_2017_realistic', '')
# do not make testID for simulation - keeping the frame
from EventFilter.CTPPSRawToDigi.totemRPRawToDigi_cfi import totemRPRawToDigi
totemRPRawToDigi.RawToDigi.testID = cms.uint32(1)
# Path and EndPath definitions
process.raw2digi_step = cms.Path(process.RawToDigi)
process.L1Reco_step = cms.Path(process.L1Reco)
process.reconstruction_step = cms.Path(process.reconstruction)
process.output_step = cms.EndPath(process.output)
# Schedule definition
process.schedule = cms.Schedule(process.raw2digi_step,process.L1Reco_step,process.reconstruction_step,process.output_step)
# customisation of the process.
# Automatic addition of the customisation function from SimGeneral.MixingModule.fullMixCustomize_cff
from SimGeneral.MixingModule.fullMixCustomize_cff import setCrossingFrameOn
#call to customisation function setCrossingFrameOn imported from SimGeneral.MixingModule.fullMixCustomize_cff
process = setCrossingFrameOn(process)
# End of customisation functions
#do not add changes to your config after this point (unless you know what you are doing)
from FWCore.ParameterSet.Utilities import convertToUnscheduled
process=convertToUnscheduled(process)
# customisation of the process.
# End of customisation functions
# Customisation from command line
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion
|
# การเพิ่มข้อมูลใน list โดยใช้ while
list1 = []
i = 0
n = 10
while i < n:
x = int(input())
list1.append(x)
i=i+1
print(list1)
# การเพิ่มข้อมูลใน list โดยใช้ for range
lists2 = []
for i in range(0, 10, 1):
x = int(input())
lists2.append(x)
print(lists2)
|
from qiniu import QiniuMacAuth, http
import requests, time
# 密钥队初始化
access_key = 'your_AK'
secret_key = 'your_SK'
q = QiniuMacAuth(access_key, secret_key)
url = 'http://ai.qiniuapi.com/v3/video/censor' # 请求url
data = {"data": {"uri": "http://cdn.vcore.hk/1528336770927592712019-02-181550474493.mp4"}, "params": {"scenes": ["pulp", "terror", "politician"]}}
ret, info = http._post_with_qiniu_mac(url, data, q)
job = ret['job']
time.sleep(10)
# 根据job得到结果
url1 = 'http://ai.qiniuapi.com/v3/jobs/video/{}'.format(job)
token = "Qiniu " + q.token_of_request("GET", "ai.qiniuapi.com", url1, "")
r = requests.get(url1, headers={'Authorization': token})
print(r.text)
|
Low = 1
Normal = 2
High = 3
Magic = 4
PartOfSet = 5
Rare = 6
Unique = 7
Crafted = 8
byName = {
"ERR_QUALITY": 0,
"Low": 1,
"Normal": 2,
"High": 3,
"Magic": 4,
"PartOfSet": 5,
"Rare": 6,
"Unique": 7,
"Crafted": 8,
}
byId = map(lambda t:t[0], sorted(byName.items(), key=lambda t:t[1]))
|
# Richard Xie 915505564
# Kneser-Ney Smoothing
import math, collections
def bigram(sentence, i):
word1 = sentence[i].word
word2 = sentence[i+1].word
return '%s %s' % (word1, word2)
discount = 1
class CustomModel:
def __init__(self, corpus):
"""Initial custom language model and structures needed by this mode"""
self.unigramCounts = collections.defaultdict(lambda: 0)
self.unigramTotal = 0
self.bigramCounts = collections.defaultdict(lambda: 0)
self.previousCounts = collections.defaultdict(lambda: 0)
self.nextCounts = collections.defaultdict(lambda: 0)
self.train(corpus)
def train(self, corpus):
""" Takes a corpus and trains your language model.
"""
# TODO your code here
for sentence in corpus.corpus: # iterate over sentences in the corpus
# Unigram
for datum in sentence.data: # iterate over datums in the sentence
token = datum.word # get the word
self.unigramCounts[token] += 1
self.unigramTotal += 1
# Bigram
for i in range(0, len(sentence.data) - 1):
token = bigram(sentence.data, i)
self.bigramCounts[token] += 1
# Unigram with Add-one smoothing
# Unknown case
self.unigramCounts['UNK'] = 0
# Apply Add-one smoothing
for token in self.unigramCounts:
self.unigramCounts[token] += 1
self.unigramTotal += 1
for unigram in self.unigramCounts:
self.previousCounts[unigram] = wordsBeforeCalc(unigram, self.bigramCounts)
self.nextCounts[unigram] = wordsAfterCalc(unigram, self.bigramCounts)
def score(self, sentence):
""" With list of strings, return the log-probability of the sentence with language model. Use
information generated from train.
"""
# TODO your code here
score = 0.0
for i in range(0, len(sentence) - 1):
bigramToken = '%s %s' % (sentence[i], sentence[i+1])
bigramCount = self.bigramCounts[bigramToken]
word1Unigram = sentence[i] if (self.unigramCounts[sentence[i]] > 0) else 'UNK'
word2Unigram = sentence[i+1]
# Calculations
discountBigram = max(bigramCount - discount, 0) / (self.unigramCounts[word1Unigram] * 1.0)
interpolationWeight = interpolationWeightCalc(word1Unigram, self.unigramCounts, self.nextCounts)
continuationProb = continuationProbCal(word2Unigram, self.previousCounts, self.bigramCounts)
KNprob = discountBigram + interpolationWeight + continuationProb
score += math.log(KNprob + 0.00000000001)
return score
def wordsBeforeCalc(word2Unigram, bigramCounts):
numBefore = 0
for bigramToken in bigramCounts:
if bigramToken.endswith(word2Unigram):
numBefore += 1
return numBefore
def wordsAfterCalc(word1Unigram, bigramCounts):
numAfter = 0
for bigramToken in bigramCounts:
if bigramToken.startswith(word1Unigram):
numAfter += 1
return numAfter
def interpolationWeightCalc(word1Unigram, unigramCounts, nextCounts):
normDiscount = discount / (unigramCounts[word1Unigram] * 1.0)
numAfter = nextCounts[word1Unigram]
return normDiscount * numAfter
def continuationProbCal(word2Unigram, previousCounts, bigramCounts):
return (previousCounts[word2Unigram] * 1.0) / (len(bigramCounts) * 2.0)
|
def stringToList(s):
l = []
for e in s:
if e == '+':
l.append(True)
elif e == '-':
l.append(False)
return l
def removePlusAtTheEnd(l):
while len(l) > 0 and l[-1] == True:
l = l[:len(l)-1]
return l
def reverseCookies(l,iFinal):
lbis = []
for i in range(len(l)):
if i <= iFinal:
lbis.append(not l[iFinal-i])
else:
lbis.append(l[i])
return lbis
T = int(input())
for t in range(T):
s = str(input())
l = stringToList(s)
#print(l)
res = 0
while len(l) > 0:
l = removePlusAtTheEnd(l)
if len(l) > 0:
top = l[0]
iFinal = -1
for i in range(len(l)):
if l[i] == top:
iFinal += 1
else:
break
#sprint(iFinal)
l = reverseCookies(l,iFinal)
res += 1
print("Case #{}: {}".format(t+1,res))
|
class Solution(object):
def grayCode(self, n):
"""
:type n: int
:rtype: List[int]
"""
size = 0
res = [0]
while len(res)>size:
size += 1
for i in range(0, n):
ans = res[-1] ^ (1 << i)
if ans not in res:
res.append(ans)
break
return res
def do2(self, x, k):
for i in range(k, self.n):
ans = x ^ (1<<i)
if ans in self.res: continue
self.res.append(ans)
print bin(ans)[2:].zfill(3),x,i
self.do(ans, k+1)
def grayCode2(self, n):
"""
:type n: int
:rtype: List[int]
"""
size = 0
res = [0]
for i in range(0, n):
size = len(res)
v = 1<<i
while size>0:
size -= 1
res.append(res[size] | v)
print v,res[-1]
print ''
return res
import time
n = 3
# start = time.time()
# ret = Solution().grayCode(n)
# # print ret
# print time.time()-start
start = time.time()
ret = Solution().grayCode2(n)
print ret
print time.time()-start
|
# An implementation of a graph in matrix format for CMPT435.
__author__ = 'Tim Polizzi'
__email__ = 'Timothy.Polizzi1@marist.edu'
class GraphMatrix(object):
def __init__(self):
self.inner_list = []
self.filled = False
def add_vertex(self, vertex_num: int):
""" Adds a new vertex to the matrix
Adds a logical vertex to the graph as a new vertex
Args:
vertex_num: The number of the vertex being added.
Returns:
A boolean value that is true if it worked, false otherwise.
"""
return_bool = True
self.inner_list.append(['t'])
return return_bool
def add_edge(self, vertex_1: int, vertex_2: int):
""" Adds an edge to the graph between two vertices.
Adds a logical edge to the graph to link two vertices.
Args:
vertex_1(int): The first vertex that is to be linked
vertex_2(int): The second vertex that is to be linked
Returns:
A boolean value that is true if it worked, or false if it did not.
"""
to_return = False
vert_1 = vertex_1 - 1
vert_2 = vertex_2 - 1
if 0 <= vert_1 < len(self.inner_list) and 0 <= vert_2 < len(self.inner_list[vert_1]):
self.inner_list[vert_1][vert_2] = "O"
self.inner_list[vert_2][vert_1] = "O"
to_return = True
return to_return
def fill_matrix(self):
""" Fills the matrix out.
Python's lists cannot be set to a definite size, thus must be filled with values if they need to be. This method
fills out the lists so the program can use them more effectively.
Returns:
A boolean that is true if the list was able to filled out successfully, and false otherwise.
"""
return_bool = True
for i in self.inner_list:
for n in range(len(self.inner_list)):
if n < len(i) and i[n] is not None:
i[n] = 'x'
else:
i.append('x')
self.filled = True
return return_bool
def print_graph(self):
""" Prints out the matrix
Prints the matrix out in a simple square format.
"""
for i in self.inner_list:
for j in i:
print(j, end=" ")
print()
|
# Copyright (c) 2019 Western Digital Corporation or its affiliates.
import logging
import torch.nn as nn
from torch.nn.modules.batchnorm import _BatchNorm
from mmcv.runner import load_checkpoint
from mmcv.cnn import constant_init, kaiming_init
from mmdet.models.utils import YoloConvLayer
from ..registry import BACKBONES
class ResBlock(nn.Module):
"""The basic residual block used in YoloV3.
Each ResBlock consists of two ConvLayers and the input is added to the final output.
In YoloV3 paper, the first convLayer has half of the number of the filters as much as the second convLayer.
The first convLayer has filter size of 1x1 and the second one has the filter size of 3x3.
"""
def __init__(self, in_channels):
super(ResBlock, self).__init__()
assert in_channels % 2 == 0 # ensure the in_channels is an even number.
half_in_channels = in_channels // 2
self.conv1 = YoloConvLayer(in_channels, half_in_channels, 1)
self.conv2 = YoloConvLayer(half_in_channels, in_channels, 3)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.conv2(out)
out += residual
return out
def make_conv_and_res_block(in_channels, out_channels, res_repeat):
"""In Darknet 53 backbone, there is usually one Conv Layer followed by some ResBlock.
This function will make that.
The Conv layers always have 3x3 filters with stride=2.
The number of the filters in Conv layer is the same as the out channels of the ResBlock"""
model = nn.Sequential()
model.add_module('conv', YoloConvLayer(in_channels, out_channels, 3, stride=2))
for idx in range(res_repeat):
model.add_module('res{}'.format(idx), ResBlock(out_channels))
return model
@BACKBONES.register_module
class DarkNet53(nn.Module):
def __init__(self,
norm_eval=True,
reverse_output=False):
super(DarkNet53, self).__init__()
self.conv1 = YoloConvLayer(3, 32, 3)
self.cr_block1 = make_conv_and_res_block(32, 64, 1)
self.cr_block2 = make_conv_and_res_block(64, 128, 2)
self.cr_block3 = make_conv_and_res_block(128, 256, 8)
self.cr_block4 = make_conv_and_res_block(256, 512, 8)
self.cr_block5 = make_conv_and_res_block(512, 1024, 4)
self.norm_eval = norm_eval
self.reverse_output=reverse_output
def forward(self, x):
tmp = self.conv1(x)
tmp = self.cr_block1(tmp)
tmp = self.cr_block2(tmp)
out3 = self.cr_block3(tmp)
out2 = self.cr_block4(out3)
out1 = self.cr_block5(out2)
if not self.reverse_output:
return out1, out2, out3
else:
return out3, out2, out1
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def _freeze_stages(self):
for param in self.parameters():
param.requires_grad = False
def train(self, mode=True):
super(DarkNet53, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
# trick: eval have effect on BatchNorm only
if isinstance(m, _BatchNorm):
m.eval() |
from flask import Flask, render_template, redirect
from flask_restful import Api
from modules.search_store_resource import SearchStoreResource
import os
from modules.send_message_resource import SendMessageResource
from modules.config_generator import config_generator
app = Flask(__name__)
api = Api(app)
api.add_resource(SearchStoreResource, '/search')
api.add_resource(SendMessageResource, '/report')
updates = config_generator.updates
@app.errorhandler(404)
def page_not_found(e):
return redirect('/', code=302)
@app.route("/", methods=['GET'])
def root():
return render_template("index.html", runtime=os.environ['RUNTIME'], updates=updates)
@app.route("/madohomu", methods=['GET'])
def madohomu():
return render_template("madohomu.html")
# @app.route('/', defaults={'path': ''})
# @app.route('/<path:path>')
# def catch_all(path):
# return redirect('/', code=302)
|
# app.py
import os
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from app import create_app, db
#config_name = os.getenv('FLASK_CONFIG')
config_name = "development"
app = create_app(config_name)
manager = Manager(app)
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
@manager.command
def run():
'''Run the app'''
app.run()
@manager.command
def clean():
'''Clean the compiled Pycache'''
print("Python __pycache__ have been deleted.")
import pathlib
[p.unlink() for p in pathlib.Path('.').rglob('*.py[co]')]
[p.rmdir() for p in pathlib.Path('.').rglob('__pycache__')]
if __name__ == '__main__':
manager.run()
|
# Name: globals.py
# Purpose: XRC editor, global variables
# Author: Roman Rolinsky <rolinsky@mema.ucl.ac.be>
# Created: 02.12.2002
# RCS-ID: $Id: globals.py,v 1.31 2007/03/08 15:49:34 ROL Exp $
import wx
# Global constants
progname = 'XRCed'
version = '0.1.8-4-admin4'
# Minimal wxWidgets version
MinWxVersion = (2,6,0)
if wx.VERSION[:3] < MinWxVersion:
print ('''\
******************************* WARNING **************************************
This version of XRCed may not work correctly on your version of wxWidgets.
Please upgrade wxWidgets to %d.%d.%d or higher.
******************************************************************************''' % MinWxVersion)
# Global variables
class Globals:
panel = None
tree = None
frame = None
tools = None
undoMan = None
testWin = None
testWinPos = wx.DefaultPosition
currentXXX = None
def _makeFonts(self):
self._sysFont = wx.SystemSettings.GetFont(wx.SYS_SYSTEM_FONT)
self._labelFont = wx.Font(self._sysFont.GetPointSize(), wx.DEFAULT, wx.NORMAL, wx.BOLD)
self._modernFont = wx.Font(self._sysFont.GetPointSize(), wx.MODERN, wx.NORMAL, wx.NORMAL)
self._smallerFont = wx.Font(self._sysFont.GetPointSize()-2, wx.DEFAULT, wx.NORMAL, wx.NORMAL)
def sysFont(self):
if not hasattr(self, "_sysFont"): self._makeFonts()
return self._sysFont
def labelFont(self):
if not hasattr(self, "_labelFont"): self._makeFonts()
return self._labelFont
def modernFont(self):
if not hasattr(self, "_modernFont"): self._makeFonts()
return self._modernFont
def smallerFont(self):
if not hasattr(self, "_smallerFont"): self._makeFonts()
return self._smallerFont
g = Globals()
|
"""
Description
-----------
Implements query and status report functionality for C1 use cases.
:Authors:
- Johannes Schlatow
"""
import logging
from mcc.framework import *
from mcc.model import SimplePlatformModel
from mcc import parser as cfgparser
from mcc import lib
from mcc.configurator import GenodeConfigurator
from xml.etree import ElementTree as ET
class StatusGenerator:
class DeviceStatus:
def __init__(self, name, device):
self._name = name
self._device = device
self._cfgfound = 'false'
self._cfgokay = 'false'
self._cfgerror = None
self._model = None
self._platform = None
def generate_xml(self, root):
status = ET.SubElement(root, 'mcc_status', name=self._name)
cfg = ET.SubElement(status, 'configuration', found=self._cfgfound, generated=self._cfgokay)
cfg.text = self._cfgerror
ET.SubElement(status, 'operationmode', name=self._device.opmode())
# TODO fill with status information
if self._platform is not None:
metrics = ET.SubElement(status, 'metrics')
for sub in self._platform.platform_graph.nodes():
if sub.static():
continue
node = ET.SubElement(metrics, 'subsystem', name=sub.name())
for name in ['ram', 'caps']:
provided = sub.quantum(name)
remaining = sub.state('%s-remaining' % name)
ET.SubElement(node, name, provided=str(provided), requested=str(provided-remaining))
# outbound network traffic (currently, we do not distinguish between comms in the
# platform model, thus we can only sum up the traffic per processing resource)
value = sub.state('out_traffic')
ET.SubElement(node, 'out_traffic', byte_s=str(value))
# TODO cpu load
def __init__(self, devices):
self._devices = dict()
for name, device in devices.items():
self._devices[name] = self.DeviceStatus(name, device)
def mcc_result(self, name, found, platform=None, model=None):
if found:
self._devices[name]._cfgfound = 'true'
assert platform is not None
assert model is not None
self._devices[name]._model = model
self._devices[name]._platform = platform
else:
self._devices[name]._cfgfound = 'false'
def cfg_result(self, name, generated, error=None):
if generated:
self._devices[name]._cfgokay = 'true'
else:
self._devices[name]._cfgokay = 'false'
self._devices[name]._cfgerror = error
def write_to_file(self, filename):
root = ET.Element('xml')
for device in self._devices.values():
device.generate_xml(root)
tree = ET.ElementTree(root)
tree.write(filename)
class ControlParser:
class DeviceControl:
def __init__(self, xml_node, basepath):
self._xml_node = xml_node
self._basepath = basepath
self._repo = 'c1_repo.xml'
# device name -> mode -> accel
self._mode_map = { 'doris' : {
'exploration' : {
'normal' : 'c1_object_recognition.xml',
'accel' : 'c1_object_recognition_hw.xml' },
'pose' : {
'normal' : 'c1_pose_estimation.xml',
'accel' : 'c1_pose_estimation_hw.xml' }},
'boris' : {
'exploration' : {
'normal' : 'c1_object_recognition.xml',
'accel' : 'c1_object_recognition_hw.xml' },
'pose' : {
'normal' : 'c1_pose_estimation.xml',
'accel' : 'c1_pose_estimation_hw.xml' }}}
self._pf_map = { 'doris' : 'base.xml',
'boris' : 'base.xml'}
def repo_filename(self):
return self._basepath + self._repo
def platform_filename(self):
name = self._xml_node.get('name')
return self._basepath + self._pf_map[name]
def query_filename(self):
name = self._xml_node.get('name')
mode = self.opmode()
hw = 'accel' if self._xml_node.find('hw_acceleration').get('value') == 'true' else 'normal'
return self._basepath + self._mode_map[name][mode][hw]
def flux(self):
return int(self._xml_node.find('flux').get('value'))
def opmode(self):
return self._xml_node.find('operationmode').get('name')
def __init__(self, xml_file, basepath):
self._file = xml_file
self._basepath = basepath
assert(self._file is not None)
parser = ET.XMLParser()
self._tree = ET.parse(self._file, parser=parser)
self._root = self._tree.getroot()
def find_device(self, name):
device = self._root.find("./mcc_control[@name='%s']" % name)
if device is not None:
return self.DeviceControl(device, self._basepath)
return None
class EnvironmentModel:
# TODO manage flux and reliability requirements
def __init__(self, control):
self.control = control
def accept_properties(self, arg):
return True
class Mcc:
def __init__(self, filename, basepath='mcc/models/', outpath='mcc/run/'):
self._filename = filename
self._basepath = basepath
self._outpath = outpath
self._parser = ControlParser(filename, basepath)
self._devices = dict()
for name in ['doris', 'boris']:
dev = self._parser.find_device(name)
if dev is not None:
self._devices[name] = dev
else:
logging.error("Unable to find device %s in %s" % (name, self._filename))
self._devstate = StatusGenerator(self._devices)
def execute(self):
results = dict()
failed = False
# find configurations
for name, device in self._devices.items():
pffile = device.platform_filename()
pf = cfgparser.PlatformParser(pffile)
pf_model = SimplePlatformModel(pf)
env = EnvironmentModel(device)
# try to create repositories from given files
repos = list()
repos.append(cfgparser.Repository(pffile))
repos.append(cfgparser.Repository(device.repo_filename()))
cfg = cfgparser.AggregateRepository(repos)
mcc = lib.SimpleMcc(repo=cfg, test_backtracking=False)
base = lib.BaseModelQuery()
basesys = cfgparser.SystemParser(pffile)
query, basemodel = mcc.search_config(pf_model, basesys,
outpath=self._outpath+name+'-'+basesys.name()+'-',
with_da=False, envmodel=env)
# store basemodel in BaseModelQuery
base.insert(name=basesys.name(),
query_graph=query,
comp_inst=basemodel.by_name['comp_inst'],
filename=pffile)
sys = cfgparser.SystemParser(device.query_filename())
try:
query, model = mcc.search_config(pf_model, sys, base,
outpath=self._outpath+name+'-',
with_da=False, envmodel=env)
results[name] = (pf_model, model)
self._devstate.mcc_result(name, found=True, platform=pf_model, model=model)
except Exception as e:
failed = True
self._devstate.mcc_result(name, found=False)
import traceback
traceback.print_exc()
print(e)
if failed:
logging.error("Do not generate configs because of failed devices.")
for name in self._devices.keys():
self._devstate.cfg_result(name, generated=False,
error="Configs not generated because search failed for at least one device.")
return
# generate configs
for name, (pf_model, model) in results.items():
# generate <config> from model
configurator = GenodeConfigurator(self._outpath+name+'-', pf_model)
configurator.create_configs(model, layer_name='comp_inst')
self._devstate.cfg_result(name, generated=True)
# write status reports
for name in self._devices.keys():
self._devstate.write_to_file(self._outpath+'status.xml')
|
# 110. Balanced Binary Tree
class Solution(object):
def isBalanced(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
if not root: return True
left = self.MaxHeight(root.left)
right = self.MaxHeight(root.right)
return abs(left - right) < 2 and self.isBalanced(root.left) and self.isBalanced(root.right)
def MaxHeight(self, root):
if not root:
return 0
left = self.MaxHeight(root.left)
right = self.MaxHeight(root.right)
return max(left, right) + 1 |
#!/usr/bin/env python
#encoding:utf-8
import os
import signal
from pwn import *
__all__ = [
'factor', 'gcd', 'ext_euclid',
'SET_PAD_CHAR', 'NOPS', 'LEFT_PAD', 'RIGHT_PAD',
'debug'
]
#############################
### utils for calculation ###
#############################
def factor(n):
"""分解质因数"""
while (2 < n) and (n & 1 == 0):
n >>= 1
print '2 * ',
i = 3
while i < n:
if n % i == 0:
n /= i
print '%d *' % i,
continue
i += 2
print n
def gcd(a, b):
"""最大公约数,a > b"""
if b == 1:
return a
return gcd(b, a % b)
def ext_euclid(a, b):
"""扩展的欧几里德,a > b,ax+by=GCD(a, b) => x,y"""
if a % b == 0:
return 0, 1
x, y = ext_euclid(b, a % b)
return y, x - a / b * y
#############################
### utils for EXP writing ###
#############################
pad_char = '\x90'
def SET_PAD_CHAR(c):
global pad_char
pad_char = c
def NOPS(n):
return pad_char * n
def LEFT_PAD(s, n):
assert len(s) <= n
return NOPS(n - len(s)) + s
def RIGHT_PAD(s, n):
assert len(s) <= n
return s + NOPS(n - len(s))
#######################
### utils for debug ###
#######################
def debug(args, shell=False, executable=None, cwd=None, env=None, timeout=pwnlib.timeout.Timeout.default):
if type(args) == str:
args = [args]
args = ['gdb'] + args
io = process(args, shell, executable, cwd, env, timeout)
io.debug_mode = True
io.sendline('set prompt {0} '.format(term.text.bold_red('gdb$')))
return io
def gdb_break(self, addr):
if type(addr) == int or type(addr) == long:
self.sendline('b *0x{0:x}'.format(addr))
else:
self.sendline('b {0}'.format(addr))
def gdb_run(self):
self.sendline('r')
def gdb_continue(self):
self.sendline('c')
def ext_interactive(self, prompt = term.text.bold_red('$') + ' '):
"""interactive(prompt = pwnlib.term.text.bold_red('$') + ' ')
Does simultaneous reading and writing to the tube. In principle this just
connects the tube to standard in and standard out, but in practice this
is much more usable, since we are using :mod:`pwnlib.term` to print a
floating prompt.
Thus it only works in while in :data:`pwnlib.term.term_mode`.
"""
def handler(signum, frame):
os.kill(proc.children(proc.pidof(self)[0])[0], signal.SIGINT)
old_handler = signal.signal(signal.SIGINT, handler)
log.info('Switching to extensive interactive mode')
go = threading.Event()
def recv_thread():
while not go.isSet():
try:
cur = self.recv(timeout = 0.05)
if cur:
sys.stderr.write(cur)
sys.stderr.flush()
except EOFError:
log.info('Got EOF while reading in interactive')
break
t = context.Thread(target = recv_thread)
t.daemon = True
t.start()
try:
while not go.isSet():
if term.term_mode:
if self.debug_mode:
data = term.readline.readline(prompt = '', float = True)
else:
data = term.readline.readline(prompt = prompt, float = True)
else:
data = sys.stdin.readline()
if data:
# continue and exit interactive mode
try:
if data.strip() == 'c!':
data = 'c\n'
go.set()
data = safeeval.const('"""{0}"""'.format(data.replace('"', r'\"')))
self.send(data)
except ValueError:
log.warning('Illegal input, ignored!')
except EOFError:
go.set()
log.info('Got EOF while sending in interactive')
else:
go.set()
except KeyboardInterrupt:
log.info('Interrupted')
go.set()
while t.is_alive():
t.join(timeout = 0.1)
signal.signal(signal.SIGINT, old_handler)
pwnlib.tubes.tube.tube.debug_mode = False
pwnlib.tubes.tube.tube.b = gdb_break
pwnlib.tubes.tube.tube.r = gdb_run
pwnlib.tubes.tube.tube.c = gdb_continue
pwnlib.tubes.tube.tube.ext_interactive = ext_interactive
|
import base64
import random
import time
import string
import urllib.parse
import json
def get_payload(signupURL):
UBASessionID = generate_uba_sessionID()
pageID = random.randint(1000000000000000, 9999999999999999)
events = get_events(signupURL, pageID, UBASessionID)
return {
"eventSource":"web", # <--- Static
"deviceTransactionID":"NEWEGG{}".format(''.join(random.choices(string.digits, k=21))), #Accertify2.js
"uBAID": ''.join(random.choices(string.ascii_lowercase + string.digits, k=36)), #Accertify2.js <---- Need to find how to scrape beacon
"uBAEvents":events,
"uBASessionID":UBASessionID,
"pageID":pageID
}
def generate_uba_sessionID():
temp = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
temp2 = ""
t = round(time.time()) * 1000
for _ in range(len(temp)):
e = int((t + 16 * random.uniform(0, 1)) % 16) | 0
temp2 += hex(e)[2:]
return temp2
def get_events(singupURL, pageID, sessionID):
timeMS = round(time.time() * 1000)
timeMS2 = round(time.time() * 1000)
EVENTS = [
{
"loc":singupURL,
"pid":pageID,
"sid":None,
"bsid":sessionID,
"ts":timeMS,
"type":"mtrk",
"pay":{
"t":timeMS,
"fd":1044.27,
"sd":1557.69,
"bb":[
random.randint(500,800),
random.randint(100,300),
random.randint(1000,1400),
random.randint(100,300)
],
"s":[
{
"t":0,
"x":1324,
"y":274,
"fd":945.72,
"sd":829.03,
"c":15,
"a":9139.97,
"mx":119066.74,
"mn":62.5
},
{
"t":260,
"x":771,
"y":613,
"fd":652.69,
"sd":648.64,
"c":24,
"a":2455.57,
"mx":8766.41,
"mn":90.91
},
{
"t":551,
"x":769,
"y":693,
"fd":80.32,
"sd":80.02,
"c":22,
"a":330.8,
"mx":1004.99,
"mn":83.33
}
],
"c":50,
"sc":3
}
},
{
"loc":singupURL,
"pid":pageID,
"sid":None,
"bsid":sessionID,
"ts":timeMS2,
"type":"mclk",
"pay":{
"t":timeMS2,
"m":{
"_":[
{
"t":0,
"b":0,
"x":random.randint(500,800),
"y":random.randint(500,800)
}
]
},
"c":1
}
},
{
"loc":singupURL,
"pid":pageID,
"sid":None,
"bsid":sessionID,
"ts":round(time.time() * 1000),
"type":"meta",
"pay":{
"t":round(time.time() * 1000),
"m":{
"_":{
"i":None,
"n":None,
"t":"submit"
}
}
}
}
]
# Start time will be a time before the current time to make it seem more real
# The ragne will be between 30s-60s
start = round(time.time()) - random.randint(30,60)
second = start + random.randint(3,10)
third = second + random.randint(3,10)
startTime = start * 1000
secondTime = second * 1000
thirdTime = third * 1000
# Update Location, timestamp, pid & bsid
EVENTS[0]["ts"] = startTime
EVENTS[0]["pay"]["t"] = startTime
EVENTS[1]["ts"] = secondTime
EVENTS[1]["pay"]["t"] = secondTime
EVENTS[2]["ts"] = thirdTime
EVENTS[2]["pay"]["t"] = thirdTime
# URL encode raw array
urlEncoded = encode(str(json.dumps(EVENTS))).encode("utf-8")
b64Encoded = base64.b64encode(urlEncoded).decode("utf-8")
semi = {"ts": round( time.time() * 1000 ), "pay": b64Encoded}
b64Encoded2 = base64.urlsafe_b64encode(json.dumps(semi).encode()).decode()
semiFinal = {
"ts":round( time.time() * 1000 ),
"pays":[
b64Encoded2,
None,
None
]
}
final = base64.urlsafe_b64encode(json.dumps(semiFinal).encode()).decode()
return final
def encode(string):
return urllib.parse.quote(string)
|
# -*- coding: utf-8 -*-
import json
import requests
from django.core.management.base import BaseCommand
from products.models import Store, Product
from .url_settings import TOP_URL
class Command(BaseCommand):
def handle(self, *args, **options):
"""
scrapes and stores product availability
"""
# load all stores and
stores = Store.objects.all()
products = Product.objects.all()
# loop through products
for product in products:
# retrieve availability json
url = TOP_URL+'/json/stores-with-sku/storelocations.bpid.%s.json' % product.product_id
# print(url)
s = requests.get(url).content.decode()
if s[0] != '{':
continue
# print(s)
data = json.loads(s)
data = data['features']
# create a list of stores to reduce amount of DB queries
stores_to_add = []
# loop through stores
for d in data:
store_id = d['properties']['storeid']
# continue if store can't be found due to outdated data
# from the beer store site
try:
store = stores.get(store_id=store_id)
except:
continue
# append to store list
stores_to_add.append(store)
# add stores where product is available
product.stores.add(*stores_to_add)
|
n=int(input("enter the number of rows"))
k=ord("A")
for i in range(n):
for j in range(n-i-1):
print(" ",end=" ")
for j in range(i+1):
print(chr(k),end=" ")
print() |
from cffi import FFI
import numpy as np
import pdb
ffi = FFI()
keepalive = []
def zero_cffiarray(nz):
tab_np = np.zeros((nz))
keepalive.append(tab_np)
tab = ffi.cast("double*", tab_np.__array_interface__['data'][0])
return tab
def morrison_2momNc_simplewarm(nx, ny, nz, qc_np, qr_np, nc_np, nr_np,
temp_np, qv_np, press_np, dz_np, dt_in):
ffi.cdef("void c_init();", override=True)
ffi.cdef("void c_morr(double QC3DTEN[], double QI3DTEN[], double QNI3DTEN[], double QR3DTEN[], double NC3DTEN[], double NI3DTEN[], double NS3DTEN[], double NR3DTEN[], double QC3D[], double QI3D[], double QNI3D[], double QR3D[], double NI3D[], double NS3D[], double NR3D[], double NC3D[], double T3DTEN[], double QV3DTEN[], double T3D[], double QV3D[], double PRES[], double DZQ[], double W3D[], double WVAR[], double PRECRT, double SNOWRT, double EFFC[], double EFFI[], double EFFS[], double EFFR[], double DT, int IMS, int IME, int JMS, int JME, int KMS, int KME, int ITS, int ITE, int JTS, int JTE, int KTS, int KTE, double QG3DTEN[], double NG3DTEN[], double QG3D[], double NG3D[], double EFFG[], double qrcu1d[], double qscu1d[], double qicu1d[], double QGSTEN[], double QRSTEN[], double QISTEN[], double QNISTEN[], double QCSTEN[]);", override=True)
lib = ffi.dlopen('libmorrison_2momNc.so')
QC3DTEN, QI3DTEN, QNI3DTEN, QR3DTEN = (zero_cffiarray(nz) for i in range(4))
NC3DTEN, NI3DTEN, NS3DTEN, NR3DTEN = (zero_cffiarray(nz) for i in range(4))
QC3D = ffi.cast("double*", qc_np.__array_interface__['data'][0])
QI3D, QNI3D = (zero_cffiarray(nz) for i in range(2))
QR3D = ffi.cast("double*", qr_np.__array_interface__['data'][0])
NI3D, NS3D = (zero_cffiarray(nz) for i in range(2))
NR3D = ffi.cast("double*", nr_np.__array_interface__['data'][0])
NC3D = ffi.cast("double*", nc_np.__array_interface__['data'][0])
T3DTEN, QV3DTEN = (zero_cffiarray(nz) for i in range(2))
T3D = ffi.cast("double*", temp_np.__array_interface__['data'][0])
QV3D = ffi.cast("double*", qv_np.__array_interface__['data'][0])
PRES = ffi.cast("double*", press_np.__array_interface__['data'][0])
DZQ = ffi.cast("double*", dz_np.__array_interface__['data'][0])
W3D, WVAR = (zero_cffiarray(nz) for i in range(2))
PRECRT = 0.
SNOWRT = 0.
EFFC, EFFI, EFFS, EFFR = (zero_cffiarray(nz) for i in range(4))
DT = dt_in
[IMS, IME, ITS, ITE] = [1, nx] * 2
[JMS, JME, JTS, JTE] = [1, ny] * 2
[KMS, KME, KTS, KTE] = [1, nz] * 2
QG3DTEN, NG3DTEN, QG3D, NG3D, EFFG = (zero_cffiarray(nz) for i in range(5))
qrcu1d, qscu1d, qicu1d = (zero_cffiarray(nz) for i in range(3))
QGSTEN, QRSTEN, QISTEN, QNISTEN, QCSTEN = (zero_cffiarray(nz) for i in range(5))
print "cffi", QGSTEN, QRSTEN, QISTEN, QNISTEN, QCSTEN
lib.c_init()
lib.c_morr(QC3DTEN, QI3DTEN, QNI3DTEN, QR3DTEN,
NC3DTEN, NI3DTEN, NS3DTEN, NR3DTEN,
QC3D, QI3D, QNI3D, QR3D, NI3D, NS3D, NR3D, NC3D,
T3DTEN, QV3DTEN, T3D, QV3D,
PRES, DZQ, W3D, WVAR, PRECRT, SNOWRT,
EFFC, EFFI, EFFS, EFFR,
DT,
IMS, IME, JMS, JME, KMS, KME,
ITS, ITE, JTS, JTE, KTS, KTE,
QG3DTEN, NG3DTEN, QG3D, NG3D, EFFG,
qrcu1d, qscu1d, qicu1d,
QGSTEN, QRSTEN, QISTEN, QNISTEN, QCSTEN
)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.conf import settings
from django.apps import apps
from django.utils.translation import ugettext as _
from django.contrib.postgres.fields import JSONField
from django.core.exceptions import ValidationError
from ordered_model.models import OrderedModelBase
from model_utils.models import TimeStampedModel
from model_utils.choices import Choices
class Notification(TimeStampedModel):
NOTIFICATION_TYPE_CHOICES = Choices(
(1, 'EVENT1', _('Event 1'),),
(2, 'EVENT2', _('Event 2'),),
(3, 'EVENT3', _('Event 3'),),
)
NOTIFICATION_MESSAGE_MAPPING = {
NOTIFICATION_TYPE_CHOICES.EVENT1: {
'message': _('Event 1 message'),
'should_save': True,
'extra_info': False
},
NOTIFICATION_TYPE_CHOICES.EVENT2: {
'message': _('Event 2 message'),
'should_save': True,
'extra_info': False
},
NOTIFICATION_TYPE_CHOICES.EVENT3: {
'message': _('Event 3 message'),
'should_save': False,
'extra_info': True
},
}
message = models.TextField(_('Message'), max_length=255)
extra_data = JSONField(_('External data'), blank=True, null=True)
is_read = models.BooleanField(_('Is read'), default=False)
user = models.ForeignKey('user.User', verbose_name=_('user'),
related_name='user_notifications')
notification_type = models.PositiveSmallIntegerField(_('Notification Type'), choices=NOTIFICATION_TYPE_CHOICES,
null=True)
class Meta:
db_table = 'Notification'
verbose_name = _('Notification')
verbose_name_plural = _('Notifications')
ordering = ('-created',)
def __str__(self):
return '{} - {}'.format(self.user.display_name, self.get_notification_type_display())
@classmethod
def get_unread_count(cls, user):
return cls.objects.filter(user=user, is_read=False).count()
@classmethod
def prepare_notification(cls, user, notification_type, message=None, extra_data=None, format_params=None):
notification_obj = cls(
user=user,
notification_type=notification_type,
)
if message is not None:
notification_obj.message = message
else:
message = cls.NOTIFICATION_MESSAGE_MAPPING[notification_type]['message']
if cls.NOTIFICATION_MESSAGE_MAPPING[notification_type]['extra_info'] and format_params is not None:
message = message.format(*format_params)
notification_obj.message = message
if extra_data is not None:
notification_obj.extra_data = extra_data
if cls.NOTIFICATION_MESSAGE_MAPPING[notification_type].get('should_save', False):
notification_obj.save()
if settings.USE_AWS_SNS and notification_obj.user.can_receive_notifications:
notification_obj.send_message()
return notification_obj
def send_message(self):
body = {
'aps': {
'alert': self.message,
'badge': self.get_unread_count(self.user),
'sound': 'default'
},
'custom': {
'notification_type': self.notification_type,
'extra_data': self.extra_data if self.extra_data else {}
}
}
if self.id:
body['custom']['notification_id'] = self.id
devices = DeviceModel.objects.filter(user=self.user)
for device in devices:
send_push.delay(device.arn, body, True)
class Banner(OrderedModelBase):
"""
Onboarding Banner Model
"""
BANNER_SCREEN_CHOICES = Choices(
(1, 'SCREE1', _('Screen1'),),
(2, 'SCREE2', _('Screen2'),),
(3, 'SCREE3', _('Screen3'),),
)
BANNER_SCREEN_CHOICES_NEED_ID = {
BANNER_SCREEN_CHOICES.SCREE1: {
'external_info': False
},
BANNER_SCREEN_CHOICES.SCREE2: {
'external_info': True,
'app_path': 'application',
'model_name': 'Model1',
'serializer_path': 'application.serializers.Serializer1',
},
BANNER_SCREEN_CHOICES.SCREE3: {
'external_info': True,
'app_path': 'application',
'model_name': 'Model2',
'serializer_path': 'application.serializers.Serializer2',
},
}
image = models.ImageField(_('Image'), upload_to=photo_directory_path)
is_shown = models.BooleanField(_('is Show'), default=True)
screen = models.PositiveSmallIntegerField(_('Screen'), choices=BANNER_SCREEN_CHOICES, null=True, blank=True)
external_id = models.BigIntegerField(_('External object id'), null=True, blank=True)
ordering = models.PositiveIntegerField(editable=False, db_index=True)
order_field_name = 'ordering'
class Meta:
verbose_name = _('Banner')
verbose_name_plural = _('Banners')
ordering = ('ordering',)
def get_image_url(self, request=None):
if request is None or settings.USE_AWS_S3_FOR_MEDIA:
return self.image.url
return request.build_absolute_uri(self.image.url)
def clean(self):
if self.screen and self.BANNER_SCREEN_CHOICES_NEED_ID[self.screen]['external_info']:
external_model = apps.get_model(
self.BANNER_SCREEN_CHOICES_NEED_ID[self.screen]['app_path'],
self.BANNER_SCREEN_CHOICES_NEED_ID[self.screen]['model_name']
)
try:
external_model.objects.get(id=self.external_id)
except external_model.DoesNotExist:
raise ValidationError({
'external_id': _('Object does not exist')
})
if (not self.screen and self.external_id) or \
(self.screen and self.BANNER_SCREEN_CHOICES_NEED_ID[self.screen]['external_info'] is False and
self.external_id):
raise ValidationError({
'external_id': _('Please select correct screen')
})
def get_serializer(self):
path = self.BANNER_SCREEN_CHOICES_NEED_ID[self.screen]['serializer_path']
path = path.split('.')
if len(path) != 4:
return None
el = __import__(path[0])
path_index = 1
while path_index < 4 and hasattr(el, path[path_index]):
el = getattr(el, path[path_index], None)
path_index += 1
return el |
import heapq
import sys
#Read given from file
filer = sys.argv[1]
fp = open(filer, "r")
#Determine the dimensions of the given
lines = fp.read().splitlines()
checkdim = lines[0].split(" ")
truedim = len(checkdim)
#Determine which heuristic to use
def chooseH(chooser, curr, goal, glist, zind):
h = 0
if chooser == 1:
#Misplaced Tiles
dex = 0
for x in goal:
if curr[dex] == x and x != "0":
h+=1
elif chooser == 2:
#Linear Conflict
for x in range(truedim):
for y in range(truedim):
tk = curr[x][y]
#remember that tj is always to the right of tk by definition
ay = y
if tk == '0':
continue
while ay < truedim:
#this is for looking for goal state
if goal[x][ay] == tk:
gok = ay
break
ay += 1
#print ay
if ay >= truedim:
#this means goal state is not here
continue
ay = y+1
while ay < truedim:
#this part searches for tj
tj = curr[x][ay]
aj = y
ay +=1
if tj == '0':
continue
while aj < truedim:
#this is for looking for goal state of tj
if goal[x][aj] == tj:
goj = aj
break
aj += 1
if aj >= truedim:
#this means goal state is not here
continue
#making it this far, it means both goal states are in the same line
if goj < gok:
#if goal of tj is to the left of goal of tk
h += 2
#checks vertically
for x in range(truedim):
for y in range(truedim):
tk = curr[y][x]
#remember that tj is always below tk by definition
ay = x
if tk == '0':
continue
while ay < truedim:
#this is for looking for goal state
if goal[ay][y] == tk:
gok = ay
break
ay += 1
#print ay
if ay >= truedim:
#this means goal state is not here
continue
ay = x+1
while ay < truedim:
#this part searches for tj
tj = curr[ay][y]
aj = x
ay +=1
if tj == '0':
continue
while aj < truedim:
#this is for looking for goal state of tj
if goal[aj][y] == tj:
goj = aj
break
aj += 1
if aj >= truedim:
#this means goal state is not here
continue
#making it this far, it means both goal states are in the same line
if goj < gok:
#if goal of tj is to the left of goal of tk
h += 2
elif chooser == 3:
#Tiles out of row and column
rowout = 0
colout = 0
for x in range(truedim):
for y in range(truedim):
bamoo = -1
bam = -1
for z in range(truedim):
if curr[x][y] == goal[x][z] and (curr[x][y] != "0" and goal[x][z] != "0"):
bam = 1
if curr[x][y] == goal[z][y] and (curr[x][y] != "0" and goal[z][y] != "0"):
bamoo = 1
if bam == 1 and bamoo == 1:
break
if bam == -1:
rowout +=1
if bamoo == -1:
colout +=1
#overshoots by 1 but idk why
rowout -= 1
colout -= 1
h = rowout + colout
elif chooser == 4:
#Gaschnig's (N-maxswap)
h = 0
Plist = [0 for x in range(truedim*truedim)]
Blist = [0 for x in range(truedim*truedim)]
Glist = glist
indz = (truedim*truedim)-1
indp = 0
indb = 0
indz = zind
if indz == 0:
for x in range(truedim):
for y in range(truedim):
Plist[indp] = int(curr[x][y])
Blist[Plist[indp]] = indb
indb += 1
indp += 1
run = 0
while(Plist != Glist):
if Plist[indz] == 0:
for x in range(truedim*truedim):
if Plist[x] != Glist[x]:
gswap = Blist[Plist[x]]
Blist[Plist[x]] = Blist[Plist[indz]]
Blist[Plist[indz]] = gswap
gswap = Plist[x]
Plist[x] = Plist[indz]
Plist[indz] = gswap
break
else:
n = 0
tempP = Plist[Blist[n]]
Plist[Blist[n]] = Plist[Blist[Blist[n]]]
Plist[Blist[Blist[n]]] = tempP
tempB = Blist[n]
Blist[n] = Blist[Blist[n]]
Blist[tempB] = tempB
h += 1
else:
for x in range(truedim):
for y in range(truedim):
Plist[indp] = int(curr[x][y])
if Plist[indp] == 0:
Plist[indp] = (truedim*truedim)-1
else:
Plist[indp] -= 1
Blist[Plist[indp]] = indb
indb += 1
indp += 1
run = 0
while(Plist != Glist):
if Plist[indz] == (truedim*truedim)-1:
for x in range(truedim*truedim):
if Plist[x] != Glist[x]:
gswap = Blist[Plist[x]]
Blist[Plist[x]] = Blist[Plist[indz]]
Blist[Plist[indz]] = gswap
gswap = Plist[x]
Plist[x] = Plist[indz]
Plist[indz] = gswap
break
else:
n = (truedim*truedim)-1
tempP = Plist[Blist[n]]
Plist[Blist[n]] = Plist[Blist[Blist[n]]]
Plist[Blist[Blist[n]]] = tempP
tempB = Blist[n]
Blist[n] = Blist[Blist[n]]
Blist[tempB] = tempB
h += 1
elif chooser == 5:
#Manhattan distance
h = 0
xlist = [0 for x in range(truedim*truedim)]
ylist = [0 for x in range(truedim*truedim)]
for x in range(truedim):
for y in range(truedim):
ugh = int(goal[x][y])
xlist[ugh] = x
ylist[ugh] = y
for x in range(truedim):
for y in range(truedim):
index = int(curr[x][y])
if index == 0:
continue
addx = abs(x - xlist[index])
addy = abs(y - ylist[index])
h = h + addx + addy
else:
h = 0
return h
#Swap tiles and return a new board to be used to make BoardStates
def tileSwap(board, h, i, j, k):
thetrueidee = ""
tempstore = board[j][k]
newboard = [[0 for x in range(truedim)] for y in range(truedim)]
for xo in range(truedim):
for ox in range(truedim):
newboard[xo][ox] = board[xo][ox]
if xo == j and ox == k:
newboard[xo][ox] = '0'
if xo == h and ox == i:
newboard[xo][ox] = tempstore
thetrueidee+=newboard[xo][ox]
thetrueidee+=","
return (newboard, thetrueidee)
#Check if goal state has been reached; return 1 if reached
def goalReached(board):
tell = 0
if board == goal:
tell = 1
return tell
#reconstruct path
def recPath(cameFrom, curr, moves,starting):
total_path = []
total_path.append(moves[curr])
parcurr = cameFrom[curr]
while parcurr != starting:
total_path.append(moves[parcurr])
parcurr = cameFrom[parcurr]
total_path.append("start")
total_path = total_path[::-1]
print total_path
#Constructing the board using a 2D array
startboard = [[0 for x in range(truedim)] for y in range(truedim)]
stateidee = ""
for i in range(truedim):
checkdim = lines[i].split(" ")
for j in range(truedim):
startboard[i][j] = checkdim[j]
stateidee += startboard[i][j]
stateidee += ","
if startboard[i][j] == '0':
startzr = i
startzc = j
#Constructing goal state (purely just for checking)
glist = [0 for x in range(truedim*truedim)]
indg = 0
turner = 0
goal = [[0 for x in range(truedim)] for y in range(truedim)]
for i in range(truedim):
checkdim = lines[i+truedim].split(" ")
for j in range(truedim):
goal[i][j] = checkdim[j]
glist[indg] = int(goal[i][j])
if indg == 0 and glist[indg] == 0:
turner = 1
if turner != 1:
if glist[indg] == 0:
glist[indg] = (truedim*truedim)-1
else:
glist[indg] -= 1
indg+=1
if turner == 1:
indz = 0
else:
indz = (truedim*truedim)-1
#Initializing open and closed lists
#oplist is a heapq where the state's f and object are stored
#colist is a dictionary where keys are string forms of states
oplist = []
colist = {}
cameFrom = {}
gScore = {}
fScore = {}
moves = {}
achoo = int(input("Choose your HEUR HEUR!\n0 - No h\n1 - Misplaced Tiles\n2 - Linear Conflict\n3 - Out of row and column\n4 - NMaxSwap\n5 - Manhattan Distance\n"))
gScore[stateidee] = 0
fScore[stateidee] = gScore[stateidee] + chooseH(achoo,startboard,goal,glist,indz)
moves[stateidee] = "start"
heapq.heappush(oplist, (fScore[stateidee], startboard, stateidee))
#Actual algorithm
while oplist:
#while the open list is not empty
q = heapq.heappop(oplist)
qstate = q[1]
qid = ""
for s in range(truedim):
for w in range(truedim):
qid += qstate[s][w]
qid += ","
if qid in colist:
continue
goalee = goalReached(qstate)
if goalee == 1:
recPath(cameFrom, qid, moves, stateidee)
print "Number of configurations:"
print len(colist)
break
#generate all possible moves from q (at most 4)
for zoo in range(truedim):
for ooz in range(truedim):
if qstate[zoo][ooz] == '0':
zrow = zoo
zcol = ooz
break
possmoves = {}
tempmoves = {}
#add q to closed list
colist[qid] = qstate
#neighbor generation
if zrow-1 >= 0:
#swap with board[zrow-1][zcol]
q1board, q1id = tileSwap(qstate, zrow, zcol, zrow-1, zcol)
possmoves[q1id] = q1board
tempmoves[q1id] = "down"
if zrow+1 < truedim:
#swap with board[zrow+1][zcol]
q2board, q2id = tileSwap(qstate, zrow, zcol, zrow+1, zcol)
possmoves[q2id] = q2board
tempmoves[q2id] = "up"
if zcol-1 >= 0:
#swap with board[zrow][zcol-1]
q3board, q3id = tileSwap(qstate, zrow, zcol, zrow, zcol-1)
possmoves[q3id] = q3board
tempmoves[q3id] = "right"
if zcol+1 < truedim:
#swap with board[zrow][zcol+1]
q4board, q4id = tileSwap(qstate, zrow, zcol, zrow, zcol+1)
possmoves[q4id] = q4board
tempmoves[q4id] = "left"
#iterate through all possible moves
for x in possmoves:
#Check if pattern is in the closed list
if x in colist:
continue
#Check for better values of g per possmove
tempG = gScore[qid]+1
if x in gScore:
if tempG >= gScore[x]:
continue
#Store best value for possmove
cameFrom[x] = qid
moves[x] = tempmoves[x]
gScore[x] = tempG
fScore[x] = gScore[x] + chooseH(achoo, possmoves[x], goal, glist, indz)
#No need to check for presence of possmove in the open list
#because it has been previously checked if a better possmove
#exists, and if so, no state is pushed. Duplicate state will be
#pushed if possmove exists but is better than the existing one
#but the duplicate will be handled by code above
heapq.heappush(oplist, (fScore[x], possmoves[x], x))
#Close file containing given
fp.close()
|
from pylab import *
figure()
title("Graded inhibitory synapse: granule-|mitral")
# adjust activation curve so that all the dynamics happens between -52 and -28mV.
act = [0.0] # at -52mV
act.extend( [1/(1+math.exp(-(vm + 40.5e-3)/1.0e-3)) for vm in arange(-50e-3,-30.00001e-3,2e-3)] )
act.extend([1.0]) # for -28mV
plot(arange(-52e-3,-28.00001e-3,2e-3),act,'r,-')
show()
|
"""
============================
Author:柠檬班-木森
Time:2020/2/25 21:54
E-mail:3247119728@qq.com
Company:湖南零檬信息技术有限公司
============================
"""
"""
linux命令 os模块中的方法
pwd: os.getcwd()
cd os.chdir()
ls os.listdir()
mkdir os.mkdir()
rmdir os.rmdir()
"""
import os
# 获取当前的工作路径
# print(os.getcwd())
# 切换路径
# os.chdir("..")
# print(os.getcwd())
# 获取当前工作路径下的文件和文件夹信息
# print(os.listdir("."))
# os.mkdir("test01")
# os.rmdir("test01")
# 判断给定的路径是否是文件
# res= os.path.isfile(r"C:\project\py27_class\py27_04day\01序列类型数据的切片操作(进阶).py")
# print(res)
# 判断给定的路径是否是文件夹路径
# res= os.path.isdir(r"C:\project\py27_class\py27_04day")
# print(res)
|
import os
from pathlib import Path
from imblearn.over_sampling import SMOTE
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import KFold, GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVC
from tqdm import tqdm
import numpy as np
from config import paths
from common import classification_report_utils
from integration import utils
from sklearn import metrics
from common.classification_metrics import METRICS_skl, top_metric_skl
from imblearn.pipeline import Pipeline
from integration.utils import get_patient_kfold_split
def __get_classifier(method_name, params):
"""
Description: Get classifier method.
:param method_name: method name.
:param params: parameters.
:returns: classifier, grid.
"""
if method_name == 'linearsvc':
classifier = ('linearsvc', LinearSVC(max_iter=params['linearsvc']['max_iter'], random_state=params['general']['random_state']))
grid = [{'linearsvc__C' : [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]}] # C
else:
classifier = ('sgdclassifier', SGDClassifier(max_iter=params['sgdclassifier']['max_iter'], random_state=params['general']['random_state']))
grid = [{'sgdclassifier__alpha': [1.e-01, 1.e-02, 1.e-03, 1.e-04, 1.e-05, 1.e-06]}] # alpha
return classifier, grid
def shallow_classifier(args, params, data_path):
"""
Description: Train and test shallow classifier, then show results.
:param args: arguments.
:param params: configuration parameters.
:param data_path: data path.
"""
# classification pipeline with scaler, SMOTE and classifier:
classifier, param_grid = __get_classifier(args.classification_method, params)
pipe = Pipeline([('standardscaler', StandardScaler()), ('smote', SMOTE(random_state=params['general']['random_state'])), classifier])
# get data
X_train, y_train, X_test, y_test = utils.get_data(data_path)
test_outer_results = []
train_outer_results = []
best_hyperparams = []
metric = top_metric_skl
# cv_outer = KFold(n_splits=params['cv']['n_outer_splits'], shuffle=True, random_state=params['general']['random_state'])
splits, _ = get_patient_kfold_split(
X_train,
y_train,
data_info_path=data_path / 'info_train.csv',
n_splits=params['cv']['n_outer_splits'])
# nested cross validation for unbiased error estimation:
print(">> Nested cross validation for unbiased error estimation...")
for train_ix, test_ix in tqdm(splits):
# split data in k_outer folds (one is test, the rest is trainval) for outer loop
X_train_cv, X_test_cv = X_train[train_ix, :], X_train[test_ix, :]
y_train_cv, y_test_cv = y_train[train_ix], y_train[test_ix]
# inner cross validation procedure for grid search of best hyperparameters:
# trainval will be split in k_inner folds (one is val, the rest is train)
# use train and val to find best model
cv_inner = KFold(n_splits=params['cv']['n_inner_splits'], shuffle=True,
random_state=params['general']['random_state'])
search = GridSearchCV(estimator=pipe, param_grid=param_grid, cv=cv_inner, n_jobs=-1,
scoring='recall', refit=True, verbose=2)
# outer cross validation procedure to evaluate the performance of the best estimator:
# fit the best model on the whole trainval
search.fit(X_train_cv, y_train_cv)
best_model = search.best_estimator_
best_score = search.best_score_
best_param = search.best_params_
# evaluate the performance of the model on test
y_test_pred = best_model.predict(X_test_cv)
test_score = metric(y_test_cv, y_test_pred)
y_train_pred = search.predict(X_train_cv)
train_score = metric(y_train_cv, y_train_pred)
test_outer_results.append(test_score)
train_outer_results.append(train_score)
best_hyperparams.append(search.best_params_)
print(f"Inner cv: ")
print(f" Best {metric.__name__} = {best_score}, best hyperparam = {best_param}")
print(f"Outer cv: ")
print(f" Val {metric.__name__} = {test_score}) - ", end='')
print(f"Val precision = {metrics.precision_score(y_test_cv, y_test_pred)}) - ", end='')
print(f"Val accuracy = {metrics.accuracy_score(y_test_cv, y_test_pred)} - ", end='')
print(f"Val matthews_corrcoef = {metrics.matthews_corrcoef(y_test_cv, y_test_pred)}) - ", end='')
print(f"Train {metric.__name__} = {train_score}) - ", end='')
print(f"Train precision = {metrics.precision_score(y_train_cv, y_train_pred)}) - ", end='')
print(f"Train matthews_corrcoef = {metrics.matthews_corrcoef(y_train_cv, y_train_pred)})")
# calculate the mean score over all K outer folds, and report as the generalization error
global_test_score = np.mean(test_outer_results)
global_test_std = np.std(test_outer_results)
global_train_score = np.mean(train_outer_results)
global_train_std = np.std(train_outer_results)
print()
print(f"Global validation {metric.__name__} = {str(global_test_score)} ({str(global_test_std)})")
print(f"Global training {metric.__name__} = {str(global_train_score)} ({str(global_train_std)})")
print("List of best hyperparameters to check stability: ")
print(best_hyperparams)
print()
# simple cross validation to find the best model:
print('>> Simple cross validation to find the best model...')
_, groups = get_patient_kfold_split(
X_train,
y_train,
data_info_path=data_path / 'info_train.csv',
n_splits=params['cv']['n_inner_splits'])
cv = KFold(n_splits=params['cv']['n_inner_splits'], shuffle=True, random_state=params['general']['random_state'])
search = GridSearchCV(estimator=pipe, param_grid=param_grid, cv=cv, n_jobs=-1, scoring='recall',
refit=True, verbose=2)
search.fit(X_train, y_train, groups=groups)
best_model = search.best_estimator_
best_hyperparam = search.best_params_
best_score = search.best_score_
print('>> Predicting on test dataset...')
y_pred = best_model.predict(X_test)
final_test_score = metric(y_test, y_pred)
print(f"Final test {metric.__name__} = {final_test_score}")
test_scores = {}
for metr in METRICS_skl:
test_scores[metr.__name__] = metr(y_test, y_pred)
# path to save results:
experiment_descr = f"{os.path.split(data_path)[1]}"
experiment_descr += f"_{params['general']['use_features_images_only']}"
experiment_descr += f"_{args.classification_method}"
experiment_descr += f"_smote"
results_path = Path(paths.integration_classification_results_dir) / experiment_descr
if not os.path.exists(results_path):
os.makedirs(results_path)
# generate classification report:
experiment_info = {}
experiment_info['Data folder'] = str(data_path)
experiment_info['Selected features'] = f'Image features, no gene features' if params['general']['use_features_images_only'] else 'All'
experiment_info['PCA'] = f"n. components={params['general']['num_principal_components']}" if params['general']['num_principal_components'] else 'No'
experiment_info['Classification method'] = str(args.classification_method)
experiment_info['Class balancing method'] = 'SMOTE'
experiment_info['Error estimation:'] = '---------------------------------------'
experiment_info['Global validation score'] = f"{metric.__name__}={str(global_test_score)} ({str(global_test_std)})"
experiment_info['Global train score'] = f"{metric.__name__}={str(global_train_score)} ({str(global_train_std)})"
experiment_info['Test results:'] = '---------------------------------------'
experiment_info['Best cv hyperparameter'] = f"{'C' if args.classification_method == 'linearsvc' else 'alpha'}={best_hyperparam}"
experiment_info['Best cv score'] = f"{metric.__name__}={best_score}"
experiment_info['Final test score'] = f"{metric.__name__}={final_test_score}"
test_data_info_path = data_path / 'info_test.csv'
classification_report_utils.generate_classification_report(results_path, y_test, y_pred, test_scores, experiment_info,
test_data_info_path=test_data_info_path)
# generate plots:
classification_report_utils.generate_classification_plots(results_path, best_model, X_test, y_test, X_train, y_train)
print('>> Done') |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""MSTICPy core pandas accessor methods."""
from typing import Any, Dict, List, Mapping, Union
import pandas as pd
from .._version import VERSION
from ..context.ip_utils import get_whois_df
from ..data.data_obfus import mask_df
from ..transform.base64unpack import unpack_df
from ..transform.iocextract import IoCExtract
from ..transform.network import df_to_networkx
from ..transform.proc_tree_builder import ProcSchema, build_process_tree
__version__ = VERSION
__author__ = "Ian Hellen"
@pd.api.extensions.register_dataframe_accessor("mp")
class MsticpyCoreAccessor:
"""Msticpy pandas accessor for core functions."""
def __init__(self, pandas_obj):
"""Initialize the extension."""
self._df = pandas_obj
self._ioc = IoCExtract()
def b64extract(self, column: str, **kwargs) -> pd.DataFrame:
"""
Base64-decode strings taken from a pandas dataframe.
Parameters
----------
data : pd.DataFrame
dataframe containing column to decode
column : str
Name of dataframe text column
trace : bool, optional
Show additional status (the default is None)
utf16 : bool, optional
Attempt to decode UTF16 byte strings
Returns
-------
pd.DataFrame
Decoded string and additional metadata in dataframe
Notes
-----
Items that decode to utf-8 or utf-16 strings will be returned as decoded
strings replaced in the original string. If the encoded string is a
known binary type it will identify the file type and return the hashes
of the file. If any binary types are known archives (zip, tar, gzip) it
will unpack the contents of the archive.
For any binary it will return the decoded file as a byte array, and as a
printable list of byte values.
The columns of the output DataFrame are:
- decoded string: this is the input string with any decoded sections
replaced by the results of the decoding
- reference : this is an index that matches an index number in the
decoded string (e.g. <<encoded binary type=pdf index=1.2').
- original_string : the string prior to decoding - file_type : the type
of file if this could be determined
- file_hashes : a dictionary of hashes (the md5, sha1 and sha256 hashes
are broken out into separate columns)
- input_bytes : the binary image as a byte array
- decoded_string : printable form of the decoded string (either string
or list of hex byte values)
- encoding_type : utf-8, utf-16 or binary
- md5, sha1, sha256 : the respective hashes of the binary file_type,
file_hashes, input_bytes, md5, sha1, sha256 will be null if this item is
decoded to a string
- src_index - the index of the source row in the input
frame.
"""
return unpack_df(data=self._df, column=column, **kwargs)
def ioc_extract(self, columns: List[str], **kwargs) -> pd.DataFrame:
"""
Extract IoCs from either a pandas DataFrame.
Parameters
----------
columns : list
The list of columns to use as source strings,
Other Parameters
----------------
ioc_types : list, optional
Restrict matching to just specified types.
(default is all types)
include_paths : bool, optional
Whether to include path matches (which can be noisy)
(the default is false - excludes 'windows_path'
and 'linux_path'). If `ioc_types` is specified
this parameter is ignored.
Returns
-------
pd.DataFrame
DataFrame of observables
Notes
-----
Extract takes a pandas DataFrame as input.
The results will be returned as a new
DataFrame with the following columns:
- IoCType: the mnemonic used to distinguish different IoC Types
- Observable: the actual value of the observable
- SourceIndex: the index of the row in the input DataFrame from
which the source for the IoC observable was extracted.
IoCType Pattern selection
The default list is: ['ipv4', 'ipv6', 'dns', 'url',
'md5_hash', 'sha1_hash', 'sha256_hash'] plus any
user-defined types.
'windows_path', 'linux_path' are excluded unless `include_paths`
is True or explicitly included in `ioc_paths`.
"""
return self._ioc.extract_df(data=self._df, columns=columns, **kwargs)
def build_process_tree(
self,
schema: Union[ProcSchema, Dict[str, Any]] = None,
show_summary: bool = False,
debug: bool = False,
) -> pd.DataFrame:
"""
Build process trees from the process events.
Parameters
----------
schema : Union[ProcSchema, Dict[str, Any]], optional
The column schema to use, by default None.
If supplied as a dict it must include definitions for the
required fields in the ProcSchema class
If None, then the schema is inferred
show_summary : bool
Shows summary of the built tree, default is False.
debug : bool
If True produces extra debugging output,
by default False
Returns
-------
pd.DataFrame
Process tree dataframe.
See Also
--------
ProcSchema
"""
return build_process_tree(
procs=self._df, schema=schema, show_summary=show_summary, debug=debug
)
def to_graph(self, **kwargs):
"""
Create a networkx graph from a DataFrame.
Parameters
----------
source_col : str
Column for source nodes.
target_col : str
Column for target nodes.
source_attrs : Optional[List[str]], optional
Optional list of columns to use as source node attributes, by default None
target_attrs : Optional[List[str]], optional
Optional list of columns to use as target node attributes, by default None
edge_attrs : Optional[List[str]], optional
Optional list of columns to use as edge node attributes, by default None
graph_type : str
"graph" or "digraph" (for nx.DiGraph)
Returns
-------
nx.Graph
The networkx graph object
"""
return df_to_networkx(self._df, **kwargs)
def mask(
self, column_map: Mapping[str, Any] = None, use_default: bool = True
) -> pd.DataFrame:
"""
Obfuscate the data in columns of a pandas dataframe.
Parameters
----------
data : pd.DataFrame
dataframe containing column to obfuscate
column_map : Mapping[str, Any], optional
Custom column mapping, by default None
use_default: bool
If True use the built-in map (adding any custom
mappings to this dictionary)
Returns
-------
pd.DataFrame
Obfuscated dataframe
"""
return mask_df(data=self._df, column_map=column_map, use_default=use_default)
def whois(self, ip_column, **kwargs):
"""
Extract IoCs from either a pandas DataFrame.
Parameters
----------
ip_column : str
Column name of IP Address to look up.
Other Parameters
----------------
asn_col : str, optional
Name of the output column for ASN description,
by default "ASNDescription"
ip_column : str, optional
Name of the output column for full whois data,
by default "WhoIsData"
show_progress : bool, optional
Show progress for each query, by default False
Returns
-------
pd.DataFrame
Output DataFrame with results in added columns.
"""
return get_whois_df(data=self._df, ip_column=ip_column, **kwargs)
|
#!/usr/bin/env python2
from __future__ import print_function
import enum
import math
import rospy
import threading
import tf
from geometry_msgs.msg import Twist, Vector3
from sensor_msgs.msg import Imu
from std_msgs.msg import UInt8, Empty
class Pose(enum.Enum):
REST = 0
FIST = 1
WAVE_IN = 2
WAVE_OUT = 3
FINGERS_SPREAD = 4
THUMB_TO_PINKY = 5
UNKNOWN = 255
rotlock = threading.Lock()
rotdat = [0.0, 0.0, 0.0]
rotlock2 = threading.Lock()
rotdat2 = [0.0, 0.0, 0.0]
def clamp(n, mn, mx):
return min(max(n, mn), mx)
def imu_handler(imu):
global rotdat
#print('IMU HANDLER')
ori = imu.orientation
x,y,z = tf.transformations.euler_from_quaternion(
(ori.x, ori.y, ori.z, ori.w))
#print(x, y, z)
with rotlock:
rotdat = [x,y,z]
def imu_handler2(imu):
global rotdat2
#print('IMU HANDLER')
ori = imu.orientation
x,y,z = tf.transformations.euler_from_quaternion(
(ori.x, ori.y, ori.z, ori.w))
#print(x, y, z)
with rotlock2:
rotdat2 = [x,y,z]
def main():
rospy.init_node('myo_ar', anonymous=True)
# Publish to AR drone cmd_vel
arPub = rospy.Publisher('cmd_vel', Twist, queue_size=10)
takeoff = rospy.Publisher('ardrone/takeoff', Empty, queue_size=1)
land = rospy.Publisher('ardrone/land', Empty, queue_size=1)
reset = rospy.Publisher('ardrone/reset', Empty, queue_size=1)
def gest_handler(msg):
#print('GEST HANDLER')
data = Pose(msg.data)
print(data)
if data == Pose.FINGERS_SPREAD:
takeoff.publish(Empty())
print('Takeoff')
elif data == Pose.FIST:
land.publish(Empty())
print('Land')
#elif data == Pose.:
# reset.publish(Empty())
# print('Reset')
rospy.Subscriber('myo_imu1', Imu, imu_handler)
rospy.Subscriber('myo_imu2', Imu, imu_handler2)
rospy.Subscriber('myo_gest1', UInt8, gest_handler)
rate = rospy.Rate(60)
while True:
with rotlock:
x,y,z = rotdat
if abs(y) > math.pi / 6.0:
f = 1 if y > 0 else -1
else:
f = 0
if abs(z) > math.pi / 6.0:
h = 1 if z > 0 else -1
else:
h = 0
with rotlock2:
x,y,z = rotdat2
if abs(y) > math.pi / 6.0:
v = -1 if y > 0 else 1
else:
v = 0
if abs(z) > math.pi / 6.0:
r = 1 if z > 0 else -1
else:
r = 0
#print(f, h)
arPub.publish(Twist(Vector3(f, h, v), Vector3(0,0,r)))
rate.sleep()
if __name__ == '__main__':
main()
|
import os
SECRET_KEY=os.getenv('SK')
SQLALCHEMY_DATABASE_URI="postgresql://postgres:@localhost:5432/postgres"
SQLALCHEMY_TRACK_MODIFICATIONS=False
DEBUG_TB_TEMPLATE_EDITOR_ENABLED=False
DEBUG_TB_INTERCEPT_REDIRECTS=False
FLASK_ADMIN_SWATCH='yeti' |
#coding=utf-8
#author=godpgf
from ctypes import *
from pyalphatree.libalphatree import alphatree
import numpy as np
class AlphaPic(object):
def __init__(self, sign_name, feature_list, daybefore, sample_size):
features_cache = self.decode_features(feature_list)
self.picid = alphatree.useAlphaPic(c_char_p(sign_name.encode('utf-8')), features_cache, len(feature_list), daybefore, sample_size)
@classmethod
def decode_features(cls, feature_list):
feature_len = 0
for feature in feature_list:
feature_len += len(feature) + 1
features_cache = (c_char * feature_len)()
cur_feature_index = 0
for feature in feature_list:
name_list = list(feature.encode('utf-8'))
for c in name_list:
features_cache[cur_feature_index] = c
cur_feature_index += 1
features_cache[cur_feature_index] = b'\0'
cur_feature_index += 1
return features_cache
def get_k_line(self, sign_name, open_elements, high_elements, low_elements, close_elements, daybefore, sample_size, column, max_std_scale):
open_cache = self.decode_features(open_elements)
high_cache = self.decode_features(high_elements)
low_cache = self.decode_features(low_elements)
close_cache = self.decode_features(close_elements)
pic_size = (column * len(open_elements) * 3 * alphatree.getSignNum(daybefore, sample_size, c_char_p(sign_name.encode('utf-8'))))
pic_cache = (c_float * pic_size)()
alphatree.getKLinePic(self.picid, c_char_p(sign_name.encode('utf-8')), open_cache, high_cache, low_cache, close_cache, len(open_elements), daybefore, sample_size, pic_cache, column, c_float(max_std_scale))
# kline = np.zeros((pic_size,))
# for i in range(pic_size):
# kline[i] = pic_cache[i]
kline = np.array(pic_cache)
return kline.reshape([-1, column, len(open_elements) * 3, 1])
def get_trend(self, sign_name, elements, daybefore, sample_size, column, max_std_scale):
element_cache = self.decode_features(elements)
pic_size = (column * len(elements) * 3 * alphatree.getSignNum(daybefore, sample_size, c_char_p(sign_name.encode('utf-8'))))
pic_cache = (c_float * pic_size)()
alphatree.getTrendPic(self.picid, c_char_p(sign_name.encode('utf-8')), element_cache, len(elements), daybefore, sample_size, pic_cache, column, c_float(max_std_scale))
# trend = np.zeros((pic_size,))
# for i in range(pic_size):
# trend[i] = pic_cache[i]
trend = np.array(pic_cache)
return trend.reshape([-1, column, len(elements) * 3, 1]) |
#!/usr/bin/env python3
import sys
from PySide import QtGui, QtCore
import os
from os import path
import logging
#class ImageViewer(QtGui.QWidget):
class ImageViewer(QtGui.QMainWindow):
def __init__(self):
super(ImageViewer, self).__init__()
self.initUI()
def initUI(self):
self.is_image_init = False
self.is_updating = False
#self.statusBar().showMessage('Ready')
self.makeMenu()
widget = QtGui.QWidget()
self.setCentralWidget(widget)
self.vbox = QtGui.QVBoxLayout()
self.vbox.setContentsMargins(5,5,5,5)
self.fname = "/home/ben/code/st/shuttle.jpg"
self.loadImage(self.fname)
widget.setLayout(self.vbox)
self.setGeometry(300, 300, 600, 150)
self.setWindowTitle('Image Classifier')
self.makeShortcuts()
self.show()
def showFileDialog(self):
self.fname, _ = QtGui.QFileDialog.getOpenFileName(self, 'Open Image', path.expanduser("~"))
self.loadImage(self.fname)
#dirname = QtGui.QFileDialog.getExistingDirectory(self, 'Open Image', path.expanduser("~"))
#self.files = list(filter(lambda x: fnmatch.fnmatch(x, "*.jpg"), os.listdir(dirname)))
def loadImage(self, path):
print(self.is_image_init)
print(self.fname)
if self.is_image_init:
self.imageUpdate(path)
else:
self.imageInit(path)
def imageUpdate(self, path):
#self.image.load(path)
#self.pixmap = QtGui.QPixmap.fromImage(self.image).scaledToWidth(600)
self.is_updating = True
self.image_label.deleteLater()
self.imageInit(path)
self.is_updating = False
def imageInit(self, path):
self.image = QtGui.QImage(100, 150, QtGui.QImage.Format_ARGB32)
self.image.load(path)
self.image_label = QtGui.QLabel("")
self.image_label.setScaledContents(True)
self.pixmap = QtGui.QPixmap.fromImage(self.image).scaledToWidth(600)
self.image_label.setPixmap(self.pixmap)
self.vbox.addWidget(self.image_label)
self.is_image_init = True
def makeMenu(self):
openFile = QtGui.QAction(QtGui.QIcon('open.png'), 'Open', self)
openFile.setShortcut('Ctrl+O')
openFile.setStatusTip('Open new File')
openFile.triggered.connect(self.showFileDialog)
exitAction = QtGui.QAction(QtGui.QIcon('exit.png'), '&Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(self.close)
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(openFile)
fileMenu.addAction(exitAction)
def makeShortcuts(self):
QtGui.QShortcut(QtGui.QKeySequence(QtCore.Qt.Key_Right), self, lambda: self.fire("Right"))
QtGui.QShortcut(QtGui.QKeySequence(QtCore.Qt.Key_Left), self, lambda: self.fire("Left"))
def paintEvent(self, e):
if not self.is_updating:
s = e.rect().size()
self.pixmap = QtGui.QPixmap(self.fname).scaled(s, QtCore.Qt.KeepAspectRatioByExpanding)
self.image_label.setPixmap(self.pixmap)
def fire(self, position):
print("Firing %s" % position)
def main():
app = QtGui.QApplication(sys.argv)
ex = ImageViewer()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
class Solution:
def minCostToMoveChips(self, position: List[int]) -> int:
total_odd_value = total_even_value = 0
for index in position:
if index % 2 == 1:
total_odd_value += 1
else:
total_even_value += 1
if total_even_value < total_odd_value:
return total_even_value
else:
return total_odd_value
|
# coding=utf-8
import numpy as np
from scipy import stats
from scipy import special
from scipy.stats import chi2
class Statistic:
"""
Класс для нахождения всяких статистических величин.
"""
def __init__(self, ):
self.data = []
self.n = 0
def load_data(self, file):
"""Метод для загрузки данных из файла."""
with open(file) as f:
for line in f:
for x in line.split():
self.data.append(float(x))
self.n = len(self.data)
self.data = np.sort(self.data)
def get_data(self):
return self.data
# среднее значение выборки
def find_average_sample_value(self):
"""Метод для вычисления среднего значения выборки."""
return np.sum(self.data) / self.n
# Выборочная дисперсия
def find_selective_dispersion(self):
"""Метод для вычисления выборочной дисперсии."""
asv = self.find_average_sample_value()
return np.sum((np.array(self.data) - asv) ** 2) / (self.n - 1)
# стандартная ошибка
def find_standard_error(self):
"""Метод для вычисления стандартной ошибки."""
sd = self.find_selective_dispersion()
return np.sqrt(sd / self.n)
# мода
def find_mode(self):
"""Метод для вычисления моды."""
value, count = np.unique(self.data, return_counts=True)
return [value[i] for i in np.where(count == count[count.argmax()])]
# медиана
def find_median(self, data=[]):
"""Метод для вычисления медианы."""
if len(data) == 0:
data = self.data
n = self.n
else:
n = len(data)
median = data[n // 2]
if n % 2 == 0:
median += data[n // 2 - 1]
median /= 2
return median
# 1, 2, 3 квартили
def find_quartiles(self):
"""Метод для вычисления квартилей."""
data1 = self.data[:self.n // 2]
data2 = self.data[self.n // 2:]
q1 = self.find_median(data1)
q2 = self.find_median()
q3 = self.find_median(data2)
return q1, q2, q3
# ящик с усами
def find_box_plot(self):
"""Метод для вычисления ящика с усами."""
q1, _, q3 = self.find_quartiles()
k = 1.5 # коэффициент, наиболее часто употребляемое значение которого равно 1,5
X1 = q1 - k * (q3 - q1)
X2 = q3 + k * (q3 - q1)
## це график
return 0
# стандартное отклонение
def find_standard_deviation(self):
"""Метод для вычисления стандартного отклонения."""
dispersion = self.find_selective_dispersion()
return np.sqrt(dispersion)
# эксцесс
def find_kurtosis(self):
"""Метод для вычисления коэффициента эксцесса."""
asv = self.find_average_sample_value()
deviation = self.find_standard_deviation()
moment4 = np.sum((np.array(self.data) - asv) ** 4) / self.n
return moment4 / (deviation ** 4) - 3
# Асимметричность
def find_skewness(self): # ????
"""Метод для вычисления коэффициента ассиметричности."""
asv = self.find_average_sample_value()
deviation = self.find_standard_deviation()
moment3 = np.sum((np.array(self.data) - asv) ** 3) / self.n
skewness = moment3 / (deviation ** 3)
result = "распределение "
if np.abs(skewness) < 0.25:
result += "незначительно"
elif np.abs(skewness) > 0.5:
result += "существенно "
else:
result += "умеренно "
if skewness > 0:
result += " скошено вправо"
elif skewness < 0:
result += " скошено влево"
return skewness, result
# минимум
def find_min(self):
"""Метод для нахождения мининума."""
return np.min(self.data)
# максимум
def find_max(self):
"""Метод для нахождения максимума."""
return np.max(self.data)
def split_data(self, n_bins=10):
"""Метод разбивающий датасет на интервалы одинаковой длины
с подсчетом элементов входящих в каждый интервал"""
eps = 1e-10
min_d = self.find_min()
max_d = self.find_max()
h = (max_d - min_d + eps) / n_bins
inter, count = [], []
for i in range(n_bins):
if i == 0:
inter.append(min_d)
inter.append(min_d + h)
else:
inter.append(inter[-1] + h)
greater = np.greater_equal(self.data, inter[-2])
less = np.less(self.data, inter[-1])
count.append(np.count_nonzero(greater == less))
# print(inter[-2], inter[-1], count[-1])
# print(len(self.data), np.sum(count))
return inter, count
def laplace_function(self, x):
"""Метод расчитывающий значение функции Лапласа в x"""
# helped https://stackoverflow.com/questions/56016484/how-to-calculate-laplace-function-in-python-3
return special.erf(x / 2 ** 0.5) / 2
def pearson_criterion(self, alpha=0.025):
"""Метод приверки гипотезы H0 с помощью критерия Пирсона."""
inter, count = self.split_data(9)
asv = self.find_average_sample_value()
disp = np.sqrt(self.find_selective_dispersion())
theor_freq = []
n = len(count)
inter[0] = -np.inf
inter[-1] = np.inf
# print(" xi xi1 Fi Fi1 n' n")
for i in range(len(inter) - 1):
inter[i + 1] -= asv
inter[i + 1] /= disp
pi = self.laplace_function(inter[i + 1]) - self.laplace_function(inter[i])
theor_freq.append(len(self.data) * pi)
# print("%6.3f %6.3f %6.3f %6.3f %6.3f %6d" % (inter[i], inter[i + 1]
# , self.laplace_function(inter[i])
# , self.laplace_function(inter[i + 1])
# , theor_freq[-1], count[i]))
count = np.array(count)
theor_freq = np.array(theor_freq)
chi2_observed = np.sum((count - theor_freq) ** 2 / theor_freq)
k = n - 3
chi2_critical = chi2.ppf(1 - alpha, k)
print("Наблюдаемое значение: %.3f" % (chi2_observed))
print("Критическое значение %.3f" % (chi2_critical))
if chi2_observed < chi2_critical:
print("Наблюдаемое < критического => ")
print("Нет оснований отвергнуть гипотезу H_0 о нормальном "
"распределении генеральной совокупности.")
else:
print("Наблюдаемое > критического =>")
print("Гипотезу H_0 отвергаем.")
def expected_value_interval(self, gamma=0.95):
asv = self.find_average_sample_value()
n = len(self.data)
s = np.sqrt(n / (n - 1) * self.find_selective_dispersion())
t = stats.t.ppf(gamma, n - 1)
left = asv - (t * s) / np.sqrt(n)
right = asv + (t * s) / np.sqrt(n)
return left, right
def standard_deviation_interval(self, gamma=0.95):
n = len(self.data)
s = np.sqrt(n / (n - 1) * self.find_selective_dispersion())
chi_1 = np.sqrt(chi2.ppf((1 - gamma) / 2, n - 1))
chi_2 = np.sqrt(chi2.ppf(1 - (1 - gamma) / 2, n - 1))
left = (np.sqrt(n - 1) * s) / chi_2
right = (np.sqrt(n - 1) * s) / chi_1
return left, right |
import string
letters = list(string.ascii_uppercase)
print letters
with open('in.txt') as f:
lines = f.readlines()
lines = [l.split('\n')[0] for l in lines]
t = int(lines[0])
def find_plan(np):
ans = []
n = len(np)
paties = letters[:n]
d = dict(zip(paties, np))
while d:
pd = {}
for k, v in d.iteritems():
pd.setdefault(v, []).append(k)
paired_keys = []
unpaired_keys = []
for k, v in pd.iteritems():
if len(v) > 1:
paired_keys.append(v)
else:
unpaired_keys.append(v)
if paired_keys:
for parties in unpaired_keys:
for p in parties:
ans.append((str(p) + ' ') * d[p])
del d[p]
for parties in paired_keys:
for p in parties[:-2]:
ans.append((str(p) + ' ') * d[p])
del d[p]
pp1 = parties[-1]
pp2 = parties[-2]
ans.append((str(pp1) + str(pp2) + ' ') * d[pp1])
del d[pp1]
del d[pp2]
else:
dd = sorted(pd.keys())[-2:]
max1 = dd[1]
max2 = dd[0]
diff = max1 - max2
ppp = pd[max1][0]
ans.append((str(ppp) + ' ') * diff)
d[ppp] = d[ppp] - diff
ans = ''.join(ans)
return ans
f = open('out.txt', 'w')
for j, i in enumerate(xrange(1, 2 * t, 2)):
print '==========================='
n = int(lines[i])
np = [int(p) for p in lines[i + 1].split(' ')]
assert len(np) == n
plan = find_plan(np)
f.write('Case #%s: %s \n' % (j+1, plan))
print 'Case #%s: %s' % (j+1, plan)
f.close()
|
# Python3 program to find minimum number
# of swaps required to sort an array
# Function returns the minimum
# number of swaps required to sort the array
def minSwaps(arr):
n = len(arr)
# Create two arrays and use
# as pairs where first array
# is element and second array
# is position of first element
arrpos = [*enumerate(arr)]
# Sort the array by array element
# values to get right position of
# every element as the elements
# of second array.
arrpos.sort(key = lambda it:it[1])
# To keep track of visited elements.
# Initialize all elements as not
# visited or false.
vis = {k:False for k in range(n)}
# Initialize result
ans = 0
for i in range(n):
# alreadt swapped or
# alreadt present at
# correct position
if vis[i] or arrpos[i][0] == i:
continue
# find number of nodes
# in this cycle and
# add it to ans
cycle_size = 0
j = i
while not vis[j]:
# mark node as visited
vis[j] = True
# move to next node
j = arrpos[j][0]
cycle_size += 1
# update answer by adding
# current cycle
if cycle_size > 0:
ans += (cycle_size - 1)
# return answer
return ans
# Driver Code
arr = [1, 5, 4, 3, 2]
print(minSwaps(arr))
# This code is contributed
# by Dharan Aditya
|
#!/usr/bin/python
from opyscad import *
import bearing, screw, config
import math, copy
l = 30.0
t = 4.0
belt_gap = 2
w = bearing.D1 + t * 2 + belt_gap * 2.0
dx = bearing.D1 / 2
lip_t = 1.5
lip_d = bearing.d + 4.0
bearing_gap = 0.0
cap_gap = 0.2
h = t + bearing.B + lip_t + bearing_gap
bearing_nut = screw.m3
bearing_nut_depth = 2.6
bearing_screw_head_depth = 2.6
dz = -45.0
knob_d = 40.0
knob_h = 12.0
knob_cut_r = 1
knob_cut_steps = 40
knob_nut_h = 2.0
knob_trigger_dist = 17.0
knob_trigger_d = 2.0
knob_triger_depth = 0.5
knob_trigger_steps = 40
trigger_gap = 0.2
knob_nut = copy.copy(screw.m6)
knob_nut.gap_thread = 0.1
knob_nut.gap_octo = 0.3
screw_head_h = 5.5
screw_h = 50.0
screw_x = 16.0
const_screw = screw.m3
const_screw_dist_x = 4.5
const_screw_dist_y = 4.5
const_screw_head_depth = 3.3
const_nut_depth = 4.0
print w, h*2
def create(octo_nut = True):
if octo_nut:
scr_depth = bearing_nut_depth
else:
scr_depth = bearing_screw_head_depth
res = cube([l, w, h]) << [-dx, -w/2, 0]
res -= cube([bearing.D1 + belt_gap*2, bearing.D1 + belt_gap*2, bearing.B + lip_t + bearing_gap + 1]) << [-dx-1, -bearing.D1/2 - belt_gap, t]
res += cylinder(lip_t, d = lip_d, fn = 24) << [0, 0, t - 0.01]
scr = bearing_nut.hole(t + lip_t + 1, scr_depth + 1, octo = octo_nut) / [180, 0, 0]
res -= scr << [0, 0, scr_depth]
#res -= +bearing_nut.hole(t + lip_t + 1, scr_depth + 1, octo = octo_nut) << [0, 0, -1]
res += +bearing_nut.z_sup(scr_depth)
#res -= cylinder(t + lip_t + 2, d = bearing.d + 0.5, fn = 16) << [0, 0, -1]
bolt = +knob_nut.hole(screw_h, screw_head_h, octo = True) / [0, -90, 0]
bolt /= [90, 0, 0]
bolt <<= [screw_x, 0, h]
res -= bolt
if octo_nut:
scr_depth = const_nut_depth
else:
scr_depth = const_screw_head_depth
scr = const_screw.hole(h + 1, scr_depth + 1, octo = octo_nut) / [180, 0, 0]
res -= scr << [l-dx - const_screw_dist_x, w/2 - const_screw_dist_y, scr_depth]
res -= scr << [l-dx - const_screw_dist_x, -w/2 + const_screw_dist_y, scr_depth]
sup = +const_screw.z_sup(scr_depth)
res += sup << [l-dx - const_screw_dist_x, w/2 - const_screw_dist_y, 0]
res += sup << [l-dx - const_screw_dist_x, -w/2 + const_screw_dist_y, 0]
return res
def create_couple():
dx = t + lip_t + bearing.B
res = (create() / [0, -90, 0]) << [dx + bearing_gap, 0, 0]
res += (create() / [180, -90, 0]) << [-dx - bearing_gap, 0, 0]
return res
def create_knob(printing = False):
res = cylinder(knob_h, d = knob_d, fn = 128)
cut = cylinder(knob_h + 2, knob_cut_r, fn = 16) << [knob_d/2, 0, -1]
cut += (cube([2, 10, 4], center = True) / [0, -30, 0]) << [knob_d/2, 0, knob_h]
cut += (cube([1, 10, 4], center = True) / [0, 30, 0]) << [knob_d/2, 0, 0]
for i in range(knob_cut_steps):
fi = 360.0/knob_cut_steps*i
res -= cut / [0, 0, fi]
if printing:
res += create_trigger(False)
res -= knob_nut.hole(knob_nut_h + 1, knob_h, octo = True) << [0, 0, knob_nut_h]
res += knob_nut.z_sup(knob_h - knob_nut_h + 0.1) << [0, 0, knob_nut_h - 0.1]
print knob_h - knob_nut_h
return color(config.knobcol) (res)
def create_trigger(gap):
gap_r = 0
if gap:
gap_r = trigger_gap
res = union()
trigger = sphere(d = knob_trigger_d + gap_r*2, fn = 16)
trigger <<= [knob_trigger_dist, 0, knob_triger_depth]
for i in range(knob_trigger_steps):
fi = 360.0/knob_trigger_steps*i
res += trigger / [0, 0, fi]
return res
if 1: #__name__ == '__main__':
res = create(False)
res += create(True) << [0, w + 10, 0]
res.save('scad/idler.scad')
res = (create_knob(True) / [180, 0, 0]) << [0, 0, knob_h]
res.save('scad/knob.scad')
|
from graphics import *
from random import randint
class Pig:
'''a movable pig'''
def __init__(self,win):
self.circ = Circle(Point(0,0),10)
self.pcent = self.circ.getCenter()
self.cy = self.pcent.getY()
self.cx = self.pcent.getX()
self.circ.setFill('pink')
self.circ.setOutline('indian red')
self.text1 = Text(Point(0,6)," ^ ^")
self.text1.setSize(18)
self.text = Text(Point(0,-2),"ヾ(;゚(OO)゚)ノ")
self.text2 = Text(Point(-14,-2),"~")
self.text2.setSize(25)
self.text.setStyle("bold")
self.text.setSize(12)
self.circ.setWidth(3)
self.circ.draw(win)
self.text1.draw(win)
self.text.draw(win)
self.text2.draw(win)
self.win = win
def movePig(self,dy):
self.circ.move(0,dy)
self.text1.move(0,dy)
self.text.move(0,dy)
self.text2.move(0,dy)
self.cy = self.cy+dy
def getCX(self):
return self.cx
def getCY(self):
return self.cy
def setCX(self,x):
self.cx = self.cx+x
def setCY(self,y):
self.cy = self.cy+y
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
#Filename: iterator.py
colors = [ 'red', 'green', 'blue', 'yellow' ]
for i in range(len(colors)):
print (colors[i])
# >>> red
# green
# blue
# yellow
for color in colors:
print(color)
|
s = set()
s.add(1)
s.add(2)
s.add(3)
s.add(4)
s.add(5)
print(s)
#each elements in a set are unique if:
s = set()
s.add(1)
s.add(2)
s.add(3)
s.add(3)
s.add(5)
print(s)
#the set will print s = {1, 2, 3, 5}
|
def Main():
# Having brackets is the python 3.X way
print("test")
# Loose typing means python figures out the type in run time also means you
#don't need to define it as you would in C languages.
numOfLoops = askuser()
x = 1
#white space is important in python since there is no brackets! (normally 4 spaces)
if numOfLoops is None:
numOfLoops = askuser()
elif numOfLoops > 9:
print("Too big of a number try again")
numOfLoops = askuser()
while x <= numOfLoops:
print(x)
x = x + 1
lists = ["cat", "dog", "rat", "tuna", "tree"]
dictionary = {"A": 1, "B": 2, "C": 3, "Z": 99}
#examples of cool for loops
for animal in lists:
print(animal)
for key, value in dictionary.items():
print(key, value)
#functions!
def askuser():
# python 3.x messes with how inputs are taken, this way we force an int type
#on an input otherwise it will act as a string
return int(input("how many times should this loop: "))
#If you use a main function make sure to call it at the end!
Main()
|
def removeRed():
my_pict = makePicture(pickAFile())
explore(my_pict)
for px in getPixels(my_pict):
x = getX(px)
y = getY(px)
# set the boundary for left eye
if 100 <= x <= 112 and 168 <= y <= 178:
color_red = makeColor(170,75,120)
# choose the gray with pickAColor()
color_gray = makeColor(51,51,51)
color = getColor(px)
if distance(color,color_red) < 90:
setColor(px,color_gray)
# set the boundary for the right eye
if 155 <= x <= 165 and 170 <= y <= 180:
color_red = makeColor(170,75,120)
color_gray = makeColor(51,51,51)
color = getColor(px)
if distance(color,color_red) < 90:
setColor(px,color_gray)
# show the final picture
explore(my_pict)
|
import pandas as pd
import itertools
import numpy as np
# Read the Excel file into a DataFrame
df = pd.read_excel('example.xlsx')
# Get the column names
column_names = df.columns.tolist()
# Define the suffixes for permute and spread columns
permute_suffix = '_P'
spread_suffix = '_S'
# Get the column names for permute and spread columns
permute_columns = [col for col in column_names if col.endswith(permute_suffix)]
spread_columns = [col for col in column_names if col.endswith(spread_suffix)]
# Get the unique non-null values from each column
unique_values = []
for col in column_names:
if col in permute_columns or col in spread_columns:
continue
non_null_values = df[col].dropna().unique()
if len(non_null_values) > 1:
spread_values = []
for i in range(len(non_null_values)-1):
spread = np.linspace(non_null_values[i], non_null_values[i+1], num=5, endpoint=False)[1:]
spread_values.extend(spread)
unique_values.append(sorted(set(list(non_null_values) + spread_values)))
else:
unique_values.append(non_null_values)
# Generate all permutations
permutations = list(itertools.product(*unique_values))
# Create a new DataFrame with separate columns for each column in the original Excel file
df_permutations = pd.DataFrame(permutations, columns=[col for col in column_names if col not in permute_columns and col not in spread_columns])
# Add the spread columns to the DataFrame
for col in spread_columns:
if col not in column_names:
continue
spread_values = df[col].unique()
df_permutations[col] = spread_values[0]
# Generate all permutations for permute columns
for col in permute_columns:
if col not in column_names:
continue
unique_values = df[col].dropna().unique()
permutations = list(itertools.permutations(unique_values))
df_permute = pd.DataFrame(permutations, columns=[f"{col}_{i}" for i in range(len(unique_values))])
df_permutations = pd.concat([df_permutations]*len(df_permute), ignore_index=True)
df_permute = pd.concat([df_permute]*len(df_permutations), ignore_index=True)
df_permutations = pd.concat([df_permutations, df_permute], axis=1)
# Add the permute columns to the DataFrame
for col in permute_columns:
if col not in column_names:
continue
permute_column_names = [f"{col}_{i}" for i in range(len(df[col].dropna().unique()))]
for i, name in enumerate(permute_column_names):
df_permutations[name] = df_permutations[col][i::len(permute_column_names)].tolist()
# Print the DataFrame with permutations
print(df_permutations)
|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
def add_layers(inputs, in_size,out_size,activation_function = None):
Weights = tf.Variable(tf.random_normal([in_size, out_size]))
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
Wx_plus_b = tf.matmul(inputs, Weights) + biases
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
return outputs
def compute_accuracy(v_xs,v_ys):
global predictions
y_pre = sess.run(predictions,feed_dict= {xs: v_xs})
correct_prediction = tf.equal(tf.argmax(y_pre,1), tf.argmax(v_ys,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) # change the data type
result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys})
return result
# define placeholder for inputs to network
xs = tf.placeholder(tf.float32,[None,784]) # 28 * 28
ys = tf.placeholder(tf.float32, [None,10])
#add output layer
predictions = add_layers(xs,784,10,activation_function = tf.nn.softmax) #in_size = 784, out_size is result
# loss function # use cross_entropy
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys*tf.log(predictions),reduction_indices = [1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(2000):
batch_xs, batch_ys = mnist.train.next_batch(100) # use mini batch to learn, save time
sess.run(train_step, feed_dict={xs:batch_xs, ys: batch_ys})
if i % 50 == 0:
print(compute_accuracy(mnist.test.images, mnist.test.labels)) |
import numpy as np
x = np.array([[0, -6], [4, 4], [0, 0], [-5, 2]])
z = np.array([[-5, 2], [0, -6]])
assignment = np.zeros(x.shape[0])
def assign_points(x, z):
distances = np.zeros([x.shape[0], z.shape[0]])
for i in np.arange(x.shape[0]):
for k in np.arange(z.shape[0]):
distances[i, k] = np.linalg.norm(x[i,:] - z[k,:], 1)
return np.argmin(distances, axis=1)
def find_medoids(x, z, assignment):
K = z.shape[0]
for k in np.arange(K):
nk = np.count_nonzero(assignment==k)
selected_points = x[assignment==k]
tries = np.zeros([nk, x.shape[1]])
for i in np.arange(nk):
tries[i,:] = np.sum(selected_points - selected_points[i,:], axis=0)
z[k,:] = x[assignment==k][np.argmin(np.linalg.norm(tries, 2, axis=1)),:]
#mean = np.sum(x[assignment==k], axis=0)/nk
#z[k,:] = x[np.argmin(np.linalg.norm(x-mean, 1, axis=1))]
return z
assignment = assign_points(x, z)
#print(assignment)
z = find_medoids(x, z, assignment)
#print(z)
assignment = assign_points(x, z)
#print(assignment)
z = find_medoids(x, z, assignment)
#print(z)
assignment = assign_points(x, z)
#print(assignment)
z = find_medoids(x, z, assignment)
#print(z)
def find_centroids(x, z, assignment):
K = z.shape[0]
for k in np.arange(K):
nk = np.count_nonzero(assignment==k)
z[k,:] = np.sum(x[assignment==k], axis=0)/nk
return z
x = np.array([[0., -6.], [4., 4.], [0., 0.], [-5., 2.]])
z = np.array([[-5., 2.], [0., -6.]])
assignment = np.zeros(x.shape[0])
assignment = assign_points(x, z)
print(assignment)
z = find_centroids(x, z, assignment)
print(z)
assignment = assign_points(x, z)
print(assignment)
z = find_centroids(x, z, assignment)
print(z)
assignment = assign_points(x, z)
print(assignment)
z = find_centroids(x, z, assignment)
print(z)
|
# -*- python -*-
load(
"@drake//attic:build_macros.bzl",
drake_cc_googletest = "attic_drake_cc_googletest",
drake_cc_library = "attic_drake_cc_library",
)
load("//tools/skylark:test_tags.bzl", "gurobi_test_tags")
load("//tools/lint:lint.bzl", "add_lint_tests")
drake_cc_library(
name = "global_inverse_kinematics_test_util",
testonly = 1,
srcs = [
"test/global_inverse_kinematics_test_util.cc",
],
hdrs = [
"test/global_inverse_kinematics_test_util.h",
],
deps = [
"//attic/multibody/parsers",
"//common:find_resource",
"//common/test_utilities:eigen_matrix_compare",
"//multibody:global_inverse_kinematics",
"//multibody:inverse_kinematics",
"//multibody:rigid_body_tree_construction",
"@gtest//:without_main",
],
)
drake_cc_googletest(
name = "global_inverse_kinematics_test",
timeout = "moderate",
srcs = ["test/global_inverse_kinematics_test.cc"],
data = [
"//manipulation/models/iiwa_description:models",
],
tags = gurobi_test_tags() + [
# Takes too long to run with Valgrind.
"no_memcheck",
# Excluding because an assertion fails in LLVM code. Issue #6179.
"no_tsan",
],
deps = [
":global_inverse_kinematics_test_util",
],
)
drake_cc_googletest(
name = "global_inverse_kinematics_reachable_test",
timeout = "moderate",
srcs = ["test/global_inverse_kinematics_reachable_test.cc"],
data = [
"//manipulation/models/iiwa_description:models",
],
tags = gurobi_test_tags() + [
# Takes too long to run with Valgrind.
"no_memcheck",
# Excluding because an assertion fails in LLVM code. Issue #6179.
"no_tsan",
],
deps = [
":global_inverse_kinematics_test_util",
],
)
drake_cc_googletest(
name = "global_inverse_kinematics_collision_avoidance_test",
timeout = "moderate",
srcs = ["test/global_inverse_kinematics_collision_avoidance_test.cc"],
data = [
"//manipulation/models/iiwa_description:models",
],
tags = gurobi_test_tags() + [
# Takes too long to run with Valgrind.
"no_memcheck",
# Excluding because an assertion fails in LLVM code. Issue #6179.
"no_tsan",
],
deps = [
":global_inverse_kinematics_test_util",
],
)
drake_cc_googletest(
name = "global_inverse_kinematics_feasible_posture_test",
timeout = "long",
srcs = ["test/global_inverse_kinematics_feasible_posture_test.cc"],
data = [
"//manipulation/models/iiwa_description:models",
],
tags = gurobi_test_tags() + [
"no_asan",
# Takes too long to run with Valgrind.
"no_memcheck",
"no_tsan",
],
deps = [
":global_inverse_kinematics_test_util",
],
)
add_lint_tests(
cpplint_data = ["//attic:CPPLINT.cfg"],
)
|
import tax.tax_calculator as txc
def test_tax_calcation():
tc = txc.CA_MARRIED
assert tc.agi_to_tax(0) == 0
assert tc.agi_to_tax(8472) == 0 # state deduction
assert round(tc.agi_to_tax(24000)) == 155 # federal deduction (some state)
assert round(tc.agi_to_tax(40000)) == 2060 # low fed rate
assert round(tc.agi_to_tax(100000)) == 12026
|
import csv
import requests
import urllib
import json
import time
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from stem import Signal
from stem.control import Controller
# spoof a real browser as jstor et al don't take kindly to bots
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:89.0) Gecko/20100101 Firefox/89.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Upgrade-Insecure-Requests': '1',
'Alt-Used': 'dx.doi.org',
'Connection': 'keep-alive',
'DNT': '1',
'Sec-GPC': '1',
}
mms_id_to_url = {}
def get_url(target_url):
session = requests.session()
# TO Request URL with SOCKS over TOR
session.proxies = {}
session.proxies['http']='socks5h://localhost:9050'
session.proxies['https']='socks5h://localhost:9050'
try:
r = session.get(target_url, headers=headers, timeout=10.0)
if len(mms_id_to_url) % 20 == 0:
print("switching ips...")
switch_ip()
print(f'{len(mms_id_to_url)}: {r.status_code}: {r.url} -- {r.headers.get("content-type")}')
mms_id_to_url.setdefault(current_id,[]).append({
'given_url': target_url,
'final_url': r.url,
'type': r.headers.get("content-type"),
'base_url': urllib.parse.urlsplit(r.url).netloc,
'status_code': r.status_code
})
with open('result.json', 'w') as outfile:
json.dump(mms_id_to_url, outfile, indent=2)
except Exception as e:
print(str(e))
def switch_ip():
with Controller.from_port(port = 9051) as controller:
controller.authenticate(password = 'PASSWORD_GOES_HERE')
controller.signal(Signal.NEWNYM)
time.sleep(controller.get_newnym_wait())
with open('rambi.csv', newline='') as csvfile:
rambi_data = csv.reader(csvfile)
current_id = None
for row in rambi_data:
id = current_id if row[0] == "" else row[0]
current_id = id
if row[1] != "":
get_url(row[1])
|
from django.shortcuts import render, get_object_or_404,redirect
from django.views.generic import ListView, DetailView, View
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from .models import Recipe, Ingredient, Like, User
from .forms import RecipeForm, NewUserForm, IngredientForm
from django.contrib.auth.forms import *
from django.contrib.auth import *
from django.contrib.auth.decorators import *
from django.http import *
from django.db.models import Q
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.urls import reverse_lazy
def recipe_list(request):
recipes = Recipe.objects.all().order_by('-time')
query = request.GET.get('q')
if query:
# usernamess = User.objects.get(username=query)
# print(usernamess)
recipes = recipes.filter(
Q(name__icontains=query) |
Q(description__icontains=query) |
Q(difficulty__icontains=query)
)
paginator = Paginator(recipes, 4)
page = request.GET.get('page')
recipes = paginator.get_page(page)
try:
recipes = paginator.page(page)
except PageNotAnInteger:
recipes = paginator.page(1)
except EmptyPage:
recipes = paginator.page(paginator.num_pages)
context ={
"object_list": recipes,
"title": "List"
}
return render(request, 'odev/recipe_list.html', {'recipes' : recipes})
def recipe_detail(request, pk):
recipe = get_object_or_404(Recipe, pk=pk)
return render(request, 'odev/recipe_detail.html', {'recipe': recipe})
def recipe_new(request):
form = RecipeForm()
if request.method == "POST":
form = RecipeForm(request.POST, request.FILES)
if form.is_valid():
post = form.save(commit=False)
post.user = request.user
post.save()
form.save_m2m()
return redirect('/')
else:
form = RecipeForm()
else:
form = RecipeForm()
return render(request, 'odev/recipe_edit.html', {'form': form})
# class IngredientList(ListView):
# model = Ingredient
def ingredient_list(request, template_name='odev/ingredient_list.html'):
ingredient = Ingredient.objects.all()
data = {}
data['object_list'] = ingredient
return render(request, template_name, data)
# classlar, fonksiyonlara göre daha rahat yazıldı
class IngredientNew(CreateView):
model = Ingredient
fields = ['ingredient']
success_url = reverse_lazy('recipe_new')
class RecipeUpdate(UpdateView):
model = Recipe
fields = ['ingredients', 'name', 'description', 'image', 'difficulty']
success_url = reverse_lazy('recipe_list')
class RecipeDelete(DeleteView):
model = Recipe
success_url = reverse_lazy('recipe_list')
def logout_request(request):
logout(request)
return redirect('/')
def login_request(request):
if request.method == 'POST':
form = AuthenticationForm(request=request, data=request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return redirect('/')
else:
form = AuthenticationForm()
else:
form = AuthenticationForm()
form = AuthenticationForm()
return render(request = request,
template_name = "registration/login.html",
context={"form":form})
def register(request):
form = NewUserForm()
if request.method == 'POST':
form = NewUserForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.save()
return redirect('login')
else:
form = NewUserForm()
return render(request,'registration/register.html',{'form':form})
|
class Rectangle:
def __init__(self,length,width):
self.length = length
self.width = width
def perimeter(self):
peri = (self.length + self.width) *2
return peri
def area(self):
space = self.length * self.width
return space
class Parallelepipede(Rectangle):
def __init__(self,length,width,height):
super().__init__(length,width)
self.height = height
def volume(self):
zen = self.area() * self.height
print(zen)
def display(self):
pass
rec_1 = Rectangle(20,30)
rec_2 = Rectangle(20,50)
para_1 = Parallelepipede(30,3,10)
para_2 = Parallelepipede(10,10,10)
print(para_1.width)
print(para_1.volume())
|
def main():
bulldog()
def bulldog():
print('feed me!')
if __name__ == '__main__': main() # __name__ is a special variable that returns the name of the current module
#####
def main():
bulldog(3)
def bulldog(n):
print(f'{n}feed me!')
if __name__ == '__main__': main()
# FUNCTION ARGUMENTS
def main():
bulldog(5, 6)
def bulldog(a, b, c = 0): # c represents a default argument
print('feed me!')
print(a, b, c)
if __name__ == '__main__': main()
# ARGUMENT LISTS
def main():
kitten('meow', 'grrr', 'purr')
def kitten(*args): # variable length argument list. It's a tuple
if len(args):
for s in args:
print(s)
else:
print('meow')
if __name__ == '__main__': main()
# another example to get the same result:
def main():
x = ('meow', 'grrr', 'purr')
kitten(*x)
def kitten(*args): # variable length argument list. It's a tuple
if len(args):
for s in args:
print(s)
else:
print('meow')
if __name__ == '__main__': main()
# KEY WORD ARGUMENTS #############
# like list arguments, but are dicts rather than tuples
# allows function to have a variable number of named arguments
def main():
kitten(Buffy = 'meow', Zilla = 'grr', Angel = 'rawr')
def kitten(**kwargs):
if len(kwargs):
for k in kwargs:
print('Kitten {} says {}.format(k, kwargs[k]))
else:
print('meow')
if __name__ == '__main__': main()
# another example to get the same result:
def main():
x = dict(Buffy = 'meow', Zilla = 'grr', Angel = 'rawr')
kitten(**x)
def kitten(**kwargs):
if len(kwargs):
for k in kwargs:
print('Kitten {} says {}.format(k, kwargs[k]))
else:
print('meow')
if __name__ == '__main__': main()
# RETURN VALUES #################
def main():
x = bulldog()
print(type(x), x)
def bulldog():
print("I'm hungry!")
return dict(buford = 5, henley = 4, emi = 4)
if __name__ == '__name__': main()
# GENERATORS ###################
# instead of returning a single value, it returns a stream of values
# example: build our own version of range() that includes all numbers inclusive
def main():
for i in inclusive_range():
print(i, end = ' ')
print()
def inclusive_range(*args):
numargs = len(args)
start = 0
stop = 1
#initiate parameters
if numargs < 1:
raise TypeError(f'expected at least 1 argument, got {numargs}')
elif numargs == 1:
stop == args[0]
elif numargs == 2:
(start, stop) = args
elif numargs == 3:
(start, stop, step) = args
else:
raise TypeError(f'expected at most 3 arguments, got {numargs}')
# DECORATORS #################
# specical type of function that returns a wrapper function
def f1(f):
def f2():
print('this is before the function call')
f()
print('this is after the function call')
return f2
@f1
def f3():
print('this is f3)
f3()
# Example
import time
def elapsed_time(f):
def wrapper():
t1 = time.time()
f()
t2 = time.time()
print(f'Elapsed time: {(t2 - t1) * 1000} ms')
@elapsed_time
def big_sum():
num_list = []
for num in range(0, 10000)):
num_list.append(num)
print(f'Big sum: {sum(num_list)}')
def main():
big_sum()
if __name__ == '__main__': main()
|
"""
@Time : 2020-07-07 23:11
@Author : QDY
@FileName: 166. 分数到小数.py
给定两个整数,分别表示分数的分子 numerator 和分母 denominator,以字符串形式返回小数。
如果小数部分为循环小数,则将循环的部分括在括号内。
示例 1:
输入: numerator = 1, denominator = 2
输出: "0.5"
示例 2:
输入: numerator = 2, denominator = 1
输出: "2"
示例 3:
输入: numerator = 2, denominator = 3
输出: "0.(6)"
"""
class Solution:
def fractionToDecimal(self, numerator, denominator):
if numerator % denominator == 0:
return str(numerator // denominator)
sgn = 1
if numerator < 0:
sgn *= -1
numerator = -numerator
if denominator < 0:
sgn *= -1
denominator = -denominator
if sgn < 0:
head = '-'
else:
head = ''
head += str(numerator // denominator) + '.' # 整数部分
# print(head)
r = numerator % denominator
pos = 0
remainder = {} # 记录余数出现的位置
tmp = ''
while True:
tmp += str(r // denominator) # 记录小数部分
# print(tmp)
r = r % denominator
if r == 0: # 不是循环小数
return head + tmp[1:]
if r in remainder: #
return '%s%s(%s)' % (head, tmp[1:remainder[r] + 1], tmp[remainder[r] + 1:pos + 1])
else:
remainder[r] = pos
pos += 1
r *= 10
|
import unittest
from scpidev.value import SCPIValue, SCPIValueList
test_vectors = [
{
"value_string": "{<resolution>|MINimum|MAX|DEF}",
"expected_values_list": [
"<resolution>", ("MIN", "imum", ""), ("MAX", "", ""),
("DEF", "", "")
],
"match_test": [
("MIN", True), ("min", True), ("mini", True), ("minimum", True),
("minimumu", False), ("DEf", True), ("+123.37 A", True),
(" MIN", False),
],
},
{
"value_string": "{0|1|OFF|ON}",
"expected_values_list": [
("0", "", ""), ("1", "", ""), ("OFF", "", ""), ("ON", "", "")
],
"match_test": [
("mIn", False),
],
}
]
# # Define test strings.
# "<test>",
# "{<resolution>|MIN|MAX|DEF}",
# "{<test_type>|DEF}",
# "{0|1|OFF|ON}",
# None,
# "{REQuired|OPTional}",
# "{\"<date_string>\"|CHANnel<n>}",
# "",
class TestSCPIValue(unittest.TestCase):
def test_value_list(self):
for test_vector in test_vectors:
print("\n++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
# Get test vector data
vs = test_vector["value_string"]
evl = test_vector["expected_values_list"]
# Create value list from test vector
vl = SCPIValueList()
vl.init(vs)
print("{} => {}".format(repr(vs), str(vl)))
n = 0
# Test correct tuple creation
print("\n--- Test correct tuple creation ------------------------")
for v in vl:
print("Testing: {} == {}".format(
repr(v.get_value()), repr(evl[n])))
self.assertEqual(v.get_value(), evl[n])
n += 1
# Test correct value matching
print("\n--- Test correct value matching ------------------------")
for val in test_vector["match_test"]:
print("Testing ({}): {} in {}"
.format(val[1], repr(val[0]), str(vl)))
result = val[0] in vl
self.assertEqual(result, val[1])
if __name__ == "__main__":
unittest.main()
# vl = SCPIValueList("{<resolution>|MINimum|MAX|DEF}")
# print(vl)
# print("mi" in vl)
# print("min" in vl)
# print("MiNimum" in vl)
# print("minimumu" in vl)
|
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 28 17:20:02 2018
@author: mathe
"""
import pygame,sys,time,random
from pygame.locals import *
import Jogo
def menu():
pygame.init()
pygame.mixer.init()
fonte=pygame.font.Font("moonhouse.ttf", 40)
fonte_titulo=pygame.font.Font("moonhouse.ttf", 90)
#Tela
cursor = ("cursor.png")
icon = pygame.image.load("icone.ico")
pygame.display.set_icon(icon)
som_select=pygame.mixer.Sound("select.ogg")
pygame.display.set_caption("Eternal War")
tela = pygame.display.set_mode((800,600))
fundo=pygame.image.load('fundo_menu.png')
frames=30
relogio=pygame.time.Clock()
#Opçoes do menu
menu=fonte_titulo.render("Eternal War",1,(185, 66, 244))
jogar=fonte.render("Jogar",1,(254, 254, 254))
tutorial=fonte.render("Highscore",1,(254, 254, 254))
sair=fonte.render("Sair",1,(254, 254, 254))
jogar_selecionado=fonte.render("Jogar",1,(76, 255, 254))
tutorial_selecionado=fonte.render("Highscore",1,(76, 255, 254))
sair_selecionado=fonte.render("Sair",1,(76, 255, 254))
js=jogar_selecionado
tut=tutorial
exit=sair
#Musicas
musica1="musica.mp3"
pygame.mixer.music.load(musica1) #Música
pygame.mixer.music.play(-1)
global markerp
markerp = 1
#variavel para o movimento do cursor
mov=0
start=[250,250]
#Loop Principal
mainloop = True
while mainloop:
tela.blit(fundo, (0,0))
tempo=relogio.tick(frames)
tela.blit(menu,[100,80])
tela.blit(js, [330,250])
tela.blit(tut, [330,350])
tela.blit(exit, [330,450])
tela.blit(pygame.image.load(cursor), start)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.mixer.music.fadeout(2)
sys.exit()
elif event.type==pygame.KEYDOWN:
if event.key==K_ESCAPE:
som_select.play()
pygame.mixer.music.fadeout(2)
sys.exit()
elif js==jogar_selecionado and event.key==K_DOWN:
som_select.play()
js=jogar
tut=tutorial_selecionado
exit=sair
markerp=2
start=[250,350]
elif js==jogar_selecionado and event.key==K_UP:
som_select.play()
js=jogar
tut=tutorial
exit=sair_selecionado
markerp=3
start=[250,450]
elif tut==tutorial_selecionado and event.key==K_DOWN:
som_select.play()
js=jogar
tut=tutorial
exit=sair_selecionado
markerp=3
start=[250,450]
elif tut==tutorial_selecionado and event.key==K_UP:
som_select.play()
js=jogar_selecionado
tut=tutorial
exit=sair
markerp=1
start=[250,250]
elif exit==sair_selecionado and event.key==K_DOWN:
som_select.play()
js=jogar_selecionado
tut=tutorial
exit=sair
markerp=1
start=[250,250]
elif exit==sair_selecionado and event.key==K_UP:
som_select.play()
js=jogar
tut=tutorial_selecionado
exit=sair
markerp=2
start=[250,350]
elif markerp==1 and event.key==pygame.K_RETURN:
som_select.play()
jogo = Jogo.loopPrincipal()
jogo.dar_load()
jogo.roda()
elif markerp==3 and event.key==pygame.K_RETURN:
som_select.play()
pygame.mixer.music.fadeout(2)
sys.exit()
mov=mov+1
pygame.display.update()
#Rodar o jogo automaticamente
if __name__=="__main__":
menu() |
from tensorboardX import SummaryWriter
class Plotter():
def __init__(self, folder):
self.writer = SummaryWriter(folder)
self.last_written = {}
def plot(self, tag, name, x, y):
key = tag + '/' + name
if key in self.last_written.keys() and x <= self.last_written[key]:
#Cannot write if it has already been written!
return
self.last_written[key] = x
self.writer.add_scalar(key, y, x)
def close(self):
self.writer.close()
print("Closed plotter!")
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
def my_insert(indeks, eleman):
liste = [1, 2, 3, 'a', 'b']
print liste[:indeks] + list(eleman) + liste[indeks:]
my_insert(3, "2")
|
#! /usr/bin/python
import socket
import math
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
RIFLE_V = 853
PISTOL_V = 350
GRAVITY = -9.81
PI = 3.14159
HOST = 'zombies_8977dda19ee030d0ea35e97ad2439319.2014.shallweplayaga.me'
PORT = 20689
s.connect((HOST, PORT));
i = 0;
s.recv(1024)
s.send('1\n')
s.recv(1024)
s.send('2\n')
def calDist(x,y):
return (x*x + y*y)**0.5
def detWeapon(x,y):
dist = calDist(x,y)
if dist < 50:
return 'p'
else:
return 'r'
def detVelocity(x,y):
dist = calDist(x,y)
if dist<50:
return PISTOL_V
else:
return RIFLE_V
def detAngle(x,y, t=0):
v = detVelocity(x,y);
if t != 0:
return math.acos(x/(t*u)) * 180 /PI
a = 0.5 * GRAVITY * x * x
b = v*v*x
c = -y*v*v + a
D = b*b - 4.0 * a * c
sol = (-b + D**0.5)/ (2.0 * a)
return (math.atan(sol))* 180 /PI
def getY(x,y,t,theta):
v = detVelocity(x,y)
y = v * math.sin(theta*PI/180) * t + 0.5 * GRAVITY * t * t
def parseInput(buf):
x = 0
y = 0
t = 0
prev = ''
lines = buf.split('\n')
for line in lines:
if 'above your van' in line:
parts = line.split(' ')
for part in parts:
if part == 'from':
x = prev[0:-1]
elif part == 'above':
y = prev[0:-1]
prev = part
elif 'frozen in terror' in line:
parts = line.split(' ')
for part in parts:
if part == 'terror':
x = prev[0:-1]
elif part == 'and':
y = prev[0:-1]
elif part == 'have':
t = prev[0:-1]
theta = detAngle(x, y, t)
y = getY(x,y, t, theta)
return (x, y, t)
i =0
while s:
data = s.recv(1024)
print data
coord = parseInput(data)
x = int(coord[0])
y = int(coord[1])
t = int(coord[2])
weapon = detWeapon(x, y)
angle = detAngle(x, y, t)
resp = weapon + ', ' + str(angle) + ', ' + str(x) + ', ' + str(y) + '\n'
print resp
s.send(resp) |
import logging
import pymysql
from pymongo import MongoClient
import pymongo
from bson import ObjectId
def op_db_query(sql, host, user, password, database):
"""
不需要跳板机,连接mysql
:param sql: 需要执行的命令
:param host: 机器地址
:param user: sql登陆用户
:param password: sql登陆密码
:param db: 需要连接的数据库
:return: 查询结果,以元组的形式
"""
db_res = pymysql.connect(host=host, user=user, password=password, database=database, charset="utf8")
# 使用cursor()方法获取操作游标
cur = db_res.cursor()
try:
cur.execute(sql) # 执行sql语句
results = cur.fetchall() # 获取查询的所有记录
logging.info("查询条件==>{}".format(sql))
logging.info("查询结果==>{}".format(results))
return results
except Exception as e:
logging.error("查询异常==>{}".format(e))
finally:
db_res.close()
def op_db_modify(sql, host, user, password, database):
"""
不需要跳板机,连接mysql
:param sql: 需要执行的命令
:param host: 机器地址
:param user: sql登陆用户
:param password: sql登陆密码
:param db: 需要连接的数据库
:return: 查询结果,以元组的形式
"""
db_res = pymysql.connect(host=host, user=user, password=password, database=database, charset="utf8")
# 使用cursor()方法获取操作游标
cur = db_res.cursor()
try:
cur.execute(sql) # 执行sql语句
logging.info("执行语句==>{}".format(sql))
db_res.commit()
except Exception as e:
logging.error("sql执行异常==>{}".format(e))
db_res.rollback()
finally:
db_res.close()
class OperateMongo:
def __init__(self, mongo):
password = None
user = None
if mongo == 'te':
self.mongo = ['mgd1app.te.test.srv.mc.dd:27017', 'mgd2app.te.test.srv.mc.dd:27017',
'mgd3app.te.test.srv.mc.dd:27017']
elif mongo == 'te_promote_sales':
self.mongo = ['10.23.34.29:27017', '10.23.34.30:27017', '10.23.34.39:27017']
password = '98LbbHSYwE2z'
user = 'mongouser'
self.client = MongoClient(self.mongo) #连接数据库
if user is not None:
self.adAdmin = self.client.admin
self.adAdmin.authenticate(user, password)
# 查询,默认按_id倒序排
def select_mongo_data(self, db_name, collect_name, condition_map, paraName):
"""
:param db_name: 数据库名
:param collect_name: 数据表名
:param condition_map: 查询的条件
:param paraName: 查询返回指定的一个字段
:return:
"""
db = self.client[db_name]
collection = db[collect_name]
results = collection.find(condition_map).sort('_id', pymongo.DESCENDING)
result_list = []
print("results:", results)
for result in results:
print(result)
data = result[paraName]
result_list.append(data)
if len(result_list) == 0:
return None
else:
return result_list
# 查询,默认按_id倒序排
def select_mongo_data2(self, db_name, collect_name, query, projection, skip_number=0,
limit_number=0, sort_field='_id', order=-1, is_sort=1):
"""
查询mongo,默认按照updata_time字段倒序排序多条件情况
:param db_name: 连接的数据库
:param collect_name: 数据库对应的表
:param query: 查询条件
:param projection: 查询后返回字段,如:{"_id":1, "name":1};1展示,0不展示
:param skip_number: 数据库对应的表
:param limit_number: 返回记录中跳过几条数据后再返回
:param sort_field: 排序的字段
:param order: 排序方式,-1倒序
:param is_sort: 是否需要排序:0不排,1排
:return:
"""
collection = self.client[db_name][collect_name]
result_list = []
if is_sort ==1:# 排序
results = collection.find(query, projection).limit(limit_number).skip(skip_number).sort(sort_field, order)
elif is_sort ==0:#不排序
results = collection.find(query, projection).limit(limit_number).skip(skip_number)
for result in results:
print(result)
result_list.append(result)
if len(result_list) == 0:
return None
else:
return result_list
# 查询,默认按_id倒序排
def select_mongo_by_id(self, db_name, collect_name, condition_map):
db = self.client[db_name]
collection = db[collect_name]
results = collection.find(condition_map).sort('_id', pymongo.DESCENDING)
result = []
try:
if results and len(result):
result = results[0]
except pymongo.errors.PyMongoError as e:
print("没查询到数据")
return result
# 更新数据库
def updata_mongo_data(self, db_name, collect_name, myrequireinfo, condition_map):
try:
collection = self.client[db_name][collect_name]
results = collection.updata_one(myrequireinfo, condition_map)
return results
except pymongo.errors.PyMongoError as e:
print("DB Error==>{}".format(e))
# 插入数据库
def insert_mongo_data(self, db_name, collect_name, document):
try:
collection = self.client[db_name][collect_name]
results = collection.insert_one(document)
return results
except pymongo.errors.PyMongoError as e:
print("DB Error==>{}".format(e))
# 删除数据库数据
def delete_mongo_data(self, db_name, collect_name, condition_map):
try:
collection = self.client[db_name][collect_name]
results = collection.delete_many(condition_map)
return results
except pymongo.errors.PyMongoError as e:
print("DB Error==>{}".format(e))
if __name__ == "__main__":
db = OperateMongo('te_user')
db_result = db.select_mongo_data2("area", "user", {"_id": ObjectId("6123")}, {"mobile": 1}) |
import math
import time
import os
from random import shuffle
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.optim as optim
import nni
import logging
import networkx as nx
from loggers import EmptyLogger, CSVLogger, PrintLogger, FileLogger, multi_logger
from model import GCN, GatNet
from pre_peocess import build_2k_vectors
import pickle
CUDA_Device = 1
class ModelRunner:
def __init__(self, conf, logger, data_logger=None, is_nni=False):
self._logger = logger
self._data_logger = EmptyLogger() if data_logger is None else data_logger
self._conf = conf
self.bar = 0.5
self._lr = conf["lr"]
self._is_nni = is_nni
# choosing GPU device
self._device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if self._device != "cpu":
with torch.cuda.device("cuda:{}".format(CUDA_Device)):
torch.cuda.empty_cache()
if not self._is_nni:
self._device = torch.device("cuda:{}".format(CUDA_Device))
self._loss = self._sub_graph_ce_loss
self._ce_loss = torch.nn.CrossEntropyLoss(reduction="mean").to(self._device)
@property
def logger(self):
return self._logger
@property
def data_logger(self):
return self._data_logger
def _sub_graph_ce_loss(self, calcs, beta=None, gamma=None):
# if beta is None:
# beta = 1 / len(calcs["f_ns_out"]) if len(calcs["f_ns_out"])!=0 else 0
# gamma = 1 / len(calcs["s_ns_out"]) if len(calcs["s_ns_out"])!=0 else 0
#todo check dimensions of central nodes torch
cn_loss = self._ce_loss(calcs["cn_out"], calcs["cn_label"])
f_ns_loss = self._ce_loss(calcs["f_ns_out"], calcs["f_ns_labels"]) *(beta) if len(calcs["f_ns_out"])!=0 else 0
s_ns_loss = self._ce_loss(calcs["s_ns_out"], calcs["s_ns_labels"]) * (gamma) if len(calcs["s_ns_out"])!=0 else 0
return cn_loss+f_ns_loss+s_ns_loss
def _get_model(self):
model = GCN(in_features=self._conf["in_features"],
hid_features=self._conf["hid_features"], out_features= self._conf["out_features"],
activation=self._conf["activation"], dropout= self._conf["dropout"])
opt = self._conf["optimizer"](model.parameters(), lr=self._conf["lr"], weight_decay=self._conf["weight_decay"])
##checged : added "feature_matrices"
return {"model": model, "optimizer": opt,
# "training_mats": self._conf["training_mat"],
# "training_labels": self._conf["training_labels"],
# "test_mats": self._conf["test_mat"],
# "test_labels": self._conf["test_labels"],
"cut": self._conf["cut"],"beta": self._conf["beta"],"gamma": self._conf["gamma"],
"labels": self._conf["labels"], "X": self._conf["X"], "ds_name": self._conf["ds_name"],
"train_ind": self._conf["train_ind"], "test_ind": self._conf["test_ind"],
"adj_matrices": self._conf["adj_matrices"]
}
# verbose = 0 - silent
# verbose = 1 - print test results
# verbose = 2 - print train for each epoch and test results
def run(self, verbose=2):
if self._is_nni:
verbose = 0
model = self._get_model()
##
loss_train, acc_train, intermediate_acc_test, losses_train, accs_train, accs_cn_train, accs_f_train, accs_s_train, test_results = self.train(
self._conf["epochs"],
model=model,
verbose=verbose)
##
# Testing . ## result is only the last one! do not use. same as 7 in last
result = self.test(model=model, verbose=verbose if not self._is_nni else 0, print_to_file=True)
test_results.append(result)
if self._is_nni:
self._logger.debug('Final loss train: %3.4f' % loss_train)
self._logger.debug('Final accuracy train: %3.4f' % acc_train)
final_results = result["acc"]
self._logger.debug('Final accuracy test: %3.4f' % final_results)
# _nni.report_final_result(test_auc)
if verbose != 0:
names = ""
vals = ()
for name, val in result.items():
names = names + name + ": %3.4f "
vals = vals + tuple([val])
self._data_logger.info(name, val)
parameters = {"temporal_pen": self._conf["temporal_pen"], "lr": self._conf["lr"],
"weight_decay": self._conf["weight_decay"],
"dropout": self._conf["dropout"], "optimizer": self._conf["optim_name"]}
return loss_train, acc_train, intermediate_acc_test, result, losses_train, accs_train, accs_cn_train, accs_f_train, accs_s_train, test_results, parameters
def train(self, epochs, model=None, verbose=2):
loss_train = 0.
acc_train = 0.
losses_train = []
accs_train = []
accs_cn_train = []
accs_f_train = []
accs_s_train = []
test_results = []
intermediate_test_acc = []
for epoch in range(epochs):
loss_train, acc_train, acc_train_cn , acc_train_f, acc_train_s= self._train(epoch, model, verbose)
##
losses_train.append(loss_train)
accs_train.append(acc_train)
accs_cn_train.append(acc_train_cn)
#if acc_train_f!=0:
accs_f_train.append(acc_train_f)
# if acc_train_s!=0:
accs_s_train.append(acc_train_s)
##
# /---------------------- FOR NNI -------------------------
if epoch % 5 == 0:
test_res = self.test(model, verbose=verbose if not self._is_nni else 0)
test_results.append(test_res)
if self._is_nni:
test_acc = test_res["acc"]
intermediate_test_acc.append(test_acc)
return loss_train, acc_train, intermediate_test_acc, losses_train, \
accs_train, accs_cn_train, accs_f_train, accs_s_train, test_results
def calculate_labels_outputs(self,node, outputs , labels, indices, ego_graph):
f_neighbors = list(ego_graph.neighbors(node))
s_neighbors = []
for f_neighbor in f_neighbors:
for s_neighbor in ego_graph.neighbors(f_neighbor):
if s_neighbor not in f_neighbors and s_neighbor != node and s_neighbor not in s_neighbors:
s_neighbors += [s_neighbor]
cn_out= outputs[[list(ego_graph.nodes).index(node)]]
cn_label = labels[[node]] ##todo [node]
f_ns_out = outputs[[list(ego_graph.nodes).index(f_n) for f_n in f_neighbors if f_n in indices]]
f_ns_labels = labels[[f_n for f_n in f_neighbors if f_n in indices]]
s_ns_out = outputs[[list(ego_graph.nodes).index(s_n) for s_n in s_neighbors if s_n in indices]]
s_ns_labels = labels[[s_n for s_n in s_neighbors if s_n in indices]]
return { "cn_out": cn_out, "cn_label": cn_label, "f_ns_out": f_ns_out, "f_ns_labels": f_ns_labels, "s_ns_out": s_ns_out, "s_ns_labels": s_ns_labels }
def _train(self, epoch, model, verbose=2):
model_ = model["model"]
model_ = model_.to(self._device)
optimizer = model["optimizer"]
cut = model["cut"]
train_indices = model["train_ind"]
model["labels"] = model["labels"].to(self._device)
labels = model["labels"]
beta = model["beta"]
gamma = model["gamma"]
model_.train()
optimizer.zero_grad()
loss_train = 0.
acc_train = 0
acc_train_cn, acc_train_f, acc_train_s = 0,0,0
f_nones = 0; s_nones = 0
# create subgraphs only for partial, but use labels of all.
partial_train_indices = train_indices[0:int(cut*len(train_indices))]
for node in partial_train_indices: #this may be in batches for big graphs todo
adj = model["adj_matrices"][node]
X_t = model["X"][list(adj.nodes)].to(device=self._device)
output = model_(X_t, nx.adjacency_matrix(adj).tocoo())
calcs = self.calculate_labels_outputs( node, output, labels, train_indices, adj)
loss_train += self._loss(calcs, beta, gamma)
acc, acc_cn, acc_f, acc_s = self.accuracy(calcs)
acc_train_cn+= acc_cn
if acc_f!=None:
acc_train_f += acc_f
else:
f_nones+=1
if acc_s!=None:
acc_train_s+=acc_s
else:
s_nones+=1
acc_train += acc
loss_train /= len(partial_train_indices)
acc_train_cn /= len(partial_train_indices)
if len(partial_train_indices)-f_nones !=0:
acc_train_f /= (len(partial_train_indices)-f_nones)
else:
acc_train_f = np.nan
if len(partial_train_indices)-s_nones !=0:
acc_train_s /= (len(partial_train_indices)-s_nones)
else:
acc_train_s = np.nan
acc_train/= len(partial_train_indices)
#print("Train Acc on cn", acc_train_cn / 1, "Acc first nodes", acc_train_f, "Acc second nodes", acc_train_s)
loss_train.backward()
optimizer.step()
if verbose == 2:
# Evaluate validation set performance separately,
# deactivates dropout during validation run.
self._logger.debug('Epoch: {:04d} '.format(epoch + 1) +
'ce_loss_train: {:.4f} '.format(loss_train.data.item()) +
'acc_train: {:.4f} '.format(acc_train))
return loss_train, acc_train, acc_train_cn , acc_train_f, acc_train_s
@staticmethod
def accuracy(calcs):
# return {"cn_out": cn_out, "cn_label": cn_label, "f_ns_out": f_ns_out, "f_ns_labels": f_ns_labels,
# "s_ns_out": s_ns_out, "s_ns_labels": s_ns_labels}
acc = 0
acc_cn, acc_f, acc_s = 0,0,0
for idx, sample in enumerate(calcs["f_ns_out"]):
if torch.argmax(sample) == calcs["f_ns_labels"][idx]:
acc+=1
acc_f+=1
for idx, sample in enumerate(calcs["s_ns_out"]):
if torch.argmax(sample) == calcs["s_ns_labels"][idx]:
acc+=1
acc_s+=1
if torch.argmax(calcs["cn_out"]) == calcs["cn_label"]:
acc+=1
acc_cn+=1
size_labeld = len(calcs["cn_out"])+len(calcs["s_ns_out"])+len(calcs["f_ns_out"])
#print(acc_cn, acc_f,acc_s)
acc_f = acc_f/len(calcs["f_ns_out"]) if len(calcs["f_ns_out"])!=0 else None
acc_s = acc_s / len(calcs["s_ns_out"]) if len(calcs["s_ns_out"]) != 0 else None
#print("Acc on cn", acc_cn/1, "Acc first nodes", acc_f, "Acc second nodes",acc_s)
#return acc/size_labeld # for all with no change between first and seconds
return acc/size_labeld, acc_cn/1, acc_f, acc_s
def test(self, model=None, verbose=2, print_to_file=False):
model_ = model["model"]
test_indices = model["test_ind"]
labels = model["labels"]
beta = model["beta"]
gamma = model["gamma"]
model_.eval()
test_loss = 0
test_acc = 0
acc_test_cn, acc_test_f, acc_test_s = 0, 0, 0
f_nones= 0; s_nones= 0
partial_rand_test_indices = np.random.choice(len(test_indices), round(0.05*len(test_indices)) , replace=False)
#partial_rand_test_indices = test_indices
#partial_test_indices = test_indices[0:int(1 * len(test_indices))] ## 1 is all
for node in partial_rand_test_indices: #this may be in batches for big graphs todo
#adj is the ego graph (that will be converted into adj matrix and coo).
adj = model["adj_matrices"][node]
import random
random.shuffle(adj.nodes)
X_t = model["X"][list(adj.nodes)].to(device=self._device)
print(X_t[0])
random.shuffle(X_t)
print(X_t[0],"after")
#todo this may be given as another param, to avoid using cpu calculations here
output = model_(X_t, nx.adjacency_matrix(adj).tocoo())
calcs = self.calculate_labels_outputs( node, output, labels, test_indices, adj)
test_loss += self._loss(calcs, beta, gamma)
#test_acc += self.accuracy(calcs)
acc, acc_cn, acc_f, acc_s = self.accuracy(calcs)
acc_test_cn += acc_cn
if acc_f!=None:
acc_test_f += acc_f
else:
f_nones +=1
if acc_s != None:
acc_test_s += acc_s
else:
s_nones +=1
test_acc += acc
test_loss /= len(partial_rand_test_indices)
test_acc /= len(partial_rand_test_indices)
acc_test_cn /= len(partial_rand_test_indices); acc_test_f /= (len(partial_rand_test_indices)-f_nones); acc_test_s /= (len(partial_rand_test_indices)-s_nones)
#print("Test Acc on cn", acc_test_cn/1, "Acc first nodes", acc_test_f, "Acc second nodes",acc_test_s)
if verbose != 0:
self._logger.info("Test: ce_loss= {:.4f} ".format(test_loss.data.item()) + "acc= {:.4f}".format(test_acc))
#result = {"loss": loss_test.data.item(), "acc": acc_test, "tempo_loss": tempo_loss.data.item()}
result = {"loss": test_loss, "acc": test_acc, "acc_cn": acc_test_cn, "acc_f":acc_test_f, "acc_s":acc_test_s}
return result
def plot_graphs(train_loss_mean, train_acc_mean,train_cn_acc_mean,train_f_acc_mean, train_s_acc_mean, test_loss_mean, test_acc_mean,
test_cn_acc_mean,test_f_acc_mean,test_s_acc_mean, parameters, plots_data):
# info[4] is list of train losses 1 . list[5] is list of acc train.
#info [6] is list of dictionaries, each dictionary is for epoch, each one contains "loss" - first loss,"acc"- acc, "tempo_loss" - tempo loss
#info[7] is the temporal_oen
regulariztion = str(round(parameters["weight_decay"],3))
lr = str(round(parameters["lr"],3))
optimizer = str(parameters["optimizer"])
dropout = str(round(parameters["dropout"],2))
cut = plots_data["cut"]*100
ds_name = plots_data["ds_name"]
#Train
# Share a X axis with each column of subplots
fig, axes = plt.subplots(2, 3, figsize=(12, 10))
plt.suptitle("DataSet: " + ds_name
+ ", final_train_accuracies_mean: " + str(round(plots_data["final_train_accuracies_mean"],2)) + ", final_train_accuracies_ste: " + str(round(plots_data["final_train_accuracies_ste"],2))
+ "\nfinal_test_accuracies_mean: " + str(round(plots_data["final_test_accuracies_mean"],2)) + ", final_test_accuracies_ste: " + str(round(plots_data["final_test_accuracies_ste"],2))
+ "\nlr="+lr+" reg= "+regulariztion+ ", dropout= "+dropout+", opt= "+optimizer, fontsize=12, y=0.99)
epoch = [e for e in range(1, len(train_loss_mean)+1)]
axes[0, 0].set_title('Loss train')
axes[0, 0].set_xlabel("epochs")
axes[0, 0].set_ylabel("Loss")
axes[0, 0].plot(epoch, train_loss_mean)
axes[0, 1].set_title('Accuracy train')
axes[0, 1].set_xlabel("epochs")
axes[0, 1].set_ylabel("Accuracy")
axes[0, 1].plot(epoch, train_acc_mean)
axes[0, 2].set_title('Accuracy layers Train')
axes[0, 2].set_xlabel("epochs")
axes[0, 2].set_ylabel("Accuracies")
axes[0, 2].plot(epoch, train_cn_acc_mean, label='CentralNode')
axes[0, 2].plot(epoch, train_f_acc_mean, label='FirstNeighbors')
axes[0, 2].plot(epoch, train_s_acc_mean, label='SecondNeighbors')
axes[0, 2].legend(loc='best')
#test
epoch = [e for e in range(1, len(test_loss_mean)+1)]
axes[1, 0].set_title('Loss test')
axes[1, 0].set_xlabel("epochs")
axes[1, 0].set_ylabel("Loss")
axes[1, 0].plot(epoch, test_loss_mean)
axes[1, 1].set_title('Accuracy test')
axes[1, 1].set_xlabel("epochs")
axes[1, 1].set_ylabel("Accuracy")
axes[1, 1].plot(epoch, test_acc_mean)
axes[1, 2].set_title('Accuracy layers Test')
axes[1, 2].set_xlabel("epochs")
axes[1, 2].set_ylabel("Accuracies")
axes[1, 2].plot(epoch, test_cn_acc_mean, label='CentralNode')
axes[1, 2].plot(epoch, test_f_acc_mean, label='FirstNeighbors')
axes[1, 2].plot(epoch, test_s_acc_mean, label='SecondNeighbors')
axes[1, 2].legend(loc='best')
fig.tight_layout()
plt.subplots_adjust(top=0.85)
# fig.delaxes(axes[1,0])
plt.savefig("figures/"+plots_data["ds_name"]+"_.png")
plt.clf()
#plt.show()
def execute_runner(runners, plots_data, is_nni=False):
train_losses = []
train_accuracies = []
train_cn_accuracies = []
train_f_accuracies = []
train_s_accuracies = []
test_intermediate_results = []
test_losses = []
test_accuracies = []
test_cn_accuracies = []
test_f_accuracies = []
test_s_accuracies = []
results = []
last= runners[-1]
for i in range(len(runners)):
#for idx_r, runner in enumerate(runners):
with torch.cuda.device("cuda:{}".format(CUDA_Device)):
torch.cuda.empty_cache()
time.sleep(1)
print("trial number",i)
result_one_iteration = runners[0].run(verbose=2)
train_losses.append(result_one_iteration[0])
train_accuracies.append(result_one_iteration[1])
test_intermediate_results.append(result_one_iteration[2])
test_losses.append(result_one_iteration[3]["loss"])
test_accuracies.append(result_one_iteration[3]["acc"])
results.append(result_one_iteration)
#todo check if can be deleted (from first check - not changing)
if len(runners) >1:
runners=runners[1:]
print("len runners", len(runners))
# for printing results on graphs. for other uses - the last result is the one should be used.
size = len(results)
#train_loss_mean = torch.stack([torch.tensor([results[j][4][i] for i in range(len(results[j][4]))]) for j in range(size)]).mean(axis=0)
train_loss_mean = np.mean([ [results[j][4][i].item() for i in range(len(results[j][4]))] for j in range(size) ], axis=0)
#train_acc_mean = torch.stack([ torch.tensor([results[j][5][i] for i in range(len(results[j][5]))]) for j in range(size) ]).mean(axis=0)
train_acc_mean = np.mean([ [results[j][5][i] for i in range(len(results[j][5]))] for j in range(size) ], axis=0)
train_cn_acc_mean = np.mean([[results[j][6][i] for i in range(len(results[j][6]))] for j in range(size)], axis=0)
train_f_acc_mean = np.nanmean([[results[j][7][i] for i in range(len(results[j][7]))] for j in range(size)], axis=0)
train_s_acc_mean = np.nanmean([[results[j][8][i] for i in range(len(results[j][8]))] for j in range(size)], axis=0)
#test_loss_mean = torch.stack([ torch.tensor([results[j][6][i]["loss"] for i in range(len(results[j][6]))]) for j in range(size) ]).mean(axis=0)
test_loss_mean = np.mean([ [results[j][9][i]["loss"].item() for i in range(len(results[j][9]))] for j in range(size) ], axis=0)
#test_acc_mean = torch.stack([ torch.tensor([torch.tensor(results[j][6][i]["acc"]) for i in range(len(results[j][6]))]) for j in range(size) ])
test_acc_mean = np.mean([ [results[j][9][i]["acc"] for i in range(len(results[j][9]))] for j in range(size) ], axis=0 )
test_cn_acc_mean = np.mean([[results[j][9][i]["acc_cn"] for i in range(len(results[j][9]))] for j in range(size)], axis=0)
test_f_acc_mean = np.mean([[results[j][9][i]["acc_f"] for i in range(len(results[j][9]))] for j in range(size)], axis=0)
test_s_acc_mean = np.mean([[results[j][9][i]["acc_s"] for i in range(len(results[j][9]))] for j in range(size)], axis=0) #todo take care of None here too?
final_train_accuracies_mean = np.mean(train_accuracies)
final_train_accuracies_ste = np.std(train_accuracies) / math.sqrt(len(runners))
final_test_accuracies_mean = np.mean(test_accuracies)
final_test_accuracies_ste = np.std(test_accuracies) / math.sqrt(len(runners))
plots_data["final_train_accuracies_mean"] = final_train_accuracies_mean
plots_data["final_train_accuracies_ste"] = final_train_accuracies_ste
plots_data["final_test_accuracies_mean"] = final_test_accuracies_mean
plots_data["final_test_accuracies_ste"] = final_test_accuracies_ste
#plot to graphs
plot_graphs(train_loss_mean, train_acc_mean,train_cn_acc_mean,train_f_acc_mean, train_s_acc_mean, test_loss_mean, test_acc_mean,
test_cn_acc_mean,test_f_acc_mean,test_s_acc_mean, results[0][10], plots_data)
if is_nni:
mean_intermediate_res = np.mean(test_intermediate_results, axis=0)
for i in mean_intermediate_res:
nni.report_intermediate_result(i)
nni.report_final_result(np.mean(test_accuracies))
# T takes the final of each iteration and for them mkes mean and std
last.logger.info("*" * 15 + "Final accuracy train: %3.4f" % final_train_accuracies_mean)
last.logger.info("*" * 15 + "Std accuracy train: %3.4f" % final_train_accuracies_ste)
last.logger.info("*" * 15 + "Final accuracy test: %3.4f" % final_test_accuracies_mean)
last.logger.info("*" * 15 + "Std accuracy test: %3.4f" % final_test_accuracies_ste)
last.logger.info("Finished")
return
def build_model(rand_test_indices, train_indices, labels ,adjacency_matrices,X,in_features,
hid_features,out_features,ds_name, cut, activation, optimizer, epochs, dropout, lr, l2_pen, temporal_pen,
beta, gamma, dumping_name, is_nni=False):
optim_name="SGD"
if optimizer==optim.Adam:
optim_name = "Adam"
conf = {"in_features":in_features, "hid_features": hid_features, "out_features":out_features,"ds_name":ds_name, "cut": cut,
"dropout": dropout, "lr": lr, "weight_decay": l2_pen,
"temporal_pen": temporal_pen, "beta": beta, "gamma": gamma,
#"training_mat": training_data, "training_labels": training_labels,
# "test_mat": test_data, "test_labels": test_labels,
"train_ind": train_indices, "test_ind": rand_test_indices, "labels":labels, "X":X,
"adj_matrices": adjacency_matrices,
"optimizer": optimizer, "epochs": epochs, "activation": activation,"optim_name":optim_name}
products_path = os.path.join(os.getcwd(), "logs", dumping_name, time.strftime("%Y%m%d_%H%M%S"))
if not os.path.exists(products_path):
os.makedirs(products_path)
logger = multi_logger([
PrintLogger("MyLogger", level=logging.DEBUG),
FileLogger("results_%s" % dumping_name, path=products_path, level=logging.INFO)], name=None)
data_logger = CSVLogger("results_%s" % dumping_name, path=products_path)
data_logger.info("model_name", "loss", "acc")
# ##
# logger.info('STARTING with cut= {:.3f} '.format(cut*100) + ' lr= {:.4f} '.format(lr) + ' dropout= {:.4f} '.format(dropout)+ ' regulariztion_l2_pen= {:.4f} '.format(l2_pen)
# + ' temporal_pen= {:.10f} '.format(temporal_pen)+ ' beta= {:.5f} '.format(beta)+ ' gamma= {:.5f} '.format(gamma)+ ' optimizer= %s ' %optim_name)
# logger.debug('STARTING with lr= {:.4f} '.format(lr) + ' dropout= {:.4f} '.format(dropout) + ' regulariztion_l2_pen= {:.4f} '.format(l2_pen)
# + ' temporal_pen= {:.10f} '.format(temporal_pen) +' beta= {:.5f} '.format(beta)+ ' gamma= {:.5f} '.format(gamma)+ ' optimizer= %s ' %optim_name)
# ##
runner = ModelRunner(conf, logger=logger, data_logger=data_logger, is_nni=is_nni)
return runner
def main_gcn(adj_matrices, X, labels,in_features, hid_features, out_features, ds_name, cut,
optimizer=optim.Adam, epochs=200, dropout=0.3, lr=0.01, l2_pen=0.005, temporal_pen=1e-6, beta=1/4, gamma = 1/16,
trials=1, dumping_name='', is_nni=False):
plot_data = {"ds_name": ds_name, "cut": cut}
runners = []
#np.random.seed(2)
#print("epochs", epochs,"l2_pen", l2_pen,"dropout", dropout,"dropout", cut,"cut", dropout)
for it in range(trials):
num_classes = out_features
rand_test_indices = np.random.choice(len(labels), len(labels)-(20*num_classes), replace=False) #
train_indices = np.delete(np.arange(len(labels)), rand_test_indices)
#train_indices = train_indices[0:int(cut*len(train_indices))]
#create x - releveant for 2k only
# X = build_2k_vectors(ds_name, out_features, train_indices)
activation = torch.nn.functional.relu
runner = build_model(rand_test_indices, train_indices, labels, adj_matrices,X, in_features, hid_features,
out_features,ds_name,cut, activation, optimizer, epochs, dropout, lr,
l2_pen, temporal_pen, beta, gamma, dumping_name, is_nni=is_nni)
runners.append(runner)
execute_runner(runners, plot_data, is_nni=is_nni)
return |
import os
import json
import re
import pickle
from maintain_data import Extract_Label as el
def load_needed_label(file_path):
label_list = []
with open(file_path,'r') as op:
for data in op:
label_list.append(data.strip('\n'))
op.close()
return label_list
def load_verdict_label(file_path):
label_file_dict = dict()
for file_name in os.listdir(file_path):
json_label = json.load(open(file_path + '/' + file_name, encoding='utf-8'))
label_file_dict[file_name] = json_label
return label_file_dict
def add_text(label_file_dict,verdict_root_path):
for key in label_file_dict.keys():
for verdict_label in label_file_dict[key]:
folder = key.split('.')[0]
tmp_file = open(verdict_root_path+'/'+folder+'/'+verdict_label['filename'],'r',encoding='ANSI')
text = ''
for data in tmp_file:
text += data.strip('\r\n')
while ' ' in text:
text = text.replace(' ',' ')
text = text.replace('\u3000','')
total_sen = re.split('事實|事 實',text)
tmp_text = ''
for i in range(1,len(total_sen)):
tmp_text += total_sen[i]
text = tmp_text
sentences = [sentence for i,sentence in enumerate(re.split('。|,|:|;|\uf6b1|\uf6b2|\uf6b3|\uf6b4|\uf6b5|\uf6b6|\uf6b7|\uf6b8',text)) if len(sentence)!=0 and i!=0]
verdict_label['text'] = sentences
return label_file_dict
needed_label_path = '../needed_label'
label_root_path = '../nsd-json-label'
verdict_root_path = '../nsd-json'
label_file_dict = load_verdict_label(label_root_path)
needed_label = load_needed_label(needed_label_path)
word_set = set()
label_file_dict = add_text(label_file_dict,verdict_root_path)
verdict_count = 10000
count = 0
embed_list = []
for folder in label_file_dict.keys():
for verdict in label_file_dict[folder]:
if 'fcs_29' not in verdict.keys():
continue
if int(verdict['fcs_29'])!=1:
continue
verdict['embedding'] = None
for key in verdict.keys():
if key == 'text':
count += 1
embed_list.append(verdict)
print(count)
if count > verdict_count:
break
if count > verdict_count:
break
print('finish_input_embed')
print('prepare_label')
el.get_label_vector(embed_verdict_list=embed_list)
print('finish_prepare_label')
print('produce_data')
pickle.dump(embed_list,open('../factor_training_data/unsafe_driving_'+str(verdict_count)+'.emb','wb'))
print('complete_producing') |
import requests_cache
import typer
from typing import Optional
from src.scrape.scrape import (
query_website, extract_job_offers, collate_jobs_data, filter_results
)
# Set up caching for use with all requests as default
requests_cache.install_cache('scraping-cache', backend='sqlite')
scrape_app = typer.Typer()
@scrape_app.command()
def scrape(job: str = typer.Option(..., help="Job title to search on indeed."),
loc: str = typer.Option(..., help="Location of job."),
salary: Optional[str] = typer.Option(None, help="Minimum salary to use in search."),
no_cache: Optional[bool] = typer.Option(False, help="Whether to bypass locally cached search.")):
"""
Executes a web scrape on the Australian site of indeed.com for a given job in a given location.
Extracts all items of value, including ratings and job descriptions, and collates them in a pandas dataframe.
Args:
job: (str) The job title to use in the search.
loc: (str) The location to use in the search.
salary: (str) optional, minimum salary to use in search.
no_cache: (bool) optional, whether to bypass the local web cache from previous searches.
Returns:
None
"""
# Prepare job and location strings for insertion into search request
# TODO: add further checks for non-compatible symbols in job string (multiple plusses do not affect search)
# TODO: change fixed query signifiers to arguments
# TODO: allow flexible salary entry
job_url = "q=" + "+".join(job.split(" "))
loc_url = "&l=" + "+".join(loc.split(" "))
if salary:
job_url = job_url + "+" + salary
# Prepare search parameters
base_url = "https://au.indeed.com/jobs?" + job_url + loc_url
result = query_website(base_url, no_cache)
job_results, num_results = extract_job_offers(result)
# Repeat scrape if not all results are contained on page
if num_results > 10:
for num in range(10, num_results + 9, 10):
page_url = base_url + f"&start={num}"
result = query_website(page_url, no_cache)
new_results, _ = extract_job_offers(result)
job_results += new_results
collate_jobs_data(job_results, num_results)
@scrape_app.command()
def filter(job: Optional[str] = typer.Option(None, help="Search string to filter job titles."),
rating: Optional[str] = typer.Option(None, help="Minimum rating value to filter companies"),
salary: Optional[str] = typer.Option(None, help="Minimum salary to filter job offers"),
save: Optional[str] = typer.Option(None, help="If given, saves results file as json")):
"""
Filters job results based on job title, company rating or minimum salary.
Outputs results to screen or, if chosen, saves as json.
Args:
job: filter term for job title
rating: minimum value for company rating
salary: minimum salary for job offer
save: (str) (optional) filename for results
Returns:
None
"""
filter_results(job, rating, salary, save)
@scrape_app.command()
def clear_cache():
"""
Clears current requests cache.
"""
print("Clearing cache...")
requests_cache.patcher.clear()
print("Done!")
if __name__ == "__main__":
scrape_app() |
""" 共通関数
"""
import datetime
from logging import Logger, getLogger, StreamHandler, handlers, Formatter, \
INFO
import os
from .const import LOGGER_DIR, LOG_LEVEL
def get_default_logger(initial_talk:bool=False) -> Logger:
"""
デフォルトロガーを返す
Parameters
----------
initial_talk: bool
デフォルトロガーを使用したことをログ出力するか否か。
"""
logger = getLogger(__name__)
# RotatingFileHandler版ロガー
__set_logger_RotatingFileHander(logger)
if initial_talk:
logger.debug('Using default logger.')
logger.debug(f'Log level is {logger.level}.')
return logger
def __set_logger_StreamHandler(logger: Logger) -> None:
"""
StreamHandler版ロガー
"""
logger = getLogger(__name__)
handler = StreamHandler()
handler.setLevel(LOG_LEVEL)
logger.setLevel(LOG_LEVEL)
logger.addHandler(handler)
logger.propagate = False
def __set_logger_RotatingFileHander(logger: Logger) -> None:
"""
RotatingFileHandler版ロガー
"""
dt_now = datetime.datetime.now()
str_now = dt_now.strftime('%Y%m%d_%H%M%S')
str_log_dir = dt_now.strftime(f'{LOGGER_DIR}/%Y/%Y%m')
os.makedirs(str_log_dir, exist_ok=True)
handler = handlers.RotatingFileHandler(
f'{str_log_dir}/{str_now}.log', 'a+',
maxBytes=10000, backupCount=5
)
handler.setLevel(LOG_LEVEL)
handler.setFormatter(Formatter('[%(asctime)s] %(levelname)s: %(message)s'))
logger.addHandler(handler)
logger.setLevel(LOG_LEVEL)
|
import json
from datetime import datetime
from django.contrib.auth.models import AbstractUser
from django.db import models
# Create your models here.
class User(AbstractUser):
balance = models.FloatField(default=0)
sale_price = models.FloatField(default=0)
def __str__(self):
return self.email
class Requests(models.Model):
user_id = models.IntegerField()
temp_request_id = models.IntegerField(null=True)
request_date = models.DateTimeField(default=datetime.now())
class Request(models.Model):
requests_id = models.IntegerField()
phone = models.BigIntegerField()
hlr_status = models.CharField(max_length=100)
hlr_status_code = models.IntegerField(null=True, blank=True)
class TempRequest(models.Model):
user_id = models.IntegerField()
price = models.FloatField()
phones = models.TextField()
status = models.BooleanField(default=False)
def set_phones(self, x):
self.phones = json.dumps(x)
def get_phones(self):
return json.loads(self.phones)
class Price(models.Model):
default_price = models.FloatField(default=80)
def __str__(self):
return str(self.default_price)
class XFile(models.Model):
user_id = models.IntegerField()
temp_request_id = models.IntegerField(null=True)
phone = models.TextField()
line = models.TextField()
def __str__(self):
return str(self.line)
|
#!/usr/bin/env python
# coding: utf-8
# Author: Chris Harris
# Class: BU CS 767 - Machine Learning
#
# This notebook builds off of Stefan Bergsteins CNN model using Keras. I liked his approach and my goal is to add to it and to compare performance from his CNN to an RNN: https://www.kaggle.com/crayharris/keras-deep-learning-on-titanic-data
#
#
# In[ ]:
# Code Borrowed from Stefan Bergstein
# https://www.kaggle.com/stefanbergstein/keras-deep-learning-on-titanic-data/notebook
# data processing
import numpy as np
import pandas as pd
# machine learning
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, Dropout, LSTM #Dropout and LSTM needed for RNN
from keras.wrappers.scikit_learn import KerasClassifier
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
# utils
import time
from datetime import timedelta
# some configuratin flags and variables
verbose=0 # Use in classifier
# Input files
file_train='../input/train.csv'
file_test='../input/test.csv'
# defeine random seed for reproducibility
seed = 23
np.random.seed(seed)
# read training data
train_df = pd.read_csv(file_train,index_col='PassengerId')
# In[ ]:
# Show the columns
train_df.columns.values
# In[ ]:
# Show the data that we've loaded
train_df.head()
# Perform data cleansing
# * Remove Name, Cabin and Ticket (These are beyond the scope of our analysis)
# * Transform categorical features to numeric ones
# In[ ]:
def prep_data(df):
# Drop unwanted features
df = df.drop(['Name', 'Ticket', 'Cabin'], axis=1)
# Fill missing data: Age and Fare with the mean, Embarked with most frequent value
df[['Age']] = df[['Age']].fillna(value=df[['Age']].mean())
df[['Fare']] = df[['Fare']].fillna(value=df[['Fare']].mean())
df[['Embarked']] = df[['Embarked']].fillna(value=df['Embarked'].value_counts().idxmax())
# Convert categorical features into numeric
df['Sex'] = df['Sex'].map( {'female': 1, 'male': 0} ).astype(int)
# Convert Embarked to one-hot
enbarked_one_hot = pd.get_dummies(df['Embarked'], prefix='Embarked')
df = df.drop('Embarked', axis=1)
df = df.join(enbarked_one_hot)
return df
# Verify that the null data has been removed
# In[ ]:
train_df = prep_data(train_df)
train_df.isnull().sum()
# Break out training and test data
# In[ ]:
# X contains all columns except 'Survived'
X = train_df.drop(['Survived'], axis=1).values.astype(float)
# It is almost always a good idea to perform some scaling of input values when using neural network models (jb).
scale = StandardScaler()
X = scale.fit_transform(X)
# Y is just the 'Survived' column
Y = train_df['Survived'].values
# Basic CNN with Keras
# * This net has 16 neurons
# * Two hidden layers
# In[ ]:
def create_model(optimizer='adam', init='uniform'):
# create model
if verbose: print("**Create model with optimizer: %s; init: %s" % (optimizer, init) )
model = Sequential()
model.add(Dense(16, input_dim=X.shape[1], kernel_initializer=init, activation='relu'))
model.add(Dense(8, kernel_initializer=init, activation='relu'))
model.add(Dense(4, kernel_initializer=init, activation='relu'))
model.add(Dense(1, kernel_initializer=init, activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
# In[ ]:
# CHRIS NEW CODE FOR FINAL PROJ
def create_model_5layer(optimizer='adam', init='uniform'):
# create model
if verbose: print("**Create model with optimizer: %s; init: %s" % (optimizer, init) )
model = Sequential()
model.add(Dense(64, input_dim=X.shape[1], kernel_initializer=init, activation='relu'))
model.add(Dense(32, kernel_initializer=init, activation='relu'))
model.add(Dense(16, kernel_initializer=init, activation='relu'))
model.add(Dense(8, kernel_initializer=init, activation='relu'))
model.add(Dense(4, kernel_initializer=init, activation='relu'))
model.add(Dense(2, kernel_initializer=init, activation='relu'))
model.add(Dense(1, kernel_initializer=init, activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
# In[ ]:
# CHRIS NEW CODE FOR FINAL PROJ
def create_model_10layer(optimizer='adam', init='uniform'):
# create model
if verbose: print("**Create model with optimizer: %s; init: %s" % (optimizer, init) )
model = Sequential()
model.add(Dense(64, input_dim=X.shape[1], kernel_initializer=init, activation='relu'))
model.add(Dense(32, kernel_initializer=init, activation='relu'))
model.add(Dense(32, kernel_initializer=init, activation='relu'))
model.add(Dense(16, kernel_initializer=init, activation='relu'))
model.add(Dense(16, kernel_initializer=init, activation='relu'))
model.add(Dense(8, kernel_initializer=init, activation='relu'))
model.add(Dense(8, kernel_initializer=init, activation='relu'))
model.add(Dense(4, kernel_initializer=init, activation='relu'))
model.add(Dense(4, kernel_initializer=init, activation='relu'))
model.add(Dense(2, kernel_initializer=init, activation='relu'))
model.add(Dense(2, kernel_initializer=init, activation='relu'))
model.add(Dense(1, kernel_initializer=init, activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
# In[ ]:
# CHRIS NEW CODE FOR FINAL PROJ
def create_model_20layer(optimizer='adam', init='uniform'):
# create model
if verbose: print("**Create model with optimizer: %s; init: %s" % (optimizer, init) )
model = Sequential()
model.add(Dense(64, input_dim=X.shape[1], kernel_initializer=init, activation='relu'))
model.add(Dense(32, kernel_initializer=init, activation='relu'))
model.add(Dense(32, kernel_initializer=init, activation='relu'))
model.add(Dense(32, kernel_initializer=init, activation='relu'))
model.add(Dense(32, kernel_initializer=init, activation='relu'))
model.add(Dense(16, kernel_initializer=init, activation='relu'))
model.add(Dense(16, kernel_initializer=init, activation='relu'))
model.add(Dense(16, kernel_initializer=init, activation='relu'))
model.add(Dense(16, kernel_initializer=init, activation='relu'))
model.add(Dense(8, kernel_initializer=init, activation='relu'))
model.add(Dense(8, kernel_initializer=init, activation='relu'))
model.add(Dense(8, kernel_initializer=init, activation='relu'))
model.add(Dense(8, kernel_initializer=init, activation='relu'))
model.add(Dense(4, kernel_initializer=init, activation='relu'))
model.add(Dense(4, kernel_initializer=init, activation='relu'))
model.add(Dense(4, kernel_initializer=init, activation='relu'))
model.add(Dense(4, kernel_initializer=init, activation='relu'))
model.add(Dense(2, kernel_initializer=init, activation='relu'))
model.add(Dense(2, kernel_initializer=init, activation='relu'))
model.add(Dense(2, kernel_initializer=init, activation='relu'))
model.add(Dense(2, kernel_initializer=init, activation='relu'))
model.add(Dense(1, kernel_initializer=init, activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
# Setting static hyperparams from previous runs where grid search was used
# In[ ]:
# pre-selected paramters
best_epochs = 200
best_batch_size = 5
best_init = 'glorot_uniform'
best_optimizer = 'rmsprop'
# Model Build and Perform Prediction
# In[ ]:
# Create a classifier with best parameters
model_pred = KerasClassifier(build_fn=create_model, optimizer=best_optimizer, init=best_init, epochs=best_epochs, batch_size=best_batch_size, verbose=verbose)
model_pred.fit(X, Y)
# Read test data
test_df = pd.read_csv(file_test,index_col='PassengerId')
# Prep and clean data
test_df = prep_data(test_df)
# Create X_test
X_test = test_df.values.astype(float)
# Scaling
X_test = scale.transform(X_test)
# Predict 'Survived'
prediction = model_pred.predict(X_test)
# Create CNN Predictions for 2 layer model
# In[ ]:
submission = pd.DataFrame({
'PassengerId': test_df.index,
'Survived': prediction[:,0],
})
submission.sort_values('PassengerId', inplace=True)
submission.to_csv('CNN-submission-simple-cleansing_2Layer.csv', index=False)
# 5 Layer Model creation
# In[ ]:
# CHRIS NEW CODE FOR FINAL PROJ
# Create a classifier with best parameters
model_pred5 = KerasClassifier(build_fn=create_model_5layer, optimizer=best_optimizer, init=best_init, epochs=best_epochs, batch_size=best_batch_size, verbose=verbose)
model_pred5.fit(X, Y)
# Read test data
test_df5 = pd.read_csv(file_test,index_col='PassengerId')
# Prep and clean data
test_df5 = prep_data(test_df5)
# Create X_test
X_test5 = test_df.values.astype(float)
# Scaling
X_test5 = scale.transform(X_test5)
# Predict 'Survived'
prediction = model_pred5.predict(X_test5)
# Create CNN Preditions for 5 layer model
# In[ ]:
# CHRIS NEW CODE FOR FINAL PROJ
submission = pd.DataFrame({
'PassengerId': test_df.index,
'Survived': prediction[:,0],
})
submission.sort_values('PassengerId', inplace=True)
submission.to_csv('CNN-submission-simple-cleansing_5Layer.csv', index=False)
# 10 Layer Model creation
# In[ ]:
# CHRIS NEW CODE FOR FINAL PROJ
# Create a classifier with best parameters
model_pred10 = KerasClassifier(build_fn=create_model_10layer, optimizer=best_optimizer, init=best_init, epochs=best_epochs, batch_size=best_batch_size, verbose=verbose)
model_pred10.fit(X, Y)
# Read test data
test_df10 = pd.read_csv(file_test,index_col='PassengerId')
# Prep and clean data
test_df10 = prep_data(test_df10)
# Create X_test
X_test10 = test_df.values.astype(float)
# Scaling
X_test10 = scale.transform(X_test10)
# Predict 'Survived'
prediction = model_pred10.predict(X_test10)
# Create CNN Preditions for 10 layer model
# In[ ]:
# CHRIS NEW CODE FOR FINAL PROJ
submission = pd.DataFrame({
'PassengerId': test_df.index,
'Survived': prediction[:,0],
})
submission.sort_values('PassengerId', inplace=True)
submission.to_csv('CNN-submission-simple-cleansing_10Layer.csv', index=False)
# 20 Layer Model creation
# In[ ]:
# CHRIS NEW CODE FOR FINAL PROJ
# Create a classifier with best parameters
model_pred20 = KerasClassifier(build_fn=create_model_20layer, optimizer=best_optimizer, init=best_init, epochs=best_epochs, batch_size=best_batch_size, verbose=verbose)
model_pred20.fit(X, Y)
# Read test data
test_df20 = pd.read_csv(file_test,index_col='PassengerId')
# Prep and clean data
test_df20 = prep_data(test_df20)
# Create X_test
X_test20 = test_df.values.astype(float)
# Scaling
X_test20 = scale.transform(X_test20)
# Predict 'Survived'
prediction = model_pred20.predict(X_test20)
# Create CNN Preditions for 20 layer model
# In[ ]:
# CHRIS NEW CODE FOR FINAL PROJ
submission = pd.DataFrame({
'PassengerId': test_df.index,
'Survived': prediction[:,0],
})
submission.sort_values('PassengerId', inplace=True)
submission.to_csv('CNN-submission-simple-cleansing_20Layer.csv', index=False)
# In[ ]:
def create_rnn_model(optimizer='adam', init='uniform'):
# create model
if verbose: print("**Create model with optimizer: %s; init: %s" % (optimizer, init) )
model = Sequential()
model.add(Dense(16, input_dim=X.shape[1], kernel_initializer=init, activation='relu'))
model.add(Dense(8, kernel_initializer=init, activation='relu'))
model.add(Dense(4, kernel_initializer=init, activation='relu'))
model.add(Dense(1, kernel_initializer=init, activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
|
from os import environ, pathsep
from pathlib import Path
from .utils import Util
for path in environ["PATH"].split(pathsep):
for util in Path(path).glob("*"):
globals()[util.name] = Util(util.name)
|
from django.views.generic import TemplateView
from django.conf import settings
class LidarView(TemplateView):
template_name = "lidar/potree/data/viewer.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['BUCKET_URL'] = settings.LIDAR_BUCKET
context['lidar_permission'] = self.request.user.is_authenticated
return context
|
import atexit
import os
import signal
from abc import ABCMeta, abstractmethod
from os.path import abspath, dirname, join, exists
from utils import log
class Socket(object):
__metaclass__ = ABCMeta
DEFAULT_NAME = join(abspath(dirname(__file__)), 'test.socket')
EMPTY_MESSAGE = '[empty message]'
def __init__(self, name=""):
self.socket_file = name or self.DEFAULT_NAME
signal.signal(signal.SIGINT, self.sig_handler)
atexit.register(self.tearDown)
@abstractmethod
def read(self):
pass
@abstractmethod
def write(self):
pass
@abstractmethod
def close(self):
pass
@abstractmethod
def tearDown(self):
pass
def _remove_socket_file(self):
if exists(self.socket_file):
os.remove(self.socket_file)
def sig_handler(self, sig, frame):
log("** SIGINT Received **")
self.tearDown()
raise KeyboardInterrupt()
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Make example datasets.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import numpy as np
from astropy.units import Quantity
from astropy.time import Time, TimeDelta
from astropy.coordinates import SkyCoord, AltAz, Angle
from astropy.table import Table
from ..extern.pathlib import Path
from ..utils.random import sample_sphere, sample_powerlaw, get_random_state
from ..utils.time import time_ref_from_dict, time_relative_to_ref
from ..utils.fits import table_to_fits_table
__all__ = [
'make_test_psf',
'make_test_observation_table',
'make_test_bg_cube_model',
'make_test_dataset',
'make_test_eventlist',
]
def make_test_psf(energy_bins=15, theta_bins=12):
"""Create a test FITS PSF file.
A log-linear dependency in energy is assumed, where the size of
the PSF decreases by a factor of tow over tow decades. The
theta dependency is a parabola where at theta = 2 deg the size
of the PSF has increased by 30%.
Parameters
----------
energy_bins : int
Number of energy bins.
theta_bins : int
Number of theta bins.
Returns
-------
psf : `~gammapy.irf.EnergyDependentMultiGaussPSF`
PSF.
"""
from ..irf import EnergyDependentMultiGaussPSF
energies_all = np.logspace(-1, 2, energy_bins + 1)
energies_lo = energies_all[:-1]
energies_hi = energies_all[1:]
theta_lo = theta_hi = np.linspace(0, 2.2, theta_bins)
def sigma_energy_theta(energy, theta, sigma):
# log-linear dependency of sigma with energy
# m and b are choosen such, that at 100 TeV
# we have sigma and at 0.1 TeV we have sigma/2
m = -sigma / 6.
b = sigma + m
return (2 * b + m * np.log10(energy)) * (0.3 / 4 * theta ** 2 + 1)
# Compute norms and sigmas values are taken from the psf.txt in
# irf/test/data
energies, thetas = np.meshgrid(energies_lo, theta_lo)
sigmas = []
for sigma in [0.0219206, 0.0905762, 0.0426358]:
sigmas.append(sigma_energy_theta(energies, thetas, sigma))
norms = []
for norm in 302.654 * np.array([1, 0.0406003, 0.444632]):
norms.append(norm * np.ones((theta_bins, energy_bins)))
psf = EnergyDependentMultiGaussPSF(Quantity(energies_lo, 'TeV'),
Quantity(energies_hi, 'TeV'),
Quantity(theta_lo, 'deg'),
sigmas, norms
)
return psf
def make_test_observation_table(observatory_name='HESS', n_obs=10,
az_range=Angle([0, 360], 'deg'),
alt_range=Angle([45, 90], 'deg'),
date_range=(Time('2010-01-01'),
Time('2015-01-01')),
use_abs_time=False,
n_tels_range=(3, 4),
random_state='random-seed'):
"""Make a test observation table.
Create an observation table following a specific pattern.
For the moment, only random observation tables are created.
The observation table is created according to a specific
observatory, and randomizing the observation pointingpositions
in a specified az-alt range.
If a *date_range* is specified, the starting time
of the observations will be restricted to the specified interval.
These parameters are interpreted as date, the precise hour of the
day is ignored, unless the end date is closer than 1 day to the
starting date, in which case, the precise time of the day is also
considered.
In addition, a range can be specified for the number of telescopes.
Parameters
----------
observatory_name : str, optional
Name of the observatory; a list of choices is given in
`~gammapy.data.observatory_locations`.
n_obs : int, optional
Number of observations for the obs table.
az_range : `~astropy.coordinates.Angle`, optional
Azimuth angle range (start, end) for random generation of
observation pointing positions.
alt_range : `~astropy.coordinates.Angle`, optional
Altitude angle range (start, end) for random generation of
observation pointing positions.
date_range : `~astropy.time.Time`, optional
Date range (start, end) for random generation of observation
start time.
use_abs_time : bool, optional
Use absolute UTC times instead of [MET]_ seconds after the reference.
n_tels_range : int, optional
Range (start, end) of number of telescopes participating in
the observations.
random_state : {int, 'random-seed', 'global-rng', `~numpy.random.RandomState`}, optional
Defines random number generator initialisation.
Passed to `~gammapy.utils.random.get_random_state`.
Returns
-------
obs_table : `~gammapy.data.ObservationTable`
Observation table.
"""
from ..data import ObservationTable, observatory_locations
random_state = get_random_state(random_state)
n_obs_start = 1
obs_table = ObservationTable()
# build a time reference as the start of 2010
dateref = Time('2010-01-01T00:00:00')
dateref_mjd_fra, dateref_mjd_int = np.modf(dateref.mjd)
# define table header
obs_table.meta['OBSERVATORY_NAME'] = observatory_name
obs_table.meta['MJDREFI'] = dateref_mjd_int
obs_table.meta['MJDREFF'] = dateref_mjd_fra
if use_abs_time:
# show the observation times in UTC
obs_table.meta['TIME_FORMAT'] = 'absolute'
else:
# show the observation times in seconds after the reference
obs_table.meta['TIME_FORMAT'] = 'relative'
header = obs_table.meta
# obs id
obs_id = np.arange(n_obs_start, n_obs_start + n_obs)
obs_table['OBS_ID'] = obs_id
# obs time: 30 min
ontime = Quantity(30. * np.ones_like(obs_id), 'minute').to('second')
obs_table['ONTIME'] = ontime
# livetime: 25 min
time_live = Quantity(25. * np.ones_like(obs_id), 'minute').to('second')
obs_table['LIVETIME'] = time_live
# start time
# - random points between the start of 2010 and the end of 2014 (unless
# otherwise specified)
# - using the start of 2010 as a reference time for the header of the table
# - observations restrict to night time (only if specified time interval is
# more than 1 day)
# - considering start of astronomical day at midday: implicit in setting
# the start of the night, when generating random night hours
datestart = date_range[0]
dateend = date_range[1]
time_start = random_state.uniform(datestart.mjd, dateend.mjd, len(obs_id))
time_start = Time(time_start, format='mjd', scale='utc')
# check if time interval selected is more than 1 day
if (dateend - datestart).jd > 1.:
# keep only the integer part (i.e. the day, not the fraction of the day)
time_start_f, time_start_i = np.modf(time_start.mjd)
time_start = Time(time_start_i, format='mjd', scale='utc')
# random generation of night hours: 6 h (from 22 h to 4 h), leaving 1/2 h
# time for the last run to finish
night_start = Quantity(22., 'hour')
night_duration = Quantity(5.5, 'hour')
hour_start = random_state.uniform(night_start.value,
night_start.value + night_duration.value,
len(obs_id))
hour_start = Quantity(hour_start, 'hour')
# add night hour to integer part of MJD
time_start += hour_start
if use_abs_time:
# show the observation times in UTC
time_start = Time(time_start.isot)
else:
# show the observation times in seconds after the reference
time_start = time_relative_to_ref(time_start, header)
# converting to quantity (better treatment of units)
time_start = Quantity(time_start.sec, 'second')
obs_table['TSTART'] = time_start
# stop time
# calculated as TSTART + ONTIME
if use_abs_time:
time_stop = Time(obs_table['TSTART'])
time_stop += TimeDelta(obs_table['ONTIME'])
else:
time_stop = TimeDelta(obs_table['TSTART'])
time_stop += TimeDelta(obs_table['ONTIME'])
# converting to quantity (better treatment of units)
time_stop = Quantity(time_stop.sec, 'second')
obs_table['TSTOP'] = time_stop
# az, alt
# random points in a portion of sphere; default: above 45 deg altitude
az, alt = sample_sphere(size=len(obs_id),
lon_range=az_range,
lat_range=alt_range,
random_state=random_state)
az = Angle(az, 'deg')
alt = Angle(alt, 'deg')
obs_table['AZ'] = az
obs_table['ALT'] = alt
# RA, dec
# derive from az, alt taking into account that alt, az represent the values
# at the middle of the observation, i.e. at time_ref + (TIME_START + TIME_STOP)/2
# (or better: time_ref + TIME_START + (TIME_OBSERVATION/2))
# in use_abs_time mode, the time_ref should not be added, since it's already included
# in TIME_START and TIME_STOP
az = Angle(obs_table['AZ'])
alt = Angle(obs_table['ALT'])
if use_abs_time:
obstime = Time(obs_table['TSTART'])
obstime += TimeDelta(obs_table['ONTIME']) / 2.
else:
obstime = time_ref_from_dict(obs_table.meta)
obstime += TimeDelta(obs_table['TSTART'])
obstime += TimeDelta(obs_table['ONTIME']) / 2.
location = observatory_locations[observatory_name]
altaz_frame = AltAz(obstime=obstime, location=location)
alt_az_coord = SkyCoord(az, alt, frame=altaz_frame)
sky_coord = alt_az_coord.transform_to('icrs')
obs_table['RA'] = sky_coord.ra
obs_table['DEC'] = sky_coord.dec
# positions
# number of telescopes
# random integers in a specified range; default: between 3 and 4
n_tels = random_state.randint(n_tels_range[0], n_tels_range[1] + 1, len(obs_id))
obs_table['N_TELS'] = n_tels
# muon efficiency
# random between 0.6 and 1.0
muoneff = random_state.uniform(low=0.6, high=1.0, size=len(obs_id))
obs_table['MUONEFF'] = muoneff
return obs_table
def make_test_bg_cube_model(detx_range=Angle([-10., 10.], 'deg'),
ndetx_bins=24,
dety_range=Angle([-10., 10.], 'deg'),
ndety_bins=24,
energy_band=Quantity([0.01, 100.], 'TeV'),
nenergy_bins=14,
altitude=Angle(70., 'deg'),
sigma=Angle(5., 'deg'),
spectral_index=2.7,
apply_mask=False,
do_not_force_mev_units=False):
"""Make a test bg cube model.
The background counts cube is created following a 2D symmetric
gaussian model for the spatial coordinates (X, Y) and a power-law
in energy.
The gaussian width varies in energy from sigma/2 to sigma.
The power-law slope in log-log representation is given by
the spectral_index parameter.
The norm depends linearly on the livetime
and on the altitude angle of the observation.
It is possible to mask 1/4th of the image (for **x > x_center**
and **y > y_center**). Useful for testing coordinate rotations.
Per default units of *1 / (MeV sr s)* for the bg rate are
enforced, unless *do_not_force_mev_units* is set.
This is in agreement to the convention applied in
`~gammapy.background.make_bg_cube_model`.
This method is useful for instance to produce true (simulated)
background cube models to compare to the reconstructed ones
produced with `~gammapy.background.make_bg_cube_model`.
For details on how to do this, please refer to
:ref:`background_make_background_models_datasets_for_testing`.
Parameters
----------
detx_range : `~astropy.coordinates.Angle`, optional
X coordinate range (min, max).
ndetx_bins : int, optional
Number of (linear) bins in X coordinate.
dety_range : `~astropy.coordinates.Angle`, optional
Y coordinate range (min, max).
ndety_bins : int, optional
Number of (linear) bins in Y coordinate.
energy_band : `~astropy.units.Quantity`, optional
Energy range (min, max).
nenergy_bins : int, optional
Number of (logarithmic) bins in energy.
altitude : `~astropy.coordinates.Angle`, optional
observation altitude angle for the model.
sigma : `~astropy.coordinates.Angle`, optional
Width of the gaussian model used for the spatial coordinates.
spectral_index : float, optional
Index for the power-law model used for the energy coordinate.
apply_mask : bool, optional
If set, 1/4th of the image is masked (for **x > x_center**
and **y > y_center**).
do_not_force_mev_units : bool, optional
Set to ``True`` to use the same energy units as the energy
binning for the bg rate.
Returns
-------
bg_cube_model : `~gammapy.background.FOVCubeBackgroundModel`
Bacground cube model.
"""
from ..background import FOVCubeBackgroundModel
# spatial bins (linear)
delta_x = (detx_range[1] - detx_range[0]) / ndetx_bins
detx_bin_edges = np.arange(ndetx_bins + 1) * delta_x + detx_range[0]
delta_y = (dety_range[1] - dety_range[0]) / ndety_bins
dety_bin_edges = np.arange(ndety_bins + 1) * delta_y + dety_range[0]
# energy bins (logarithmic)
log_delta_energy = (np.log(energy_band[1].value)
- np.log(energy_band[0].value)) / nenergy_bins
energy_bin_edges = np.exp(np.arange(nenergy_bins + 1) * log_delta_energy
+ np.log(energy_band[0].value))
energy_bin_edges = Quantity(energy_bin_edges, energy_band[0].unit)
# TODO: this function should be reviewed/re-written, when
# the following PR is completed:
# https://github.com/gammapy/gammapy/pull/290
# define empty bg cube model and set bins
bg_cube_model = FOVCubeBackgroundModel.set_cube_binning(detx_edges=detx_bin_edges,
dety_edges=dety_bin_edges,
energy_edges=energy_bin_edges)
# counts
# define coordinate grids for the calculations
det_bin_centers = bg_cube_model.counts_cube.image_bin_centers
energy_bin_centers = bg_cube_model.counts_cube.energy_edges.log_centers
energy_points, dety_points, detx_points = np.meshgrid(energy_bin_centers,
det_bin_centers[1],
det_bin_centers[0],
indexing='ij')
E_0 = Quantity(1., 'TeV') # reference energy for the model
# norm of the model
# taking as reference for now a dummy value of 1
# it is linearly dependent on the zenith angle (90 deg - altitude)
# it is norm_max at alt = 90 deg and norm_max/2 at alt = 0 deg
norm_max = Quantity(1, '')
alt_min = Angle(0., 'deg')
alt_max = Angle(90., 'deg')
slope = (norm_max - norm_max / 2) / (alt_max - alt_min)
free_term = norm_max / 2 - slope * alt_min
norm = altitude * slope + free_term
# define E dependent sigma
# it is defined via a PL, in order to be log-linear
# it is equal to the parameter sigma at E max
# and sigma/2. at E min
sigma_min = sigma / 2. # at E min
sigma_max = sigma # at E max
s_index = np.log(sigma_max / sigma_min)
s_index /= np.log(energy_bin_edges[-1] / energy_bin_edges[0])
s_norm = sigma_min * ((energy_bin_edges[0] / E_0) ** -s_index)
sigma = s_norm * ((energy_points / E_0) ** s_index)
# calculate counts
gaussian = np.exp(-((detx_points) ** 2 + (dety_points) ** 2) / sigma ** 2)
powerlaw = (energy_points / E_0) ** -spectral_index
counts = norm * gaussian * powerlaw
bg_cube_model.counts_cube.data = Quantity(counts, '')
# livetime
# taking as reference for now a dummy value of 1 s
livetime = Quantity(1., 'second')
bg_cube_model.livetime_cube.data += livetime
# background
bg_cube_model.background_cube.data = bg_cube_model.counts_cube.data.copy()
bg_cube_model.background_cube.data /= bg_cube_model.livetime_cube.data
bg_cube_model.background_cube.data /= bg_cube_model.background_cube.bin_volume
# bg_cube_model.background_cube.set_zero_level()
if not do_not_force_mev_units:
# use units of 1 / (MeV sr s) for the bg rate
bg_rate = bg_cube_model.background_cube.data.to('1 / (MeV sr s)')
bg_cube_model.background_cube.data = bg_rate
# apply mask if requested
if apply_mask:
# find central coordinate
detx_center = (detx_range[1] + detx_range[0]) / 2.
dety_center = (dety_range[1] + dety_range[0]) / 2.
mask = (detx_points <= detx_center) & (dety_points <= dety_center)
bg_cube_model.counts_cube.data *= mask
bg_cube_model.livetime_cube.data *= mask
bg_cube_model.background_cube.data *= mask
return bg_cube_model
def make_test_dataset(outdir, overwrite=False,
observatory_name='HESS', n_obs=10,
az_range=Angle([0, 360], 'deg'),
alt_range=Angle([45, 90], 'deg'),
date_range=(Time('2010-01-01'),
Time('2015-01-01')),
n_tels_range=(3, 4),
sigma=Angle(5., 'deg'),
spectral_index=2.7,
random_state='random-seed'):
"""
Make a test dataset and save it to disk.
Uses:
* `~gammapy.datasets.make_test_observation_table` to generate an
observation table
* `~gammapy.datasets.make_test_eventlist` to generate an event list
and effective area table for each observation
* `~gammapy.data.DataStore` to handle the file naming scheme;
currently only the H.E.S.S. naming scheme is supported
This method is useful for instance to produce samples in order
to test the machinery for reconstructing background (cube) models.
See also :ref:`datasets_obssim`.
Parameters
----------
outdir : str
Path to store the files.
overwrite : bool, optional
Flag to remove previous datasets in ``outdir`` (if existing).
observatory_name : str, optional
Name of the observatory; a list of choices is given in
`~gammapy.data.observatory_locations`.
n_obs : int
Number of observations for the obs table.
az_range : `~astropy.coordinates.Angle`, optional
Azimuth angle range (start, end) for random generation of
observation pointing positions.
alt_range : `~astropy.coordinates.Angle`, optional
Altitude angle range (start, end) for random generation of
observation pointing positions.
date_range : `~astropy.time.Time`, optional
Date range (start, end) for random generation of observation
start time.
n_tels_range : int, optional
Range (start, end) of number of telescopes participating in
the observations.
sigma : `~astropy.coordinates.Angle`, optional
Width of the gaussian model used for the spatial coordinates.
spectral_index : float, optional
Index for the power-law model used for the energy coordinate.
random_state : {int, 'random-seed', 'global-rng', `~numpy.random.RandomState`}, optional
Defines random number generator initialisation.
Passed to `~gammapy.utils.random.get_random_state`.
"""
from ..data import DataStore
random_state = get_random_state(random_state)
# create output folder
Path(outdir).mkdir(exist_ok=overwrite)
# generate observation table
observation_table = make_test_observation_table(observatory_name=observatory_name,
n_obs=n_obs,
az_range=az_range,
alt_range=alt_range,
date_range=date_range,
use_abs_time=False,
n_tels_range=n_tels_range,
random_state=random_state)
# save observation list to disk
outfile = Path(outdir) / 'runinfo.fits'
observation_table.write(str(outfile))
# create data store for the organization of the files
# using H.E.S.S.-like dir/file naming scheme
if observatory_name == 'HESS':
scheme = 'HESS'
else:
s_error = "Warning! Storage scheme for {}".format(observatory_name)
s_error += "not implemented. Only H.E.S.S. scheme is available."
raise ValueError(s_error)
data_store = DataStore(dir=outdir, scheme=scheme)
# loop over observations
for obs_id in observation_table['OBS_ID']:
event_list, aeff_hdu = make_test_eventlist(observation_table=observation_table,
obs_id=obs_id,
sigma=sigma,
spectral_index=spectral_index,
random_state=random_state)
# save event list and effective area table to disk
outfile = data_store.filename(obs_id, filetype='events')
outfile_split = outfile.rsplit("/", 1)
os.makedirs(outfile_split[0]) # recursively
event_list.write(outfile)
outfile = data_store.filename(obs_id, filetype='effective area')
aeff_hdu.writeto(outfile)
def make_test_eventlist(observation_table,
obs_id,
sigma=Angle(5., 'deg'),
spectral_index=2.7,
random_state='random-seed'):
"""
Make a test event list for a specified observation.
The observation can be specified with an observation table object
and the observation ID pointing to the correct observation in the
table.
For now, only a very rudimentary event list is generated, containing
only detector X, Y coordinates (a.k.a. nominal system) and energy
columns for the events. And the livetime of the observations stored
in the header.
The model used to simulate events is also very simple. Only
dummy background is created (no signal).
The background is created following a 2D symmetric gaussian
model for the spatial coordinates (X, Y) and a power-law in
energy.
The gaussian width varies in energy from sigma/2 to sigma.
The number of events generated depends linearly on the livetime
and on the altitude angle of the observation.
The model can be tuned via the sigma and spectral_index parameters.
In addition, an effective area table is produced. For the moment
only the low energy threshold is filled.
See also :ref:`datasets_obssim`.
Parameters
----------
observation_table : `~gammapy.data.ObservationTable`
Observation table containing the observation to fake.
obs_id : int
Observation ID of the observation to fake inside the observation table.
sigma : `~astropy.coordinates.Angle`, optional
Width of the gaussian model used for the spatial coordinates.
spectral_index : float, optional
Index for the power-law model used for the energy coordinate.
random_state : {int, 'random-seed', 'global-rng', `~numpy.random.RandomState`}, optional
Defines random number generator initialisation.
Passed to `~gammapy.utils.random.get_random_state`.
Returns
-------
event_list : `~gammapy.data.EventList`
Event list.
aeff_hdu : `~astropy.io.fits.BinTableHDU`
Effective area table.
"""
from ..data import EventList
random_state = get_random_state(random_state)
# find obs row in obs table
obs_ids = observation_table['OBS_ID'].data
obs_index = np.where(obs_ids == obs_id)
row = obs_index[0][0]
# get observation information
alt = Angle(observation_table['ALT'])[row]
livetime = Quantity(observation_table['LIVETIME'])[row]
# number of events to simulate
# it is linearly dependent on the livetime, taking as reference
# a trigger rate of 300 Hz
# it is linearly dependent on the zenith angle (90 deg - altitude)
# it is n_events_max at alt = 90 deg and n_events_max/2 at alt = 0 deg
n_events_max = Quantity(300., 'Hz') * livetime
alt_min = Angle(0., 'deg')
alt_max = Angle(90., 'deg')
slope = (n_events_max - n_events_max / 2) / (alt_max - alt_min)
free_term = n_events_max / 2 - slope * alt_min
n_events = alt * slope + free_term
# simulate energy
# the index of `~numpy.random.RandomState.power` has to be
# positive defined, so it is necessary to translate the (0, 1)
# interval of the random variable to (emax, e_min) in order to
# have a decreasing power-law
e_min = Quantity(0.1, 'TeV')
e_max = Quantity(100., 'TeV')
energy = sample_powerlaw(e_min.value, e_max.value, spectral_index,
size=n_events, random_state=random_state)
energy = Quantity(energy, 'TeV')
E_0 = Quantity(1., 'TeV') # reference energy for the model
# define E dependent sigma
# it is defined via a PL, in order to be log-linear
# it is equal to the parameter sigma at E max
# and sigma/2. at E min
sigma_min = sigma / 2. # at E min
sigma_max = sigma # at E max
s_index = np.log(sigma_max / sigma_min)
s_index /= np.log(e_max / e_min)
s_norm = sigma_min * ((e_min / E_0) ** -s_index)
sigma = s_norm * ((energy / E_0) ** s_index)
# simulate detx, dety
detx = Angle(random_state.normal(loc=0, scale=sigma.deg, size=n_events), 'deg')
dety = Angle(random_state.normal(loc=0, scale=sigma.deg, size=n_events), 'deg')
# fill events in an event list
event_list = EventList()
event_list['DETX'] = detx
event_list['DETY'] = dety
event_list['ENERGY'] = energy
# store important info in header
event_list.meta['LIVETIME'] = livetime.to('second').value
event_list.meta['EUNIT'] = str(energy.unit)
# effective area table
aeff_table = Table()
# fill threshold, for now, a default 100 GeV will be set
# independently of observation parameters
energy_threshold = Quantity(0.1, 'TeV')
aeff_table.meta['LO_THRES'] = energy_threshold.value
aeff_table.meta['name'] = 'EFFECTIVE AREA'
# convert to BinTableHDU and add necessary comment for the units
aeff_hdu = table_to_fits_table(aeff_table)
aeff_hdu.header.comments['LO_THRES'] = '[' + str(energy_threshold.unit) + ']'
return event_list, aeff_hdu
|
"""Model classes"""
from app.types.User import User
from app.types.Role import Role
from app.types.Machine import Machine
from app.types.Manufacturer import Manufacturer
from app.types.Tournament import Tournament
from app.types.Division import Division
from app.types.Player import Player
from app.types.Entry import Entry
|
# -*- coding: utf-8 -*-
# setup.py
# author : Antoine Passemiers
from setuptools import setup
setup(
name='rmm',
version='1.0.0',
description='Realistic mouse movements',
url='https://github.com/AntoinePassemiers/Realistic-Mouse',
author='Antoine Passemiers',
author_email='apassemi@ulb.ac.be',
packages=['rmm'],
package_data={'rmm': ['data/*.*']})
|
# This program generates for each userid, 5 nearest neighbors, by using collaborative filterting. In particular, the sparse matrix
# of userid/event is generated, then one takes the dot product of this matrix (normalized) with itself.
# This program should load csv files generated by RASparse_rowcol_generator.py (there are 2 files below since I did this in two stages).
from scipy import sparse
import pandas as pd
from scipy.sparse import coo_matrix
import numpy as np
from sklearn.preprocessing import normalize
# Load in all row/column/id entries which will form our sparse matrix.
df_rowcols1 = pd.read_csv('../RA_row_col_id_urlSept25_2.csv', delim_whitespace=True)
df_rowcols2 = pd.read_csv('../RA_row_col_id_urlSept25_2Part2.csv', delim_whitespace=True)
rowcols = [df_rowcols1,df_rowcols2]
df_rowcols = pd.concat(rowcols, ignore_index=True).drop_duplicates()
# Generate sparse userid/event matrix.
rows = np.array(df_rowcols['row'])
columns = np.array(df_rowcols['column'])
data = [1.0]*len(columns)
X = coo_matrix((data, (rows,columns)), shape=(75988+1,25022+1))
# Normalize all of the columns
X_n = normalize(X, norm='l2', axis=1)
# Take dot product with transpose to generate matrix of user/user similarity.
Y = X_n.dot(X_n.T)
# Output the nearest neighbors (5) for each userid, by taking the top 5 entries from each row in Y.
print 'n1 n2 n3 n4 n5 row'
for i in range(0, 75988+1):
row_nn = np.squeeze(np.asarray(Y.getrow(i).todense()))
nnarr = np.argsort(row_nn)[-5:]
print nnarr[0], nnarr[1], nnarr[2], nnarr[3], nnarr[4], i
|
from django.db import models
from datetime import datetime
# Create your models here.
class Modulation(models.Model):
modulation_name = models.CharField(max_length=100)
def __str__(self):
return (self.modulation_name)
class Encryption(models.Model):
encryption_name = models.CharField(max_length=100)
key_size = models.IntegerField(default = 0)
def __str__(self):
return (self.encryption_name)
class Sensor(models.Model):
antenna_type = models.CharField(max_length = 100)
antenna_direction = models.IntegerField(default=0)
latitude = models.FloatField(default=0.0)
longitude = models.FloatField(default=0.0)
elevation = models.IntegerField(default=0)
ssid = models.CharField(max_length = 100)
encryption = models.ForeignKey(Encryption, on_delete=models.CASCADE, default=1)
modulation = models.ForeignKey(Modulation, on_delete=models.CASCADE, default=1)
class ImageModel(models.Model):
model_pic = models.ImageField(upload_to = 'rf/static/pic_folder/', default = 'no-img.jpg')
class Device(models.Model):
device_name = models.CharField(max_length=100)
#pictureFileName = models.CharField(max_length=100)
image = models.ForeignKey(ImageModel, on_delete=models.SET_DEFAULT, default = 1)
three_dimensional_Object = models.CharField(max_length=100)
default_username = models.CharField(max_length=100)
default_password = models.CharField(max_length=100)
vendor_id = models.CharField(max_length=100)
class Signal(models.Model):
sensor = models.ForeignKey(Sensor, on_delete=models.CASCADE, default=1)
device = models.ForeignKey(Device, on_delete=models.CASCADE, default=1)
azimuth = models.FloatField(default=0.0)
signal_strength = models.IntegerField(default = 0)
sensor_latitude = models.FloatField(default=0.0)
sensor_longitude = models.FloatField(default=0.0)
modulation = models.ForeignKey(Modulation, on_delete=models.CASCADE, default=1)
encryption = models.ForeignKey(Encryption, on_delete=models.CASCADE, default=1)
meter_distance = models.IntegerField(default = 0)
power = models.IntegerField(default = 0)
bssid = models.CharField(max_length=100)
essid = models.CharField(max_length=100)
apmode = models.CharField(max_length=100)
date_time = models.DateTimeField(default=datetime.now, blank=True)
|
import unittest
from app.models import Pitch,User
from app import db
class PitchModelTest(unittest.TestCase):
def setUp(self):
self.user_manow = User(username = 'manow',password = '1234')
self.new_pitch = Pitch(name='zee',title='Money',description='moneyreview',user =self.user_manow, category='Finance')
# def tearDown(self):
# Pitch.query.delete()
# User.query.delete()
def test_check_instance_variable(self):
self.assertEquals(self.new_pitch.name,'zee')
self.assertEquals(self.new_pitch.title,'Money')
self.assertEquals(self.new_pitch.description,'moneyreview')
self.assertEquals(self.new_pitch.category, 'Finance')
# self.assertEquals(self.new_pitch.user,self.user_manow)
def test_save_pitch(self):
self.new_pitch.save_pitch()
self.assertTrue(len(Pitch.query.all()) >0)
def test_get_pitch_by_id(self):
self.new_pitch.save_pitch()
got_pitch = Pitch.get_pitches(12345)
self.assertTrue(len(got_pitch) > 0) |
# _*_ coding: utf-8 _*_
__author__ = 'Melon'
__date__ = "18/07/09"
from flask import Blueprint
home = Blueprint("home",__name__)
from . import views |
'''
Logic:
A function of filename logic is a key component of whole snippet.
Takes input as m, n and a matrix on which logic is to be performed and returns the same matrix
after the operation
'''
# logic takes m, n and deeeply copied matrix l2 as parameter
# operations are performed on l2
def logic(m, n, l2):
j = 0
for l in range(m):
for i in range(l, m):
if l2[i][j] != 0: # checking if 1st element is 0 or not
for j in range(n-1,0,-1): #if not then dividing whole row with checked element
l2[i][j] = l2[i][j]/l2[i][l]
else:
for k in range(i+1, n):
if l2[k][l] != 0: # if element is zero replacing the whole row with consecutive
for j in range(l,n):
d = l2[i][j]
l2[i][j] = l2[k][j]
l2[k][j] = d
break
if l2[i][l] != 0: # further checking element of next row and column
for j in range(n-1,l+1,-1): # if not zero dividing whole row with checked element
l2[i][j] = l2[i][j]/l2[i][l]
for i in range(l+1, m): # and so on till we reach last row
for j in range(l,n):
if l2[i][j] != 0:
l2[i][j] = l2[i][j]-l2[l][j]
return l2
# returning l2 after operation as echelon form matrix
|
import streamlit as st
import pandas as pd
import numpy as np
import os
import joblib
from PIL import Image
from titanic_req.titanic_model import titanic
def titanic_data(sidebar_slots, predict_button):
model_titanic = titanic()
dtree, svc, name = model_titanic.input(sidebar_slots, predict_button)
if predict_button:
message = f"{name} unfortunately will **_not_ Survive**" if dtree[0] == 0 else f"{name} **_will_ Survive**"
st.subheader("DECISION TREE PREDICTION")
st.markdown(message)
message = f"{name} unfortunately will **_not_ Survive**" if svc[0] == 0 else f"{name} **_will_ Survive**"
st.subheader("SUPPORT VECTOR MACHINE PREDICTION")
st.markdown(message)
def cars_prediction(mainwindow_slots, sidebar_slots, predict_button):
buying = sidebar_slots[0].selectbox("Buying Value", options = ["vhigh", "low", "high", "med"])
maint = sidebar_slots[1].selectbox("Maintanance", options = ["vhigh", "low", "high", "med"])
doors = sidebar_slots[2].number_input("Number of Doors", min_value = 2, max_value = 5, value = 4, step = 1)
persons = sidebar_slots[3].selectbox("Number of Persons", options = [2, 4, 5])
lug_boot = sidebar_slots[4].selectbox("Lugguage Boot Space", options = ["big", "small", "med"])
safety = sidebar_slots[5].selectbox("Safety", options = ["low", "high", "med"])
dat = {"buying" : buying,
"maint" : maint,
"doors" : doors,
"persons" : persons,
"lug_boot" : lug_boot,
"safety" : safety}
dat = pd.DataFrame(dat, index = [0])
buying = 3 if buying == "vhigh" else buying
buying = 2 if buying == "low" else buying
buying = 1 if buying == "high" else buying
buying = 0 if buying == "med" else buying
maint = 3 if maint == "vhigh" else maint
maint = 2 if maint == "low" else maint
maint = 1 if maint == "high" else maint
maint = 0 if maint == "med" else maint
doors = 1 if doors == 4 else doors
doors = 0 if doors == 5 else doors
persons = 1 if persons == 4 else persons
persons = 0 if persons == 5 else persons
lug_boot = 2 if lug_boot == "big" else lug_boot
lug_boot = 1 if lug_boot == "small" else lug_boot
lug_boot = 0 if lug_boot == "med" else lug_boot
safety = 2 if safety == "low" else safety
safety = 1 if safety == "high" else safety
safety = 0 if safety == "med" else safety
st.write("""
# Cars Evaluation
""")
image_path = os.path.join(os.getcwd(), "support_models")
image_path = os.path.join(image_path, "images")
image_path = os.path.join(image_path, "cars.jpg")
banner = Image.open(image_path)
st.image(banner, use_column_width = True)
st.write("Enter the details of your car to view the predictions")
if predict_button:
st.subheader("User Input features")
st.write(dat)
arr = np.array([buying, maint, doors, persons, lug_boot, safety])
file_path = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(file_path, "dtree_cars.sav")
dtree_cars = joblib.load(file_path)
if predict_button:
st.write("""
## Decision Tree Prediction
""")
prediction_cars = dtree_cars.predict(arr.reshape(1,-1))
message = "### This Car is "
message = message + "***Un Acceptable***" if prediction_cars == "unacc" else message
message = message + "***Un Acceptable***" if prediction_cars == "acc" else message
message = message + "***Un Acceptable***" if prediction_cars == "good" else message
message = message + "***Un Acceptable***" if prediction_cars == "v-good" else message
st.write(message) |
# coding=utf-8
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import scoped_session
from sqlalchemy.schema import Index
import DBBase
from FlightFixInfoModel import FlightFixInfo
from CompanyInfoModel import CompanyInfo
from CompanyInfoAllModel import CompanyInfoAll
from FlightRealtimeInfoModel import FlightRealtimeInfo
from AirportInfoModel import AirportInfo
from AirlineInfoModel import AirlineInfo
from CityInfoModel import CityInfo
from FollowedInfoModel import FollowedInfo
from PunctualityInfoModel import PunctualityInfo
from VersionInfoModel import VersionInfo
import traceback
from tools import LogUtil
import json
import datetime
def init(db_user, db_passwd, db_host, db_name):
DBBase.Engine = create_engine("mysql://%s:%s@%s/%s?charset=utf8" %(db_user, db_passwd, db_host, db_name), pool_recycle = -1, echo = False)
Index('ix_followedInfo_col23456',
FollowedInfo.device_token,
FollowedInfo.flight_no,
FollowedInfo.takeoff_airport,
FollowedInfo.arrival_airport,
FollowedInfo.schedule_takeoff_date)
DBBase.Base.metadata.create_all(DBBase.Engine)
DBBase.Session = scoped_session(sessionmaker(bind = DBBase.Engine, expire_on_commit = False))
class DB:
def __init__(self):
self.logger = LogUtil.Logging.getLogger()
##############################################################################################
# FlightFixInfo
def getFlightFixInfoByFlightNO(self, flight_no):
try:
ret = FlightFixInfo.findLike(flight_no)
return self.__convertFixInfo(ret)
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
def getFlightFixInfoByRoute(self, takeoff_airport, arrival_airport, company):
try:
ret = []
if company == 'all':
ret = FlightFixInfo.find(takeoff_airport = takeoff_airport, arrival_airport = arrival_airport)
else:
ret = FlightFixInfo.find(takeoff_airport = takeoff_airport, arrival_airport = arrival_airport, company = company)
return self.__convertFixInfo(ret)
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
def getFlightFixInfoByUniq(self, flight_no, takeoff_airport, arrival_airport):
try:
ret = FlightFixInfo.find(flight_no = flight_no, takeoff_airport = takeoff_airport, arrival_airport = arrival_airport)
return self.__convertFixInfo(ret)
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
def getOverdayRoute(self, cur_date):
try:
ret = FlightFixInfo.getOverdayRoute(cur_date)
route_list = []
for one in ret:
one_hash = {}
one_hash['takeoff_airport'] = one.takeoff_airport
one_hash['arrival_airport'] = one.arrival_airport
route_list.append(one_hash)
return route_list
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
def __convertFixInfo(self, data):
flight_info_list = []
for one in data:
one_hash = {}
one_hash['flight_no'] = one.flight_no
one_hash['company'] = one.company
one_hash['schedule_takeoff_time'] = one.schedule_takeoff_time
one_hash['schedule_arrival_time'] = one.schedule_arrival_time
one_hash['takeoff_city'] = one.takeoff_city
one_hash['takeoff_airport'] = one.takeoff_airport
one_hash['takeoff_airport_building'] = one.takeoff_airport_building
one_hash['arrival_city'] = one.arrival_city
one_hash['arrival_airport'] = one.arrival_airport
one_hash['arrival_airport_building'] = one.arrival_airport_building
one_hash['plane_model'] = one.plane_model
one_hash['mileage'] = one.mileage
one_hash['stopover'] = one.stopover
one_hash['schedule'] = json.loads(one.schedule)
one_hash['valid_date_from'] = one.valid_date_from
one_hash['valid_date_to'] = one.valid_date_to
flight_info_list.append(one_hash)
return flight_info_list
def getFlightList(self):
try:
ret = FlightFixInfo.find()
flight_info_list = []
for one in ret:
one_hash = {}
one_hash['flight_no'] = one.flight_no
one_hash['takeoff_airport'] = one.takeoff_airport
one_hash['arrival_airport'] = one.arrival_airport
flight_info_list.append(one_hash)
return flight_info_list
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
def getRandomFlightList(self, cur_time):
try:
ret = FlightFixInfo.getNowFlightNO(cur_time)
flight_list = []
for one in ret:
flight_list.append(one[0])
return flight_list
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
def putFlightFixInfo(self, flight_info_list):
try:
for one in flight_info_list:
flight_info = FlightFixInfo()
flight_info.flight_no = one['flight_no']
flight_info.company = one['company']
flight_info.schedule_takeoff_time = one['schedule_takeoff_time']
flight_info.schedule_arrival_time = one['schedule_arrival_time']
flight_info.takeoff_airport = self.getAirportShort(one['takeoff_airport'].encode("utf-8"), 'zh')
flight_info.takeoff_city = self.getAirportCity(flight_info.takeoff_airport)
flight_info.takeoff_airport_building = one['takeoff_airport_building']
flight_info.arrival_airport = self.getAirportShort(one['arrival_airport'].encode("utf-8"), 'zh')
flight_info.arrival_city = self.getAirportCity(flight_info.arrival_airport)
flight_info.arrival_airport_building = one['arrival_airport_building']
flight_info.plane_model = one['plane_model'].encode("utf-8")
flight_info.mileage = one['mileage']
flight_info.stopover = one['stopover']
flight_info.schedule = json.dumps(one['schedule'])
flight_info.valid_date_from = one['valid_date_from']
flight_info.valid_date_to = one['valid_date_to']
flight_info.add()
self.logger.info("%s rows is added" % (str(len(flight_info_list))))
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
def checkRouteInfo(self):
try:
ret = FlightFixInfo.getAllRoute()
route_info = []
for one in ret:
route = {}
route["takeoff_airport"] = one.takeoff_airport
route["arrival_airport"] = one.arrival_airport
count = len(FlightFixInfo.find(takeoff_airport = one.takeoff_airport, arrival_airport = one.arrival_airport))
if count == 0:
route["flight_count"] = "ZERO"
else:
route["flight_count"] = count
route_info.append(route)
return route_info
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
def deleteRoute(self, takeoff_airport, arrival_airport):
try:
self.logger.info("delete route %s %s" % (takeoff_airport, arrival_airport))
ret = FlightFixInfo.findDelete(takeoff_airport = takeoff_airport, arrival_airport = arrival_airport)
self.logger.info("%s rows is deleted" % (str(ret)))
return 0
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
# FlightFixInfo
##############################################################################################
##############################################################################################
# FlightRealtimeInfo
def getFlightRealtimeInfo(self, flight):
try:
ret = FlightRealtimeInfo.find(flight_no = flight['flight_no'],
takeoff_airport = flight['takeoff_airport'],
arrival_airport = flight['arrival_airport'],
schedule_takeoff_date = flight['schedule_takeoff_date'])
if len(ret) >= 1:
one = ret[0]
flight['flight_state'] = one.flight_state
flight['estimate_takeoff_time'] = one.estimate_takeoff_time
flight['actual_takeoff_time'] = one.actual_takeoff_time
flight['estimate_arrival_time'] = one.estimate_arrival_time
flight['actual_arrival_time'] = one.actual_arrival_time
flight['full_info'] = one.full_info
return flight
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
def getRandomFlight(self):
try:
ret = FlightRealtimeInfo.getOneArrivedFlightNO()
flight = None
if ret is not None:
flight = {}
flight['flight_no'] = ret.flight_no
flight['schedule_takeoff_date'] = ret.schedule_takeoff_date
return flight
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
def getLivedFlight(self):
try:
day = []
day.append(datetime.datetime.now().strftime("%Y-%m-%d"))
day.append((datetime.datetime.now() - datetime.timedelta(1)).strftime("%Y-%m-%d"))
lived_flight_list = []
for one in day:
flights = FlightRealtimeInfo.find(full_info = 0, schedule_takeoff_date = one)
for flight in flights:
lived_flight = {}
lived_flight['flight_no'] = flight.flight_no
lived_flight['schedule_takeoff_time'] = flight.schedule_takeoff_time
lived_flight['schedule_arrival_time'] = flight.schedule_arrival_time
lived_flight['takeoff_airport'] = flight.takeoff_airport
lived_flight['arrival_airport'] = flight.arrival_airport
lived_flight['schedule_takeoff_date'] = flight.schedule_takeoff_date
lived_flight['flight_state'] = flight.flight_state
lived_flight['estimate_takeoff_time'] = flight.estimate_takeoff_time
lived_flight['actual_takeoff_time'] = flight.actual_takeoff_time
lived_flight['estimate_arrival_time'] = flight.estimate_arrival_time
lived_flight['actual_arrival_time'] = flight.actual_arrival_time
lived_flight['full_info'] = flight.full_info
lived_flight_list.append(lived_flight)
return lived_flight_list
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
def putFlightRealtimeInfo(self, one):
try:
flight_info = FlightRealtimeInfo.find(flight_no = one['flight_no'], takeoff_airport = one['takeoff_airport'], arrival_airport = one['arrival_airport'], schedule_takeoff_date = one['schedule_takeoff_date'])
if len(flight_info) == 0:
flight_info = FlightRealtimeInfo()
else:
flight_info = flight_info[0]
flight_info.flight_no = one['flight_no']
flight_info.flight_state = one['flight_state']
flight_info.schedule_takeoff_time = one['schedule_takeoff_time']
flight_info.estimate_takeoff_time = one['estimate_takeoff_time']
flight_info.actual_takeoff_time = one['actual_takeoff_time']
flight_info.schedule_arrival_time = one['schedule_arrival_time']
flight_info.estimate_arrival_time = one['estimate_arrival_time']
flight_info.actual_arrival_time = one['actual_arrival_time']
flight_info.schedule_takeoff_date = one['schedule_takeoff_date']
flight_info.takeoff_airport = one['takeoff_airport']
flight_info.arrival_airport = one['arrival_airport']
if one['actual_arrival_time'] != "--:--":
flight_info.full_info = 1
flight_info.add()
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
# FlightRealtimeInfo
##############################################################################################
##############################################################################################
# FollowedInfo
def getPushCandidate(self, flight):
try:
ret = FollowedInfo.findAll(push_switch = 'on',
flight_no = flight['flight_no'],
takeoff_airport = flight['takeoff_airport'],
arrival_airport = flight['arrival_airport'],
schedule_takeoff_date = flight['schedule_takeoff_date'])
push_list = []
for one in ret:
one_hash = {}
one_hash['device_token'] = one.device_token
one_hash['push_switch'] = one.push_switch
one_hash['push_info'] = json.loads(one.push_info)
push_list.append(one_hash)
return push_list
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
def getPushInfoList(self, device_token, push_switch):
try:
ret = []
if device_token != "" and push_switch != "":
ret = FollowedInfo.findAll(device_token = device_token, push_switch = push_switch)
elif device_token == "" and push_switch == "":
ret = FollowedInfo.findAll()
elif device_token != "" and push_switch == "":
ret = FollowedInfo.findAll(device_token = device_token)
elif device_token == "" and push_switch != "":
ret = FollowedInfo.findAll(push_switch = push_switch)
push_list = []
for one in ret:
one_hash = {}
one_hash['device_token'] = one.device_token
one_hash['flight'] = "[%s][%s][%s][%s]" % (one.flight_no, one.takeoff_airport, one.arrival_airport, one.schedule_takeoff_date)
one_hash['push_switch'] = one.push_switch
one_hash['push_info'] = json.loads(one.push_info)
push_list.append(one_hash)
return push_list
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
def putFollowedInfo(self, device_token, followed_list):
try:
for one in followed_list:
ret = FollowedInfo.findOne(device_token = ''.join(device_token.strip("<>").split(" ")),
flight_no = one['flight_no'],
takeoff_airport = one['takeoff_airport'],
arrival_airport = one['arrival_airport'],
schedule_takeoff_date = one['schedule_takeoff_date'])
if ret is None:
info = FollowedInfo()
info.device_token = ''.join(device_token.strip("<>").split(" "))
info.flight_no = one['flight_no']
info.takeoff_airport = one['takeoff_airport']
info.arrival_airport = one['arrival_airport']
info.schedule_takeoff_date = one['schedule_takeoff_date']
info.add()
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
def putPushInfo(self, push_candidate, flight):
try:
ret = FollowedInfo.findOne(device_token = push_candidate['device_token'],
flight_no = flight['flight_no'],
takeoff_airport = flight['takeoff_airport'],
arrival_airport = flight['arrival_airport'],
schedule_takeoff_date = flight['schedule_takeoff_date'])
if ret is not None:
ret.push_switch = push_candidate['push_switch']
ret.push_info = json.dumps(push_candidate['push_info'])
ret.add()
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
def deleteFollowedInfo(self, device_token, followed_list):
try:
for one in followed_list:
ret = FollowedInfo.findOne(device_token = ''.join(device_token.strip("<>").split(" ")),
flight_no = one['flight_no'],
takeoff_airport = one['takeoff_airport'],
arrival_airport = one['arrival_airport'],
schedule_takeoff_date = one['schedule_takeoff_date'])
if ret is not None:
ret.delete()
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
# FollowedInfo
##############################################################################################
##############################################################################################
# CompanyInfo
def getCompanyList(self, lang):
try:
ret = CompanyInfo.find()
company_info_list = []
for one in ret:
one_hash = {}
one_hash['short'] = one.company_short
if lang == 'zh':
one_hash['full'] = one.company_zh
company_info_list.append(one_hash)
return company_info_list
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
def putCompany(self, company_list):
try:
for one in company_list:
ret = CompanyInfoAll.find(company_short = one[0])
if len(ret) != 0:
continue
company = CompanyInfoAll()
company.company_short = one[0]
company.company_zh = one[1]
company.company_en = one[2]
company.state = one[3]
company.add()
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
# CompanyInfo
##############################################################################################
##############################################################################################
# CityInfo
def getCityList(self, lang):
try:
ret = CityInfo.find()
city_list = []
for one in ret:
city_list.append(one.city_zh)
return city_list
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
def getCityName(self, short, lang):
try:
ret = CityInfo.find(city_short = short)
if len(ret) == 1:
if lang == 'zh':
return ret[0].city_zh
else:
return ""
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
def getCityCode(self, short):
try:
ret = CityInfo.find(city_short = short)
if len(ret) == 1:
return ret[0].city_code
else:
return None
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
def getCityShort(self, name, lang):
try:
ret = None
if lang == 'zh':
ret = CityInfo.find(city_zh = name)
if len(ret) == 1:
return ret[0].city_short
else:
return ""
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
# CityInfo
##############################################################################################
##############################################################################################
# AirlineInfo
def getAirlineList(self):
try:
ret = AirlineInfo.find()
airline_list = []
for one in ret:
hash = {} #@ReservedAssignment
hash['takeoff_city'] = one.takeoff_city
hash['arrival_city'] = one.arrival_city
airline_list.append(hash)
return airline_list
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
def putAirline(self, airline_list):
try:
for one in airline_list:
ret = one.split('-')
airline = AirlineInfo()
airline.takeoff_city = ret[0]
airline.arrival_city = ret[1]
airline.add()
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
# AirlineInfo
##############################################################################################
##############################################################################################
# AirportInfo
def getAirportList(self, lang):
try:
ret = AirportInfo.find()
airport_info_list = []
for one in ret:
one_hash = {}
one_hash['short'] = one.airport_short
if lang == 'zh':
one_hash['full'] = one.airport_zh
one_hash['city'] = self.getCityName(one.city, lang)
airport_info_list.append(one_hash)
return airport_info_list
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
def getAirportName(self, short, lang):
try:
ret = AirportInfo.find(airport_short = short)
if len(ret) == 1:
if lang == 'zh':
return ret[0].airport_zh
else:
return ""
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
def getAirportShort(self, name, lang):
try:
ret = None
if lang == 'zh':
ret = AirportInfo.find(airport_zh = name)
if len(ret) == 1:
return ret[0].airport_short
else:
return ""
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
def getAirportCity(self, short):
try:
ret = AirportInfo.find(airport_short = short)
if len(ret) == 1:
return ret[0].city
else:
return None
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
def putAirportInfo(self, flight_info_list):
try:
for one in flight_info_list:
ret = AirportInfo.find(airport_short = one['takeoff_airport_short'])
if len(ret) == 0:
airport_info = AirportInfo()
airport_info.airport_short = one['takeoff_airport_short']
airport_info.airport_zh = one['takeoff_airport']
airport_info.add()
ret = AirportInfo.find(airport_short = one['arrival_airport_short'])
if len(ret) == 0:
airport_info = AirportInfo()
airport_info.airport_short = one['arrival_airport_short']
airport_info.airport_zh = one['arrival_airport']
airport_info.add()
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
# AirportInfo
##############################################################################################
##############################################################################################
# CompanyInfo
def getCompanyName(self, short, lang):
try:
ret = CompanyInfo.find(company_short = short)
if len(ret) == 1:
if lang == 'zh':
return ret[0].company_zh
else:
return ""
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
# CompanyInfo
##############################################################################################
##############################################################################################
# PunctualityInfo
def getPunctualityInfo(self, flight_no, takeoff_airport, arrival_airport):
try:
ret = PunctualityInfo.find(flight_no = flight_no, takeoff_airport = takeoff_airport, arrival_airport = arrival_airport)
punctuality_info = None
if len(ret) == 1:
punctuality_info = {}
punctuality_info['on_time'] = ret[0].on_time
punctuality_info['half_hour_late'] = ret[0].half_hour_late
punctuality_info['one_hour_late'] = ret[0].one_hour_late
punctuality_info['more_than_one_hour_late'] = ret[0].more_than_one_hour_late
punctuality_info['cancel'] = ret[0].cancel
return punctuality_info
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
def putPunctualityInfo(self, flight, punctualit_info):
try:
ret = PunctualityInfo.find(flight_no = flight['flight_no'], takeoff_airport = flight['takeoff_airport'], arrival_airport = flight['arrival_airport'])
if len(ret) == 0:
info = PunctualityInfo()
info.flight_no = flight['flight_no']
info.takeoff_airport = flight['takeoff_airport']
info.arrival_airport = flight['arrival_airport']
info.on_time = punctualit_info['on_time']
info.half_hour_late = punctualit_info['half_hour_late']
info.one_hour_late = punctualit_info['one_hour_late']
info.more_than_one_hour_late = punctualit_info['more_than_one_hour_late']
info.cancel = punctualit_info['cancel']
info.add()
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
# PunctualityInfo
##############################################################################################
##############################################################################################
# VersionInfo
def getVersionInfoList(self):
try:
ret = VersionInfo.findAll()
version_list = []
for one in ret:
one_hash = {}
one_hash['id'] = one.id
one_hash['version'] = one.version
one_hash['ipa'] = one.ipa
one_hash['changelog'] = one.changelog.replace("\n", "<br>")
version_list.append(one_hash)
return version_list
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
def putVersionInfo(self, version, ipa, changelog):
try:
info = VersionInfo()
info.version = version
info.ipa = ipa
info.changelog = changelog
info.add()
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
def getNewestVersionInfo(self):
try:
ret = VersionInfo.findNewest()
one_hash = {}
if ret is not None:
one_hash['version'] = ret.version
one_hash['ipa'] = ret.ipa
one_hash['changelog'] = ret.changelog
return one_hash
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
# VersionInfo
##############################################################################################
##############################################################################################
# Test
def adjustFlightFixInfo(self):
try:
city_code_file = open("../../test/citycode", "r")
hash = {} #@ReservedAssignment
for line in city_code_file:
line = line.strip()
item = line.split('=')
if len(item) == 2:
hash[item[1].decode("utf-8")] = item[0]
ret = CityInfo.find()
for one in ret:
if one.city_zh in hash:
one.city_code = hash[one.city_zh]
one.add()
'''
count = 0
for one in ret:
count += 1
print count
index = one.takeoff_airport.find('A')
if index != -1:
one.takeoff_airport = one.takeoff_airport[:index]
one.takeoff_airport_building = 'A'
index = one.takeoff_airport.find('B')
if index != -1:
one.takeoff_airport = one.takeoff_airport[:index]
one.takeoff_airport_building = 'B'
index = one.arrival_airport.find('A')
if index != -1:
one.arrival_airport = one.arrival_airport[:index]
one.arrival_airport_building = 'A'
index = one.arrival_airport.find('B')
if index != -1:
one.arrival_airport = one.arrival_airport[:index]
one.arrival_airport_building = 'B'
one.add()
'''
except:
msg = traceback.format_exc()
self.logger.error(msg)
DBBase.Session.rollback()
DBBase.Engine.dispose()
return None
# Test
##############################################################################################
if __name__ == "__main__":
init("root", "root", "127.0.0.1", "fd_db")
db = DB()
ret = db.deleteRoute("PEK", "HGH")
|
# Alex Bello
# 2/11/2020
# The list of methods used for the Operation project.
def sums(lists):
sum = 0
for i in lists:
sum += i
return sum
def product(lists):
total = 0
for i in lists:
total *= i
return total
def mean(lists):
avg = sums(lists)
return avg / len(lists)
def median(lists):
mid = (len(lists) - 1) // 2
if mid % 2 == 1:
mid = lists[mid] + lists[mid + 1] / 2
else:
mid = lists[mid]
return mid
def mode(lists):
mode = []
count = 0
for i in range(len(lists)):
temp_count = 1
for j in range(1, len(lists)):
if lists[i] == lists[j]:
temp_count += 1
if temp_count > count:
mode.clear()
count = temp_count
if temp_count >= count:
mode.append(lists[i])
return set(mode)
def big_boi(lists):
large = lists[0]
for i in lists:
if large < i:
large = i
return large
def small_boi(lists):
little = lists[0]
for i in lists:
if little > i:
little = i
return little
def removal(lists):
return list(set(lists))
def odd_times(lists):
return [odd for odd in lists if odd % 2 == 1]
def even_times(lists):
return [even for even in lists if even % 2 == 0]
def tell_me(lists):
new_num = int(input("Give me a number boyo and i'll tell you if its in the list or not!"))
for i in lists:
if i == new_num:
return True
return False
def bonus(lists):
return lists[-2]
|
strhr = input("Input Hours: ")
hr = float(strhr)
strrate = input("Input Rate: ")
rate = float(strrate)
if hr<=40 :
pay = hr * rate
print("Gross pay is :",pay)
else :
exhr = hr-40
pay = (40 * rate) + (exhr * rate * 1.5)
print(pay)
|
# Generated by Django 2.0 on 2018-01-04 10:14
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tyre', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tyre_without_issue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('From_date', models.DateField()),
('To_Date', models.DateField()),
('Tyre_no', models.CharField(max_length=20)),
('Vehicle_no', models.CharField(max_length=20)),
('Out_Date', models.DateField()),
('Km_Run', models.CharField(max_length=20, validators=[django.core.validators.RegexValidator('^[1-9]\\d*(\\.\\d+)?$', 'Enter valid salary')])),
],
),
migrations.CreateModel(
name='Tyre_without_reciept',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('From_date', models.DateField()),
('To_Date', models.DateField()),
('Tyre_no', models.CharField(max_length=20)),
('Vehicle_no', models.CharField(max_length=20)),
('On_Date', models.DateField()),
('Km_Run', models.CharField(max_length=20, validators=[django.core.validators.RegexValidator('^[1-9]\\d*(\\.\\d+)?$', 'Enter valid salary')])),
],
),
]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007, TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version. Please read the COPYING file.
#
import os
import time
import Queue
import threading
import StringIO
import logging
import ldif
import sha
import ajan.config
import ajan.ldaputil
class Timer:
""" Timer class : attribute 'last' refreshes its value to system time in a period of 'interval' attribute's value """
def __init__(self, interval, callable):
self.interval = interval
self.callable = callable
self.last = time.time()
def remaining(self, cur):
return max(0, self.interval - (cur - self.last))
def is_ready(self, cur):
if (cur - self.last) > self.interval:
self.last = cur
return True
return False
class Applier(threading.Thread):
""" """
def __init__(self, apply_queue, result_queue):
""" """
threading.Thread.__init__(self)
self.log = logging.getLogger("Applier")
self.apply_queue = apply_queue
self.result_queue = result_queue
self.active = True
#Create all modules' -module names are stored in a tuple in config.py file- Policy objects and stores them in 'policies' list
self.policies = map(lambda x: x.Policy(), ajan.config.modules)
self.timers = {}
def next_timeout(self):
""" Returns nearest next_timeout of the timers """
if len(self.timers) == 0:
return None
cur = time.time()
next = min(map(lambda x: x.remaining(cur), self.timers.values()))
next = max(1, next)
return next
def update_policy(self, policy, computer, units):
""" Updates policy in the modules : calls 'update' and 'apply' functions of each policy """
self.log.debug("Updating %s", policy.__module__)
try:
# 'update' function updates policy attributes, 'apply' functions does the related actions
policy.update(computer, units)
policy.apply()
except Exception, e:
self.result_queue.put(("error", str(e)))
return
#If there exists timers attribute in the policy : ???????????????????
func = getattr(policy, "timers", None)
if func:
for callable, interval in func().iteritems():
if interval and interval != 0:
old = self.timers.get(callable, None)
if old:
old.interval = interval
else:
self.timers[callable] = Timer(interval, callable)
else:
if self.timers.get(callable, None):
del self.timers[callable]
def run(self):
""" ?????????????????????????????????????"""
self.log.debug("started")
while self.active:
if not self.apply_queue.empty():
computer, units = self.apply_queue.get()
for policy in self.policies:
self.update_policy(policy, computer, units)
else:
cur = time.time()
active = filter(lambda x: x.is_ready(cur), self.timers.values())
for event in active:
try:
event.callable()
except Exception, e:
self.result_queue.put(("error", str(e)))
time.sleep(1)
class Loader(ldif.LDIFParser):
def handle(self, dn, attr):
if self.comp:
self.ou.append(attr)
else:
self.comp = attr
class Fetcher(threading.Thread):
""" """
def __init__(self, result_queue):
threading.Thread.__init__(self)
self.result_queue = result_queue
self.log = logging.getLogger("Fetcher")
self.active = True
def fetch(self):
self.log.debug("Fetching new policy...")
conn = ajan.ldaputil.Connection()
policy_output = StringIO.StringIO()
output = ldif.LDIFWriter(policy_output)
# Get this computer's entry
ret = conn.search_computer()[0]
comp_attr = ret[1]
output.unparse(ret[0], ret[1])
# Organizational unit policies
ou_attrs = []
ou_list = comp_attr.get("ou", [])
for unit in ou_list:
ret = conn.search_ou(unit)
if len(ret) > 0:
ret = ret[0]
output.unparse(ret[0], ret[1])
ou_attrs.append(ret[1])
conn.close()
policy_ldif = policy_output.getvalue()
policy_output.close()
# Save a copy of fetched policy
f = file(ajan.config.default_policyfile, "w")
f.write(policy_ldif)
f.close()
return comp_attr, ou_attrs, sha.sha(policy_ldif).hexdigest()
def run(self):
self.log.debug("started")
old_hash = None
# Load latest fetched policy if available
if os.path.exists(ajan.config.default_policyfile):
self.log.debug("Loading old policy...")
old_hash = sha.sha(file(ajan.config.default_policyfile).read()).hexdigest()
loader = Loader(file(ajan.config.default_policyfile))
loader.comp = None
loader.ou = []
loader.parse()
if loader.comp:
message = "policy", (loader.comp, loader.ou)
self.result_queue.put(message)
# Periodically fetch latest policy
while self.active:
try:
computer, units, ldif_hash = self.fetch()
if ldif_hash != old_hash:
self.log.debug("Policy has changed")
message = "policy", (computer, units)
self.result_queue.put(message)
old_hash = ldif_hash
else:
self.log.debug("Policy is still same")
except Exception, e:
self.result_queue.put(("error", "Fetch error: %s" % str(e)))
timeout = ajan.config.policy_check_interval
while timeout > 0 and self.active:
timeout -= 0.5
time.sleep(0.5)
|
# Copyright (c) 2016, Xilinx, Inc.
# SPDX-License-Identifier: BSD-3-Clause
import os
import pytest
from pynq import PL
from pynq import Overlay
from pynq import Clocks
from pynq.pl import BS_BOOT
from pynq.ps import DEFAULT_CLK_MHZ
bitfile1 = BS_BOOT
bitfile2 = PL.bitfile_name
ol1 = Overlay(bitfile1)
ol2 = Overlay(bitfile2)
cpu_mhz = 0
bitfile1_fclk0_mhz = DEFAULT_CLK_MHZ
bitfile1_fclk1_mhz = DEFAULT_CLK_MHZ
bitfile1_fclk2_mhz = DEFAULT_CLK_MHZ
bitfile1_fclk3_mhz = DEFAULT_CLK_MHZ
bitfile2_fclk0_mhz = DEFAULT_CLK_MHZ
bitfile2_fclk1_mhz = DEFAULT_CLK_MHZ
bitfile2_fclk2_mhz = DEFAULT_CLK_MHZ
bitfile2_fclk3_mhz = DEFAULT_CLK_MHZ
@pytest.mark.run(order=2)
def test_overlay():
"""Test whether the overlay is properly set.
Each overlay has its own bitstream. Also need the corresponding ".tcl"
files to pass the tests.
"""
global ol1, ol2
global cpu_mhz
global bitfile1_fclk0_mhz, bitfile1_fclk1_mhz
global bitfile1_fclk2_mhz, bitfile1_fclk3_mhz
global bitfile2_fclk0_mhz, bitfile2_fclk1_mhz
global bitfile2_fclk2_mhz, bitfile2_fclk3_mhz
for ol in [ol1, ol2]:
ol.download()
assert len(ol.ip_dict) > 0,\
'Overlay gets empty IP dictionary.'
assert len(ol.gpio_dict) > 0,\
'Overlay gets empty GPIO dictionary.'
for ip in ol.ip_dict:
for key in ['addr_range', 'phys_addr', 'state', 'type']:
assert key in ol.ip_dict[ip], \
'Key {} missing in IP {}.'.format(key, ip)
assert ol.ip_dict[ip]['state'] is None,\
'Overlay gets wrong IP state.'
# Set "TEST" for IP states
ol.ip_dict[ip]['state'] = "TEST"
for gpio in ol.gpio_dict:
for key in ['index', 'state']:
assert key in ol.gpio_dict[gpio], \
'Key {} missing in GPIO {}.'.format(key, gpio)
assert ol.gpio_dict[gpio]['state'] is None, \
'Overlay gets wrong GPIO state.'
# Set "TEST" for GPIO states
ol.gpio_dict[gpio]['state'] = "TEST"
ol.reset()
for ip in ol.ip_dict:
# "TEST" should have been cleared by reset()
assert ol.ip_dict[ip]['state'] is None,\
'Overlay cannot reset IP dictionary.'
for gpio in ol.gpio_dict:
# "TEST" should have been cleared by reset()
assert ol.gpio_dict[gpio]['state'] is None,\
'Overlay cannot reset GPIO dictionary.'
cpu_mhz = Clocks.cpu_mhz
bitfile1_fclk0_mhz = Clocks.fclk0_mhz
bitfile1_fclk1_mhz = Clocks.fclk1_mhz
bitfile1_fclk2_mhz = Clocks.fclk2_mhz
bitfile1_fclk3_mhz = Clocks.fclk3_mhz
assert not ol.timestamp == '', \
'Overlay ({}) has an empty timestamp.'.format(ol.bitfile_name)
assert ol.is_loaded(), \
'Overlay ({}) should be loaded.'.format(ol.bitfile_name)
assert Clocks.cpu_mhz == cpu_mhz, \
'CPU frequency should not be changed.'
assert Clocks.fclk0_mhz == bitfile1_fclk0_mhz, \
'FCLK0 frequency not correct after downloading {}.'.format(
ol.bitfile_name)
assert Clocks.fclk1_mhz == bitfile1_fclk1_mhz, \
'FCLK1 frequency not correct after downloading {}.'.format(
ol.bitfile_name)
assert Clocks.fclk2_mhz == bitfile1_fclk2_mhz, \
'FCLK2 frequency not correct after downloading {}.'.format(
ol.bitfile_name)
assert Clocks.fclk3_mhz == bitfile1_fclk3_mhz, \
'FCLK3 frequency not correct after downloading {}.'.format(
ol.bitfile_name)
@pytest.mark.run(order=-1)
def test_end():
"""Wrapping up by changing the overlay back.
This is the last test to be performed.
"""
global ol1, ol2
ol2.download()
# Clear the javascript files copied during tests if any
if os.system("rm -rf ./js"):
raise RuntimeError('Cannot remove WaveDrom javascripts.')
del ol1
del ol2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.