text stringlengths 38 1.54M |
|---|
import pytest
from msdsl.lfsr import LFSR
@pytest.mark.parametrize('n', list(range(3, 19)))
def test_lsfr(n):
lfsr = LFSR(n)
state = 0
passes = []
for i in range(2):
passes.append([])
for _ in range((1<<n)-1):
passes[-1].append(state)
state = lfsr.next_state(state)
# check that all numbers were covered in the first pass
assert sorted(passes[0]) == list(range((1<<n)-1))
# check that the first pass is exactly equal to the second pass
assert passes[0] == passes[1]
|
from gym.envs.registration import register
register(
id='bataille_corse-v0',
entry_point='gym_bataille_corse.envs:BatailleCorseEnv',
kwargs={'playersNumber': 2}
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .API import SurfsharkAPI, AuthorizationRequired
class UserSession():
FAIL = 0
SUCCESS = 1
NEED_2FA = 2
def __init__(self, tokens=None):
self.api = SurfsharkAPI(tokens=tokens)
self.tokens = None
self.logged_in = False
def login(self, username, password):
self.tokens = self.api.postAuthLogin(username, password)
if self.tokens is not None:
if self.tokens[1]:
return UserSession.SUCCESS
return UserSession.NEED_2FA
return UserSession.FAIL
def submit2FA(self, code):
return self.api.postTwoFactorAuthorization(code)
def renewToken(self):
self.tokens = self.api.renewAuth()
if self.tokens:
return True
return False
def isLoggedIn(self):
try:
return self.api.getAccountUserMe() is not None
except AuthorizationRequired:
return False
|
"""
read a text file with a single URL on each line and
save the contents of each to a file
"""
import sys
import urllib2
urlfilename = 'urls.txt'
if len(sys.argv) > 1:
urlfilename = sys.argv[1]
urlfile = open(urlfilename, 'r')
for (i, url) in enumerate(urlfile):
wd = urllib2.urlopen(url)
fd = open('file'+str(i)+'.html', 'w')
page = wd.read()
fd.write(page)
wd.close()
fd.close()
|
"""
USM 作业code
"""
import numpy as np
import math
from scipy import linalg
from sympy import *
from scipy.stats import norm
import matplotlib.pyplot as plt
"""
matrix1 = np.array([[100, 32, -48, 0, 0],
[32, 64, 51.2, 0, 0],
[-48, 51.2, 256, 0, 0],
[0, 0, 0, 225, 45],
[0, 0, 0, 45, 25]])
v = np.array([[-1, -1, -1, 1, 1]])
result = np.dot(v, matrix1)
result = np.dot(result, v.T)
print(math.sqrt(result))
L = linalg.cholesky(matrix1, lower=True) # cholesky分解
print(L)
print(np.dot(L, L.T))
"""
# matrix1 = np.array([[0.022, 0.017, 0, 0, 0.012],
# [0.017, 0.086, 0, 0, -0.012],
# [0, 0, 0.039, 0.006, 0.012],
# [0, 0, 0.006, 0.01, -0.008],
# [0.012, -0.012, 0.012, -0.008, 0.039]])
# L = linalg.cholesky(matrix1, lower=True) # cholesky分解
# print(L)
# print(linalg.inv(L))
# m1 = np.array([[0.149, 0, 0, 0, 0],
# [0, 0.294, 0, 0, 0],
# [0, 0, 0.198, 0, 0],
# [0, 0, 0, 0.1, 0],
# [0, 0, 0, 0, 0.198]])
#
# m2 = np.array([[1, 0.4, 0, 0, 0.4],
# [0.4, 1, 0, 0, -0.2],
# [0, 0, 1, 0.3, 0.3],
# [0, 0, 0.3, 1, -0.4],
# [0.4, -0.2, 0.3, -0.4, 1]])
# res = np.dot(m1, m2)
# res = np.dot(res, m1)
# print(res)
# mat = np.array([[0.6, 1],
# [1, 0]])
# mat = linalg.inv(mat)
# print(np.dot(mat, np.array([[1], [0]])))
# t = symbols('t')
# T = symbols('T')
# c1 = symbols('c1')
# c2 = symbols('c2')
# x = symbols('x')
# a = solve([x**2 + 0.6*x +1], [x])
# print(a)
# f = c1 * exp(-0.3 * t) * (cos(0.95 * t) + I*sin(0.95 * t)) + c2 * exp(-0.3 * t) * (cos(0.95 * t) - I*sin(0.95 * t))
# print(diff(f, t).subs({t: 0}))
# print(diff(f, t))
# ut = exp(-1.7 * T)
# ht = sin(0.95 * (t - T))*I
# st = ut * ht
# print(integrate(st, (T, 0, t)))
# print(integrate(exp(-1.7 * x) * cos(0.95 * (a - x)), (x, 0, a)))
# print(integrate(((-6/19)*sin(0.95*(a-x))),(x,0,a)))
# print(linsolve([x + a -1,x -a -(-6/19)*I],(x,a)))
a = symbols('a')
b = symbols('b')
t = symbols('t')
s = symbols('s')
T = symbols('T')
eq1 = exp(-s*t)
# print(integrate(eq1,(t, -1, 1)))
# res = solve([x**4 + 2*x**3 + x**2], [x])
# print(res)
p = symbols('p')
n = symbols('n')
m = symbols('m')
# f1 = 1 - (1 - p**n)**m
# print(diff(f1, n))
|
# -*- coding: utf-8 -*-
from tools.system import FileManager
from Singleton import Singleton
class Dialog(metaclass = Singleton):
''' Printing messages in current language
The class is a singleton.
serviceExpressions: the data from the data base file.
'''
serviceExpressions: list
def __init__(self, appLanguage: str):
super().__init__()
self.serviceExpressions = list()
self.changeLanguage(appLanguage)
return
def getMessageFor(self, expression: str) -> str:
''' Getting the text of expression by given expression
'''
for line in self.serviceExpressions:
row = line.split(' # ')
if row[0] == expression:
return row[1]
return self.getMessageFor("error")
def changeLanguage(self, lang: str) -> None:
self.serviceExpressions = FileManager.readFile("../DataBase/ServiceExpressions" + lang.upper() + ".db")
return
|
import cv2
import os
import numpy as np
from numpy import array
import pickle
from pathlib import Path
from collections import Counter
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import style
d = 8
k = 3
confusion_dir = 'confusion/'
confusion_mat_dir = 'confusion_matrice/'
storage_dir = 'stockage/'
path = 'dataset3'
file = open(storage_dir+"storage_simple.gt", 'rb')
data = pickle.load(file)
varse = data[0]
classes = data[1]
sift = cv2.xfeatures2d.SIFT_create(d)
path = 'dataset3'
dirs = os.listdir(path)
print('nombre de classes : ',len(dirs))
correct = 0
total = 0
nearest = 2
#start_time = time.time()
CONFUSION = []
for idx, obj in enumerate(dirs):
#total +=len(os.listdir(path+'/'+obj+'/test'))
doss = os.listdir(path+'/'+obj+'/test')
local_conf = np.zeros(len(classes))
for idz, fic in enumerate(doss):
img = cv2.imread(path+'/'+obj+'/test/'+fic)
kp, des = sift.detectAndCompute(img,None)
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks=128)
flann = cv2.FlannBasedMatcher(index_params,search_params)
M = []
if len(des)>nearest:
total +=1
for idy,v in enumerate(varse):
tmp = []
for z in v:
c1 = 0
matches = flann.knnMatch(z,des,k=nearest)
for i,(m,n) in enumerate(matches):
if m.distance <0.6*n.distance:
c1+=1
tmp.append(c1)
M.append([idy,c1])
#print(M)
M.sort(key=lambda x: x[1], reverse=True)
#print(M)
k_nearest = M[:k]
print(k_nearest)
E = []
for b in k_nearest:
E.append(b[0])
dav = Counter(E)
pred = dav.most_common(1)[0][0]
local_conf[pred] +=1
if classes[pred] == obj:
correct +=1
#print('Originale ',obj,' La classe predite est : ',pred)
print('Originale ',obj,' La classe predite est : ',classes[pred])
print("Dectection de ",fic,' actual correction rate ',round((correct/total)*100,2),'%')
CONFUSION.append(local_conf)
#break
print('Overall result',round((correct/total)*100,2),'%')
if(not Path(confusion_dir+"confusion_simple.gt").is_file()):
os.mknod(confusion_dir+"confusion_simple.gt")
f = open(confusion_dir+"confusion_simple.gt", "wb")
f.truncate(0)
pickler = pickle.Pickler(f)
pickler.dump(CONFUSION)
|
from rest_framework import serializers
from babycare.models import Like
class LikeSerializer(serializers.ModelSerializer):
like_id = serializers.IntegerField(read_only=True, source='id')
event_id = serializers.IntegerField(read_only=True, source='event.id')
like_user_id = serializers.IntegerField(read_only=True, source='baby.id')
class Meta:
model = Like
fields = ['like_id', 'event_id', 'like_user_id', 'datetime']
|
"""
Some simple time operations that I frequently use
"""
import argparse
import arrow
def main():
"""Main function"""
args = _get_args()
args.func(args)
# End def
def _get_args():
parser = argparse.ArgumentParser(description='Some simple time operations')
subparsers = parser.add_subparsers()
add_parser = subparsers.add_parser('add', help='Find the date a given number of days/months/years from a given date')
add_parser.add_argument('start_date', help='Date to start counting. Date must be in ISO 8601 format, or "today" for the current, local date.')
add_parser.add_argument('addend', type=int, help='Number of days/months/years')
add_parser.add_argument('unit', choices=['days', 'months', 'years'], help='Unit')
add_parser.set_defaults(func=_add_date)
delta_parser = subparsers.add_parser('delta', help='Find the number of days between two dates')
delta_parser.add_argument('date_1', help='First date. Date must be in ISO 8601 format, or "today" for the current, local date')
delta_parser.add_argument('date_2', help='Second date. Date must be in ISO 8601 format, or "today" for the current, local date')
delta_parser.set_defaults(func=_delta_date)
epoch_2_human_parser = subparsers.add_parser('epoch2human', help='Convert epoch timestamp to human readable format')
epoch_2_human_parser.add_argument('timestamp', help='Unix epoch timestamp in either milliseconds or seconds')
epoch_2_human_parser.add_argument('-t', '--timezone', help='Timezone by name or tzinfo. The local timezone is the default')
epoch_2_human_parser.set_defaults(func=_epoch_2_human)
human_2_epoch_parser = subparsers.add_parser('human2epoch', help='Convert a human readable time to a Unix timestamp (in seconds)')
human_2_epoch_parser.add_argument('date', help='Date to convert. Date must be in either ISO 8601 format, "today" for the beginning of the current, local date, or "now"')
human_2_epoch_parser.set_defaults(func=_human_2_epoch)
yesterday_parser = subparsers.add_parser('yesterday', help='Return Unix timestamp of the beginning and end of yesterday')
yesterday_parser.add_argument('-t', '--timezone', help='Timezone by name or tzinfo. The local timezone is the default')
yesterday_parser.set_defaults(func=_yesterday)
return parser.parse_args()
# End def
def _parse_date(date):
if date == 'today':
date = arrow.now().floor('day')
elif date == 'now':
date = arrow.now()
else:
date = arrow.get(date)
# end if/else
return date
# End def
def _add_date(args):
add_date(args.start_date, args.unit, args.addend)
# End def
def add_date(start, unit, addend):
"""
Find the date so many days/months/years into the future from the given date
"""
start = _parse_date(start)
if unit == 'days':
print(start.replace(days=addend))
elif unit == 'months':
print(start.replace(months=addend))
elif unit == 'years':
print(start.replace(years=addend))
else:
print('ERROR: Do not recognise unit {}'.format(unit))
# End if/else
# End def
def _delta_date(args):
delta_date(args.date_1, args.date_2)
# End def
def delta_date(start, end):
"""
Find the number of days between two dates
"""
start = _parse_date(start)
end = _parse_date(end)
print(abs(end - start))
# End def
def _epoch_2_human(args):
epoch_2_human(args.timestamp, args.timezone)
# End def
def epoch_2_human(timestamp, timezone=None):
"""
Convert epoch to human readable
"""
date = arrow.get(timestamp)
if timezone:
print(date.to(timezone))
else:
print(date)
# End if/else
# End def
def _human_2_epoch(args):
human_2_epoch(args.date)
# End def
def human_2_epoch(date):
"""
Convert human readable to epoch
"""
print(arrow.get(_parse_date(date)).format('X'))
# End def
def _yesterday(args):
yesterday(args.timezone)
# End def
def yesterday(timezone=None):
"""
Get the timestamp for the start and end of yesterday
"""
print(arrow.now(timezone).floor('day').replace(days=-1).format('X'))
print(arrow.now(timezone).floor('day').format('X'))
# End def
if __name__ == '__main__':
main()
# End if
|
n = 20
mat = [[] for i in range(n)]
for i in range(n):
line = input()
mat[i] = list(map(int, line.split()))
dx = [0, 1, 0, -1, 1, 1, -1, -1]
dy = [1, 0, -1, 0, 1, -1, 1, -1]
def valid(n, i, j):
return i >= 0 and i < n and j >= 0 and j < n
ans = 0
for i in range(n):
for j in range(n):
for d in range(8):
if valid(n, i + 3 * dx[d], j + 3 * dy[d]):
cur = 1
for k in range(4):
cur *= mat[i + k * dx[d]][j + k * dy[d]]
ans = max(ans, cur)
print(ans) |
from pygridtools.viz import _viz_bokeh
import pytest
from pygridgen.tests import raises
def test__plot_domain(simple_boundary):
with raises(NotImplementedError):
fig1 = _viz_bokeh._plot_domain(x='x', y='y', data=simple_boundary)
fig2 = _viz_bokeh._plot_domain(x=simple_boundary['x'], y=simple_boundary['y'], data=None)
fig3 = _viz_bokeh._plot_domain(x='x', y='y', beta='beta', data=simple_boundary)
fig4 = _viz_bokeh._plot_domain(x=simple_boundary['x'], y=simple_boundary['y'], beta=simple_boundary['beta'], data=None)
def test__plot_boundaries(simple_boundary, simple_islands):
with raises(NotImplementedError):
fig1 = _viz_bokeh._plot_boundaries(model_x='x', model_y='y', model=simple_boundary)
fig2 = _viz_bokeh._plot_boundaries(model_x=simple_boundary['x'], model_y=simple_boundary['y'], model=None)
fig3 = _viz_bokeh._plot_boundaries(island_x='x', island_y='y', island_name='island',
islands=simple_islands)
fig4 = _viz_bokeh._plot_boundaries(island_x=simple_islands['x'], island_y=simple_islands['y'],
island_name=simple_islands['island'], islands=None)
fig5 = _viz_bokeh._plot_boundaries(model_x='x', model_y='y', model=simple_boundary,
island_x='x', island_y='y', island_name='island',
islands=simple_islands)
fig6 = _viz_bokeh._plot_boundaries(model_x=simple_boundary['x'], model_y=simple_boundary['y'], model=None,
island_x=simple_islands['x'], island_y=simple_islands['y'],
island_name=simple_islands['island'], islands=None)
def test__plot_points(simple_nodes):
with raises(NotImplementedError):
fig1 = _viz_bokeh._plot_points(*simple_nodes)
def test__plot_cells(simple_nodes):
with raises(NotImplementedError):
fig1 = _viz_bokeh._plot_cells(*simple_nodes)
|
"""Unit Testing for Fiddlewith"""
from unittest import TestCase
from fiddlewith.calc import Calculator
class TestCalculator(TestCase):
"Unit Testing class for FiddleWith"
def test_add(self):
"test for add"
calc = Calculator()
self.assertTrue(calc.add(3, 2) == 5)
|
# -*- coding: utf-8 -*-
"""
剑指 Offer 59 - I. 滑动窗口的最大值
给定一个数组 nums 和滑动窗口的大小 k,请找出所有滑动窗口里的最大值。
示例:
输入: nums = [1,3,-1,-3,5,3,6,7], 和 k = 3
输出: [3,3,5,5,6,7]
解释:
滑动窗口的位置 最大值
--------------- -----
[1 3 -1] -3 5 3 6 7 3
1 [3 -1 -3] 5 3 6 7 3
1 3 [-1 -3 5] 3 6 7 5
1 3 -1 [-3 5 3] 6 7 5
1 3 -1 -3 [5 3 6] 7 6
1 3 -1 -3 5 [3 6 7] 7
提示:
你可以假设 k 总是有效的,在输入数组不为空的情况下,1 ≤ k ≤ 输入数组的大小。
"""
from typing import List
import collections
class Solution:
def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:
if not nums or k == 0: return []
deque = collections.deque()
for i in range(k): # 未形成窗口
while deque and deque[-1] < nums[i]:
deque.pop()
deque.append(nums[i])
res = [deque[0]]
for i in range(k, len(nums)): # 形成窗口后
if deque[0] == nums[i - k]:
deque.popleft()
while deque and deque[-1] < nums[i]:
deque.pop()
deque.append(nums[i])
res.append(deque[0])
return res
if __name__ == '__main__':
nums = [1, 3, -1, -3, 5, 3, 6, 7]
k = 3
solution = Solution()
print(solution.maxSlidingWindow(nums, k))
|
#!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for versions_views.py."""
from tests.common import testing
import json
import os
import urllib
import mox
import webtest
from titan.common.lib.google.apputils import basetest
from titan import files
from titan.files.mixins import versions
from titan.files.mixins import versions_views
class VersionedFile(versions.FileVersioningMixin, files.File):
pass
class HandlersTest(testing.BaseTestCase):
def setUp(self):
super(HandlersTest, self).setUp()
self.app = webtest.TestApp(versions_views.application)
files.register_file_factory(lambda *args, **kwargs: VersionedFile)
def testChangesetHandler(self):
# Weakly test execution path:
response = self.app.post('/_titan/files/versions/changeset')
self.assertEqual(201, response.status_int)
self.assertIn('num', json.loads(response.body))
def testChangesetCommitHandler(self):
mock_vcs = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(
versions_views.versions, 'VersionControlService')
# 1st:
versions_views.versions.VersionControlService().AndReturn(mock_vcs)
# 2nd:
versions_views.versions.VersionControlService().AndReturn(mock_vcs)
mock_vcs.commit(
mox.IgnoreArg(), force=True, save_manifest=True).AndReturn('success')
# 3rd:
versions_views.versions.VersionControlService().AndReturn(mock_vcs)
mock_vcs.commit(
mox.IgnoreArg(), force=False, save_manifest=False).AndReturn('success')
self.mox.ReplayAll()
# Manifest and force not given.
url = '/_titan/files/versions/changeset/commit?changeset=1'
response = self.app.post(url, expect_errors=True)
self.assertEqual(400, response.status_int)
# Force eventually consistent commit.
url = ('/_titan/files/versions/changeset/commit'
'?changeset=1&force=true&save_manifest=true')
response = self.app.post(url)
self.assertEqual(201, response.status_int)
self.assertEqual('success', json.loads(response.body))
# Use manifest for strongly consistent commit.
manifest = ['/foo', '/bar']
url = ('/_titan/files/versions/changeset/commit'
'?changeset=1&save_manifest=false')
params = {'manifest': json.dumps(manifest)}
response = self.app.post(url, params=params)
self.assertEqual(201, response.status_int)
self.assertEqual('success', json.loads(response.body))
self.mox.VerifyAll()
if __name__ == '__main__':
basetest.main()
|
'''
Created on 13.03.2017
@author: alex
'''
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
import scipy.misc
#close existed
for i in plt.get_fignums():
print 'has existed'
plt.close(plt.figure(i))
img = scipy.misc.imread("../../data/images/phone.png")
array=np.asarray(img)
arr=(array.astype(float))/255.0
img_hsv = colors.rgb_to_hsv(arr[...,:3])
lu1=img_hsv[...,0].flatten()
plt.subplot(1,3,1)
plt.hist(lu1*360,bins=360,range=(0.0,360.0),histtype='stepfilled', color='r', label='Hue')
plt.title("Hue")
plt.xlabel("Value")
plt.ylabel("Frequency")
plt.legend()
lu2=img_hsv[...,1].flatten()
plt.subplot(1,3,2)
plt.hist(lu2,bins=100,range=(0.0,1.0),histtype='stepfilled', color='g', label='Saturation')
plt.title("Saturation")
plt.xlabel("Value")
plt.ylabel("Frequency")
plt.legend()
lu3=img_hsv[...,2].flatten()
plt.subplot(1,3,3)
plt.hist(lu3*255,bins=256,range=(0.0,255.0),histtype='stepfilled', color='b', label='Intesity')
plt.title("Intensity")
plt.xlabel("Value")
plt.ylabel("Frequency")
plt.legend()
manager = plt.get_current_fig_manager()
backend=matplotlib.get_backend()
if backend=='TkAgg':
manager.resize(*manager.window.maxsize())
elif backend=='QT':
manager.window.showMaximized()
elif backend=='WX':
manager.frame.Maximize(True)
else:
raise ValueError('Unhandled matplotlib backend:'+backend)
plt.show()
|
from flask_restful import Resource
from flask import request
import secrets, postgresql, os
from config import DATABASE_PATH, UPLOAD, LINK
database = postgresql.open(DATABASE_PATH)
class EducationVerefizied(Resource):
def update(self, id):
token = request.headers.get('token', False)
if not token or len(token) < 5: return {'status': False, 'message': 'Fack token, allday'}, 401
verefied_education = database.prepare('UPDATE educations SET verification = true WHERE user_id = $1')
result = verefied_education(int(id))
if result[1] == 1:
verefied_education = database.prepare('DELETE * FROM education_verifications WHERE user_id = $1')
result = verefied_education(int(id))
if result[1] == 1:
return {'status' : True}
return {'status' : False}
class ProfileVerefizied(Resource):
def get(self, id):
token = request.headers.get('token', False)
if not token or len(token) < 5: return { 'status': False, 'message': 'Fack token, allday' }, 401
query = database.prepare("SELECT U.id, U.email, U.fullname, V.verefizied FROM users as U INNER JOIN verefication as V ON U.id = V.id and U.id = $1")
user_result = query(int(id))
if len(user_result) == 0: return { 'status': False, 'message': 'User not found' }, 404
#
user = user_result[0]
return {'status': True,
'user': user}
def post(self, id):
token = request.headers.get('token', False)
verefizied_statement = request.json.get('verefizied', False)
if not token or len(token) < 5: return { 'status': False, 'message': 'Fack token, allday' }, 401
query = database.prepare("Update verefication set verefizied = $1 WHERE id = $2")
user_verefizied = query(verefizied_statement, int(id))
if len(user_verefizied) == 0: return { 'status': False, 'message': 'User not found' }, 404
return {'status': True,
'user': int(id),
'verefizied': verefizied_statement}
|
#Solve this equation for x with python:
#x**2 = 4**3+17
sum= 4**3 +17
print (f'{sum}')
x = sum ** (1/2)
print (f'{x}') |
import os
PROJECT_ROOT_ENV = 'GAUGE_PROJECT_ROOT'
STEP_IMPL_DIR_ENV = 'STEP_IMPL_DIR'
STEP_IMPL_DIR_NAME = os.getenv(STEP_IMPL_DIR_ENV) or 'step_impl'
def get_project_root():
try:
return os.path.abspath(os.environ[PROJECT_ROOT_ENV])
except KeyError:
return ""
def get_step_impl_dir():
return os.path.join(get_project_root(), STEP_IMPL_DIR_NAME)
def get_impl_files():
step_impl_dir = get_step_impl_dir()
file_list = []
for root, _, files in os.walk(step_impl_dir):
for file in files:
if file.endswith('.py') and '__init__.py' != os.path.basename(file):
file_list.append(os.path.join(root, file))
return file_list
def read_file_contents(file_name):
if os.path.isfile(file_name):
f = open(file_name)
content = f.read().replace('\r\n', '\n')
f.close()
return content
return None
def get_file_name(prefix='', counter=0):
name = 'step_implementation{}.py'.format(prefix)
file_name = os.path.join(get_step_impl_dir(), name)
if not os.path.exists(file_name):
return file_name
else:
counter = counter + 1
return get_file_name('_{}'.format(counter), counter)
|
# -*- coding:utf-8 -*-
# -------------------------------
# ProjectName : autoDemo
# Author : zhangjk
# CreateTime : 2020/12/5 16:51
# FileName : day7.3
# Description :eggs
# --------------------------------
try:
__import__('pkg_resources').declare_namesapce(__name__)
except ImportError:
from pkgutil import extend_path
__path__ = extend_path(__path__,__name__)
|
import sqlite3
def Process(dbname):
try:
conn = sqlite3.connect(dbname) # DB생성
cur = conn.cursor()
sql = "drop table if exists emp"
cur.execute(sql)
sql = "create table if not exists emp(id integer primary key, name text)"
cur.execute(sql)
# 데이터 입력
cur.execute("insert into emp values(1, '홍길동')")
cur.execute("insert into emp values(?, ?)", (2, "임꺽정"))
tddata = (3, "김 수한무")
cur.execute("insert into emp values(?, ?)", tddata)
tlist =((4,"유비"), (5, "관우"), (6, "장비"))
cur.executemany("insert into emp values(?, ?)", tlist) #execute : 트렌잭션 한번 / executemany : 동시에 여러번의 실행 시 사용
ldata =[7, "강감찬"]
cur.execute("insert into emp values(?, ?)", ldata)
cur.execute("insert into emp values(:sabun, :irum)", {"sabun" : "8", "irum" : "관창"})
cur.execute("insert into emp values(:sabun, :irum)", {"irum":"김유신", "sabun":"9"})
conn.commit() # DML명령어 실행 후 commit 필수
#데이터 조회
cur.execute("select * from emp")
for row in cur.fetchmany(3):
print(row)
print("---------------------------------------------")
cur.execute("select count(*) from emp")
print(cur.fetchone())
except sqlite3.Error as err:
print("에러 : ", err)
conn.rollback()
finally:
cur.close()
conn.close()
if __name__ == "__main__":
Process("nice.db") |
# Generated by Django 2.0.13 on 2019-06-13 00:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pinball', '0002_auto_20190612_1946'),
]
operations = [
migrations.AlterField(
model_name='pinball',
name='coils',
field=models.ManyToManyField(blank=True, help_text='Select the coils used in this game', to='pinball.Coil'),
),
migrations.AlterField(
model_name='pinball',
name='parts',
field=models.ManyToManyField(blank=True, help_text='Add parts specific to this game', to='pinball.Parts'),
),
]
|
from libs.mixins import *
__all__ = [
'Psychic',
]
class Psychic(StoreMixin):
def __init__(self, data):
super().__init__()
if data:
self.data = data
else:
self.data = [
{'id': 0, 'name': 'Vlad', 'assumptions': [], 'index_effectivity': 0},
{'id': 1, 'name': 'Genady', 'assumptions': [], 'index_effectivity': 0},
{'id': 2, 'name': 'Petr', 'assumptions': [], 'index_effectivity': 0}
]
def set(self, key, value):
self.data[key] = value
def update_assumptions(self, assessments):
"""
Метод для обновления догадок экстрасенсов
:param assessments: список словарей догадок
:return:
"""
for psychic in self.data:
psychic['assumptions'].append(assessments.get(psychic['id'])['value'])
return self.data
|
# """
# To see an example of the Wikipedia API JSON look at this url:
# https://en.wikipedia.org/api/rest_v1/page/summary/Japanese_cuisine
# """
import requests
def my_function(title, value):
url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{title}"
req = requests.get(url)
data = req.json()
if req.status_code != 200:
print(f"We got an error: {req.status_code}")
exit()
return req
def my_function2(title, value):
data = my_function(title, value).json()
return data[f"{value}"]
title = input("Give an article: ").strip()
value = input("Description or extract? ").strip().lower()
data = my_function2(title, value)
print(f"https://en.wikipedia.org/wiki/{title}")
print(f"Here is {value} for {title}:")
print(data)
|
import numpy as np
import cv2
#You can add your own Template.png and this code will use
#your webcam and look after this template with a threshold
#my template
template_color = cv2.imread('Template.png')
#dont need this but good for troubleshooting of the wrong template
cv2.imshow('template', template_color)
#caputre web cam
cap = cv2.VideoCapture(0)
_,frame = cap.read()
#you will need height and width from the template to draw a box
h,w,_ = template_color.shape
while True:
#rea webcam
_,frame = cap.read()
#for all px start points give the match quality
res = cv2.matchTemplate(template_color, frame, cv2.TM_CCOEFF_NORMED)
#set limit and look for high enough matches
threshold = 0.7
loc = np.where(res >= threshold)
#every matches cords draw a rectangle
for pt in zip(*loc[::-1]):
cv2.rectangle(frame, pt, (pt[0] + w, pt[1] + h), (0,255,255))
#show image with boxes
cv2.imshow('Detected', frame)
#stop the loop
if cv2.waitKey(1) == ord('q'):
break
#set your webcam free
cap.release()
|
from flask import Blueprint
from flask_restful import Api
from app_blueprint.tree.main import Main
trees = Blueprint('trees', __name__)
api = Api(trees)
api.add_resource(Main, "/")
|
from app import db
from datetime import datetime
class Record(db.Model):
__tablename__ = 'records'
id = db.Column(db.Integer, primary_key=True)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
co2 = db.Column(db.Integer)
hum = db.Column(db.Float)
temp = db.Column(db.Float)
def __repr__(self):
return '<Record {}>'.format(self.timestamp)
|
#!/bin/python
# Author: Daniel Beyer
# CS372 - Project 1: Chat server/client
# 10/24/17
import sys
from socket import *
serverHandle = "MrServer" #server name
#Function where chat loop happens
def chatLoop(conn):
while 1: #Loop runs continuously
rec_data = conn.recv(513)[0:-1] #Using 512 here to hold enough space for message + ">" + client name
if rec_data == "":
print "Connection closed on client end"
print "Waiting for new connection"
break
print rec_data #print received message
send_data = ""
while len(send_data) > 500 or len(send_data) == 0:
send_data = raw_input("{}> ".format(serverHandle))
#quit command
if send_data == "\quit":
print "Exiting..."
exit(1)
conn.send("{}> {}\0".format(serverHandle, send_data)) #Combine message with server name, ">", and \0 to send to C-based client
if __name__ == "__main__":
if len(sys.argv) != 2: #input validation for port
print "Error: Use this format: python chatServer.py [port]"
exit(1)
#Source: https://docs.python.org/3.3/howto/sockets.html
portNum = sys.argv[1]
sckt = socket(AF_INET, SOCK_STREAM)
sckt.bind(('', int(portNum)))
sckt.listen(1)
print "Waiting for incoming connections"
while 1:
conn, address = sckt.accept()
print "Connected on address {}".format(address)
#Begin chat function
chatLoop(conn)
conn.close() |
from django.db import models
class Country(models.Model):
"""
Model that represents a country.
"""
name = models.CharField(null=False, blank=False, max_length=250)
code = models.CharField(null=False, blank=False, max_length=10, unique=True)
def __str__(self):
return '{} - {}'.format(self.code, self.name)
class Meta:
db_table = 'country'
class FoodType(models.Model):
"""
Model that represents a duck food type.
"""
name = models.CharField(null=False, blank=False, max_length=250)
def __str__(self):
return '{} - {}'.format(self.id, self.name)
class Meta:
db_table = 'food_type'
class FeedSchedule(models.Model):
"""
Repeats associated feeding every <days> days
"""
days = models.PositiveIntegerField()
class FeedEntry(models.Model):
"""
Model that represents a feed entry submission.
"""
date = models.DateTimeField()
quantity = models.PositiveIntegerField()
description = models.TextField(null=True, blank=True, max_length=500)
city = models.CharField(max_length=250)
park = models.CharField(max_length=250)
country = models.ForeignKey(Country, on_delete=models.CASCADE)
food_type = models.ForeignKey(FoodType, on_delete=models.CASCADE)
schedule = models.ForeignKey(FeedSchedule, null=True, blank=True, on_delete=models.SET_NULL)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return '{} - {} - {} - {}'.format(self.date, self.city, self.country, self.food_type.name)
class Meta:
db_table = 'feed_entry'
ordering = ['-date']
|
from .type import Type
from .complex import Complex, Real, Im
from .matrix import Matrix, Vector
from .function import Function, ListFunction
from .polynomial import *
|
# app/urls.py
from django.conf.urls import url
from app import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^test/$', views.test, name='test'),
url(r'^profile/$', views.profile, name='profile'),
url(r'^model/$', views.model, name='model'),
url(r'^predict/$', views.predict, name='predict'),
] |
print("="*30,"[Conversao de BASES]","="*30)
escolha = 0
while escolha != 4:
value = int(input("Digite um valor para a conversao: "))
print("\nEscolha uma das opcoes:\n")
print("[ 1 ] Conversao do numero em HEXADECIMAL.")
print("[ 2 ] Conversao do numero em OCTAL.")
print("[ 3 ] Conversao do numero em BINARIO.")
print("[ 4 ] FIM.")
escolha = int(input("\nSua escolha: "))
if escolha == 1:
print(f"\nO valor {value} em HEXADECIMAL eh {hex(value)}")
elif escolha == 2:
print(f"\nO valor {value} em OCTAL eh {oct(value)}")
elif escolha == 3:
print(f"\nO valor {value} em BINARIO eh {bin(value)}")
elif escolha == 4:
print("="*30,"[F I M]","="*30)
else:
print("\nERROR in system")
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-30 01:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainsite', '0003_auto_20170729_2156'),
]
operations = [
migrations.CreateModel(
name='Family',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('age', models.PositiveIntegerField(max_length=2)),
('gender', models.CharField(choices=[('M', 'Male'), ('F', 'female')], max_length=2)),
],
),
]
|
# Generated by Django 2.1.15 on 2021-02-08 03:52
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('t', '0004_auto_20210208_1151'),
]
operations = [
migrations.RenameField(
model_name='department',
old_name='patient_id',
new_name='patient',
),
migrations.RenameField(
model_name='illness',
old_name='patient_id',
new_name='patient',
),
migrations.RenameField(
model_name='result',
old_name='patient_id',
new_name='patient',
),
]
|
"""
Created by Jonas Pfeiffer on 26/04/17.
"""
import csv
import os
import pickle
import numpy as np
import scipy.io
from matplotlib import pyplot
from peakutils.plot import plot as pplot
def read_lable_dict():
with open('training2017/REFERENCE.csv', mode='r') as infile:
reader = csv.reader(infile)
mydict = {rows[0]: rows[1] for rows in reader}
return mydict
with open('all_labels.pickle', 'rb') as handle:
all_labels = pickle.load(handle)
label_dict = read_lable_dict()
dir = 'training_data'
for filename in os.listdir(dir):
if filename.endswith('.mat'):
name = filename[:-4]
if name not in all_labels:
label = label_dict[name]
mat1 = scipy.io.loadmat('training_data/' + filename)
y = mat1['val'][0]
length = len(y)
x = np.linspace(0, length - 1, length)
pyplot.close("all")
pyplot.figure(figsize=(10, 6))
pplot(x, y, [0])
pyplot.title('outliers')
pyplot.show()
var = raw_input("Please enter something: ")
print "you entered", var
var = var.split(",")
all_labels[name] = {}
all_labels[name]["flip"] = var[0]
all_labels[name]["left"] = var[1]
all_labels[name]["middle"] = var[2]
all_labels[name]["right"] = var[3]
all_labels[name]["label"] = label
with open('all_labels.pickle', 'wb') as handle:
pickle.dump(all_labels, handle, protocol=pickle.HIGHEST_PROTOCOL)
|
## Generators
def make_generators_generator(g):
"""Generates all the "sub"-generators of the generator returned by
the generator function g.
>>> def ints_to(n):
... for i in range(1, n + 1):
... yield i
...
>>> def ints_to_5():
... for item in ints_to(5):
... yield item
...
>>> for gen in make_generators_generator(ints_to_5):
... print("Next Generator:")
... for item in gen:
... print(item)
...
Next Generator:
1
Next Generator:
1
2
Next Generator:
1
2
3
Next Generator:
1
2
3
4
Next Generator:
1
2
3
4
5
"""
for i in g():
yield range(1, i+1)
def permutations(lst):
"""Generates all permutations of sequence LST. Each permutation is a
list of the elements in LST in a different order.
The order of the permutations does not matter.
>>> sorted(permutations([1, 2, 3]))
[[1, 2, 3], [1, 3, 2], [2, 1, 3], [2, 3, 1], [3, 1, 2], [3, 2, 1]]
>>> type(permutations([1, 2, 3]))
<class 'generator'>
>>> sorted(permutations((10, 20, 30)))
[[10, 20, 30], [10, 30, 20], [20, 10, 30], [20, 30, 10], [30, 10, 20], [30, 20, 10]]
>>> sorted(permutations("ab"))
[['a', 'b'], ['b', 'a']]
"""
if not lst:
yield []
return
try:
for i in permutations(lst[1:]):
for j in range(len(i)+1):
alst = [0 for _ in range(len(i)+1)]
alst[j] = lst[0]
n = 0
for k in range(len(i)+1):
if k != j:
alst[k] = i[n]
n += 1
yield alst
except:
yield lst[0]
|
import pytest
import json_provider
import rest_client
from data import Valid_User, Invalid_User
# @pytest.fixture()
# #user (with email & password)
@pytest.fixture(scope="session")
def valid_user():
return Valid_User
@pytest.fixture(scope="session")
def json():
return json_provider
@pytest.fixture(scope="session")
def client():
return rest_client
@pytest.fixture(scope="session")
def login(client):
return client.login(json_provider.login_json(Valid_User.email, Valid_User.password)).json()['token']
@pytest.fixture(scope="function")
def create_issue(client, login, request):
try:
description = request.param['description']
except (AttributeError, KeyError):
description = "fixture description"
try:
summary = request.param['summary']
except (AttributeError, KeyError):
summary = "fixture summary"
try:
priority = request.param['priority']
except (AttributeError, KeyError):
priority = 1
response = client.create_issue(json_provider.create_issue_json(summary, description, priority), login)
return response
@pytest.fixture(scope="function")
def delete_issue(create_issue, client, login):
yield delete_issue
client.delete_issue(create_issue.json()['_id'], login)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup #required to parse html
import requests #required to make request
#read file
with open(r'C:\Users\Shravya.Shanmukh\Desktop\URL.csv','r') as f:
csv_raw_cont=f.read()
#split by line
split_csv=csv_raw_cont.split('\n')
#remove empty line
split_csv.remove('')
#specify separator
separator=","
#iterate over each line
for each in split_csv:
#specify the row index
url_row_index=0 #in our csv example file the url is the first row so we set 0
#get the url
url = each.split(separator)[url_row_index]
#fetch content from server
html=requests.get(url).content
#soup fetched content
soup = BeautifulSoup(html,'html.parser')
#show title from soup
for link in soup.find_all('p'):
print(link.text)
|
import json
class Config:
def __init__(self):
self.telegram = {}
self.discord = {}
def loads(self, config_file=None):
configures = {}
if config_file:
try:
with open(config_file) as f:
data = f.read()
configures = json.loads(data)
except Exception as e:
print(e)
exit(0)
if not configures:
print("config json file error!")
exit(0)
self.update(configures)
def update(self, update_fields):
self.telegram = update_fields.get("telegram", {})
self.discord = update_fields.get("discord", {})
# 将配置文件中的数据按照dict格式解析并设置成config的属性
for k, v in update_fields.items():
setattr(self, k, v)
config = Config()
|
import etherscan.accounts as accounts
from etherscan.blocks import Blocks
from etherscan.contracts import Contract
from etherscan.proxies import Proxies
import etherscan.stats as stats
import etherscan.tokens as tokens
import etherscan.transactions as transactions
import json
from pandas.io.json import json_normalize
import pandas as pd
with open("./key.txt") as k:
key = k.read()
address = '0x2a65aca4d5fc5b5c859090a6c34d164135398226'
#accounts
api = accounts.Account(address=address, api_key=key)
#get_balance
balance = api.get_balance()
print(balance)
#get_transaction_page
tran_page = api.get_transaction_page(page=1, offset=10)
t = json_normalize(tran_page)
print(t)
#get_all_transactions
trans = api.get_all_transactions(offset=10)
#get_transaction_page_erc20
trans_erc20 = api.get_transaction_page(erc20=True)
t_erc20 = json_normalize(trans_erc20)
print(t_erc20)
#get_blocks_mined_page
bl_mined_page = api.get_blocks_mined_page(page=1, offset=10)
bmp = json_normalize(bl_mined_page)
print(bmp)
#get_all_blocks_mined
blocks_mined = api.get_all_blocks_mined()
#get multiple balance
address = ['0xbb9bc244d798123fde783fcc1c72d3bb8c189413', '0xddbd2b932c763ba5b1b7ae3b362eac3e8d40121a']
api = accounts.Account(address=address, api_key=key)
balances = api.get_balance_multiple()
print(balances)
#blocks
api_b = Blocks(api_key=key)
reward = api_b.get_block_reward(2165403)
r = json_normalize(reward)
print(r)
print(r['blockReward'])
uncle_r = json_normalize(r['uncles'][0])
print(uncle_r)
#contracts
address = '0x6e03d9cce9d60f3e9f2597e13cd4c54c55330cfd'
api_c = Contract(address=address, api_key=key)
#get_abi
abi = api_c.get_abi()
with open('abi.json', 'w') as fd:
fd.write(abi)
df_abi = pd.read_json('abi.json')
print(df_abi)
#get_sourcecode
sourcecode = api_c.get_sourcecode()
sc_norm = json_normalize(sourcecode)
df_sc = pd.DataFrame(sc_norm)
print(df_sc)
#proxies
api_p = Proxies(api_key=key)
#gas price
price = api_p.gas_price()
print(price)
#get block by number
bl = api_p.get_block_by_number(0x57b414)
bl_norm = json_normalize(bl)
bl_norm_trans = bl_norm['transactions'].apply(lambda x: json_normalize(x))
print(bl_norm)
print(bl_norm_trans[0])
#get block transaction count by number
tx_count = api_p.get_block_transaction_count_by_number(block_number='0x57b414')
print(int(tx_count, 16))
#get code
code = api_p.get_code('0x48f775efbe4f5ece6e0df2f7b5932df56823b990')
print(code)
#get most recent block
rblock = api_p.get_most_recent_block()
print(int(rblock, 16))
#get storage
value = api_p.get_storage_at('0x6e03d9cce9d60f3e9f2597e13cd4c54c55330cfd', 0x1)
print(value)
#get transaction by blocknumber index
transaction = api_p.get_transaction_by_blocknumber_index(block_number='0x57b414', index='0x2')
norm_transaction = json_normalize(transaction)
print(norm_transaction)
#get transaction by hash
TX_HASH = '0xb11f622f0f58d8648bd456d751329de27b402fbc974167cb468bbc260d966f57'
tran_by_hash = api_p.get_transaction_by_hash(tx_hash=TX_HASH)
norm_tran_by_hash = json_normalize(tran_by_hash)
print(norm_tran_by_hash)
#get transaction count
count = api_p.get_transaction_count('0x7896f0cea889964c00fb47fcddf89eab42eb9df8')
print(int(count, 16))
#get transaction receipt
receipt = api_p.get_transaction_receipt('0x498abfd4aac86b970b54b6fea4fa32948a6838f33bedf6aae55eaf31c6acce94')
norm_receipt = json_normalize(receipt)
print(norm_receipt)
#get uncles by blocknumber index 0x210A9B
uncles = api_p.get_uncle_by_blocknumber_index(block_number='0x210A9B', index='0x1')
print(uncles['uncles'])
#stats
api_s = stats.Stats(api_key=key)
#get ether last price
lastprice = api_s.get_ether_last_price()
print(lastprice)
#get total ether supply
total_supply = api_s.get_total_ether_supply()
print(total_supply)
#tokens
contract_address = '0x57d90b64a1a57749b0f932f1a3395792e12e7055'
api_t = tokens.Tokens(contract_address=contract_address, api_key=key)
#token balance
address = '0xe04f27eb70e025b78871a2ad7eabe85e61212761'
tb = api_t.get_token_balance(address=address)
print(tb)
#total supply of tokens
total_supply_t = api_t.get_total_supply()
print(total_supply_t)
#transactions
api_tran = transactions.Transactions(api_key=key)
#get status
TX_HASH = '0xb11f622f0f58d8648bd456d751329de27b402fbc974167cb468bbc260d966f57'
status = api_tran.get_status(tx_hash=TX_HASH)
print(status)
#receipt status
receipt_status = api_tran.get_tx_receipt_status(tx_hash=TX_HASH)
print(receipt_status) |
a = int(input())
b = a
result = a ** 2
while b != 0:
a = int(input())
b += a
result += a ** 2
if b == 0:
break
print(result)
|
import cv2
# img = cv2.imread('./frame_imgs/62清晰度异常/0.jpg', cv2.IMREAD_GRAYSCALE)
# img = cv2.imread('./frame_imgs/62清晰度异常/10.jpg', cv2.IMREAD_GRAYSCALE)
img = cv2.imread('./frame_imgs/116亮度异常/0.jpg', cv2.IMREAD_GRAYSCALE)
# img = cv2.imread('./frame_imgs/116亮度异常/10.jpg', cv2.IMREAD_GRAYSCALE)
x = cv2.Sobel(img, cv2.CV_16S, 1, 0)
y = cv2.Sobel(img, cv2.CV_16S, 0, 1)
absX = cv2.convertScaleAbs(x)
absY = cv2.convertScaleAbs(y)
dst = cv2.addWeighted(absX, 0.5, absY, 0.5, 0)
print(dst.var())
# 阈值暂定为 1450
if dst.var() > 1450:
print('清晰度正常')
else:
print('清晰度异常')
cv2.imshow('absX', absX)
cv2.imshow('absY', absY)
cv2.imshow('result', dst)
cv2.waitKey(0)
# 按任意建关闭窗口
cv2.destroyAllWindows() |
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait # 显式等待
from lxml import etree
import os
import requests
import re
import time
class Wz_spider():
driver_path = r'E:\ChromeDriver\chromedriver.exe'
def __init__(self):
# self.option = webdriver.ChromeOptions()
# self.option.add_argument('headless') options=self.option
self.driver = webdriver.Chrome(executable_path=self.driver_path)
self.url = 'https://pvp.qq.com/web201605/wallpaper.shtml'
self.head = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3724.8 Safari/537.36"}
self.x = True
def Get_bz_url_list(self,data):
data = etree.HTML(data)
bizhi_name_list2 = []
bizhi_name_list1 = data.xpath('//div[@id="Work_List_Container_267733"]/div/img/@alt')
bizhi_url_list = data.xpath('//div[@id="Work_List_Container_267733"]/div/ul/li[5]/a/@href')
print(bizhi_name_list1)
for name in bizhi_name_list1:
name = re.sub(r"[\/?:*<>|]", 'X', name)
bizhi_name_list2.append(name)
self.Save_data(bizhi_url_list,bizhi_name_list2)
def Save_data(self,url_list,name_list):
os.chdir(r'E:\pycharm\爬虫代码\小实战\se实现爬王者壁纸\Tupian')
index = 0
for url in url_list:
# print(name_list[index])
data = requests.get(url,headers = self.head).content
with open(name_list[index]+str(index)+'-.png','wb')as f:
f.write(data)
index += 1
def Run(self):
# 打开网页
self.driver.get(self.url)
while self.x:
time.sleep(3)
try:
self.driver.switch_to.window(self.driver.window_handles[-1])
WebDriverWait(self.driver,10).until(
lambda d: d.find_element_by_xpath('//div[@id="Work_List_Container_267733"]/div[@class="p_newhero_item"]')
)
source = self.driver.page_source
# 分析出当前页的壁纸链接和 名字
self.Get_bz_url_list(source)
# 保存
# 翻页
nextTag = WebDriverWait(self.driver,10).until(
lambda d: d.find_element_by_xpath('//div[@class="pagingPanel"]/a[@class="downpage"]')
)
nextTag.click()
except:
self.x = False
try:
source = self.driver.page_source
bizhi_url_list, bizhi_name_list = self.Get_bz_url_list(source)
self.Save_data(bizhi_url_list, bizhi_name_list)
except:
print("爬完啦!")
if __name__ == '__main__':
wz = Wz_spider()
wz.Run() |
from select import select
from errno import ECONNREFUSED, ENOENT, EAGAIN
from time import sleep
from math import isnan
from io import BytesIO
import logging
import msgpack
import socket
import pyev
from fluxmonitor.player.main_controller import MainController
from fluxmonitor.err_codes import (
SUBSYSTEM_ERROR, NO_RESPONSE, RESOURCE_BUSY, UNKNOWN_COMMAND)
from fluxmonitor.storage import Storage, metadata
from fluxmonitor.config import CAMERA_ENDPOINT
from fluxmonitor.player import macro
from .base import CommandMixIn, DeviceOperationMixIn
logger = logging.getLogger(__name__)
class CameraInterface(object):
def __init__(self, kernel):
try:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(CAMERA_ENDPOINT)
self.unpacker = msgpack.Unpacker()
self.watcher = kernel.loop.io(self.fileno(), pyev.EV_READ,
lambda *args: None)
except socket.error as err:
if err.args[0] in [ECONNREFUSED, ENOENT]:
raise RuntimeError(SUBSYSTEM_ERROR, NO_RESPONSE)
else:
raise
def fileno(self):
return self.sock.fileno()
def recv_object(self):
buf = self.sock.recv(4096)
if buf:
self.unpacker.feed(buf)
for payload in self.unpacker:
return payload
else:
raise SystemError(SUBSYSTEM_ERROR, NO_RESPONSE)
def recv_binary(self, length):
self.sock.send("\x00")
l = 0
f = BytesIO()
while l < length:
try:
buf = self.sock.recv(min(length - l, 4096))
except socket.error:
raise SystemError("Camera service broken pipe")
if buf:
f.write(buf)
l += len(buf)
else:
raise SystemError("Camera service broken pipe")
f.seek(0)
return f
def async_oneshot(self, callback):
def overlay(w, r):
try:
w.stop()
callback(self.end_oneshot())
except Exception:
logger.exception("Oneshot error")
self.begin_oneshot()
self.watcher.callback = overlay
self.watcher.start()
def begin_oneshot(self):
self.sock.send(msgpack.packb((0, 0)))
def end_oneshot(self):
args = self.recv_object()
if args[0] == "binary":
mimetype = args[1]
length = args[2]
return mimetype, length, self.recv_binary(int(int(args[2])))
elif args[0] == "er":
raise RuntimeError(*args[1:])
else:
logger.error("Got unknown response from camera service: %s", args)
raise SystemError("UNKNOWN_ERROR")
def async_check_camera_position(self, callback):
def overlay(w, r):
try:
w.stop()
callback(self.end_check_camera_position())
except Exception:
logger.exception("Check camera position error")
self.begin_check_camera_position()
self.watcher.callback = overlay
self.watcher.start()
def begin_check_camera_position(self):
self.sock.send(msgpack.packb((1, 0)))
def end_check_camera_position(self):
return " ".join(self.recv_object())
def async_get_bias(self, callback):
def overlay(w, r):
try:
w.stop()
callback(self.end_get_bias())
except Exception:
logger.exception("Get bias error")
self.begin_get_bias()
self.watcher.callback = overlay
self.watcher.start()
def begin_get_bias(self):
self.sock.send(msgpack.packb((2, 0)))
def end_get_bias(self):
return " ".join(("%s" % i for i in self.recv_object()))
def async_compute_cab(self, step, callback):
def overlay(w, r):
try:
w.stop()
callback(step, self.end_compute_cab())
except Exception:
logger.exception("Compute cab error")
self.begin_compute_cab(step)
self.watcher.callback = overlay
self.watcher.start()
def begin_compute_cab(self, step):
if step == 'O':
self.sock.send(msgpack.packb((3, 0)))
elif step == 'L':
self.sock.send(msgpack.packb((4, 0)))
elif step == 'R':
self.sock.send(msgpack.packb((5, 0)))
def end_compute_cab(self):
return " ".join(("%s" % i for i in self.recv_object()))
def close(self):
self.sock.close()
class ScanTask(DeviceOperationMixIn, CommandMixIn):
st_id = -2
mainboard = None
step_length = 0.45
busying = False
_macro = None
def __init__(self, stack, handler, camera_id=None):
self.camera = CameraInterface(stack)
super(ScanTask, self).__init__(stack, handler)
def on_mainboard_ready(ctrl):
self.busying = False
for cmd in ("G28", "G91", "M302", "M907 Y0.4", "T2"):
ctrl.send_cmd(cmd)
handler.send_text("ok")
def on_mainboard_empty(sender):
if self._macro:
self._macro.on_command_empty(self)
def on_mainboard_sendable(sender):
if self._macro:
self._macro.on_command_sendable(self)
def on_mainboard_ctrl(sender, data):
if self._macro:
self._macro.on_ctrl_message(self, data)
self.mainboard = MainController(
self._sock_mb.fileno(), bufsize=14,
empty_callback=on_mainboard_empty,
sendable_callback=on_mainboard_sendable,
ctrl_callback=on_mainboard_ctrl)
self.mainboard.bootstrap(on_mainboard_ready)
self.busying = True
def make_gcode_cmd(self, cmd, callback=None):
def cb():
self._macro = None
if callback:
callback()
self._macro = macro.CommandMacro(cb, (cmd, ))
self._macro.start(self)
def dispatch_cmd(self, handler, cmd, *args):
if self._macro or self.busying:
raise RuntimeError(RESOURCE_BUSY)
elif cmd == "oneshot":
self.oneshot(handler)
elif cmd == "scanimages":
self.take_images(handler)
elif cmd == "scan_check":
self.scan_check(handler)
elif cmd == "get_cab":
self.get_cab(handler)
elif cmd == "calibrate":
self.async_calibrate(handler)
elif cmd == "scanlaser":
param = args[0] if args else ""
l_on = "l" in param
r_on = "r" in param
def cb():
handler.send_text("ok")
self.change_laser(left=l_on, right=r_on, callback=cb)
elif cmd == "set":
if args[0] == "steplen":
self.step_length = float(args[1])
handler.send_text("ok")
else:
raise RuntimeError(UNKNOWN_COMMAND, args[1])
elif cmd == "scan_backward":
def cb():
self._macro = None
handler.send_text("ok")
cmd = "G1 F500 E-%.5f" % self.step_length
self._macro = macro.CommandMacro(cb, (cmd, ))
self._macro.start(self)
elif cmd == "scan_next":
def cb():
self._macro = None
handler.send_text("ok")
cmd = "G1 F500 E%.5f" % self.step_length
self._macro = macro.CommandMacro(cb, (cmd, ))
self._macro.start(self)
elif cmd == "quit":
self.stack.exit_task(self)
handler.send_text("ok")
else:
logger.debug("Can not handle: '%s'" % cmd)
raise RuntimeError(UNKNOWN_COMMAND)
def change_laser(self, left, right, callback=None):
def cb():
self._macro = None
if callback:
callback()
flag = (1 if left else 0) + (2 if right else 0)
self._macro = macro.CommandMacro(cb, ("X1E%i" % flag, ))
self._macro.start(self)
if not callback:
while self._macro:
rl = select((self._sock_mb, ), (), (), 1.0)[0]
if rl:
self.on_mainboard_message(self._watcher_mb, 0)
def scan_check(self, handler):
def callback(m):
self.busying = False
handler.send_text(m)
self.camera.async_check_camera_position(callback)
self.busying = True
def async_calibrate(self, handler):
# this is measure by data set
table = {8: 60, 7: 51, 6: 40, 5: 32, 4: 26, 3: 19, 2: 11, 1: 6, 0: 1}
compute_cab_ref = (("O", False, False),
("L", True, False),
("R", False, True))
data = {"flag": 0, "thres": 0.2, "calibrate_param": []}
def on_loop(output=None):
if output:
self.change_laser(left=False, right=False)
self.busying = False
handler.send_text('ok ' + output)
elif data["flag"] < 10:
data["flag"] += 1
self.camera.async_get_bias(on_get_bias)
else:
self.change_laser(left=False, right=False)
self.busying = False
handler.send_text('ok fail chess')
def on_compute_cab(step, m):
m = m.split()[1]
data["calibrate_param"].append(m)
if len(data["calibrate_param"]) < 3:
begin_compute_cab()
else:
if 'fail' in data["calibrate_param"]:
output = ' '.join(data["calibrate_param"])
on_loop('fail laser ' + output)
elif all(abs(float(r) - float(data["calibrate_param"][0])) < 72
for r in data["calibrate_param"][1:]):
# so naive check
s = Storage('camera')
s['calibration'] = ' '.join(
map(lambda x: str(round(float(x))),
data["calibrate_param"]))
output = ' '.join(data["calibrate_param"])
on_loop(output)
else:
output = ' '.join(data["calibrate_param"])
on_loop('fail laser ' + output)
def begin_compute_cab():
step, l, r = compute_cab_ref[len(data["calibrate_param"])]
logger.debug("calibrate laser step %s", step)
self.change_laser(left=l, right=r)
self.camera.async_compute_cab(step, on_compute_cab)
def on_get_bias(m):
data["flag"] += 1
w = float(m.split()[1])
logger.debug("Camera calibrate w = %s", w)
if isnan(w):
on_loop()
else:
if abs(w) < data["thres"]: # good enough to calibrate
begin_compute_cab()
elif w < 0:
self.make_gcode_cmd(
"G1 F500 E{}".format(table.get(round(abs(w)), 60)),
on_loop)
elif w > 0:
self.make_gcode_cmd(
"G1 F500 E-{}".format(table.get(round(abs(w)), 60)),
on_loop)
data["thres"] += 0.05
on_loop()
self.busying = True
def get_cab(self, handler):
s = Storage('camera')
a = s.readall('calibration')
if a is None:
a = '320 320 320'
handler.send_text("ok " + a)
def oneshot(self, handler):
def sent_callback(h):
self.busying = False
handler.send_text("ok")
def recv_callback(result):
mimetype, length, stream = result
handler.async_send_binary(mimetype, length, stream, sent_callback)
self.camera.async_oneshot(recv_callback)
self.busying = True
def take_images(self, handler):
def cb_complete(h):
self.busying = False
handler.send_text("ok")
def cb_shot3_ready(result):
mimetype, length, stream = result
handler.async_send_binary(mimetype, length, stream, cb_complete)
def cb_shot3(h):
self.camera.async_oneshot(cb_shot3_ready)
def cb_shot2_ready(result):
mimetype, length, stream = result
self.change_laser(left=False, right=False,
callback=lambda: sleep(0.04))
handler.async_send_binary(mimetype, length, stream, cb_shot3)
def cb_shot2(h):
self.camera.async_oneshot(cb_shot2_ready)
def cb_shot1_ready(result):
mimetype, length, stream = result
self.change_laser(left=False, right=True,
callback=lambda: sleep(0.04))
handler.async_send_binary(mimetype, length, stream, cb_shot2)
def cb_shot1():
self.camera.async_oneshot(cb_shot1_ready)
self.change_laser(left=True, right=False, callback=cb_shot1)
self.busying = True
def on_mainboard_message(self, watcher, revent):
try:
self.mainboard.handle_recv()
except IOError as e:
if e.errno == EAGAIN:
return
logger.exception("Mainboard connection broken")
self.handler.send_text("error SUBSYSTEM_ERROR")
self.stack.exit_task(self)
except RuntimeError:
pass
except Exception:
logger.exception("Unhandle Error")
def on_timer(self, watcher, revent):
metadata.update_device_status(self.st_id, 0, "N/A",
self.handler.address)
def clean(self):
try:
if self.mainboard:
if self.mainboard.ready:
self.mainboard.send_cmd("X1E0")
self.mainboard.close()
self.mainboard = None
except Exception:
logger.exception("Mainboard error while quit")
if self.camera:
self.camera.close()
self.camera = None
metadata.update_device_status(0, 0, "N/A", "")
|
from setuptools import setup, find_packages
import re
import ast
# version parsing from __init__ pulled from Flask's setup.py
# https://github.com/mitsuhiko/flask/blob/master/setup.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('q2_plotly/__init__.py', 'rb') as f:
hit = _version_re.search(f.read().decode('utf-8')).group(1)
version = str(ast.literal_eval(hit))
setup(
name="q2-plotly",
version=version,
packages=find_packages(),
# Dependencies go in here
# plotly needs to be >1.12 for offline, >1.12.9 for native drop-down menus
install_requires=['qiime >= 2.0.6', 'pandas', 'q2templates >= 0.0.6',
'plotly >= 1.12.9'],
author="Michael Hall",
author_email="mike.hall@dal.ca",
description="Visualizations of QIIME2 artifacts using the Plotly library.",
entry_points={
"qiime.plugins":
["q2-plotly=q2_plotly.plugin_setup:plugin"]
},
# If you are creating a visualizer, all template assets must be included in
# the package source, if you are not using q2templates this can be removed
package_data={
"q2_plotly": ["assets/index.html"]
}
)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 25 11:44:42 2020
@author: Admin
"""
import pandas as pd
import numpy as np
import seaborn as sns
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score,confusion_matrix
Train_Data= pd.read_csv('CrashTest_TrainData.csv')
Test_Data=pd.read_csv('CrashTest_TestData.csv')
Train_Data.describe()
###Q13 What is the difference between third quartile values of the
# variable ManBI from Train_Data and Test_Data?
Train_Data['ManBI'].describe()
Test_Data['ManBI'].describe()
#75% 3.417500
#75% 2.50000
# ans=(3.417500-2.50000=0.9175)
###################################################
##Q14 How many distinct car types are there in the Train_Data?
pd.crosstab(Train_Data['CarType'],columns= 'count')
#col_0 count
#CarType
#Hatchback 50
#SUV 30
#Ans=2
############################################
#Q15 How many missing values are there in Train_Data?
Train_Data.isnull().sum()
Test_Data.isnull().sum()
#Ans=3
##############################################
#Q16What is the proportion of car types in the Test_Data?
pd.crosstab(Test_Data['CarType'],columns= 'count')
#Ans=50-50
#########################################
train_data=Train_Data.dropna(axis=0)
train_x1=train_data.drop(['CarID','CarType'],axis=1,inplace=False)
train_y1=train_data['CarType']
train_y1=train_y1.map({'Hatchback':0,'SUV':1})
test_data=Test_Data.dropna(axis=0)
test_x1=test_data.drop(['CarID','CarType'],axis=1,inplace=False)
test_y1=test_data['CarType']
test_y1=test_y1.map({'Hatchback':0,'SUV':1})
model1=KNeighborsClassifier(n_neighbors=3)
model1_KNN=model1.fit(train_x1,train_y1)
prediction_model1=model1.predict(test_x1)
accuracy_score_model1=accuracy_score(test_y1,prediction_model1)
misclassified_sample=np.where(prediction_model1 != test_y1)
print("misclassified sample: %d" %(prediction_model1!=test_y1).sum())
######################33
model2=KNeighborsClassifier(n_neighbors=2)
model2_KNN=model2.fit(train_x1,train_y1)
prediction_model2=model2.predict(test_x1)
accuracy_score_model2=accuracy_score(test_y1,prediction_model2)
#################################3
from sklearn.linear_model import LogisticRegression
lgr=LogisticRegression()
lgr.fit(train_x1,train_y1)
predict_lgr=lgr.predict(test_x1)
accuracy_lgr=accuracy_score(test_y1,predict_lgr)
|
from urllib.parse import parse_qs
from oic.utils.authn.user import UsernamePasswordMako
from oic.utils.authn.user import logger
from oic.utils.http_util import SeeOther
from oic.utils.http_util import Unauthorized
__author__ = "danielevertsson"
class JavascriptFormMako(UsernamePasswordMako):
"""
Do user authentication.
This is using the normal username password form in a WSGI environment using Mako as template system.
"""
def verify(self, request, **kwargs):
"""
Verify that the given username and password was correct.
:param request: Either the query part of a URL a urlencoded body of a HTTP message or a parse such.
:param kwargs: Catch whatever else is sent.
:return: redirect back to where ever the base applications wants the user after authentication.
"""
logger.debug("verify(%s)" % request)
if isinstance(request, str):
_dict = parse_qs(request)
elif isinstance(request, dict):
_dict = request
else:
raise ValueError("Wrong type of input")
logger.debug("dict: %s" % _dict)
logger.debug("passwd: %s" % self.passwd)
# verify username and password
try:
assert _dict["login_parameter"][0] == "logged_in"
except (AssertionError, KeyError):
return (
Unauthorized("You are not authorized. Javascript not executed"),
False,
)
else:
cookie = self.create_cookie("diana", "upm")
try:
_qp = _dict["query"][0]
except KeyError:
_qp = self.get_multi_auth_cookie(kwargs["cookie"])
try:
return_to = self.generate_return_url(kwargs["return_to"], _qp)
except KeyError:
return_to = self.generate_return_url(self.return_to, _qp)
return SeeOther(return_to, headers=[cookie]), True
|
from onmt.translate.Translator import Translator
from onmt.translate.TranslatorMultimodal import TranslatorMultimodal
from onmt.translate.Translation import Translation, TranslationBuilder
from onmt.translate.Beam import Beam, GNMTGlobalScorer
__all__ = [Translator, TranslatorMultimodal, Translation,
Beam, GNMTGlobalScorer, TranslationBuilder]
|
import sys
import os
import glob
import time
import unittest
import gevent.testing as greentest
from gevent.testing import util
this_dir = os.path.dirname(__file__)
def _find_files_to_ignore():
old_dir = os.getcwd()
try:
os.chdir(this_dir)
result = [
'wsgiserver.py',
'wsgiserver_ssl.py',
'webproxy.py',
'webpy.py',
'unixsocket_server.py',
'unixsocket_client.py',
'psycopg2_pool.py',
'geventsendfile.py',
]
result += [x[14:] for x in glob.glob('test__example_*.py')]
finally:
os.chdir(old_dir)
return result
default_time_range = (2, 4)
time_ranges = {
'concurrent_download.py': (0, 30),
'processes.py': (0, 4)
}
class _AbstractTestMixin(util.ExampleMixin):
time_range = (2, 4)
filename = None
def test_runs(self):
start = time.time()
min_time, max_time = self.time_range
if util.run([sys.executable, '-u', self.filename],
timeout=max_time,
cwd=self.cwd,
quiet=True,
buffer_output=True,
nested=True,
setenv={'GEVENT_DEBUG': 'error'}):
self.fail("Failed example: " + self.filename)
else:
took = time.time() - start
self.assertGreaterEqual(took, min_time)
def _build_test_classes():
result = {}
try:
example_dir = util.ExampleMixin().cwd
except unittest.SkipTest:
util.log("WARNING: No examples dir found", color='suboptimal-behaviour')
return result
ignore = _find_files_to_ignore()
for filename in glob.glob(example_dir + '/*.py'):
bn = os.path.basename(filename)
if bn in ignore:
continue
tc = type(
'Test_' + bn,
(_AbstractTestMixin, greentest.TestCase),
{
'filename': bn,
'time_range': time_ranges.get(bn, _AbstractTestMixin.time_range)
}
)
result[tc.__name__] = tc
return result
for k, v in _build_test_classes().items():
locals()[k] = v
if __name__ == '__main__':
greentest.main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Lia Thomson
cyanoConstruct file to run (because there is currently no __main__ file)
"""
import os
from sys import path as sysPath
sysPath.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from cyanoConstruct import app
if(__name__ == "__main__"):
app.run(debug=True) |
#!/usr/bin/env python
import sys
import os
if __name__ == '__main__':
if len(sys.argv) != 2:
print 'Usage: ./generate_img_abspath_list.py <image_dir_root>\n'
print 'Output: <image_dir_root>/result.txt'
exit(1)
root = os.path.abspath(sys.argv[1])
result_path = root + '/result.txt'
print result_path
result_file = open(result_path, 'w')
for (root, subdirs, fns) in os.walk(root):
for fn in fns:
if not fn.endswith('jpg'):
continue
path = os.path.join(root, fn)
#print path
result_file.write('{}\n'.format(path))
result_file.close()
|
from django.shortcuts import render
from budgetApp.models import Stuff
from budgetApp.forms import NewCategory
# Create your views here.
def index(request):
form = NewCategory()
budget_list = Stuff.objects.order_by('top_name')
amount_budgeted = 0
amount_spent = 0
income = 0
for i in budget_list:
amount_budgeted += i.budgeted
amount_spent += i.actual
budgeted_saved = income - amount_budgeted
amount_saved = (amount_budgeted-amount_spent)
if request.method == "POST":
#postData = request.POST
#return render(request,'budgetApp/test.html', {'postData': postData})
if 'newExpense' in request.POST:
category = request.POST["Add"]#category you want to add the expense to
amount_spent = int(request.POST["newExpense"])
category_info = Stuff.objects.get(top_name=category)
current_spent = int(category_info.actual)
current_spent += amount_spent
category_info.actual = current_spent
category_info.save()
form = NewCategory()
budget_list = Stuff.objects.order_by('top_name')
amount_budgeted = 0
amount_spent = 0
for i in budget_list:
amount_budgeted += i.budgeted
amount_spent += i.actual
budgeted_saved = income - amount_budgeted
amount_saved = (income-amount_spent)
return render(request,'budgetApp/index.html', {'stuff': budget_list, 'form':form, 'budgeted_saved':int(budgeted_saved),'amount_budgeted':int(amount_budgeted), 'amount_spent':int(amount_spent), 'amount_saved':int(amount_saved)})
#old method of testing below
#postData = category_info
#return render(request,'budgetApp/test.html', {'postData': postData})
if "newCategory" in request.POST:
form = NewCategory(request.POST)
if form.is_valid():
form.save(commit=True)
form = NewCategory()
budget_list = Stuff.objects.order_by('top_name')
amount_budgeted = 0
amount_spent = 0
for i in budget_list:
amount_budgeted += i.budgeted
amount_spent += i.actual
budgeted_saved = income - amount_budgeted
amount_saved = (income-amount_spent)
return render(request,'budgetApp/index.html', {'stuff': budget_list, 'form':form, 'budgeted_saved':int(budgeted_saved),'amount_budgeted':int(amount_budgeted), 'amount_spent':int(amount_spent), 'amount_saved':int(amount_saved)})
else:
print ('error form is invalid')
return render(request,'budgetApp/index.html', {'stuff': budget_list, 'form':form, 'budgeted_saved':int(budgeted_saved),'amount_budgeted':int(amount_budgeted), 'amount_spent':int(amount_spent), 'amount_saved':int(amount_saved)})
def test(request):
budget_list = Stuff.objects.order_by('top_name')
postData = []
for i in budget_list:
postData.append(i.budgeted)
return render(request,'budgetApp/test.html', {'postData': postData})
|
import random
list_of_choices = ["Rock", "Paper", "Scissors"]
your_wins = 0
comp_wins = 0
num_of_rounds = int(input("What do you want to play to? Best of: 5, 7, 9, etc."))
while((your_wins or comp_wins) < num_of_rounds*.5):
player_choice = input("Rock, Paper, or Scissors?")
comp_choice = random.choice(list_of_choices)
if(player_choice.lower() == comp_choice.lower()):
print("Tie")
elif(player_choice.lower() == "rock"):
if(comp_choice.lower() == "paper"):
print("You lose")
comp_wins +=1
elif(comp_choice.lower() == "scissors"):
print("You win")
your_wins+=1
elif(player_choice.lower() == "paper"):
if(comp_choice.lower() == "scissors"):
print("You lose")
comp_wins +=1
elif(comp_choice.lower() == "rock"):
print("You win")
your_wins+=1
elif(player_choice.lower() == "scissors"):
if(comp_choice.lower() == "rock"):
print("You lose")
comp_wins +=1
elif(comp_choice.lower() == "paper"):
print("You win")
your_wins+=1
if(your_wins > num_of_rounds*.5):
print("You won!!! :D")
print("Your wins: ", your_wins)
print("comp wins: ", comp_wins)
else:
print("You lost :(((")
print("Your wins: ", your_wins)
print("comp wins: ", comp_wins)
|
from numpy import linspace,pi,sin,cos
from multiprocessing import cpu_count
class Config:
def __init__(self):
self.resolution=(1000,1000)
#only enable if you have imageMagick installed
self.saveAnimaiton=True
tRange=(0,2*pi)
totalFrames=160
self.framerate=30
#julia sets typically need a high number of iterations to look proper, this number also depends on what resolution is set
self.iterations=1000
self.threshold=4
self.enableFullScreen=True
#mutiProcessing only supported for color mandelbrot
self.enableMultiProcessing=True
#the number of processes spawned is determined by cpu count, change if you wish
self.processesUsed=cpu_count()
#starting screen (yUpperBound is calculated to keep it square)
xLowerBound=-2
xUpperBound=2
yLowerBound=-2
#after each click zoom, how big is the screen compared to last time
self.newWindowSize=1/2
#non adjustable
yUpperBound=yLowerBound+(xUpperBound-xLowerBound)
self.xInitalBounds=(xLowerBound,xUpperBound)
self.yInitalBounds=(yLowerBound,yUpperBound)
self.tVals=linspace(tRange[0],tRange[1],totalFrames)
def parametricSeedPoint(self,t,state):
#nice path i found on wikipedia, change to whatever path you like to
#explore more of the julia set
c=0.7885*(cos(t)+1j*sin(t))
state.currentSeedPoint=c
|
from django.db import models
from unifier.apps.core.models.base import StandardModelMixin
from unifier.apps.core.models.manga import Manga
from unifier.apps.core.models.novel import Novel
class Platform(StandardModelMixin):
class Meta:
verbose_name = "Platform"
verbose_name_plural = "Platforms"
url = models.URLField(blank=False, null=False, max_length=256, verbose_name="Platform URL")
name = models.CharField(blank=False, null=False, max_length=128, verbose_name="Platform Name")
url_search = models.URLField(blank=False, null=False, max_length=256, verbose_name="Platform search URL")
mangas = models.ManyToManyField(Manga, blank=True, related_name="platform")
novels = models.ManyToManyField(Novel, blank=True, related_name="platform")
def __str__(self):
return f"{self.name}"
|
# -*- coding: utf-8 -*-
#!/usr/bin/python
'''
-İki algoritmanin karmaşıklığıda O(n) dir.
-Lomuto Partition listeyi 4 kısma böler. Bunlar pivot,pivottan küçük ve pivottan büyük ve belirsiz kısım
şeklindedir
-Hoare Partion da ise liste pivottan küçük ve büyük olmak üzere 2 kısıma ayrılır.
- Swaping işlemleri Hoare Part.'a göre Lomuto part. 3 kat daha yüksektir.
- Compariton işlemleri iki yöntemde de n-1 karşılaştırma yapmaktadır.
- Sıralı bir liste verildiğinde Lomuto partition O(n^2) kadar çıkarken,
Hoare partition O(nlogn) dir.
-Lomuto part. Hoare Part. e göre daha kolay implement edilebilmektedir.
Not:Algoritmaların implemantationları yapılırken ders defteri ve geeksforgeeks.org sitelerinden
yararlanıldı.
'''
def LomutoPartition(arr,low,high):
pivot = arr[high]
i = low - 1
for j in range(low,high-1):
if arr[j] <= pivot:
i=i+1
arr[i],arr[j]=arr[j],arr[i]
arr[i+1],arr[high]=arr[high],arr[i+1]
return i+1
def HoarePartiton(arr,low,high):
right = low-1
left = high+1
pivot=arr[low]
while right<left:
while 1:
right=right+1
if arr[right]>= pivot:
break
while 1:
left=left-1
if arr[left]<=pivot:
break
if right<=left:
arr[left],arr[right]=arr[right],arr[left]
position=left
arr[low]=arr[position]
arr[position]=pivot
return position
def quickSort(arr, low, high):
if low < high:
pos= HoarePartiton(arr,low,high)
print(pos)
quickSort(arr, low, pos);
quickSort(arr, pos+1 , high);
return arr
liste=[4,52,46,72,1]
print(quickSort(liste,0,len(liste)-1)) |
if __name__=="__main__":
T = int(raw_input())
for _ in range(T):
N = int(raw_input())
arr = [ [0 for i in range(N+1)] for i in range(3) ]
arr[0] = map(int, raw_input().split())
##
for i in range(N):
if arr[0][i]%2==0 or arr[0][i]==1:
arr[1][i+1] = arr[1][i]+1#old
arr[2][i+1] = arr[2][i]#cold
else:
arr[1][i+1] = arr[1][i] #old
arr[2][i+1] = arr[2][i]+1 #cold
query = int(raw_input())
for i in range(query):
L,R = map(int, raw_input().split())
if arr[1][R]-arr[1][L-1] < arr[2][R]-arr[2][L-1]:
total = (arr[1][R]-arr[1][L-1])+(arr[2][R]-arr[2][L-1])
print (arr[2][R]-arr[2][L-1]) - int(total/2)
else:
print '0'
|
##
from bs4 import BeautifulSoup
import pandas as pd,requests,io
import acqua.aqueduct as aq
gestore = "TeaAcqueMantova"
aq.setEnv('Lombardia//'+gestore)
url = 'https://www.cometea.it/verifica-la-tua-acqua/'
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html.parser')
#
map = soup.findAll("area", {"shape": "poly"})
comuniList = [comune['href'].split('/') for comune in list(map)]
alias_city = [comune[len(comune)-1].replace('-',' ') for comune in comuniList]
##
locationList = pd.DataFrame({'alias_city':alias_city})
locationList['alias_address'] = 'Comune'
locationList['georeferencingString'] = locationList['alias_city']+", Mantova, Italia"
locationList['type'] = 'POINT'
locationList.to_csv('Metadata/LocationList.csv',index=False)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from kuon.common import CommonSteamGames
from kuon.steam.common import SteamUrls
from kuon.steam.steam import Steam
class IInventory(Steam):
"""Implementation of the API methods related to the inventory of the user on Steam
common not self explanatory keys:
app id:
The Steam AppID of the game which owns this item (e.g. 730 for CS:GO, 440 for TF2, 570 for Dota 2)
app context:
The context of the game. Nearly all games usually have the context id 2, while Steam items usually have the
context id 6
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get_my_inventory(self, app_id: int = CommonSteamGames.APP_ID_CSGO, app_context: int = 2):
"""Retrieve the steam inventory
:type app_id: int
:type app_context: int
:return:
"""
url = '{base:s}/my/inventory/json/{app_id:d}/{app_context:d}'.format(base=SteamUrls.COMMUNITY,
app_id=app_id,
app_context=app_context)
return self.api_request(url)
|
class ServerBaseException(Exception):
"""Base class for server errors for server."""
def __init__(self, *args):
try:
if args and isinstance(args[0], str):
self.value = args[0]
except Exception:
raise Exception
class ServerMethodException(ServerBaseException):
"""Catcher errors in case method is not allowed by the server."""
def __init__(self, value):
super().__init__(value)
def __str__(self):
return f'405_Method__{self.value}__is_not_allowed'
class ServerValuesException(ServerBaseException):
"""Handler for errors occurred at unpacking values."""
def __init__(self, value):
super().__init__(value)
def __str__(self):
return f'{self.value}__ServerException_Not_enough_args_were_transmitted'
class ServerDatabaseException(ServerBaseException):
"""Handler expected DB errors."""
def __init__(self, value):
super().__init__(value)
def __str__(self):
return f'{self.value}__ServerDatabaseException'
class ServerValidateError(ServerBaseException):
"""Errors validation handler, value and unmatched pattern returns."""
def __init__(self, value, pattern):
super().__init__(value)
self.pattern = pattern
def __str__(self):
return f'ServerValidateException: Value_{self.value}__unmatched__expression__{self.pattern}'
class UnexpectedError(ServerBaseException):
"""Error handler in case not expected error in the server occurred."""
def __init__(self, value):
super().__init__(value)
def __str__(self):
return f'{self.value}__Unexpected_behaviour'
|
from braces.views import PrefetchRelatedMixin
from django.contrib.auth import login, logout
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.views import redirect_to_login
from django.forms import HiddenInput
from django.http import HttpResponseRedirect
from django.urls import reverse_lazy
from django.views.generic import (CreateView, DetailView, FormView,
RedirectView, UpdateView)
from .forms import UserCreateForm
from .models import UserProfile
class LoginView(FormView):
"""Logs a user in"""
form_class = AuthenticationForm
template_name = "accounts/signin.html"
def get_success_url(self):
"""Gets the URL to redirect to after a successful login"""
# Check if we have a next url as Query parameter
next_url = self.request.GET.get('next', None)
if next_url:
# Return the next URL
return "{}".format(next_url)
# Default: Redirect to home
return reverse_lazy('projects:index')
def get_form(self, form_class=None):
"""Get the form"""
if form_class is None:
form_class = self.get_form_class()
return form_class(self.request, **self.get_form_kwargs())
def form_valid(self, form):
"""Check if the form is valid"""
# Login the user
login(self.request, form.get_user())
return super().form_valid(form)
class LogoutView(RedirectView):
"""Logs a user out"""
url = reverse_lazy("projects:index")
def get(self, request, *args, **kwargs):
# Logout the user
logout(request)
return super().get(request, *args, **kwargs)
class SignUpView(CreateView):
"""Creates a new user"""
form_class = UserCreateForm
success_url = reverse_lazy("accounts:login")
template_name = "accounts/signup.html"
class ProfileView(PrefetchRelatedMixin, DetailView):
"""Profile of a user"""
model = UserProfile
prefetch_related = ("user",)
template_name = "accounts/profile.html"
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
past_apps = self.request.user.applications.filter(accepted=True)
context['past_apps'] = past_apps
return context
def get(self, request, *args, **kwargs):
"""Get the profile"""
# If we pass in `/me/` as profile, see if we get a user back,
# If not, redirect to login
if self.kwargs[self.get_slug_field()] == "me":
try:
self.get_object()
except self.model.DoesNotExist:
return redirect_to_login(reverse_lazy(
"accounts:profile",
kwargs={"slug": "me"}
))
return super().get(request, *args, **kwargs)
def get_object(self, queryset=None):
"""Get object"""
if not queryset:
queryset = self.get_queryset()
# If we pass in `/me` as profile,
# Use the username of the current user instead
slug = self.kwargs[self.get_slug_field()]
if slug == "me":
slug = self.request.user.username
return queryset.get(slug=slug)
class ProfileEditView(LoginRequiredMixin, PrefetchRelatedMixin, UpdateView):
"""Update profile of a user"""
model = UserProfile
prefetch_related = ("user",)
fields = ("bio", "pfp", "skills_internal")
template_name = "accounts/profile_edit.html"
def get_form(self, form_class=None):
"""Get form"""
form = super().get_form(form_class)
# Set some form overwrites
form.fields['pfp'].required = False
form.fields['skills_internal'].required = False
form.fields['skills_internal'].widget = HiddenInput()
return form
def get(self, request, *args, **kwargs):
auth_user = request.user
profile = self.get_object()
# Make sure we can only edit our own profile.
# If we try to edit someone elses profile,
# Redirect to their normal profile page
if not auth_user == profile.user:
return HttpResponseRedirect(reverse_lazy(
'accounts:profile',
kwargs={"slug": profile.slug}
))
return super().get(request, *args, **kwargs)
|
from django.urls import path
# from . import views
from .views import *
from django.contrib.auth.views import LoginView, LogoutView
urlpatterns = [
# path('', indexView.as_view(), name='home'),
path('', indexView, name='home'),
path('test/', test_View, name='test'),
path('m-test/', mohit_test_view, name='mohit-test'),
]
|
#!/usr/bin/python
import sys
fname1 = sys.argv[1]
fname2 = sys.argv[2]
if len(sys.argv) > 3:
new_col_name = sys.argv[3]
else:
new_col_name = None
id_set = set()
with open(fname2) as f:
id_set = set(l.rstrip() for l in f)
if fname1 != "stdin":
if fname1.endswith(".gz"):
i_file = gzip.open(fname1)
else:
i_file = open(fname1)
else:
i_file = sys.stdin
if new_col_name:
print (i_file.readline().rstrip() + "\t" + new_col_name)
for l in i_file:
spl = l.rstrip().split("\t")
add_val = '0'
if spl[0] in id_set:
add_val = '1'
print (l.rstrip() + "\t" + add_val)
|
import sqlite3
conn = sqlite3.connect('eventos.db')
cursor = conn.cursor()
id = 3
# excluindo um registro da tabela
cursor.execute("""
DELETE FROM clientes
WHERE id = ?
""", (id))
conn.commit()
print('Registro excluido com sucesso.')
conn.close() |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
def InitCenter(k,m,x_train):
#取数据集中前k个点作为初始中心
Center = np.zeros([k,n]) #从样本中随机取k个点做初始聚类中心
np.random.seed(15) #设置随机数种子
for i in range(k):
x = np.random.randint(m)
Center[i] = np.array(x_train.iloc[x])
return Center
def getDistense(x_train, k, m, Center):
Distence=[]
for j in range(k):
for i in range(m):
x = np.array(x_train.iloc[i, :])
a = x.T - Center[j]
Dist = np.sqrt(np.sum(np.square(a))) #计算距离公式
Distence.append(Dist)
Dis_array = np.array(Distence).reshape(k,m)
return Dis_array
def getNewCenter(x_train,k,n, Dis_array):
#获取新的密度吸引中心点
cen = []
axisx ,axisy,axisz= [],[],[]
cls = np.argmin(Dis_array, axis=0)
for i in range(k):
train_i=x_train.loc[cls == i]
x,y,z = list(train_i.iloc[:,1]),list(train_i.iloc[:,2]),list(train_i.iloc[:,3])
axisx.append(x)
axisy.append(y)
axisz.append(z)
meanC = np.mean(train_i,axis=0)
cen.append(meanC)
newcent = np.array(cen).reshape(k,n)
NewCent=np.nan_to_num(newcent)
return NewCent,axisx,axisy,axisz
def KMcluster(x_train,k,n,m,threshold):
global axis_x, axis_y
center = InitCenter(k,m,x_train)
initcenter = center
centerChanged = True
t=0
while centerChanged:
Dis_array = getDistense(x_train, k, m, center)
center ,axis_x,axis_y,axis_z= getNewCenter(x_train,k,n,Dis_array)
err = np.linalg.norm(initcenter[-k:] - center)
t+=1
print('err of Iteration '+str(t),'is',err)
plt.figure(1)
p1,p2,p3 = plt.scatter(axis_x[0], axis_y[0], c='c'),plt.scatter(axis_x[1], axis_y[1], c='m'),plt.scatter(axis_x[2], axis_y[2], c='y')
plt.legend(handles=[p1, p2, p3], labels=['0', '1', '2'], loc='best')
plt.show()
if err < threshold:
centerChanged = False
else:
initcenter = np.concatenate((initcenter, center), axis=0)
return center, axis_x, axis_y,axis_z, initcenter
if __name__=="__main__":
x=pd.read_csv("iris.csv")
x_train=x.iloc[:,1:5]
m,n = np.shape(x_train)
k = 3
threshold = 0.1
km,ax,ay,az,ddd = KMcluster(x_train, k, n, m, threshold)
print('最终的聚类中心为: ', km)
plt.figure(2)
plt.scatter(km[0,1],km[0,2],c = 'k',s = 200,marker='x')
plt.scatter(km[1,1],km[1,2],c = 'k',s = 200,marker='x')
plt.scatter(km[2,1],km[2,2],c = 'k',s = 200,marker='x')
p1, p2, p3 = plt.scatter(axis_x[0], axis_y[0], c='c'), plt.scatter(axis_x[1], axis_y[1], c='m'), plt.scatter(axis_x[2], axis_y[2], c='y')
plt.legend(handles=[p1, p2, p3], labels=['0', '1', '2'], loc='best')
plt.title('2-D')
plt.show()
plt.figure(3)
TreeD = plt.subplot(111, projection='3d')
TreeD.scatter(ax[0],ay[0],az[0],c='c')
TreeD.scatter(ax[1],ay[1],az[1],c='m')
TreeD.scatter(ax[2],ay[2],az[2],c='y')
TreeD.set_zlabel('Z') # 坐标轴
TreeD.set_ylabel('Y')
TreeD.set_xlabel('X')
TreeD.set_title('3-D')
plt.show()
|
import json
import os
from string import Template
from flask import request, jsonify
from helpers import query, update, log, generate_uuid
from escape_helpers import sparql_escape_uri, sparql_escape_string, sparql_escape_int, sparql_escape_datetime
import pandas as pd
from .file_handler import postfile
def store_json(data):
"""
Store json data to a file and call postfile to store in in a triplestore
:param data: data in json format
:return: response from storing data in triple store
"""
file_id = generate_uuid()
dumpFileName = f"{file_id}.json"
dumpFilePath = f'/share/ai-files/{dumpFileName}'
with open(dumpFilePath, 'w') as f:
json.dump(data, f)
resp = postfile(dumpFilePath, dumpFileName)
return resp
@app.route("/data/query", methods=["GET"])
def query_data():
"""
Endpoint for loading data from triple store using a query file and converting it to json
Accepted request arguments:
- filename: filename that contains the query
- limit: limit the amount of data retrieved per query execution, allows for possible pagination
- global_limit: total amount of items to be retrieved
:return: response from storing data in triple store, contains virtual file id and uri
"""
# env arguments to restrict option usage
acceptFilename = os.environ.get('ACCEPT_FILENAME') or False
acceptOptions = os.environ.get('ACCEPT_OPTIONS') or False
# default filename
filename = "/config/input.sparql"
if acceptFilename:
f = request.args.get("filename")
if f:
filename = "/config/" + f
# default amount of items to retrieve per request
limit = 1000
globalLimit = float('inf')
if acceptOptions:
limit = int(request.args.get("limit") or 1000)
globalLimit = float(request.args.get("global_limit") or float("inf"))
if globalLimit < limit:
limit = globalLimit
# load query
q = ""
if os.path.isfile(filename):
with open(filename) as f:
q = f.read()
else:
return "Requested filename does not exist", 204
# iteratively retrieve requested amount of data
ret = {}
if q:
stop = False
index = 0
while not stop and (limit * index) <= globalLimit - 1:
stop = True
offset = limit * index
formatted = (q + f" LIMIT {limit} OFFSET {offset}")
resp = query(formatted)["results"]["bindings"]
# convert data to json
for val in resp:
stop = False
for k, v in val.items():
if k not in ret:
ret[k] = []
ret[k].append(v["value"])
index += 1
# store json data to file and in triple store
storeResp = store_json(ret)
return jsonify(storeResp)
@app.route("/data/file", methods=["GET"])
def file_data():
"""
Endpoint for loading data from a csv file and converting it to json
Accepted request arguments:
- filename: filename that contains the data
- columns: csv data columns to use
:return: response from storing data in triple store, contains virtual file id and uri
"""
# env arguments to restrict option usage
acceptFilename = os.environ.get('ACCEPT_FILENAME') or False
acceptOptions = os.environ.get('ACCEPT_OPTIONS') or False
# default filename
filename = "/share/input.csv"
if acceptFilename:
f = request.args.get("filename")
if f:
filename = "/share/" + f
columns = None
if acceptOptions:
columns = request.args.get("columns") or None
if not os.path.isfile(filename):
return "Data inaccessible", 204
data = pd.read_csv(filename).astype(str)
# select requested columns, all if not specified
if columns:
columns = list(columns.split(","))
dataColumns = list(data.columns)
for col in columns:
if col not in dataColumns:
return f"Invalid column {col} requested", 204
data = data[columns]
ret = {}
for col in data:
ret[col] = data[col].tolist()
# store json data to file and in triple store
storeResp = store_json(ret)
return jsonify(storeResp)
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
"""
Default endpoint/ catch all
:param path: requested path
:return: debug information
"""
return 'You want path: %s' % path, 404
if __name__ == '__main__':
debug = os.environ.get('MODE') == "development"
app.run(debug=debug, host='0.0.0.0', port=80)
|
#coding=UTF-8
import threading
class jd_Threadings(threading.Thread):
def __init__(self,keyword,id,obj):
#threading.Thread.__init__(self)
super(jd_Threadings,self).__init__()
self.keyword=keyword
self.id=id
self.obj=obj
self.lock=threading.Lock()
def run(self):
self.lock.acquire()
print '%d : 正在爬取%s类.' % (self.id,self.keyword)
self.obj.jd_craw_urls(self.keyword)
self.lock.release()
|
import sys
'''
先对一跳的句子进行搜索,选取最大的n个,然后再找n个实体相连的候选路径进行计算,选择得分最高的
'''
sys.path.insert(0,'/home/aistudio/work/MyExperiment/path_ent_rel')
sys.path.insert(0,'/home/hbxiong/QA2/path_ent_rel')
from keras_bert import load_trained_model_from_checkpoint
import keras
import json
from py2neo import Graph
from some_function_maxbert import transfer_data_pathentrel
import os
import numpy as np
import re
import tensorflow as tf
from keras.metrics import binary_accuracy
import keras.backend as k
gpu_options = tf.GPUOptions(allow_growth=True)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
from get_query_result_new import get_all_F1
from pprint import pprint
import os
import time
gpu_options = tf.GPUOptions(allow_growth=True)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
tag=time.strftime("%Y-%m-%d-%H-%M-%S",time.localtime())
print(tag)
model_tag='ckpt_path_ent_rel_bert_wwm_ext-100,1e-05,8-2020-10-26-15-05-24.hdf5'#room
print(model_tag)
class Config:
# data_path对于交叉路径,固定在训练文件中
data_dir = r'./data'
train_data_path = os.path.join(data_dir, 'path_data/train_data_sample.json')
valid_data_path = os.path.join(data_dir, 'path_data/valid_data_sample.json')
linking_data_path = '../result_new_linking-no_n_59,r_0.8321,line_right_recall_0.9230,avg_n_2.6919.json'
# bert_path
# bert_path = '../../bert/bert_wwm_ext' # 百度
# bert_path = r'C:\Users\bear\OneDrive\ccks2020-onedrive\ccks2020\bert\tf-bert_wwm_ext' # room
# bert_path = r'../../../ccks/bert/tf-bert_wwm_ext' # colab
bert_path = '/home/hbxiong/ccks/bert/tf-bert_wwm_ext' # lab
bert_config_path = os.path.join(bert_path, 'bert_config.json')
bert_ckpt_path = os.path.join(bert_path, 'bert_model.ckpt')
bert_vocab_path = os.path.join(bert_path, 'vocab.txt')
result_path = './data/%s-%s'%(model_tag,tag)
true_answer_path = os.path.join(result_path, 'true_path_score.json') # 模型在测试集正确路径上的预测得分
ok_result_path = os.path.join(result_path, 'ok_result.txt') # 保存为txt,使得发生错误时可以在当前问题继续训练
pred_result_path = os.path.join(result_path, 'pred_result.txt')
similarity_ckpt_path = './ckpt/%s'%model_tag # 模型训练后,模型参数存储路径
batch_size = 64
epoches = 100
learning_rate = 1e-5 # 2e-5
neg_sample_number = 5
max_length = 100 # neg3:64;100
config = Config()
for i in ['./ckpt',config.result_path]:
if not os.path.exists(i):
os.mkdir(i)
pprint(vars(Config))
def basic_network():
bert_model = load_trained_model_from_checkpoint(config.bert_config_path,
config.bert_ckpt_path,
seq_len=config.max_length,
training=False,
trainable=True)
# 选择性某些层进行训练
# bert_model.summary()
# for l in bert_model.layers:
# # print(l)
# l.trainable = True
path1 = keras.layers.Input(shape=(config.max_length,))
path2 = keras.layers.Input(shape=(config.max_length,))
path_bert_out = bert_model([path1, path2]) # 输出维度为(batch_size,max_length,768)
# dense=bert_model.get_layer('NSP-Dense')
path_bert_out = keras.layers.Lambda(lambda bert_out: bert_out[:, 0])(path_bert_out)
ent1 = keras.layers.Input(shape=(config.max_length,))
ent2 = keras.layers.Input(shape=(config.max_length,))
ent_bert_out = bert_model([ent1, ent2]) # 输出维度为(batch_size,max_length,768)
# dense=bert_model.get_layer('NSP-Dense')
ent_bert_out = keras.layers.Lambda(lambda bert_out: bert_out[:, 0])(ent_bert_out)
rel1 = keras.layers.Input(shape=(config.max_length,))
rel2 = keras.layers.Input(shape=(config.max_length,))
rel_bert_out = bert_model([path1, path2]) # 输出维度为(batch_size,max_length,768)
# dense=bert_model.get_layer('NSP-Dense')
rel_bert_out = keras.layers.Lambda(lambda bert_out: bert_out[:, 0])(rel_bert_out)
#bert_out = keras.layers.Dropout(0.5)(bert_out)
com_out=keras.layers.concatenate([path_bert_out,ent_bert_out,rel_bert_out])
outputs = keras.layers.Dense(1, activation='sigmoid')(com_out)
model = keras.models.Model([path1, path2,ent1,ent2,rel1,rel2], outputs)
model.compile(
optimizer=keras.optimizers.Adam(config.learning_rate),
loss=my_loss,
metrics=[my_accuracy, monitor_f1]
)
model.summary()
return model
def my_accuracy(y_true, y_pred):
'''
:param y_true: ?,2
:param y_pred: ?,
:return: 1
'''
# y_true=tf.to_int32(y_true)
y_true = k.expand_dims(y_true[:, 0],axis=-1)
return binary_accuracy(y_true, y_pred)
def my_loss(y_true, y_pred):
# y_true = tf.to_int32(y_true)
y_true = k.expand_dims(y_true[:, 0])
return k.binary_crossentropy(y_true, y_pred)
def monitor_f1(y_true, y_pred):
'''
统计预测为1或真实为1的样本的f1的平均值,弊端batch-wise的,解决:https://www.zhihu.com/question/53294625/answer/362401024
:param y_true: ?,2
:param y_pred: ?,
:return: 1
'''
f1 = k.expand_dims(y_true[:, 1],axis=-1)
y_true = k.expand_dims(y_true[:, 0],axis=-1)
# 0.5 划分0,1
one = tf.ones_like(y_pred)
zero = tf.zeros_like(y_pred)
# y_pred = tf.where(y_pred < 0.5, x=zero, y=one)
# y= tf.where(y_pred == 1, x=one, y=y_true) #y_true 或 y_pred 为1的地方
# 合并上面两个
y= tf.where(y_pred > 0.5, x=one, y=y_true)
return tf.div(k.sum(tf.multiply(y,f1)),k.sum(y))
def get_one_ent_one_hop(graph, ent):
'''
输入graph实例和实体,返回实体周围一跳的路径
:param graph:
:param ent:
:return:
'''
# ent->rel->?x
cypher1 = "MATCH (ent1:Entity{name:'" + ent + "'})-[rel]->(x) RETURN DISTINCT rel.name"
try:
relations = graph.run(cypher1).data()
except:
relations=[]
print('one_ent_one_hop cypher1 wrong')
sample1 = []
for rel in relations:
sam = ent + '|||' + rel['rel.name'] + '|||?x'
sample1.append(sam)
# print(sam)
# ?x->rel->ent
cypher2 = "MATCH (ans)-[rel]->(ent1:Entity{name:'" + ent + "'}) RETURN DISTINCT rel.name"
try:
relations = graph.run(cypher2).data()
except:
relations=[]
print('one_ent_one_hop cypher2 wrong')
sample2 = []
for rel in relations:
sam = '?x|||' + rel['rel.name'] + '|||' + ent
sample2.append(sam)
# print(sam)
return sample1 + sample2
def _get_next_hop_path_two(graph,path):
path=path.replace('\\','\\\\').replace("\'","\\\'")
# 输入一跳的路径,返回两跳的路径(包括一跳两个实体)
# 若一跳路径答案不为属性,则选择一跳后续的两个实体路径
# 若第一跳中关系为类型,则无第二跳
path_list=path.split('|||')
print('now query path---',path_list)
assert len(path_list)==3
if path_list[1]=='<类型>' and path_list[2]=='?x':
return []
cypher='match '
sample=[]
if path_list[0]=='?x':
cypher1=cypher+"(y)-[rel1:Relation{name:'"+path_list[1]+"'}]->(ent1:Entity{name:'"+path_list[2]+"'}) match (y)-[rel2]->(x) where rel2.name<>'<类型>' and ent1.name<>x.name return distinct rel2.name"
# print(cypher1)
try:
answers = graph.run(cypher1).data()
except:
answers = []
print('next_hop_path_two cypher1 wrong')
for ans in answers:
one_ent=path.replace('?x','?y')+'\t'+'?y|||'+ans['rel2.name']+'|||?x'
# print(one_ent)
sample.append(one_ent)
cypher2 = cypher + "(y)-[rel1:Relation{name:'" + path_list[1] + "'}]->(ent1:Entity{name:'" + path_list[2] + "'}) match (x)-[rel2]->(y) return distinct rel2.name"
# print(cypher2)
try:
answers = graph.run(cypher2).data()
except:
answers = []
print('next_hop_path_two cypher2 wrong')
for ans in answers:
one_ent = path.replace('?x', '?y')+ '\t' +'?x|||' + ans['rel2.name'] + '|||?y'
# print(one_ent)
sample.append(one_ent)
cypher3=cypher+"(x)-[rel1:Relation{name:'"+path_list[1]+"'}]->(:Entity{name:'"+path_list[2]+"'}) match (y)-[rel2]->(x) return distinct rel2.name,y.name"
# print(cypher3)
try:
answers = graph.run(cypher3).data()
except:
answers = []
print('next_hop_path_two cypher3 wrong')
if len(answers)<1000:
for ans in answers:
two_ent1=ans['y.name']+'|||'+ans['rel2.name']+'|||?x'+'\t'+path
# print(two_ent)
sample.append(two_ent1)
cypher4 = cypher + "(x)-[rel1:Relation{name:'" + path_list[1] + "'}]->(ent1:Entity{name:'" + path_list[2] + "'}) match (x)-[rel2]->(y) where ent1.name<>y.name return distinct rel2.name,y.name"
# print(cypher4)
if len(answers)<1000:
try:
answers = graph.run(cypher4).data()
except:
answers = []
print('next_hop_path_two cypher4 wrong')
for ans in answers:
two_ent1= path + '\t' +'?x|||' + ans['rel2.name'] + '|||'+ans['y.name']
# print(two_ent)
sample.append(two_ent1)
if path_list[2]=='?x':
cypher5=cypher+"(:Entity{name:'"+path_list[0]+"'})-[rel1:Relation{name:'"+path_list[1]+"'}]->(y) match (y)-[rel2]->(x) where rel2.name<>'<类型>' return distinct rel2.name"
# print(cypher5)
try:
answers = graph.run(cypher5).data()
except:
answers = []
print('next_hop_path_two cypher5 wrong')
for ans in answers:
one_ent=path.replace('?x','?y')+'\t'+'?y|||'+ans['rel2.name']+'|||?x'
# print(one_ent)
sample.append(one_ent)
cypher6 = cypher + "(ent1:Entity{name:'" + path_list[0] + "'})-[rel1:Relation{name:'" + path_list[1] + "'}]->(y) match (x)-[rel2]->(y) where ent1.name<>x.name return distinct rel2.name"
# print(cypher6)
try:
answers = graph.run(cypher6).data()
except:
answers = []
print('next_hop_path_two cypher6 wrong')
for ans in answers:
one_ent = path.replace('?x', '?y') + '\t' + '?x|||' + ans['rel2.name'] + '|||?y'
# print(one_ent)
sample.append(one_ent)
if path_list[1] != '<国籍>' and path_list[1] != '<类型>' and path_list[1]!='<性别>':
cypher7=cypher+"(ent1:Entity{name:'" +path_list[0] + "'})-[rel1:Relation{name:'" + path_list[1] + "'}]->(x) match (y)-[rel2]->(x) where ent1.name<>y.name return distinct rel2.name,y.name"
# print(cypher7)
try:
answers = graph.run(cypher7).data()
except:
answers = []
print('next_hop_path_two cypher7 wrong')
if len(answers) <= 1000:
for ans in answers:
two_ent1=path+'\t'+ans['y.name']+'|||'+ans['rel2.name']+'|||?x'
# print(two_ent)
sample.append(two_ent1)
cypher8 = cypher + "(:Entity{name:'" + path_list[0] + "'})-[rel1:Relation{name:'" + path_list[1] + "'}]->(x) match (x)-[rel2]->(y) return distinct rel2.name,y.name"
# print(cypher8)
try:
answers = graph.run(cypher8).data()
except:
answers = []
print('next_hop_path_two cypher8 wrong')
if len(answers) <= 1000:
for ans in answers:
two_ent = path + '\t' + '?x|||' + ans['rel2.name'] + '|||'+ans['y.name']
# print(two_ent)
sample.append(two_ent)
print('two hop path len',len(sample),'---',sample[:5])
return sample
def _get_next_hop_path_three(graph,path):
'''
输入任意跳路径,返回构建的多一跳路径(只包含y-rel->x)
:param graph:
:param path:
:return:
'''
path = path.replace('\\', '\\\\').replace("\'", "\\\'")
triple_list=path.split('\t')
x_triple=triple_list[-1]
x_list=x_triple.split('|||')
if x_list[1]=='<类型>' and x_list[2]=='?x':
return []
cypher=''
rel=[]
sample=[]
# print('path2---',path)
for triple in triple_list:
triple_cypher='match '
item_list=triple.split('|||')
if item_list[0].startswith('?'):
triple_cypher=triple_cypher+"("+item_list[0].strip('?')+")"
else:
triple_cypher = triple_cypher +"(:Entity{name:'"+item_list[0]+"'})"
triple_cypher = triple_cypher + "-[:Relation{name:'" + item_list[1] + "'}]"
rel.append(item_list[1])
if item_list[2].startswith('?'):
triple_cypher = triple_cypher + "->(" + item_list[2].strip('?') + ") "
else:
triple_cypher = triple_cypher + "->(:Entity{name:'" + item_list[2] + "'}) "
cypher=cypher+triple_cypher
# #质包括y-rel->x
# if len(re.findall(cypher,'(y)')) > 0:
# cypher1=cypher+'match (x)-[rel]->(a) where y.name<>a.name return distinct rel.name'
# else:
# cypher1 = cypher + 'match (x)-[rel]->(a) where '
# for ent_item in ent:
# cypher1+="a.name<>'"+ent_item+"' and "
# cypher1=cypher1[:-4]+'return distinct rel.name'
cypher1 = cypher + "match (x)-[rel]->(a) where rel.name<>'<类型>' and "
for rel_item in rel:
cypher1+="rel.name<>'"+rel_item+"' and "
cypher1=cypher1[:-4]+'return distinct rel.name'
try:
answers = graph.run(cypher1).data()
except:
answers = []
print('next_hop_path_two cypher1 wrong')
print('three hop cypher---',cypher1)
for ans in answers:
one_ent1_1=path.replace('?x','?y1')+'\t'+'?y1|||'+ans['rel.name']+'|||?x'
sample.append(one_ent1_1)
print('three hop path---',sample)
return sample
def _get_next_hop_path_four(graph,path):
'''
输入任意跳路径,返回构建的多一跳路径(只包含y-rel->x)
:param graph:
:param path:
:return:
'''
path = path.replace('\\', '\\\\').replace("\'", "\\\'")
triple_list=path.split('\t')
x_triple=triple_list[-1]
x_list=x_triple.split('|||')
if x_list[1]=='<类型>' and x_list[2]=='?x':
return []
cypher=''
rel=[]
sample=[]
# print('path2---',path)
for triple in triple_list:
triple_cypher='match '
item_list=triple.split('|||')
if item_list[0].startswith('?'):
triple_cypher=triple_cypher+"("+item_list[0].strip('?')+")"
else:
triple_cypher = triple_cypher +"(:Entity{name:'"+item_list[0]+"'})"
triple_cypher = triple_cypher + "-[:Relation{name:'" + item_list[1] + "'}]"
rel.append(item_list[1])
if item_list[2].startswith('?'):
triple_cypher = triple_cypher + "->(" + item_list[2].strip('?') + ") "
else:
triple_cypher = triple_cypher + "->(:Entity{name:'" + item_list[2] + "'}) "
cypher=cypher+triple_cypher
# #质包括y-rel->x
# if len(re.findall(cypher,'(y1)')) > 0:
# cypher1=cypher+'match (x)-[rel]->(a) where y1.name<>a.name return distinct rel.name'
# else:
# cypher1 = cypher + 'match (x)-[rel]->(a) where '
# for ent_item in ent:
# cypher1 += "a.name<>'" + ent_item + "' and "
# cypher1 = cypher1[:-4] + 'return distinct rel.name'
cypher1 = cypher + "match (x)-[rel]->(a) where rel.name<>'<类型>' and "
for rel_item in rel:
cypher1+="rel.name<>'"+rel_item+"' and "
cypher1 = cypher1[:-4] + 'return distinct rel.name'
try:
answers = graph.run(cypher1).data()
except:
answers = []
print('next_hop_path_two cypher1 wrong')
print('four hop cypher---',cypher1)
for ans in answers:
one_ent1_1=path.replace('?x','?y2')+'\t'+'?y2|||'+ans['rel.name']+'|||?x'
sample.append(one_ent1_1)
print('four hop path---',sample)
return sample
def _get_next_hop_path_(graph,path):
'''
输入任意跳路径,返回构建的多一跳路径(包含各种情况)
:param graph:
:param path:
:return:
'''
path = path.replace('\\', '\\\\').replace("\'", "\\\'")
triple_list=path.split('\t')
x_triple=triple_list[-1]
x_list=x_triple.split('|||')
if x_list[1]=='<类型>' and x_list[2]=='?x':
return []
cypher=''
ent=[]
sample=[]
# print('path2---',path)
for triple in triple_list:
triple_cypher='match '
item_list=triple.split('|||')
if item_list[0].startswith('?'):
triple_cypher=triple_cypher+"("+item_list[0].strip('?')+")"
else:
ent.append(item_list[0])
triple_cypher = triple_cypher +"(:Entity{name:'"+item_list[0]+"'})"
triple_cypher = triple_cypher + "-[:Relation{name:'" + item_list[1] + "'}]"
if item_list[2].startswith('?'):
triple_cypher = triple_cypher + "->(" + item_list[2].strip('?') + ") "
else:
ent.append(item_list[2])
triple_cypher = triple_cypher + "->(:Entity{name:'" + item_list[2] + "'}) "
cypher=cypher+triple_cypher
#第一种格式
if len(re.findall(cypher,'(y)')) > 0:
cypher1=cypher+'match (x)-[rel]->(a) where y.name<>a.name return distinct rel.name'
else:
cypher1 = cypher + 'match (x)-[rel]->(a) return distinct rel.name'
try:
answers = graph.run(cypher1).data()
except:
answers = []
print('next_hop_path_two cypher1 wrong')
for ans in answers:
one_ent1_1=path.replace('?x','?z')+'\t'+'?z|||'+ans['rel.name']+'|||?x'
# print(one_ent)
sample.append(one_ent1_1)
# print(sample[-1])
if len(re.findall(cypher,'(y)'))>0:
cypher1=cypher+'match (x)-[rel]->(a) where y.name<>a.name return distinct rel.name,a.name'
else:
cypher1 = cypher + 'match (x)-[rel]->(a) return distinct rel.name,a.name'
try:
answers = graph.run(cypher1).data()
except:
answers = []
print('next_hop_path_two cypher1 wrong')
for ans in answers:
two_ent1_2=path+'\t?x|||'+ans['rel.name']+'|||'+ans['a.name']
# print(two_ent)
sample.append(two_ent1_2)
# print(sample[-1])
#第二种格式
if len(re.findall(cypher,'(y)'))>0:
cypher2=cypher+'match (a)-[rel]->(x) where y.name<>a.name return distinct rel.name'
else:
cypher2 = cypher + 'match (a)-[rel]->(x) return distinct rel.name'
try:
answers = graph.run(cypher2).data()
except:
answers = []
print('next_hop_path_two cypher2 wrong')
for ans in answers:
one_ent2_1=path.replace('?x','?z')+'\t'+'?x|||'+ans['rel.name']+'|||?z'
# print(one_ent)
sample.append(one_ent2_1)
# print(sample[-1])
if len(re.findall(cypher,'(y)'))>0:
cypher2=cypher+'match (a)-[rel]->(x) where y.name<>a.name return distinct rel.name,a.name'
else:
cypher2 = cypher + 'match (a)-[rel]->(x) return distinct rel.name,a.name'
try:
answers = graph.run(cypher2).data()
except:
answers = []
print('next_hop_path_two cypher2 wrong')
for ans in answers:
two_ent2_2=path+'\t'+ans['a.name']+'|||'+ans['rel.name']+'|||?x'
# print(two_ent)
sample.append(two_ent2_2)
# print(sample[-1])
return sample
if __name__=='__main__':
threshold=1#认为当得分大于等于此值时停止往下找
model = basic_network()
model.load_weights(config.similarity_ckpt_path)
# graph = Graph("http://47.114.86.211:57474", username='neo4j', password='pass',timeout=3000)
graph = Graph("http://59.78.194.63:37474", username='neo4j', password='pass')
reader = open(config.linking_data_path, 'r', encoding='utf-8')
data = json.load(reader)
pre_writer=open(config.pred_result_path,'w',encoding='utf-8')
ok_writer=open(config.ok_result_path,'w',encoding='utf-8')
assert len(data)==766
beamsearch=[10,10,3,2]#top k
all_sample=0
all_number=0
for k in range(len(data)):#k控制对第k个句子进行predict
a_sent_data=data[k]
print('问题',k,':',a_sent_data['sentence'])
if len(a_sent_data['pred_entity'])==0:#若句子中没有链接的实体,则不进行处理
continue
else:
x_sample=[]
for candidate in a_sent_data['pred_entity']:
candidate = candidate.replace("'", "\\'") # 数据库中某些实体存在'
x_sample.extend(get_one_ent_one_hop(graph,candidate))#不包括关系为类型,所以可能存在实体有而无路径的情况
if len(x_sample)==0:
continue
x_sent=[a_sent_data['sentence']]*len(x_sample)
path_indices1, path_segments1,ent_indices1,ent_segments1,rel_indices1,rel_segments1 = transfer_data_pathentrel(x_sent, x_sample, config.max_length,config.bert_vocab_path)
sample_number=len(path_indices1)
all_sample=all_sample+sample_number
result = model.predict([path_indices1, path_segments1,ent_indices1,ent_segments1,rel_indices1,rel_segments1],batch_size=config.batch_size)
result=result.ravel()#将result展平变为一维数组
assert len(x_sample)==len(result)
top_beamsearch_one_hop=[]#记录前k个候选的一跳路径
result_sorted = np.argsort(-np.array(result))
if len(result) > beamsearch[0]:
result_sorted = result_sorted[0:beamsearch[0]]
all_max_score = result[result.argmax(-1)] # 记录总体的最大得分
all_max_path = x_sample[result.argmax(-1)]
# print(all_max_score,result_sorted[0],result[result_sorted[0]])
assert result[result_sorted[0]] == all_max_score
for i in result_sorted:
now = {}
now['level'] = 1 # level的值表示path涉及的跳数
now['score'] = result[i]
now['path'] = x_sample[i]
top_beamsearch_one_hop.append(now)
print('one hop top ', beamsearch[0], ': ', now)
top_beamsearch_two_hop = [] # 记录包含前k个一跳路径,且得分大于其包含的一跳路径的两跳候选路径,最多k
if all_max_score < threshold:
two_score = []
two_sample = []
for top in top_beamsearch_one_hop:
max = top['score']
x_sample_next = _get_next_hop_path_two(graph, top['path'])
if len(x_sample_next) == 0: # 若没有第二跳路径,则看下一个
continue
x_sent_next = [a_sent_data['sentence']] * len(x_sample_next)
path_indices2, path_segments2,ent_indices2,ent_segments2,rel_indices2,rel_segments2= transfer_data_pathentrel(x_sent_next, x_sample_next, config.max_length,config.bert_vocab_path)
sample_number = len(path_indices2)
all_sample = all_sample + sample_number
result = model.predict([path_indices2, path_segments2,ent_indices2,ent_segments2,rel_indices2,rel_segments2], batch_size=config.batch_size)
result = result.ravel() # 将result展平变为一维数组
# print('two_hop---',result)
for i in range(len(result)):
if result[i] > max:
two_score.append(result[i])
two_sample.append(x_sample_next[i])
if result[i] > all_max_score:
all_max_score = result[i]
all_max_path = x_sample_next[i]
next_sorted = np.argsort(-np.array(two_score))
if len(next_sorted) > beamsearch[1]:
next_sorted = next_sorted[0:beamsearch[1]]
for i in next_sorted:
now = {}
now['level'] = 2
now['score'] = two_score[i]
now['path'] = two_sample[i]
top_beamsearch_two_hop.append(now)
print('two hop top ',str(beamsearch[1]),' :', now) # 有符合条件加入twohop的path
# 在two_hop的基础上向下找,直到没有可以加入的更大的得分
pre_writer.write('q'+str(k)+':' + a_sent_data['sentence'] + '\n')
pre_writer.write(str(all_max_score)+'---'+all_max_path + '\n')
pre_writer.flush()
#将符合条件的路径写入ok文件
ok_writer.write('q'+str(k)+':' + a_sent_data['sentence'] + '\n')
for item in top_beamsearch_one_hop+top_beamsearch_two_hop:
ok_writer.write(str(item['score'])+'---'+item['path']+'\n')
print('问题', k,',得分',all_max_score,':',a_sent_data['sentence'],'predict over: ',all_max_path)
all_number+=1
pre_writer.close()
ok_writer.close()
print('平均的候选答案数量---',all_sample/all_number)
get_all_F1(config.pred_result_path)
|
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from plone.app.layout.viewlets.common import ViewletBase
class CSS(ViewletBase):
def available(self):
return True
|
## The Data Analysis Process- Drawing Conclusions Quiz ##
"""
This quiz was done on my own with research.
"""
# imports and load data
import pandas as pd
% matplotlib inline
df = pd.read_csv('store_data.csv')
df.head()
"""
This function selects specific rows in specific columns so that you can apply
statistical functions to them
"""
# total sales for the last month
print(sum(df.loc[196:, 'storeA']))
print(sum(df.loc[196:, 'storeB']))
print(sum(df.loc[196:, 'storeC']))
print(sum(df.loc[196:, 'storeD']))
print(sum(df.loc[196:, 'storeE']))
# average sales
print(sum(df.loc[:,'storeA'])/200)
print(sum(df.loc[:,'storeB'])/200)
print(sum(df.loc[:,'storeC'])/200)
print(sum(df.loc[:,'storeD'])/200)
print(sum(df.loc[:,'storeE'])/200)
"""
This function selects a certain row to be viewed within the dataframe
"""
# sales on march 13, 2016
df[df.week=='2016-03-13']
# worst week for store C
print(min(df.loc[:,'storeC']))
print(df.loc[df['storeC']==927])
# total sales during most recent 3 month period
print(sum(df.loc[187:, 'storeA']))
print(sum(df.loc[187:, 'storeB']))
print(sum(df.loc[187:, 'storeC']))
print(sum(df.loc[187:, 'storeD']))
print(sum(df.loc[187:, 'storeE']))
## Communicating Results Practice ##
"""
This quiz was done with trial and error then looking at the answers to see
what I did wrong.
"""
# imports and load data
import pandas as pd
% matplotlib inline
df = pd.read_csv('store_data.csv')
# explore data
df.tail(20)
# sales for the last month
df.iloc[196:, 1:].sum().plot(kind='bar');
# average sales
df.mean().plot(kind='pie');
# sales for the week of March 13th, 2016
sales = df[df['week'] == '2016-03-13']
sales.iloc[0, 1:].plot(kind='bar');
# sales for the lastest 3-month periods
last_three_months = df[df['week'] >= '2017-12-01']
last_three_months.iloc[:, 1:].sum().plot(kind='pie')
|
import sys
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
from keras.models import load_model
import cv2
import numpy as np
noise = []
noise = np.random.normal(0, 1, [100, 100])
noise = np.array(noise)
print(noise.shape)
model = load_model('facegeneratorep100.hdf5')
pr = model.predict(noise)
pr = ((pr*127.5)+127.5).astype(int)
for i, img in enumerate(pr):
cv2.imwrite('predictions/'+str(i)+'.jpg', img)
|
__author__ = 'karthikb'
a = [15, 16, 19, 20, 25, 1, 3, 4, 5, 7, 10, 14]
a1 = [20, 25, 1, 3, 4, 5, 7, 10, 14, 15, 16, 19]
a2 = [1,2,3,4]
def special_binary(a,left,right):
mid = (right + left) //2
print a[low:right]
if a[left] < a[right]:
return a[left]
elif a[mid - 1] >= a[mid] and a[right] > a[mid]:
return a[mid]
elif a[low] > a[mid]:
return special_binary(a,left, mid)
#print special_binary(a2, 0, len(a2) - 1)
def find_binary(a,left,right,key):
mid = left + (right - left) / 2
print a[mid]
if a[mid] == key:
return mid
elif a[left] >= a[mid]:
if key < a[mid] and key > a[left]:
return find_binary(a,left,mid - 1, key)
else:
return find_binary(a,mid + 1,right, key)
elif a[right] > a[mid] and key > a[mid]:
return find_binary(a,mid + 1,right, key)
elif key < a[mid]:
return find_binary(a,left,mid - 1, key)
else:
return -1
print find_binary([1,2,3,4], 0 , len([1,2,3,4])-1 , 2)
|
import os
from datetime import timedelta
class Config(object):
SECRET_KEY = 'kaadfadfafafdafafadddddadfadadfaffddddddd'
# REMEMBER_COOKIE_DURATION = timedelta(seconds=20)
# SQLALCHEMY_DATABASE_URI = "mysql+mysqlconnector://armandosuazo:a1234567@armandosuazo.mysql.pythonanywhere-services.com/medical_db"
# SQLALCHEMY_DATABASE_URI = "mysql+mysqlconnector://armandosuazo:a1234567@armandosuazo.mysql.pythonanywhere-services.com/inspection_db"
SQLALCHEMY_DATABASE_URI = 'mysql://root:''@127.0.0.1/medic_consult_db'
SQLALCHEMY_POOL_SIZE = 30
SQLALCHEMY_MAX_OVERFLOW = 20
SQLALCHEMY_POOL_TIMEOUT = 300
SQLALCHEMY_TRACK_MODIFICATIONS = False
DEBUG = True
#********MY PATHS ROUTES ------------****************************
PATH_PDF_FOLDER = "C:/Users/UserGP/Documents/MEDICAL CONSULT/app/pdf_report/"
IMAGE_UPLOADS = 'C:/Users/UserGP/Documents/PROC_INSP/app/static/img/img_database'
IMAGE_UPLOADS_PROFILE = 'C:/Users/UserGP/Documents/PROC_INSP/app/static/img/profile'
SAVED_GRAPH_PNG = "C:/Users/UserGP/Documents/PROC_INSP/app/saved_graph"
ALLOWED_IMAGE_EXTENSIONS = ['JPG','JPEG', 'PNG', 'GIF']
MAX_IMAGE_FILESIZE = 1024 * 1024
MAX_CONTENT_LENGTH = 16 * 1024 * 1024
class Develop(object):
SECRET_KEY = 'kaadfadfafafdafafadddddadfadadfaffddddddd'
# REMEMBER_COOKIE_DURATION = timedelta(seconds=20)
# SQLALCHEMY_DATABASE_URI = "mysql+mysqlconnector://armandosuazo:a1234567@armandosuazo.mysql.pythonanywhere-services.com/medical_db"
# SQLALCHEMY_DATABASE_URI = "mysql+mysqlconnector://armandosuazo:a1234567@armandosuazo.mysql.pythonanywhere-services.com/inspection_db"
SQLALCHEMY_DATABASE_URI = 'mysql://root:''@127.0.0.1/inspection_db'
# SQLALCHEMY_POOL_SIZE = 30
# SQLALCHEMY_MAX_OVERFLOW = 20
# SQLALCHEMY_POOL_TIMEOUT = 300
SQLALCHEMY_TRACK_MODIFICATIONS = False
DEBUG = True
#********MY PATHS ROUTES ------------****************************
IMAGE_UPLOADS = 'C:/Users/UserGP/Documents/PROC_INSP/app/static/img/img_database'
IMAGE_UPLOADS_PROFILE = 'C:/Users/UserGP/Documents/PROC_INSP/app/static/img/profile'
SAVED_GRAPH_PNG = "C:/Users/UserGP/Documents/PROC_INSP/app/saved_graph"
ALLOWED_IMAGE_EXTENSIONS = ['JPG','JPEG', 'PNG', 'GIF']
MAX_IMAGE_FILESIZE = 1024 * 1024
MAX_CONTENT_LENGTH = 16 * 1024 * 1024
|
import os
import math
import numpy
import nltk
import re
class LexRank(object):
def __init__(self):
self.text = Preprocessing()
self.sim = DocumentSim()
def score(self, sentences, idfs, CM, t):
Degree = [0 for i in sentences]
n = len(sentences)
for i in range(n):
for j in range(n):
CM[i][j] = self.sim.sim(sentences[i], sentences[j], idfs)
Degree[i] += CM[i][j]
for i in range(n):
for j in range(n):
CM[i][j] = CM[i][j] / float(Degree[i])
L = self.PageRank(CM, n)
normalizedL = self.normalize(L)
for i in range(len(normalizedL)):
score = normalizedL[i]
sentence = sentences[i]
sentence.setLexRankScore(score)
return sentences
def PageRank(self,CM, n, maxerr = .0001):
Po = numpy.zeros(n)
P1 = numpy.ones(n)
M = numpy.array(CM)
t = 0
while (numpy.sum(numpy.abs(P1-Po)) > maxerr) and (t < 100):
Po = numpy.copy(P1)
t = t + 1
P1 = numpy.matmul(Po, M)
print(numpy.sum(numpy.abs(P1-Po)))
print(t)
return list(Po)
def buildMatrix(self, sentences):
# build our matrix
CM = [[0 for s in sentences] for s in sentences]
for i in range(len(sentences)):
for j in range(len(sentences)):
CM[i][j] = 0
return CM
def buildSummary(self, sentences, n):
sentences = sorted(sentences, key=lambda x: x.getLexRankScore(), reverse=True)
summary = []
for i in range(n):
summary += [sentences[i]]
return summary
def normalize(self, numbers):
max_number = max(numbers)
normalized_numbers = []
for number in numbers:
normalized_numbers.append(number / max_number)
return normalized_numbers
def main(self, n, path):
sentences = self.text.openDirectory(path)
idfs = self.sim.IDFs(sentences)
CM = self.buildMatrix(sentences)
sentences = self.score(sentences, idfs, CM, 0.1)
summary = self.buildSummary(sentences, n)
return summary
class sentence(object):
def __init__(self, docName, stemmedWords, OGwords):
self.stemmedWords = stemmedWords
self.docName = docName
self.OGwords = OGwords
self.wordFrequencies = self.sentenceWordFreqs()
self.lexRankScore = None
def getStemmedWords(self):
return self.stemmedWords
def getDocName(self):
return self.docName
def getOGwords(self):
return self.OGwords
def getWordFreqs(self):
return self.wordFrequencies
def getLexRankScore(self):
return self.LexRankScore
def setLexRankScore(self, score):
self.LexRankScore = score
def sentenceWordFreqs(self):
wordFreqs = {}
for word in self.stemmedWords:
if word not in wordFreqs.keys():
wordFreqs[word] = 1
else:
wordFreqs[word] = wordFreqs[word] + 1
return wordFreqs
class Preprocessing(object):
def processFile(self, file_path_and_name):
try:
f = open(file_path_and_name, 'r')
text_0 = f.read()
# code 2007
text_1 = re.search(r"<TEXT>.*</TEXT>", text_0, re.DOTALL)
text_1 = re.sub("<TEXT>\n", "", text_1.group(0))
text_1 = re.sub("\n</TEXT>", "", text_1)
text_1 = re.sub("<P>", "", text_1)
text_1 = re.sub("</P>", "", text_1)
text_1 = re.sub("\n", " ", text_1)
text_1 = re.sub("\"", "\"", text_1)
text_1 = re.sub("''", "\"", text_1)
text_1 = re.sub("``", "\"", text_1)
text_1 = re.sub(" +", " ", text_1)
sent_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
lines = sent_tokenizer.tokenize(text_1.strip())
sentences = []
porter = nltk.PorterStemmer()
for sent in lines:
OG_sent = sent[:]
sent = sent.strip().lower()
line = nltk.word_tokenize(sent)
stemmed_sentence = [porter.stem(word) for word in line]
stemmed_sentence = list(filter(lambda x: x != '.' and x != '`' and x != ',' and x != '?' and x != "'"
and x != '!' and x != '''"''' and x != "''" and x != "'s",
stemmed_sentence))
if stemmed_sentence != []:
sentences.append(sentence(file_path_and_name, stemmed_sentence, OG_sent))
return sentences
except IOError:
print('Oops! File not found', file_path_and_name)
return [sentence(file_path_and_name, [], [])]
def get_file_path(self, file_name):
for root, dirs, files in os.walk(os.getcwd()):
for name in files:
if name == file_name:
return os.path.join(root, name)
print("Error! file was not found!!")
return ""
def get_all_files(self, path=None):
retval = []
if path == None:
path = os.getcwd()
for root, dirs, files in os.walk(path):
for name in files:
retval.append(os.path.join(root, name))
return retval
def openDirectory(self, path=None):
file_paths = self.get_all_files(path)
sentences = []
for file_path in file_paths:
sentences = sentences + self.processFile(file_path)
return sentences
class DocumentSim(object):
def __init__(self):
self.text = Preprocessing()
def TFs(self, sentences):
tfs = {}
for sent in sentences:
wordFreqs = sent.getWordFreqs()
for word in wordFreqs.keys():
if tfs.get(word, 0) != 0:
tfs[word] = tfs[word] + wordFreqs[word]
else:
tfs[word] = wordFreqs[word]
return tfs
def TFw(self, word, sentence):
return sentence.getWordFreqs().get(word, 0)
def IDFs(self, sentences):
N = len(sentences)
idfs = {}
words = {}
w2 = []
for sent in sentences:
for word in sent.getStemmedWords():
if sent.getWordFreqs().get(word, 0) != 0:
words[word] = words.get(word, 0) + 1
for word in words:
n = words[word]
try:
w2.append(n)
idf = math.log10(float(N) / n)
except ZeroDivisionError:
idf = 0
idfs[word] = idf
return idfs
def IDF(self, word, idfs):
return idfs[word]
def sim(self, sentence1, sentence2, idfs):
numerator = 0
denom1 = 0
denom2 = 0
for word in sentence2.getStemmedWords():
numerator += self.TFw(word, sentence2) * self.TFw(word, sentence1) * self.IDF(word, idfs) ** 2
for word in sentence1.getStemmedWords():
denom2 += (self.TFw(word, sentence1) * self.IDF(word, idfs)) ** 2
for word in sentence2.getStemmedWords():
denom1 += (self.TFw(word, sentence2) * self.IDF(word, idfs)) ** 2
try:
return numerator / (math.sqrt(denom1) * math.sqrt(denom2))
except ZeroDivisionError:
return float("-inf")
if __name__ == '__main__':
lexRank = LexRank()
doc_folders = os.listdir("Data_DUC_2007/Documents")
total_summary = []
summary_length = 14
for folder in doc_folders:
path = os.path.join("Data_DUC_2007/Documents/", '') + folder
print("Running LexRank Summarizer for files in folder: ", folder)
doc_summary = []
summary = lexRank.main(summary_length, path)
for sentences in summary:
text_append = re.sub("\n", "", sentences.getOGwords())
text_append = text_append + " "
doc_summary.append(text_append)
total_summary.append(doc_summary)
os.chdir("Data_DUC_2007/LexRank_results")
for i in range(len(doc_folders)):
myfile = doc_folders[i] + ".LexRank"
f = open(myfile, 'w')
for j in range(summary_length):
f.write(total_summary[i][j])
f.close()
|
#basic lib to work with dataset
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
#libraries to work with the anonymity of the proc(Data)
from cn.protect import Protect
from cn.protect.privacy import KAnonymity
from cn.protect.hierarchy import DataHierarchy, OrderHierarchy, IntervalHierarchy
from cn.protect.quality import Loss #to calculate the loss of the data
dataset=pd.read_csv("dataset/raw_data1.csv")
print(dataset.dtypes)
print(dataset.head())
print(dataset.isnull().any())
#filling the NaN or null values with median or mode of the values
dataset['Patient Number'].fillna(dataset['Patient Number'].median(),inplace = True)
dataset['State Patient Number'].fillna(dataset['State Patient Number'].mode()[0],inplace = True)
dataset['Age Bracket'].fillna(dataset['Age Bracket'].mode()[0],inplace = True)
dataset['Gender'].fillna(dataset['Gender'].mode()[0] ,inplace = True)
dataset['Detected City'].fillna(dataset['Detected City'].mode()[0],inplace = True)
dataset['Detected District'].fillna(dataset['Detected District'].mode()[0],inplace = True)
dataset['Detected State'].fillna(dataset['Detected State'].mode()[0],inplace = True)
dataset['State code'].fillna(dataset['State code'].mode()[0],inplace = True)
dataset['Nationality'].fillna(dataset['Nationality'].mode()[0] ,inplace = True)
dataset['Type of transmission'].fillna(dataset['Type of transmission'].mode()[0] ,inplace = True)
dataset['Status Change Date'].fillna(dataset['Status Change Date'].mode()[0] ,inplace = True)
dataset.drop(['Source_1', 'Source_2', 'Source_3', "Contracted from which Patient (Suspected)", "Estimated Onset Date"], axis=1, inplace=True)
dataset["Age Bracket"].replace({"28-35": "32", "1.5": 2}, inplace=True)
dataset["Age Bracket"] = dataset["Age Bracket"].astype(str).astype(int)
print(dataset.tail(2))
#labelEncoding the Patient's identity
dataset["Patient Number"]=le.fit_transform(dataset["Patient Number"])
dataset["State Patient Number"]=le.fit_transform(dataset["State Patient Number"])
print(dataset.head())
#visualizing the dataset
import seaborn as sns
print(sns.pairplot(dataset))
#applying KAnonymity, suppression, loss functions on the data by creating a prot datatype
prot=Protect(dataset, KAnonymity(17300))
prot.quality_model=Loss()
prot.suppression=.1
#hiding the identifiers (explicit)
for col in dataset:
if col not in ("Patient Number", "State Patient Number", "Detected District"):
prot.itypes[col]='insensitive'
prot.itypes["Patient Number"]='identifying'
prot.itypes["State Patient Number"]='quasi'
prot.itypes["Detected District"]='quasi'
prot.itypes["Age Bracket"]='insensitive'
print(prot.itypes)
#transfering prot data type to dataframe(priv)
priv = prot.protect()
#generalizing the age
priv = prot.protect()
priv=priv.rename(columns={"Age Bracket":"age"})
bins = [0,18, 30, 40, 50, 60, 70, 120]
labels = ['0-17','18-29', '30-39', '40-49', '50-59', '60-69', '70+']
priv['Age'] = pd.cut(priv.age, bins, labels = labels,include_lowest = True)
priv["age"]=priv["Age"]
priv.drop(["Age"], axis=1, inplace=True)
#saving dataframe to csv file
dataset.to_csv('Privacy_Protected_rawdata1.csv',index=False)
|
import math
import networkx as nx
import pandas as pd
import numpy as np
import random
from time import process_time
k = int(input("Enter a k: "))
maxServer = int(math.pow(k, 3) / 4)
print('CHECK: Max amount of servers given k = ', k, ' is ', maxServer, 'servers')
# initializing
coreCT = int(math.pow((k / 2), 2))
interswitch = int(k / 2)
servers = int(math.pow((k / 2), 2))
edgeS = maxServer
aggS = maxServer + (2 * servers) - 1
coreS = aggS + (2 * servers) - int(interswitch / 2) - 1
if (coreS % 2 != 0):
coreS = coreS + 1
icoreS = coreS
print('INFO CHECK')
print('Number of Core Switches: ', coreCT, ' and number of inter switches(aggregate and edge switches) : ', interswitch)
# initializing lists to add edges to
ft = list()
results = []
serverID = 0
switchCT = 0
aggCT = 0
Cct = 1
v = 2 * (int(math.log(k,2))-1)
# graph to use later for the search
graph = list()
l1 = list() # edge between PM_ID => EDGE SWITCH
l2 = list() # edge between EDGE SWITCH => AGG SWITCH
l3 = list() # edge between AGG SWITCH => CORE SWITCH
print(aggS, coreS, icoreS, v)
for pod in range(0, (k)):
for a in range(0, (servers)):
ft.append(serverID)
l1.append(serverID)
serverID = serverID + 1
switchCT = switchCT + 1
aggCT = aggCT + 1
if (switchCT <= interswitch):
ft.append(edgeS)
l1.append(edgeS)
aggS = aggS + 1
ft.append(aggS)
l2 = [edgeS, aggS]
coreS = coreS + v
else:
edgeS = edgeS + 1
ft.append(edgeS)
l1.append(edgeS)
switchCT = 1
Cct = Cct + 1
coreS = icoreS + v + Cct - 1
if (aggCT >= servers):
aggS = aggS + 1
ft.append(aggS)
aggCT = 1
else:
aggS = aggS - (interswitch - 1)
ft.append(aggS)
l2 = [edgeS, aggS]
ft.append(coreS)
l3 = [aggS, coreS]
graph.append(tuple(l1))
graph.append(tuple(l2))
graph.append(tuple(l3))
results.append(ft)
print(ft)
ft, l1, l2, l3 = ([] for i in range(4))
Cct = 0
# convert list graph into an actual graph
G = nx.Graph()
G.add_edges_from(graph)
pos = nx.spring_layout(G)
nx.draw_networkx_nodes(G, pos)
nx.draw_networkx_edges(G, pos)
nx.draw_networkx_labels(G, pos)
# plt.show()
# calculate upper boundary
maxV = np.max(results) + 1
# print(maxV)
source = int(input("Enter the source ID: "))
dest = int(input("Enter the destination ID: "))
pd.set_option("display.max_rows", None, "display.max_columns", None)
start = process_time()
# rewards matrix
R = np.matrix(np.zeros(shape=(maxV, maxV)))
for x in G[dest]:
R[x, dest] = 100
# Q matrix
Q = np.matrix(np.zeros(shape=(maxV, maxV)))
Q -= 100
for node in G.nodes:
for x in G[node]:
Q[node, x] = 0
Q[x, node] = 0
def next_number(start, er):
random_value = random.uniform(0, 1)
if random_value < er:
print(start)
if ((start > maxServer)):
print(start, G[start])
sample = G[start]
else:
print('thinking..')
sample = np.where(Q[start,] == np.max(Q[start,]))[1]
else:
sample = np.where(Q[start,] == np.max(Q[start,]))[1]
next_node = int(np.random.choice(sample, 1))
return next_node
def updateQ(n1, n2, lr, discount):
# print('updating Q...')
max_index = np.where(Q[n2,] == np.max(Q[n2,]))[1]
if max_index.shape[0] > 1:
max_index = int(np.random.choice(max_index, size=1))
else:
max_index = int(max_index)
max_value = Q[n2, max_index]
Q[n1, n2] = int((1 - lr) * Q[n1, n2] + lr * (R[n1, n2] + discount * max_value))
walk = 100 * (pow(6,int(math.log(k))*2) ) # as k increases, the walks needs to increase as well
print(walk)
def learn(er, lr, discount):
for i in range(int(walk)):
# print('walking and learning')
start = np.random.randint(0, maxV)
next_node = next_number(start, er)
updateQ(start, next_node, lr, discount)
# begin the walk
learn(0.4, 0.8, 0.8)
def sp(source, dest):
path = [source]
nopath = maxServer * maxServer
limit_count = 0
next_node = np.argmax(Q[source,])
path.append(next_node)
while next_node != dest:
print('thinking..next')
next_node = np.argmax(Q[next_node,])
path.append(next_node)
return path
final_path = sp(source, dest)
print('From', source, 'to', dest, 'takes', len(final_path) - 1, 'hops!')
print('Final path: ', final_path)
stop = process_time()
print("Time elapsed: ", (stop - start), ' seconds')
|
import sublime
import sublime_plugin
import base64
class EncodeCommand(sublime_plugin.TextCommand):
def run(self, edit):
selection = self.view.sel()
for region in selection:
region_text = self.view.substr(region)
randomized_text = base64.b64encode(bytes(region_text.strip(), encoding='utf-8')).decode("utf-8")
self.view.replace(edit, region, str(randomized_text))
class DecodeCommand(sublime_plugin.TextCommand):
def run(self, edit):
selection = self.view.sel()
for region in selection:
region_text = self.view.substr(region)
randomized_text = base64.b64decode(bytes(region_text.strip(), encoding='utf-8')).decode("utf-8")
self.view.replace(edit, region, str(randomized_text))
|
import unittest
from data_action import get_data
from data_action import delete_data
test_url_1 = "https://data.seattle.gov/resource/4xy5-26gy.csv"
test_url_2 = "https://data.seattle.gov/resource/4xy5-27gy.csv"
class TestDataAction(unittest.TestCase):
# Test get_data function
def testGetData(self):
test_url_1 = "https://data.seattle.gov/resource/4xy5-26gy.csv"
test_url_2 = "https://data.seattle.gov/resource/4xy5-27gy.csv"
delete_data(test_url_1)
# Test 1: URL points to a file that does exist, thus download should occur.
result = get_data(test_url_1)
self.assertTrue(result == "Download performed successfully.")
# Test 2: URL was already downloaded locally.
result = get_data(test_url_1)
self.assertTrue(result == "File exists locally, skipping download.")
# Test 3: URL points to a file that does not exist.
result = get_data(test_url_2)
self.assertTrue(result == "URL does not point to a file that exists.")
# Test delete_data function
def testDeleteData(self):
test_url_1 = "https://data.seattle.gov/resource/4xy5-26gy.csv"
test_url_2 = "https://data.seattle.gov/resource/4xy5-27gy.csv"
delete_data(test_url_1)
get_data(test_url_1)
# Test 1: URL delete is successful
result = delete_data(test_url_1)
self.assertTrue(result == "File successfully removed locally.")
# Test 2: URL not found locally. File from URL is valid.
result = delete_data(test_url_1)
self.assertTrue(result == "File from URL not found locally.")
if __name__ == '__main__':
unittest.main()
|
import unittest
from unittest.mock import Mock
from src.combat.combat import Combat
from src.elemental.ability.ability import Target
from src.elemental.combat_elemental import CombatElemental
from src.team.combat_team import CombatTeam
from src.team.team import Team
from tests.character.character_builder import NPCBuilder, PlayerBuilder
from tests.elemental.elemental_builder import ElementalBuilder
from tests.team.team_builder import TeamBuilder
class CombatTeamTests(unittest.TestCase):
def test_setup_active(self):
error = "CombatTeam didn't assign an active CombatElemental on combat start"
team = CombatTeam([ElementalBuilder().build()])
Combat([team], [], Mock())
self.assertIsInstance(
team.active_elemental,
CombatElemental, error)
def test_skip_ko_active(self):
error = "CombatTeam incorrectly set a 0 HP Elemental as the active Elemental"
team = CombatTeam([
ElementalBuilder().with_current_hp(0).build(),
ElementalBuilder().build()
])
Combat([team], [], Mock())
self.assertGreater(team.active_elemental.current_hp, 0, error)
def test_is_npc(self):
error = "CombatTeam didn't flag itself as NPC when its owner was an NPC"
npc = NPCBuilder().build()
combat_team = CombatTeam([ElementalBuilder().build()], npc)
self.assertIs(combat_team.is_npc, True, error)
def test_bench(self):
error = "CombatTeam incorrectly included the active CombatElemental in bench"
team = CombatTeam([
ElementalBuilder().build(),
ElementalBuilder().build()
])
Combat([team], [], Mock())
self.assertEqual(len(team.bench), 1, error)
self.assertEqual(team.bench[0].id, team.elementals[0].id, error)
def test_eligible_bench(self):
error = "CombatTeam incorrectly included knocked out CombatElementals in the eligible bench"
team = CombatTeam([
ElementalBuilder().with_current_hp(0).build(),
ElementalBuilder().build()
])
Combat([team], [], Mock())
self.assertEqual(len(team.eligible_bench), 0, error)
def test_switch_ko(self):
error = "CombatTeam incorrectly allowed a knocked out CombatElemental to be switched in"
team = CombatTeam([
ElementalBuilder().with_current_hp(0).build(),
ElementalBuilder().build()
])
Combat([team], [], Mock())
is_switched = team.attempt_switch(team.elementals[0])
self.assertFalse(is_switched, error)
def test_all_knocked_out(self):
error = "CombatTeam.is_all_knocked_out didn't resolve correctly"
team = CombatTeam([
ElementalBuilder().with_current_hp(0).build(),
])
self.assertIs(team.is_all_knocked_out, True, error)
def test_mana_per_turn(self):
error = "CombatTeam eligible Elementals on the bench didn't gain mana on turn start"
team = CombatTeam([
ElementalBuilder().build(),
ElementalBuilder().build()
])
Combat([team], [], Mock())
bench = team.eligible_bench
starting_mana = bench[0].current_mana
team.turn_start()
resultant_mana = bench[0].current_mana
self.assertGreater(resultant_mana, starting_mana, error)
def test_team_defensive_copy(self):
error = "Changing the member of a Team incorrectly affected the CombatTeam"
# Not that it should be possible to change your elementals when you're in combat.
team = TeamBuilder().build()
combat_team = CombatTeam.from_team(team)
team.remove_elemental(0)
self.assertEqual(len(combat_team.elementals), 1, error)
def test_get_enemy_target(self):
error = "Ability that targets an enemy didn't get the correct target"
team_a = CombatTeam([ElementalBuilder().build()], PlayerBuilder().build())
team_b = CombatTeam([ElementalBuilder().build()], PlayerBuilder().build())
combat = Combat([team_a], [team_b], Mock())
ability = Mock()
ability.targeting = Target.ENEMY
target = combat.get_target(ability, team_a.active_elemental)
self.assertEqual(target, team_b.active_elemental, error)
def test_get_self_target(self):
error = "Ability that targets self didn't get the correct target"
team_a = CombatTeam([ElementalBuilder().build()], PlayerBuilder().build())
team_b = CombatTeam([ElementalBuilder().build()], PlayerBuilder().build())
combat = Combat([team_a], [team_b], Mock())
ability = Mock()
ability.targeting = Target.SELF
target = combat.get_target(ability, team_a.active_elemental)
self.assertEqual(target, team_a.active_elemental, error)
|
if __name__ == '__main__':
# Read feature files
bert_feature_index_start = open('output/test_set_bert_features.txt', 'r')
main_features = open('output/features.txt', 'r')
word_feature_index_start = open('output/word_distance_features.txt', 'r')
# Create ultimate feature file
feature_file = open("output/ultimate_features.txt", "a", newline='')
while True:
# Read individual lines
bert_line = bert_feature_index_start.readline().split(' ')
main_line = main_features.readline().split(' ')
word_line = word_feature_index_start.readline().split(' ')
if not bert_line:
break # If we reach the end of the file, break.
# Check the query IDs
bert_query_id = bert_line[0][4:]
main_query_id = main_line[1][4:]
word_query_id = word_line[0][4:]
assert bert_query_id == main_query_id and main_query_id == word_query_id
# Check the passage IDs
bert_passage_id = bert_line[1][4:]
main_passage_id = main_line[5][1:]
word_passage_id = word_line[1][4:]
assert bert_passage_id == main_passage_id and main_passage_id == word_passage_id
bert_feature_index_start = 2
res_feature_index_start = 4
bert_feature_line = ''
for i in range(8):
bert_start = bert_feature_index_start + i
res_pos = res_feature_index_start + i
feature = bert_line[bert_start].split(':')[1]
bert_feature_line = bert_feature_line + ' {}:{}'.format(res_pos, feature)
main_feature_line = main_line[0] + ' ' + main_line[1] + ' ' + main_line[2] + ' ' + main_line[3] + ' ' + main_line[4]
word_feature_index_start = 2
res_feature_index_start = 12
word_feature_line = ''
for i in range(2):
word_start = word_feature_index_start + i
res_pos = res_feature_index_start + i
feature = word_line[word_start].split(':')[1]
word_feature_line = word_feature_line + ' {}:{}'.format(res_pos, feature)
res_line = main_feature_line + bert_feature_line + word_feature_line + ' #' + main_passage_id
print(res_line)
feature_file.write(res_line)
feature_file.close()
|
"""
For cases in which an entire view function needs to be made available
only to users with certain permissions, a custom decorator can be used.
Example usage:
@main.route('/admin')
@login_required
@admin_required
def for_admins_only():
return "For administrators!"
@main.route('/moderator')
@login_required
@permission_required(Permission.MODERATE_COMMENTS)
def for_moderators_only():
return "For comment moderators!"
"""
from functools import wraps
from flask import abort
from flask_login import current_user
from .models import Permission
def permission_required(permission):
""" Creates a custom decorator that checks permissions of user
"""
def decorator(f):
@wraps(f) # Takes a function used in a decorator and adds the
#functionality of copying over the function name, docstring, args list..
def decorated_function(*args, **kwargs):
if not current_user.can(permission):
abort(403)
return f(*args, **kwargs)
return decorated_function
return decorator
def admin_required(f):
return permission_required(Permission.ADMINISTER)(f)
|
"""
This contains implementations of:
synflow, grad_norm, fisher, and grasp, and variants of jacov and snip
based on https://github.com/mohsaied/zero-cost-nas
"""
import torch
import logging
import math
from naslib.predictors.predictor import Predictor
from naslib.predictors.utils.pruners import predictive
logger = logging.getLogger(__name__)
class ZeroCost(Predictor):
def __init__(self, method_type="jacov"):
# available zero-cost method types: 'jacov', 'snip', 'synflow', 'grad_norm', 'fisher', 'grasp'
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
self.method_type = method_type
self.dataload = "random"
self.num_imgs_or_batches = 1
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def query(self, graph, dataloader=None, info=None):
loss_fn = graph.get_loss_fn()
n_classes = graph.num_classes
score = predictive.find_measures(
net_orig=graph,
dataloader=dataloader,
dataload_info=(self.dataload, self.num_imgs_or_batches, n_classes),
device=self.device,
loss_fn=loss_fn,
measure_names=[self.method_type],
)
if math.isnan(score) or math.isinf(score):
score = -1e8
if self.method_type == 'synflow':
if score == 0.:
return score
score = math.log(score) if score > 0 else -math.log(-score)
return score
|
#!/usr/bin/env python
"""
A quick utility script to mark analyzed songs as analyzed.
A song has been analyzed if any notes contain a non-NULL root.
$ python -m utils.mark_analyzed [-t DBPOOL_SIZE] [-u USERNAME] [-p PASSWORD]
where:
- DBPOOL_SIZE is the number of databases
- USERNAME is the database username
- PASSWORD is the database password
"""
from db import get_sessions,Song
from optparse import OptionParser
from iter import SongIterator
def main():
parser = OptionParser()
parser.add_option("-t", "--pool-size", dest="pool_size", default=8, type="int")
parser.add_option("-u", "--username", dest="db_username", default="postgres")
parser.add_option("-p", "--password", dest="db_password", default="postgres")
(options, args) = parser.parse_args()
count = 0
# iterate through all database sessions
for session in get_sessions(options.pool_size,options.db_username,options.db_password):
# through all songs and notes
for song in session.query(Song).all():
print count, ".", song
# the song has already been marked as analyzed
if song.analyzed:
print "\t(song.analyzed == True)"
continue
# check notes in the song
for note in SongIterator(song):
# if a note has a root, then the song has been analyzed
if note.root != None:
song.analyzed = True
break
# print out the results
if song.analyzed:
print "\tAlready analyzed."
else:
print "\tNeed to analyze."
session.commit()
count += 1
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from django import forms
from cadastro.models import Inscricao
class InscricaoForm(forms.ModelForm):
nome = forms.CharField(max_length=300)
tipo_pessoa = forms.CharField(max_length=100)
cpf_cnpj = forms.CharField('cpf_cnpj', max_length=20, unique=True)
rg = forms.CharField(max_length=25, unique=True)
idade = forms.IntegerField()
email = forms.EmailField(unique=True)
telefone = forms.CharField(max_length=20, blank=True)
criado_em = forms.DateTimeField('criado em', auto_now_add=True)
class Meta:
forms = Inscricao |
#encoding=utf8
from models import *
#from serializers import *
from django.db.models import Q
from django.http import HttpResponse
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.request import Request
from rest_framework import renderers
from rest_framework.decorators import api_view
from rest_framework.reverse import reverse
import logging
ac_logger = logging.getLogger("access_log")
from django.contrib.auth.models import User
from django.shortcuts import render,render_to_response
from django.http import HttpResponseRedirect
import json
@api_view(('GET',))
def api_root(request, format=None):
return Response({
'hosts': reverse('hosts', request=request, format=format),
'codis': reverse('codis', request=request, format=format),
'codislog': reverse('codis-log', request=request, format=format),
'allcodisinfo': reverse('allcodisinfo', request=request, format=format),
'rebalance': reverse('rebalance', request=request, format=format),
'proxyinfo': reverse('proxyinfo', request=request, format=format),
'serverinfo': reverse('serverinfo', request=request, format=format),
})
|
# coding=utf-8
from pytest_bdd import (
scenario
)
@scenario('../features/redshift_node_metrics_percentage_disk_space_used.feature',
'Create redshift:alarm:node_metrics_percentage_disk_space_used:2020-04-01 '
'based on PercentageDiskSpaceUsed metric and check OK status.')
def test_node_metrics_percentage_disk_space_used():
pass
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import fvcore.nn.weight_init as weight_init
from torch import nn
import torch.nn.functional as F
from detectron2.layers import Conv2d, FrozenBatchNorm2d, get_norm, BatchNorm2d
from detectron2.modeling import BACKBONE_REGISTRY, ResNet, make_stage
from detectron2.modeling.backbone.resnet import BottleneckBlock, DeformBottleneckBlock, ResNetBlockBase
from .layers.wrappers import Conv2dv2
__all__ = ["BUABasicStem", "BUABasicStemv2", "build_bua_resnet_backbone"]
class BUABasicStem(nn.Module):
def __init__(self, in_channels=3, out_channels=64, norm="BN"):
"""
Args:
norm (str or callable): a callable that takes the number of
channels and return a `nn.Module`, or a pre-defined string
(one of {"FrozenBN", "BN", "GN"}).
"""
super().__init__()
self.conv1 = Conv2d(
in_channels,
out_channels,
kernel_size=7,
stride=2,
padding=3,
bias=False,
norm=get_norm(norm, out_channels),
)
weight_init.c2_msra_fill(self.conv1)
def forward(self, x):
x = self.conv1(x)
x = F.relu_(x)
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=0, ceil_mode=True)
return x
@property
def out_channels(self):
return self.conv1.out_channels
@property
def stride(self):
return 4 # = stride 2 conv -> stride 2 max pool
class BUABasicStemv2(nn.Module):
def __init__(self, in_channels=3, out_channels=64, norm="BN"):
"""
Args:
norm (str or callable): a callable that takes the number of
channels and return a `nn.Module`, or a pre-defined string
(one of {"FrozenBN", "BN", "GN"}).
"""
super().__init__()
self.norm = BatchNorm2d(in_channels, eps=2e-5)
self.conv1 = Conv2d(
in_channels,
out_channels,
kernel_size=7,
stride=2,
padding=3,
bias=False,
norm=BatchNorm2d(out_channels, eps=2e-5),
)
# weight_init.c2_msra_fill(self.norm)
weight_init.c2_msra_fill(self.conv1)
def forward(self, x):
x = self.norm(x)
x = self.conv1(x)
x = F.relu_(x)
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=0, ceil_mode=True)
return x
@property
def out_channels(self):
return self.conv1.out_channels
@property
def stride(self):
return 4 # = stride 2 conv -> stride 2 max pool
@BACKBONE_REGISTRY.register()
def build_bua_resnet_backbone(cfg, input_shape):
"""
Create a ResNet instance from config.
Returns:
ResNet: a :class:`ResNet` instance.
"""
# need registration of new blocks/stems?
norm = cfg.MODEL.RESNETS.NORM
if cfg.MODEL.BUA.RESNET_VERSION == 2:
stem = BUABasicStemv2(
in_channels=input_shape.channels,
out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS,
)
else:
stem = BUABasicStem(
in_channels=input_shape.channels,
out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS,
norm=norm,
)
freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT
if freeze_at >= 1:
for p in stem.parameters():
p.requires_grad = False
stem = FrozenBatchNorm2d.convert_frozen_batchnorm(stem)
# fmt: off
out_features = cfg.MODEL.RESNETS.OUT_FEATURES
depth = cfg.MODEL.RESNETS.DEPTH
num_groups = cfg.MODEL.RESNETS.NUM_GROUPS
width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
bottleneck_channels = num_groups * width_per_group
in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS
out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1
res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION
deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE
deform_modulated = cfg.MODEL.RESNETS.DEFORM_MODULATED
deform_num_groups = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS
# fmt: on
assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation)
num_blocks_per_stage = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}[depth]
stages = []
# Avoid creating variables without gradients
# It consumes extra memory and may cause allreduce to fail
out_stage_idx = [{"res2": 2, "res3": 3, "res4": 4, "res5": 5}[f] for f in out_features]
max_stage_idx = max(out_stage_idx)
for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)):
dilation = res5_dilation if stage_idx == 5 else 1
first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2
stage_kargs = {
"num_blocks": num_blocks_per_stage[idx],
"first_stride": first_stride,
"in_channels": in_channels,
"bottleneck_channels": bottleneck_channels,
"out_channels": out_channels,
"num_groups": num_groups,
"norm": norm,
"stride_in_1x1": stride_in_1x1,
"dilation": dilation,
}
if deform_on_per_stage[idx]:
stage_kargs["block_class"] = DeformBottleneckBlock
stage_kargs["deform_modulated"] = deform_modulated
stage_kargs["deform_num_groups"] = deform_num_groups
else:
stage_kargs["block_class"] = BottleneckBlock if cfg.MODEL.BUA.RESNET_VERSION == 1 else BottleneckBlockv2
blocks = make_stage(**stage_kargs)
in_channels = out_channels
out_channels *= 2
bottleneck_channels *= 2
if freeze_at >= stage_idx:
for block in blocks:
block.freeze()
stages.append(blocks)
return ResNet(stem, stages, out_features=out_features)
class BottleneckBlockv2(ResNetBlockBase):
def __init__(
self,
in_channels,
out_channels,
*,
bottleneck_channels,
stride=1,
num_groups=1,
norm="BN",
stride_in_1x1=False,
dilation=1,
):
"""
Args:
norm (str or callable): a callable that takes the number of
channels and return a `nn.Module`, or a pre-defined string
(one of {"FrozenBN", "BN", "GN"}).
stride_in_1x1 (bool): when stride==2, whether to put stride in the
first 1x1 convolution or the bottleneck 3x3 convolution.
"""
super().__init__(in_channels, out_channels, stride)
if in_channels != out_channels:
self.shortcut = Conv2dv2(
in_channels,
out_channels,
kernel_size=1,
stride=stride,
bias=False,
norm=None,
)
else:
self.shortcut = None
# The original MSRA ResNet models have stride in the first 1x1 conv
# The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have
# stride in the 3x3 conv
stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)
self.conv1 = Conv2dv2(
in_channels,
bottleneck_channels,
kernel_size=1,
stride=stride_1x1,
bias=False,
norm=None,
)
self.conv2 = Conv2dv2(
bottleneck_channels,
bottleneck_channels,
kernel_size=3,
stride=stride_3x3,
padding=1 * dilation,
bias=False,
groups=num_groups,
dilation=dilation,
norm=BatchNorm2d(bottleneck_channels, eps=2e-5),
activation=F.relu_,
)
self.conv3 = Conv2dv2(
bottleneck_channels,
out_channels,
kernel_size=1,
bias=False,
norm=BatchNorm2d(bottleneck_channels, eps=2e-5),
activation=F.relu_,
)
for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]:
if layer is not None: # shortcut can be None
weight_init.c2_msra_fill(layer)
self.norm = BatchNorm2d(in_channels, eps=2e-5)
# Zero-initialize the last normalization in each residual branch,
# so that at the beginning, the residual branch starts with zeros,
# and each residual block behaves like an identity.
# See Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour":
# "For BN layers, the learnable scaling coefficient γ is initialized
# to be 1, except for each residual block's last BN
# where γ is initialized to be 0."
# nn.init.constant_(self.conv3.norm.weight, 0)
# TODO this somehow hurts performance when training GN models from scratch.
# Add it as an option when we need to use this code to train a backbone.
def forward(self, x):
x_2 = self.norm(x)
x_2 = F.relu_(x_2)
out = self.conv1(x_2)
# out = F.relu_(out)
out = self.conv2(out)
# out = F.relu_(out)
out = self.conv3(out)
if self.shortcut is not None:
shortcut = self.shortcut(x_2)
else:
shortcut = x
out += shortcut
# out = F.relu_(out)
return out |
# facerec.py
import cv2, sys, numpy, os
import datetime
import urllib.request
import numpy as np
size = 4
haar_file = 'haarcascade_frontalface_default.xml'
datasets = 'datasets'
print('Training...')
# Create a list of images and a list of corresponding names
(images, labels, names, id) = ([], [], {}, 0)
for (subdirs, dirs, files) in os.walk(datasets):
for subdir in dirs:
names[id] = subdir
subjectpath = os.path.join(datasets, subdir)
for filename in os.listdir(subjectpath):
path = subjectpath + '/' + filename
label = id
images.append(cv2.imread(path, 0))
labels.append(int(label))
id += 1
(width, height) = (130, 100)
# Create a Numpy array from the two lists above
(images, labels) = [numpy.array(lis) for lis in [images, labels]]
# OpenCV trains a model from the images
# NOTE FOR OpenCV2: remove '.face'
model = cv2.face.LBPHFaceRecognizer_create()
model.train(images, labels)
# Part 2: Use fisherRecognizer on camera stream
face_cascade = cv2.CascadeClassifier(haar_file)
webcam = cv2.VideoCapture(0)
url="http://192.168.1.8:8080/shot.jpg"
cc,nc,c2c=0,0,0
while True:
(_, im) = webcam.read()
'''imgPath=urllib.request.urlopen(url)
imgNp=np.array(bytearray(imgPath.read()),dtype=np.uint8)
im1=cv2.imdecode(imgNp,-1)'''
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(im,(x,y),(x+w,y+h),(255,255,0),2)
face = gray[y:y + h, x:x + w]
face_resize = cv2.resize(face, (width, height))
#Try to recognize the face
prediction = model.predict(face_resize)
cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0), 3)
if prediction[1]<500:
#print (names[prediction[0]])
if names[prediction[0]]=="Non-Criminal":
nc+=1
elif names[prediction[0]]=="criminal":
cc+=1
elif names[prediction[0]]=="criminal-2":
c2c+=1
if names[prediction[0]]!="Non-Criminal":
f=open("1.txt",'a')
f.write('Printed string %s recorded at %s.\n' %(1, datetime.datetime.now()))
cv2.putText(im,names[prediction[0]],(x-10, y-10), cv2.FONT_HERSHEY_PLAIN,1,(0, 255, 0))
else:
cv2.putText(im,'Scanning',(x-10, y-10), cv2.FONT_HERSHEY_PLAIN,1,(0, 255, 0))
cv2.imshow('cam1', im)
if nc!=0 or cc!=0 or c2c!=0:
print(f"accuracy test for detecting criminal with {nc+cc+c2c} validation images")
print(f"no.of non criminal detected:{nc}",f"no.of criminal detected:{cc}",f"no.of criminal-2 detected:{c2c}")
print(f"accuracy{(cc/(nc+cc+c2c))*100}%")
'''gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(im,(x,y),(x+w,y+h),(255,255,0),2)
face = gray[y:y + h, x:x + w]
face_resize = cv2.resize(face, (width, height))
#Try to recognize the face
prediction = model.predict(face_resize)
cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0), 3)
if prediction[1]<500:
print (names[prediction[0]])
cv2.putText(im1,names[prediction[0]],(x-10, y-10), cv2.FONT_HERSHEY_PLAIN,1,(0, 255, 0))
else:
cv2.putText(im1,'Scanning',(x-10, y-10), cv2.FONT_HERSHEY_PLAIN,1,(0, 255, 0))
cv2.imshow('cam2', im1)'''
key = cv2.waitKey(10)
if key == 27:
break
|
num=int(input("enter value for num:"))
n1,n2=0,1
count=0
if(num<=0):
print("error! needs positive number")
elif(num==1):
print(n1)
else:
while(count<num):
print(n1," " ,end="")
n=n1+n2
n1=n2
n2=n
count+=1 |
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 9 16:28:07 2017
@author: ellie
"""
import tensorflow as tf
# Create TensorFlow object called hello_constant
hello_constant = tf.constant('Hello World!')
with tf.Session() as sess:
# Run the tf.constant operation in the session
output = sess.run(hello_constant)
print(output) |
from .db import db
from .usersOnTeam import UsersOnTeams
class Team(db.Model):
__tablename__ = "teams"
id = db.Column(db.Integer, nullable = False, primary_key = True)
teamName = db.Column(db.String(50), nullable = False)
users = db.relationship("User", secondary=UsersOnTeams, back_populates="teams")
projects = db.relationship("Project", back_populates="team")
def to_dict(self):
return {
"id": self.id,
"teamName": self.teamName,
} |
import os
import copy
import json
import torch
import logging
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from argparse import ArgumentParser
from torch.optim import lr_scheduler
from torchvision import datasets, models, transforms
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M')
logger = logging.getLogger(__file__)
parser = ArgumentParser(description='Process solution arguments.')
parser.add_argument('--device', type=str, default='cpu', help='Device used for training (cuda or cpu)')
parser.add_argument('--name', type=str, choices=['alexnet', 'vgg11', 'vgg16', 'vgg19', 'resnet18', 'resnet50',
'resnet152'], help='One of pre-trained model names', default='vgg11')
parser.add_argument('--lr', type=int, help='Learning rate', default=1.0e-3)
parser.add_argument('--layers', type=int, help='Number of hidden layers excluding input and output', default=1)
parser.add_argument('--units', type=int, help='Number of hidden units per hidden layer', default=128)
parser.add_argument('--epochs', type=int, help='Number of epochs used for training', default=5)
def load_and_process_data():
data_dir = 'flower_data'
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'valid': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
image_datasets = {
x: datasets.ImageFolder(
os.path.join(data_dir, x),
data_transforms[x]) for x in ['train', 'valid']}
dataloaders = {
x: torch.utils.data.DataLoader(
image_datasets[x],
batch_size=4,
shuffle=True,
num_workers=4
) for x in ['train', 'valid']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'valid']}
return dataloaders, dataset_sizes
def get_cat_to_name():
cat_to_name = None
try:
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
except FileNotFoundError:
return cat_to_name
return cat_to_name
class DeepFeedForwardNet(nn.Module):
def __init__(self, input_shape, layers=2, units=128, dropout=0.5):
super(DeepFeedForwardNet, self).__init__()
self.input_shape = input_shape
self.input = nn.Linear(input_shape, units)
self.out = nn.Linear(units, 102)
if dropout is not None:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = dropout
self.layers = list()
for i in range(layers):
self.layers.append(nn.Linear(units, units))
self.layers = nn.ModuleList(self.layers)
def forward(self, x):
if self.dropout is not None:
y = F.relu(self.dropout(self.input(x)))
for layer in self.layers:
y = F.relu(self.dropout(layer(y)))
else:
y = F.relu(self.input(x))
for layer in self.layers:
y = F.relu(layer(y))
out = self.out(y)
return out
def instantiate_model(name_, n_layers=1, n_units=128, lr_=0.001, dropout=None, device_='cpu'):
logger.info("Instantiating model with params {}".format([name, n_layers, n_units, lr_, dropout]))
model_rn = models.__dict__[name_](pretrained=True)
if 'vgg' in name:
input_features = 25088 # VGG input
elif 'resnet' in name:
input_features = 512 # Resnet input
else:
input_features = 9216 # Alexnet input
dff_net = DeepFeedForwardNet(input_features, n_layers, n_units, dropout)
dff_net = dff_net.to(device_)
for param in model_rn.parameters():
param.requires_grad = False
# This happens because classifier's last layer doesn't have default names.
if 'resnet' in name:
model_rn.fc = dff_net
else:
model_rn.classifier = dff_net
model_rn = model_rn.to(device_)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(dff_net.parameters(), lr=lr_, momentum=0.9)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)
logger.info("Model, criterion, optimizer and lr-scheduler created.")
return model_rn, criterion, optimizer, exp_lr_scheduler
def train_model(model, criterion, optimizer, scheduler, dataloaders, dataset_sizes, device_='cpu', num_epochs=20):
logger.info("Training model with epochs:{}".format(num_epochs))
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
logger.info('Epoch {}/{}'.format(epoch, num_epochs - 1))
logger.info('-' * 10)
for phase in ['train', 'valid']:
if phase == 'train':
scheduler.step()
model.train()
else:
model.eval()
running_loss = 0.0
running_corrects = 0
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device_)
labels = labels.to(device_)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
if phase == 'train':
loss.backward()
optimizer.step()
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
logger.info('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
if phase == 'valid' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
# print()
logger.info('Best val Acc: {:4f}'.format(best_acc))
model.load_state_dict(best_model_wts)
return model
def save_model(model, optimizer, image_datasets, lr_scheduler_, criterion, layers_, hidden_units_, name_, epochs_, path_):
directory = os.path.join(path_, '{}-dnn{}'.format(name_, layers_))
if not os.path.exists(directory):
os.makedirs(directory)
model.class_to_index = image_datasets['train'].dataset.class_to_idx
torch.save({
'epochs': epochs,
'model': model.state_dict(),
'model_opt': optimizer.state_dict(),
'classes': image_datasets['train'].dataset.class_to_idx,
'lr_scheduler': lr_scheduler_.state_dict(),
'criterion': criterion.state_dict()
}, os.path.join(directory, '{}-dnn{}-{}_{}_{}.tar'.format(name_, layers_, hidden_units_, epochs_, 'checkpoint')))
if __name__ == '__main__':
args = parser.parse_args()
name = args.name
lr = args.lr
layers = args.layers
hidden_units = args.units
epochs = args.epochs
dls, ds_sizes = load_and_process_data()
if args.device == 'cuda':
if not torch.cuda.is_available():
device = 'cpu'
logger.warning('Cuda is not available on this machine, setting device to cpu')
else:
device = args.device
else:
device = args.device
logger.info('Device mode set to {}'.format(device))
nn, loss, opt, lr_scheduler = instantiate_model(name, layers, hidden_units, lr, 0.2, device)
m = train_model(nn, loss, opt, lr_scheduler, dls, ds_sizes, device_=device, num_epochs=epochs)
path = os.path.join(os.path.dirname(__file__), 'checkpoints')
save_model(m, opt, dls, lr_scheduler, loss, layers, hidden_units, name, epochs, path)
logger.info("Model trained and saved")
|
import sys
infile = open(sys.argv[1], "r")
table = {}
num = 0
count = 0
dists = dict.fromkeys(range(1000), 0)
for line in infile:
if line[0] == '@':
continue
items = line.strip().split("\t")
name = items[0]
pos = int(items[3])
qual = int(items[4])
seq = items[9]
if qual < 40:
continue
if not table.has_key(name):
table[name] = (pos, len(seq))
else:
if pos < table[name][0]:
dist = table[name][0] - (pos + len(seq))
else:
dist = pos - (table[name][0] + table[name][1])
if dist < 1000 and dist > 0:
dists[dist] += 1
num += dist
count += 1
del table[name]
#for i, x in dists.items():
# print i, x
print num / float(count)
infile.close()
|
import os, re
import pandas as pd
from functools import reduce
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import f1_score, accuracy_score
from sklearn.metrics import precision_score, recall_score
from sklearn.metrics import confusion_matrix
from keras.utils import plot_model
from keras.models import Sequential
### train + predict wrappers ---------------------------------------------
def train_sklearn(clf_class, hyper_dict, Xs, ys):
'''
train a classifier of clf_class with hyper_dict params on
inputs Xs and targets ys. return the predict method of the fit.
'''
clf = clf_class(**hyper_dict)
clf.fit(Xs, ys)
return clf.predict
def train_keras(clf_class, hyper_dict, Xs, ys):
'''
train a keras sequential mode (clf_class) with params in
hyper_dict (must have top-level keys 'layers', 'config', 'train').
return the predict_classes method of the fit.
hyper_dict['layers'] is a list of 2-tuples, w layer class + params.
NOTE: VERBOSE PARAM NOT WORKING!
'''
clf = clf_class()
for layer, layer_params in hyper_dict['layers']:
clf.add(layer(**layer_params))
clf.compile(**hyper_dict['config'])
clf.fit(Xs, ys, **hyper_dict['train'])
return clf.predict_classes
#### load + prep data utils --------------------------------------
def get_imdb_subset(dat, subset, lbin):
'''quickly access relevant subsets of the imdb data'''
out = dat[(dat.subset==subset) & (dat.length_bin==lbin)]
# print(f'retrieved {out.shape}-dim {lbin}th quartile of IMDB {subset}')
return out
def get_params_subset(hypers_dict, clf_key, prefix='clf__'):
'''assumes `hypers_dict` has structure of data in sklearn_tuned_hypers.json'''
params = hypers_dict[clf_key]['best_params']
out = {key.replace(prefix, ''): val
for key, val in params.items() if key.startswith(prefix)}
return out
def quick_vectorize(train_text, test_text, hypers={}):
'''vectorize train and test text properly with one function call'''
Vectorizer = CountVectorizer(**hypers)
train_dtm = Vectorizer.fit_transform(train_text)
test_dtm = Vectorizer.transform(test_text)
return train_dtm, test_dtm
def quick_dtmize(train_text, test_text, vocab_limit, mode='count'):
'''vectorize docs w keras Tokenizer API properly with one function call'''
assert mode in ['binary','count','freq','tfidf'], 'supplied `mode` invalid!'
tokenizer = Tokenizer(num_words=vocab_limit)
tokenizer.fit_on_texts(train_text)
train_intseqs = tokenizer.texts_to_sequences(train_text)
test_intseqs = tokenizer.texts_to_sequences(test_text)
train_x = tokenizer.sequences_to_matrix(train_intseqs, mode=mode)
test_x = tokenizer.sequences_to_matrix(test_intseqs, mode=mode)
return train_x, test_x, tokenizer.word_index
def quick_docpad(train_text, test_text, vocab_limit, out_length):
'''pad docs w keras Tokenizer API properly with one function call'''
tokenizer = Tokenizer(num_words=vocab_limit)
tokenizer.fit_on_texts(train_text)
train_intseqs = tokenizer.texts_to_sequences(train_text)
test_intseqs = tokenizer.texts_to_sequences(test_text)
train_x = pad_sequences(train_intseqs, maxlen=out_length)
test_x = pad_sequences(test_intseqs, maxlen=out_length)
return train_x, test_x, tokenizer.word_index
### performance evaluation utilities ----------------------------------------
def quick_clfreport(y_true, y_pred, digits=3):
metrics = [f1_score, accuracy_score, precision_score, recall_score]
fmt_metric = lambda f: f.__name__.replace('_score', '')
report = {fmt_metric(f): round(f(y_true, y_pred), digits) for f in metrics}
return report
def make_confmat_dict(y_obs, y_pred):
conf_mat = confusion_matrix(y_obs, y_pred)
tn, fp, fn, tp = conf_mat.ravel()
return {'tn': int(tn), 'fp': int(fp), 'fn': int(fn), 'tp': int(tp)}
### postprocess results dict utils ---------------------------------------
def results_dict_to_df(results, metric_name):
res = {key: val[metric_name] for key, val in results.items()}
res_df = pd.DataFrame(res, index=[metric_name]).T
res_df.index.name = 'id'
res_df.reset_index(inplace=True)
return res_df
def postprocess_results(results, metric_names):
'''
data munging! take the results dict + flatten it to a df,
by calling results_dict_to_df on each metric name
'''
res_df_list = [results_dict_to_df(results, m) for m in metric_names]
metrics_df = reduce(lambda l, r: pd.merge(l, r, on='id'), res_df_list)
metrics_df['clf'] = [re.sub('q\\d-', '', val) for val in metrics_df.id]
metrics_df['lbin'] = [re.sub('-[0-9a-zA-Z_]+', '', val) for val in metrics_df.id]
metrics_df = metrics_df[['clf', 'lbin'] + metric_names]
return metrics_df
### func to make visualization of keras network graph --------------------
def plot_keras_model(clf_key, hyper_dict, out_dir):
'''visualize the structure of a keras network, write to .png
# wrapper that compiles model + then calls:
plot_model(model, to_file=outfile, dpi=300,
show_shapes=False, show_layer_names=True, expand_nested=False)
'''
outfile = os.path.join(out_dir, clf_key+'_graph.png')
nn = Sequential()
for layer, layer_params in hyper_dict['layers']:
nn.add(layer(**layer_params))
nn.compile(**hyper_dict['config'])
# print(f'writing model network graph to file: `{outfile}`')
plot_model(nn, to_file=outfile,
show_shapes=True, show_layer_names=False)
### dev + unused stuff area -------------------------------------------------
### dev + unused stuff area -------------------------------------------------
### dev + unused stuff area -------------------------------------------------
# def train_clf(clf_identifier, hyper_dict, Xs, ys):
# # NOTE: assumes hyper_dict is compatible with the relevant API
# # NOTE: assumes Xs and ys are prepped correctly for the clf!
# clf_API = clf_APIs[clf_identifier]
# clf_class = clf_classes[clf_identifier]
# train_function = {'sklearn': train_sklearn, 'keras': train_keras}[clf_API]
# predict_function = train_function(clf_class, hyper_dict, Xs, ys)
# return predict_function
### example of quick_docpad() usage:
# docs = ['this is me toy corp', 'a corp is just docs', 'this is a doc doc']
# moredocs = ['this is me last corp corp corp', 'waow disjoint vocab yikes']
# pd1, pd2, widx = quick_docpad(docs, moredocs, vocab_limit=5, out_length=10)
|
from sqlalchemy import create_engine, Column, Integer, String
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine('sqlite:///mybase2.db')
db_session = scoped_session(sessionmaker(bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
class Branches(Base):
__tablename__='Branches'
id = Column(Integer, primary_key=True)
Type = Column(String)
address = Column(String)
lon = Column(String)
lat = Column(String)
Type2 = Column(String)
def __repr__(self):
return '<Mac {}; {}; {}>'.format(address, lon, lat)
class Users(Base):
__tablename__='Users'
id = Column(Integer, primary_key=True)
cid = Column(Integer)
chosen = Column(String)
def __repr__(self):
return '<users {}; {}; {}>'.format(address, lon, lat)
if __name__ == "__main__":
Base.metadata.create_all(bind=engine) |
import math
import numpy as np
import pandas as pd
from scipy.stats import norm
import matplotlib.pyplot as plt
ace_list = {1:'depress', 2:'alcoabuse', 3:'drugabuse', 4:'prison',
5:'patdivorce', 6:'phyabuse1', 7:'phyabuse2', 8:'verbalabuse',
9:'sexabuse1', 10:'sexabuse2', 11:'sexabuse3', 12:'foodinsecure'}
groupa = list(ace_list.values())[0:5]
groupb = list(ace_list.values())[5:-1]
groupc = list(ace_list.values())[-1]
race_list = {0:'All', 1:'White', 2:'Black', 3:'Hispanic', 4:'Other', 5:'Multi'}
income_list = {0:'All', 1:'< 15000', 2:'15000 - 24999', 3:'25000 - 34999',
4:'35000 - 49999', 5:'50000 +', 9:'Don\'t Know'}
# generate combinations of aces
def comb(aces, n):
res = []
if n <= 0:
return [[]]
if n > len(aces):
return comb(aces, len(aces))
for i in range(len(aces) - n + 1):
res += [[aces[i]] + x for x in comb(aces[i+1:], n - 1)]
return res
# cast the aces code in brfss, ori_code to 0 -> No, 1 -> Yes
def cat_code(ori_code, *args):
if pd.isna(ori_code) or len(args) == 0:
return ori_code
col_name = args[0]
if col_name not in list(ace_list.values()):
return ori_code
if (col_name in groupa and ori_code == 2) or \
(col_name in groupb and ori_code == 1) or \
(col_name in groupc and ori_code == 1):
return 0
if (col_name in groupa and ori_code == 1) or \
(col_name in groupb and ori_code in [2,3]) or \
(col_name in groupc and ori_code in [2,3,4,5]):
return 1
return np.NaN
def cal_prop(df, *aces):
if not aces:
return np.NaN, np.NaN
aces_values = df[list(aces)]
k = (aces_values == 1).all(axis = 1).sum()
n = (aces_values.isin([0,1])).all(axis = 1).sum()
if n == 0:
return np.NaN, np.NaN
prop = k/n
return prop, math.sqrt(prop*(1-prop)/n)
def plot_aces_hm(mat_val, ax, title, xticks, yticks):
im = ax.imshow(mat_val)
cbar = ax.figure.colorbar(im, ax = ax)
cbar.ax.set_ylabel('', rotation = -90, va = 'bottom')
ax.set_xticks(np.arange(len(xticks)))
ax.set_yticks(np.arange(len(yticks)))
# ... and label them with the respective list entries
ax.set_xticklabels(xticks)
ax.set_yticklabels(yticks)
ax.tick_params(top=True, bottom=False,
labeltop=True, labelbottom=False)
ax.set_title(title)
plt.setp(ax.get_xticklabels(), rotation=-30, ha="right",
rotation_mode="anchor")
for m in range(len(yticks)):
for n in range(len(xticks)):
text = ax.text(n, m, '{:.2f}'.format(mat_val[m, n]),
ha="center", va="center", color="w")
class bfs_data:
ci = 0.975
def __init__(self, df):
if type(df) is str:
self.df = pd.read_csv(df, low_memory = False)
for ace in list(ace_list.values()):
self.df[ace] = self.df[ace].apply(cat_code, args = (ace,))
else:
self.df = df
self.keys = self.df.keys()
self.corr_mat = {r:{i: None for i in income_list.keys()}
for r in race_list.keys()}
self.prop_mat = {r:{i: None for i in income_list.keys()}
for r in race_list.keys()}
def get_value(self, race, income, keys = []):
ri_values = self.df[['_RACE_G1', '_INCOMG'] + list(keys)]
if not race == 0:
ri_values = ri_values[(ri_values['_RACE_G1']) == race]
if not income == 0:
ri_values = ri_values[(ri_values['_INCOMG']) == income]
return ri_values
def get_prop(self, race, income, *keys):
if race not in race_list.keys() or \
income not in income_list.keys() or\
len(keys) == 0:
return np.NaN, np.NaN
ri_values = self.df[['_RACE_G1', '_INCOMG'] + list(keys)]
if not race == 0:
ri_values = ri_values[(ri_values['_RACE_G1']) == race]
if not income == 0:
ri_values = ri_values[(ri_values['_INCOMG']) == income]
# ri_values = values[(values[['_RACE_G1', '_INCOMG']] == [race, income]).all(axis = 1)]
return cal_prop(ri_values, *keys)
def get_dist(self, *keys):
if not keys:
keys = list(ace_list.values())
else:
keys = list(keys)
res_index = [[], []]
res_dist = []
for r in race_list.keys():
for i in income_list.keys():
prop, se = self.get_prop(r, i, *keys)
res_dist.append([prop, se,
prop - norm.ppf(self.ci) * se,
prop + norm.ppf(self.ci) * se])
res_index[0].append(race_list[r])
res_index[1].append(income_list[i])
return pd.DataFrame(res_dist,
columns = ['Proportion', 'Standard Error',
'L 95% CI', 'U 95% CI'],
index = res_index)
def get_corr(self, ace1, ace2 = None):
res_index = [[], []]
res_corr = []
for r in race_list.keys():
for i in income_list.keys():
res_corr.append(self.get_corr_ri(r, i, ace1, ace2))
res_index[0].append(race_list[r])
res_index[1].append(income_list[i])
return pd.DataFrame(res_corr, columns = ['Correlation'], index = res_index)
def get_corr_ri(self, race, income, ace1, ace2 = None):
if race not in race_list.keys() or\
income not in income_list.keys():
return np.NaN
if ace2 == None:
ace2 = ace1
ri_values = self.df[['_RACE_G1', '_INCOMG'] + [ace1, ace2]]
if not race == 0:
ri_values = ri_values[(ri_values['_RACE_G1']) == race]
if not income == 0:
ri_values = ri_values[(ri_values['_INCOMG']) == income]
# ace1_value = ri_values[ace1].apply(cat_code, args = (ace1,))
# ace2_value = ri_values[ace2].apply(cat_code, args = (ace2,))
# return ace1_value.corr(ace2_value)
return ri_values[ace1].corr(ri_values[ace2])
def __reset_mat__(self):
self.corr_mat = {r:{i: None for i in income_list.keys()}
for r in race_list.keys()}
self.prop_mat = {r:{i: None for i in income_list.keys()}
for r in race_list.keys()}
def get_corr_mat(self, race, income):
if not self.corr_mat[race][income] is None:
return self.corr_mat[race][income]
aces = list(ace_list.values())
ri_values = self.df[['_RACE_G1', '_INCOMG'] + aces]
if not race == 0:
ri_values = ri_values[(ri_values['_RACE_G1']) == race]
if not income == 0:
ri_values = ri_values[(ri_values['_INCOMG']) == income]
result = pd.DataFrame(
[[ ri_values[ace1].corr(ri_values[ace2])
for ace1 in aces]
for ace2 in aces],
index = aces, columns = aces)
self.corr_mat[race][income] = result
return result
def get_prop_mat(self, race, income):
if not self.prop_mat[race][income] is None:
return self.prop_mat[race][income]
aces = list(ace_list.values())
ri_values = self.df[['_RACE_G1', '_INCOMG'] + aces]
if not race == 0:
ri_values = ri_values[(ri_values['_RACE_G1']) == race]
if not income == 0:
ri_values = ri_values[(ri_values['_INCOMG']) == income]
result = [[ cal_prop(ri_values, ace1, ace2)
for ace1 in aces]
for ace2 in aces]
res_pr = [[x[0] for x in y] for y in result]
res_se = [[x[1] for x in y] for y in result]
result = {
'pr': pd.DataFrame(res_pr, index = aces, columns = aces),
'se': pd.DataFrame(res_se, index = aces, columns = aces)
}
self.prop_mat[race][income] = result
return result
|
import os
# root path of the project
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
# directory for logs
LOG_DIR = os.path.join(ROOT_DIR, 'backend', 'logs')
# dir for all configs
CONFIGS_DIR = os.path.join(ROOT_DIR, 'configs')
# path of the config file for connection to postgresql
DATABASE_CONFIG_PATH = os.path.join(CONFIGS_DIR, 'database.ini')
# path of the config file for twitter api
TWITTER_API_CONFIG_PATH = os.path.join(CONFIGS_DIR, 'twitter.ini')
# dir for all cache
CACHE_DIR = os.path.join(ROOT_DIR, 'cache')
# path of the job frontier set
FRONTIER_CACHE_PATH = os.path.join(CACHE_DIR, 'twitter.frontier.pickle')
# path of the id CacheSet
ID_CACHE_PATH = os.path.join(CACHE_DIR, 'ids.pickle')
# path for keywords.txt
KEYWORDS_PATH = os.path.join(ROOT_DIR, 'keywords.txt')
|
'''
Created on 22.10.2014
@author: Philip
'''
from data import db
import users.constants
class CRUDMixin(object):
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
@classmethod
def create(cls, commit=True, form=None, **kwargs):
instance = cls(**kwargs)
if form:
form.populate_obj(instance)
return instance.save(commit)
@classmethod
def get(cls, identifier):
return cls.query.get(identifier)
# We will also proxy Flask-SqlAlchemy's get_or_44
# for symmetry
@classmethod
def get_or_404(cls, identifier):
return cls.query.get_or_404(identifier)
def update(self, commit=True, form=None, **kwargs):
for attr, value in kwargs.iteritems():
setattr(self, attr, value)
if form:
form.populate_obj(self)
return commit and db.session.commit()
def save(self, commit=True):
db.session.add(self)
if commit:
db.session.commit()
return self
def delete(self, commit=True):
db.session.delete(self)
return commit and db.session.commit()
class UserAccessMixin(object):
def has_access(self, user):
for u in self.users:
if u.has_access(user):
return True
return False
@classmethod
def filter_user(cls, user):
query = cls.query
if not user.is_admin():
query = query.filter(cls.users.contains(user))
return query
|
#!/usr/bin/env python
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
def main():
driver = webdriver.Remote(
command_executor='http://127.0.0.1:8910',
desired_capabilities=DesiredCapabilities.PHANTOMJS)
driver.get('https://citrix.service-now.com/unlock.do')
driver.find_element_by_id('userId').send_keys('yangqi')
driver.find_element_by_xpath("//input[@value='Submit']").click()
driver.save_screenshot('click.png')
driver.quit()
if __name__ == '__main__':
main()
|
import copy
import numpy as np
from nanodet.data.transform.warp import (
ShapeTransform,
get_flip_matrix,
get_perspective_matrix,
get_rotation_matrix,
get_scale_matrix,
get_shear_matrix,
get_stretch_matrix,
get_translate_matrix,
warp_and_resize,
)
def test_get_matrix():
# TODO: better unit test
height = 100
width = 200
# center
C = np.eye(3)
C[0, 2] = -width / 2
C[1, 2] = -height / 2
# do not change the order of mat mul
P = get_perspective_matrix(0.1)
C = P @ C
Scl = get_scale_matrix((1, 2))
C = Scl @ C
Str = get_stretch_matrix((0.5, 1.5), (0.5, 1.5))
C = Str @ C
R = get_rotation_matrix(180)
C = R @ C
Sh = get_shear_matrix(60)
C = Sh @ C
F = get_flip_matrix(0.5)
C = F @ C
T = get_translate_matrix(0.5, width, height)
M = T @ C
assert M.shape == (3, 3)
def test_warp():
dummy_meta = dict(
img=np.random.randint(0, 255, size=(100, 200, 3), dtype=np.uint8),
gt_bboxes=np.array([[0, 0, 20, 20]]),
gt_masks=[np.zeros((100, 200), dtype=np.uint8)],
)
warp_cfg = {}
res = warp_and_resize(
copy.deepcopy(dummy_meta), warp_cfg, dst_shape=(50, 50), keep_ratio=False
)
assert res["img"].shape == (50, 50, 3)
assert res["gt_masks"][0].shape == (50, 50)
assert np.array_equal(res["gt_bboxes"], np.array([[0, 0, 5, 10]], dtype=np.float32))
res = warp_and_resize(
copy.deepcopy(dummy_meta), warp_cfg, dst_shape=(50, 50), keep_ratio=True
)
assert np.array_equal(
res["gt_bboxes"], np.array([[0, 12.5, 5.0, 17.5]], dtype=np.float32)
)
res = warp_and_resize(
copy.deepcopy(dummy_meta), warp_cfg, dst_shape=(300, 300), keep_ratio=True
)
assert np.array_equal(
res["gt_bboxes"], np.array([[0, 75, 30, 105]], dtype=np.float32)
)
def test_shape_transform():
dummy_meta = dict(
img=np.random.randint(0, 255, size=(100, 200, 3), dtype=np.uint8),
gt_bboxes=np.array([[0, 0, 20, 20]]),
gt_masks=[np.zeros((100, 200), dtype=np.uint8)],
)
# keep ratio
transform = ShapeTransform(keep_ratio=True, divisible=32)
res = transform(dummy_meta, dst_shape=(50, 50))
assert np.array_equal(
res["gt_bboxes"], np.array([[0, 0, 6.4, 6.4]], dtype=np.float32)
)
assert res["img"].shape[0] % 32 == 0
assert res["img"].shape[1] % 32 == 0
# not keep ratio
transform = ShapeTransform(keep_ratio=False)
res = transform(dummy_meta, dst_shape=(50, 50))
assert np.array_equal(res["gt_bboxes"], np.array([[0, 0, 5, 10]], dtype=np.float32))
|
from flask import Flask, render_template, request
from data import Book, BOOK_TYPES
import json
app = Flask(__name__)
@app.route("/")
def index():
return render_template(
"index.html",
**{"greeting": "Welcome!", "book_types": BOOK_TYPES.keys()}
)
@app.route("/charges", methods=["POST"])
def charges():
data = request.get_json()
books = [Book(**item).to_dict() for item in data]
return json.dumps(books)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.