text stringlengths 38 1.54M |
|---|
import pytest
from yads import yads
import json
import tempfile
from exceptions import *
@pytest.fixture
def tempfile_name():
fd, path = tempfile.mkstemp(suffix=".txt", prefix="test_file")
return path
def test_invalid_ttl(tempfile_name):
data_store = yads(tempfile_name)
key = "A"
simple_json = "{}"
# TTL is not properly formatted
with pytest.raises(TimeToLiveFormatException):
data_store.create(key, simple_json, "A")
# TTL is not a correct value
with pytest.raises(TimeToLiveFormatException):
data_store.create(key, simple_json, 0)
|
import forecastio
from secrets import FORECAST_IO_API_KEY
import arrow
def get_preci_prob_by_loc_time(lat, lng, time):
time = time.datetime
forecast = forecastio.load_forecast(FORECAST_IO_API_KEY, lat, lng, time, units="us")
byHour = forecast.currently()
return byHour.precipProbability
|
# How to have the 'concept' of numbers?
# Let counting happen by how many times you apply a function to an argument
def zero(x): # don't call the function x
def fn(y):
return y # apply zero times
return fn
def one(x):
def fn(y):
return x(y) # apply once
return fn
def two(x):
def fn(y):
return x(x(y)) # apply twice
return fn
def incr(x): # cheating. Just so that we can verify that numbers are actually working
return x+1
print(zero(incr)(0) == 0)
print(one(incr)(0) == 1)
print(two(incr)(0) == 2)
# It's tiring to write all the definitions. I need to GENERATE the next number
def next_number(num):
def n_num(f):
def fn(x):
r = num(f)(x) # apply as many times as given num
return f(r) # apply once more
return fn
return n_num
three = next_number(two)
four = next_number(three)
five = next_number(four)
print(three(incr)(0) == 3)
print(four(incr)(0) == 4)
# How to add???
# apply x times then apply y times.
def add(a):
def fn(b):
def result(x):
return a(next_number)(b)(x)
return result
return fn
# all this def+return is too much typing
def add(a):
def fn(b):
return lambda x: a(next_number)(b)(x)
return fn
def add(a):
return lambda b: lambda x: a(next_number)(b)(x)
add = lambda a: lambda b: lambda x: a(next_number)(b)(x)
print(add(three)(four)(incr)(0) == 7)
print(add(three)(three)(incr)(0) == 6)
print(add(one)(two)(incr)(0) == 3)
print(add(zero)(two)(incr)(0) == 2)
def mult(a):
def fn(b):
def result(x):
return a(b(x))
return result
return fn
def mult(a):
def fn(b):
return lambda x: a(b(x))
return fn
def mult(a):
return lambda b: lambda x: a(b(x))
mult = lambda a: lambda b: lambda x: a(b(x))
# mult = λa:λb:λx:a(b(x))
# mult = λabx:a(b(x))
# mult = λabx:abx
# mult = λabx.abx
# This is the notation that appears in most papers
print(mult(two)(three)(incr)(0) == 6)
print(mult(two)(two)(incr)(0) == 4)
print(mult(five)(three)(incr)(0) == 15)
def power(a):
def fn(b):
def result(x):
return a(b)(x)
return result
return fn
power = lambda a: lambda b: lambda x: b(a)(x)
print(power(two)(three)(incr)(0) == 8)
print(power(five)(two)(incr)(0) == 25)
print(power(five)(zero)(incr)(0) == 1)
print(power(five)(one)(incr)(0) == 5)
|
from wordpress_project import *
def main():
welcome()
if discovery_version() == False:
print("finish!")
else:
https_and_hsts()
search_important_headers()
discovery_admin_panel()
discovery_wordpress_with_robots_file()
discovery_usernames_with_author_query()
discovery_users_with_wp_json()
checker_xmlrpc()
find_exposed_folders()
discovery_plugins()
if __name__ == '__main__':
try:
main()
except:
print("There is a problem. Try again!")
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
import urllib, urllib2
uri_base = "http://127.0.0.1:12345/ltp"
data = {
's': '我爱北京天安门',
'x': 'n',
't': 'all'}
request = urllib2.Request(uri_base)
params = urllib.urlencode(data)
response = urllib2.urlopen(request, params)
content = response.read().strip()
print content |
#!/usr/bin/env python
################################################################################
## Copyright 2017 "Nathan Hwang" <thenoviceoof>
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
################################################################################
# Generate audio from changing gain parameters.
import array
import csv
import math
import wave
from pprint import pprint
SAMPLE_RATE = 44100.0
################################################################################
# Read in the functions.
params = {}
with open('linear_parameters.csv') as file:
csv_file = csv.reader(file, delimiter=',')
for row in csv_file:
if row[0] == '':
continue
name = row[0].split(' ')[0].lower()
values = [float(v) for v in row[1:]]
paired_values = zip(values[:8], values[8:])
params[name] = paired_values
################################################################################
# Generate noise.
def read_wav(path):
wav_file = wave.open(path)
assert wav_file.getnchannels() == 1, 'Expect monochannel audio'
assert wav_file.getframerate() == SAMPLE_RATE, 'Expect 44.1k audio'
assert wav_file.getsampwidth() == 2, 'Expected signed 16 bit audio'
data_string = wav_file.readframes(wav_file.getnframes())
# Convert the data from string to byte(s) array
data = array.array('h')
data.fromstring(data_string)
return list(data)
data = read_wav('white_noise.wav')
################################################################################
# Filter the noise.
def slope_to_coefficients(params, slope):
fc_gain = [[2000000, None], [16.5, None], [270.0, None], [5300.0, None]]
index_dict = {
'constant': 0,
'low': 1,
'medium': 2,
'high': 3,
}
# Convert the slope to a target gain.
for name, linear_fn in params.iteritems():
max_gain = max([g for _,g in linear_fn])
gain = None
for i in range(len(linear_fn)-1):
if linear_fn[i][0] <= slope < linear_fn[i+1][0]:
frac = (slope - linear_fn[i][0])/(linear_fn[i+1][0] - linear_fn[i][0])
gain = linear_fn[i][1] * (1 - frac) + linear_fn[i+1][1] * frac
break
if gain is None:
gain = linear_fn[-1][1]
# Snap to the nearest potentiometer step.
if gain < 0:
gain = 0
steps = 1024
gain = math.floor(steps*gain/max_gain)/steps * max_gain
fc_gain[index_dict[name]][1] = gain
# Convert the fc/gain params to a function
tmp_coefs = [(1-(1/SAMPLE_RATE)/(1/(2*math.pi*fc) + 1/SAMPLE_RATE), gain)
for fc, gain in fc_gain]
coefs = [(A, gain*(1-A)) for A, gain in tmp_coefs]
return coefs
def apply_continuous_filter(params, slope_spec, data):
# Prep coefficients
fc_gain = [[2000000, None], [16.5, None], [270.0, None], [5300.0, None]]
index_dict = {
'constant': 0,
'low': 1,
'medium': 2,
'high': 3,
}
max_slope, min_slope = slope_spec
# Apply to the data.
output_data = []
filtered = [0, 0, 0, 0]
for i,d in enumerate(data):
slope = float(max_slope - min_slope)*(float(i)/len(data)) + min_slope
coefs = slope_to_coefficients(params, slope)
filtered = [cf[0] * prev + cf[1] * d for prev,cf in zip(filtered, coefs)]
raw_output = sum(filtered)
trim_output = min(max(int(raw_output), -2**15), 2**15 - 1)
output_data.append(trim_output)
return output_data
output_data = apply_continuous_filter(params, [-20, 0], data)
################################################################################
# Write the noise to a WAV.
def write_wav(data):
wav_file = wave.open('filtered_noise.wav', 'w')
wav_file.setnchannels(1)
wav_file.setframerate(SAMPLE_RATE)
wav_file.setsampwidth(2)
# Convert from byte(s) array to string
arr = array.array('h')
arr.fromlist(data)
wav_file.writeframes(arr.tostring())
write_wav(output_data)
|
# Exceptions, APIs and Protocols
################################################################################
# few exception types
# IndexError # integer index is out of range
# KeyError # look-up in a mapping fails
# ValueError # object is of the right type, but contains an inappropriate value.
# TypeError
z = [1, 4, 2]
# IndexError: list index out of range
z[4]
# ValueError: invalid literal for int() with base 10: 'jim'
int("jim")
codes = dict(gb=44, us=1, no=47, fr=33, es=34)
# KeyError: 'de'
codes['de']
################################################################################
|
"""
dp[j]表示可以使用的硬币方法
dp[j-coins[i]]表示使用一个coins[i]拼接成j的方法
同时,由于按硬币面值从小到大考虑,dp[j]为前i种类型硬币
进行组合时的组合数,当组成j的币值种类中包含coins[i]时,
计算如下:
组成面值和为j的硬币中,最后一枚不使用coins[i]时的组合数,即上一步所求结果dp[j]
组成面值和为j的硬币中,最后一枚使用coins[i]时的组合数,即dp[j-coins[i]]
这两个事件构成了事件的全体,因此dp[j] = dp[j]+dp[j-coins[i]]
"""
class Coins:
def countWays(self, n):
coins = [1, 5, 10, 25]
dp = [0 for i in range(n + 1)]
dp[0] = 1
for i in range(4):
for j in range(coins[i], n + 1):
dp[j] = (dp[j] + dp[j - coins[i]]) % 1000000007
return dp[n]
|
import re
from collections import Counter
from aocd import data
def generate_coords(claim):
return [(x, y) for x in range(claim[1], claim[1] + claim[3]) for y in range(claim[2], claim[2] + claim[4])], claim[0]
def part_1(data):
coords = [coord for coords, id_ in [generate_coords(claim) for claim in data] for coord in coords]
return len(list(filter(lambda x: x > 1, Counter(coords).values())))
def part_2(data):
claims = {}
all_coords = []
for claim in data:
coords, id_ = generate_coords(claim)
claims[id_] = coords
all_coords += coords
overlaps = list(map(lambda x: x[0], list(filter(lambda x: x[1] > 1, Counter(all_coords).items()))))
for id_, coords in claims.items():
if len(list(set(overlaps) & set(coords))) == 0:
return id_
if __name__ == '__main__':
data = [tuple(map(int, re.findall('\d+', input))) for input in data.split('\n')]
print(f"Part 1: {part_1(data)}")
print(f"Part 2: {part_2(data)}")
|
#Известен ГОД. Определить, будет ли этот год високосным, и к какому веку этот относится.
#Високосный год это каждый 4 год.
year = int(input())
if year % 4 == 0:
print("Високосный")
else:
print("Не високосный")
print(year//100+1, "\tВек") |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import Queue
import sys
from threading import Thread
# working thread
class Worker(Thread):
worker_count = 0
timeout = 2
def __init__(self, work_queue, result_queue, **kwargs):
Thread.__init__(self, **kwargs)
self.id = Worker.worker_count
Worker.worker_count += 1
self.setDaemon(True)
self.workQueue = work_queue
self.resultQueue = result_queue
self.start()
def run(self):
""" the get-some-work, do-some-work main loop of worker threads """
while True:
try:
callable, args, kwds = self.workQueue.get(timeout=Worker.timeout)
res = callable(*args, **kwds)
print "worker[%d]'s result: %s" % (self.id, str(res))
self.resultQueue.put(res)
# time.sleep(Worker.sleep)
except Queue.Empty:
break
except:
print "worker[%2d]" % self.id, sys.exc_info()[:2]
raise
class WorkerManager:
def __init__(self, num_of_workers=10, timeout=2):
self.workQueue = Queue.Queue()
self.resultQueue = Queue.Queue()
self.workers = []
self.timeout = timeout
self._recruit_threads(num_of_workers)
def _recruit_threads(self, num_of_workers):
for i in range(num_of_workers):
worker = Worker(self.workQueue, self.resultQueue)
self.workers.append(worker)
def wait_for_complete(self):
# ...then, wait for each of them to terminate:
while len(self.workers):
worker = self.workers.pop()
worker.join()
if worker.isAlive() and not self.workQueue.empty():
self.workers.append(worker)
print "All jobs are are completed."
def add_job(self, callable, *args, **kwargs):
self.workQueue.put((callable, args, kwargs))
def get_result(self, *args, **kwargs):
return self.resultQueue.get(*args, **kwargs)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayEcapiprodDataPutResponse(AlipayResponse):
def __init__(self):
super(AlipayEcapiprodDataPutResponse, self).__init__()
self._data_version = None
@property
def data_version(self):
return self._data_version
@data_version.setter
def data_version(self, value):
self._data_version = value
def parse_response_content(self, response_content):
response = super(AlipayEcapiprodDataPutResponse, self).parse_response_content(response_content)
if 'data_version' in response:
self.data_version = response['data_version']
|
from PIL import Image, ImageFont, ImageDraw
from string import *
def main():
file = open('trainingLists.txt', 'w')
fontList = [ImageFont.truetype("C:\Windows\Fonts\OCRAEXT.ttf", 18),
ImageFont.truetype("C:\Windows\Fonts\AGENCYR.ttf", 18),
ImageFont.truetype("C:\Windows\Fonts\ARIALUNI.ttf", 18),
ImageFont.truetype("C:\Windows\Fonts\Hack-Regular.ttf", 18),
ImageFont.truetype("C:\Windows\Fonts\FTLTLT.ttf", 18),
ImageFont.truetype("C:\Windows\Fonts\GARA.ttf", 18),
ImageFont.truetype("C:\Windows\Fonts\REFSAN.ttf", 18),
ImageFont.truetype("C:\Windows\Fonts\VINERITC.ttf", 18),
ImageFont.truetype("C:\Windows\Fonts\\times.ttf", 18),
ImageFont.truetype("C:\Windows\Fonts\\timesi.ttf", 18),
ImageFont.truetype("C:\Windows\Fonts\\trebuc.ttf", 18),
ImageFont.truetype("C:\Windows\Fonts\\Tiger.ttf", 18),
ImageFont.truetype("C:\Windows\Fonts\\Tiger Expert.ttf", 18),
ImageFont.truetype("C:\Windows\Fonts\\TEMPSITC.ttf", 18),
ImageFont.truetype("C:\Windows\Fonts\\tahoma.ttf", 18),
ImageFont.truetype("C:\Windows\Fonts\\TektonPro-BoldCond.otf", 18),
ImageFont.truetype("C:\Windows\Fonts\AdobeHebrew-Regular.otf", 18),
ImageFont.truetype("C:\Windows\Fonts\AdobeArabic-Regular.otf", 18),
ImageFont.truetype("C:\Windows\Fonts\AdobeArabic-Bold.otf", 18),
ImageFont.truetype("C:\Windows\Fonts\SourceSansPro-Regular.otf", 18),
ImageFont.truetype("C:\Windows\Fonts\Sitka.ttc", 18),
ImageFont.truetype("C:\Windows\Fonts\MOD20.ttf", 18),
ImageFont.truetype("C:\Windows\Fonts\AdobeMingStd-Light.otf", 18),
ImageFont.truetype("C:\Windows\Fonts\AdobeSongStd-Light.otf", 18),
]
for font in fontList:
for character in ascii_letters:
im = Image.open("empty.png")
draw = ImageDraw.Draw(im)
draw.text((4, 0), character, (0, 0, 0), font=font)
saveName = "training letters/" + font.getname()[0] + character + str(ord(character)) + ".png"
im.save(saveName)
im = im.convert('1')
pixelList = list(im.getdata())
for x in range(len(pixelList)):
pixelList[x] = int(not(pixelList[x] // 255))
print(character, pixelList, file=file)
if __name__ == "__main__":
main() |
import h5py as h5
import numpy as np
import pytest
import six
from nexusformat.nexus import *
field1 = NXfield((1,2), name="f1")
field2 = NXfield((3,4), name="f2")
field3 = NXfield((5,6), name="f3")
def test_group_creation():
group1 = NXgroup()
assert len(group1) == 0
group2 = NXgroup(field1)
assert len(group2) == 1
assert "f1" in group2
group1["f2"] = field2
assert "f2" in group1
group1["g2"] = group2
assert len(group1) == 2
assert "g2" in group1
assert group1["g2/f1"] is group1.g2.f1
group1["g2/f3"] = field3
assert "f3" in group1["g2"]
assert "g2/f3" in group1
group3 = NXgroup(g1=group1)
assert "g1/g2/f1" in group3
def test_group_insertion():
group1 = NXgroup()
group1.insert(field2, name="f1")
assert "f1" in group1
assert len(group1) == 1
def test_entry_creation():
group = NXentry()
assert group.nxname == "entry"
assert group.nxclass == "NXentry"
assert isinstance(group, NXentry)
def test_group_title():
group = NXentry()
group["title"] = "Group Title"
assert group.nxtitle == "Group Title"
|
# -*- coding: utf-8 -*-
from selenium.webdriver.chrome.webdriver import WebDriver
success = True
wd = WebDriver(r"myDir\chromedriver.exe")
wd.implicitly_wait(60)
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
try:
wd.get("http://localhost:5002/")
wd.find_element_by_xpath("//div[@class='container']//button[.='Send bug']").click()
wd.find_element_by_name("team_id").click()
wd.find_element_by_name("team_id").clear()
wd.find_element_by_name("team_id").send_keys("123")
wd.find_element_by_name("bug_content").click()
wd.find_element_by_name("bug_content").clear()
wd.find_element_by_name("bug_content").send_keys("test1")
wd.find_element_by_id("send-button").click()
if not ("Team with id: '123' was not found." in wd.find_element_by_tag_name("html").text):
success = False
print("verifyTextPresent failed")
wd.find_element_by_xpath("//div[@class='container']//button[.='Go back']").click()
wd.find_element_by_xpath("//div[@class='container']//button[.='Go back']").click()
finally:
wd.quit()
if not success:
raise Exception("Test failed.")
|
# Given pointers to the head nodes of 2 linked lists that merge together at some point,
# find the node where the two lists merge. The merge point is where both lists point to the same node,
# i.e. they reference the same memory location. It is guaranteed that the two head nodes will be different,
# and neither will be NULL. If the lists share a common node, return that node's value.
class SinglyLinkedListNode:
def __init__(self, node_data):
self.data = node_data
self.next = None
class SinglyLinkedList:
def __init__(self):
self.head = None
self.tail = None
def insert_node(self, node_data):
node = SinglyLinkedListNode(node_data)
if not self.head:
self.head = node
else:
self.tail.next = node
self.tail = node
def print_singly_linked_list(node, sep, fptr):
while node:
print(str(node.data))
node = node.next
# Complete the findMergeNode function below.
#
# For your reference:
#
# SinglyLinkedListNode:
# int data
# SinglyLinkedListNode next
#
#
def findMergeNode(head1, head2):
# Create a set to keep track of all nodes that we've visited
visited_nodes = set()
# Starting at each head, traverse through lists simultaneously
curr1 = head1
curr2 = head2
# While there are still nodes left in the first list or second list
while curr1 or curr2:
# check if the current node is in the set.
# if yes, return the data of that node
# if not, add the current node to the set and move to next
if curr1 and curr1 in visited_nodes:
return curr1.data
elif curr1:
visited_nodes.add(curr1)
curr1 = curr1.next
if curr2 and curr2 in visited_nodes:
return curr2.data
elif curr2:
visited_nodes.add(curr2)
curr2 = curr2.next
|
from netmiko import ConnectHandler
import os
ciscoasa = {
'device_type': 'cisco_asa',
'ip': '192.168.1.76',
'username': 'cisco',
'password': os.getenv('ciscopass'),
}
conn = ConnectHandler(**ciscoasa)
config_commands = ['pager 0', 'logging permit-hostdown']
output = conn.send_config_set(config_commands)
wr = conn.save_config()
conn.disconnect()
print(output)
print(wr) |
#BOJ11724 연결 요소의 개수 20210224
import sys
from collections import deque
input = sys.stdin.readline
def main():
n,m = map(int, input().split())
adj = [ [] for _ in range(n+1)]
for _ in range(m):
a,b = map(int, input().split())
adj[a].append(b)
adj[b].append(a)
q = deque()
cnt = 0
visited = [ False for _ in range(n+1)]
for i in range(1,n+1):
if visited[i] : continue
q.append(i)
visited[i] = True
cnt += 1
while q:
tmp = q.popleft()
for j in adj[tmp]:
if not visited[j]:
q.append(j)
visited[j] = True
print(cnt)
if __name__ == '__main__':
main() |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import astropy.constants as c
import astropy.units as u
import sys
import matplotlib.animation as animation
sys.path.insert(0, '../')
from matplotlib.colors import LogNorm
from six.moves import cPickle as pickle
# In[7]:
import pyathena as pa
# In[8]:
basedir = 'D:/yeongu/'
simid = 'n1e-4_v1414'
# ## Unit system
#
# The unit system we choose for this simulation is
# * [length] = pc
# * [velocity] = km/s
# * [density] = 1.4271*m_h/cm^3
# In[9]:
# You can retrive the unit system with pa.set_units function.
# To make unit conversion easier, I use astropy's unit and constant.
# You may need to install astropy to use it
# please visit http://www.astropy.org/
unit = pa.set_units(muH=1.4271)
#print(unit)
print (unit['density'].cgs / 1.4271 / c.m_p.cgs, unit['velocity'], unit['length'])
# other units can be easily obtained
print (unit['mass'], unit['time'], unit['magnetic_field'], unit['temperature'])
MFu = 0.5476852239548456
# ## Read Full data cube
#
# Original data cube is stored in "vtk" files. For the MPI simulations, Athena dumps the same number of vtk files with the number of proccessors. Each vtk file has data for all physical variables for a part of the simulation "domain" (it is called "grid"). I wrote a reader to read each grid, get data from it, and merge into a big data array for full domain.
# In[10]:
# import pyathena as pa
ims=[]
#fig, ax = plt.subplots()
for tidx in range(251, 501):
vtkfname = '%s%s/id0/%s.%04d.vtk' % (basedir, simid, simid, tidx)
# read in domain information
ds = pa.AthenaDataSet(vtkfname)
# name of original data fields we stored from the simulation
print(ds.field_list)
# It also has predefined data fields can be calculated from the original data.
#print(ds.derived_field_list)
# full domain information
ds.domain
# information of grid #0
ds.grids[0]
# yet, we didn't read data.
# let's read each data field in a full domain
# this can be original data fields
d = ds.read_all_data('density')
# note that it adopts C-like indexing, k (z-index) comes first and i (x-index) comes last
# vector field has 3 component
nd = ds.read_all_data('number_density')
#print (nd.shape)
T = ds.read_all_data('T1')
#print(T)
tem = ds.read_all_data('temperature') # Temperature from data
#print (tem.shape)
# calculate sound speed
P = ds.read_all_data('pressure')
cs = np.sqrt(P / d)
# calculation of temperature needs additional information about mean molecular weight I adopt
coolftn = pa.coolftn()
temp = coolftn.get_temp(P / d) # Temperature from P/d
mag = ds.read_all_data('magnetic_field')
mag1 = ds.read_all_data('magnetic_field1') #z axis comp.
mag2 = ds.read_all_data('magnetic_field2') #y axis comp.
mag3 = ds.read_all_data('magnetic_field3') #x axis comp.
#print(mag.shape)
mag_str = np.sqrt(mag1**2+mag2**2+mag3**2) # Sum of mag comp.
#print(mag_str)
#print(mag_str.shape)
print(tidx)
#print(np.amax(mag_str)*MFu,np.amin(mag_str)*MFu)
#fig = plt.figure()
#mag_up = mag_str[16:32]
#mag_up = np.sum(mag_up,axis=0)
#mag_lw = mag_str[0:16]
#mag_lw = np.sum(mag_lw,axis=0)
zlen = 448
mag_str = np.sum(mag_str,axis = 0)
d = np.sum(d,axis = 0)
#print(mag_up.shape)
#print(mag_lw.shape)
plt.imshow(mag_str,origin='lower',animated=True,cmap = 'copper',interpolation='bilinear')
plt.title('Projection Magnetic map_T = %s Myr' % tidx)
cbar = plt.colorbar()
cbar.ax.set_ylabel(r'B[$\mu$G]')
plt.clim(0,1000)
plt.xlabel('x')
plt.ylabel('y')
plt.savefig('./magmap/MagMap_%s.png' % tidx)
plt.clf()
#plt.show()
#ims.append([im])
#plt.show()
plt.imshow(d,origin='lower',interpolation='bilinear')
cbar=plt.colorbar()
cbar.ax.set_ylabel(r'$n_H[cm^{-3}]$')
plt.clim(0,250)
plt.title('Projection Density map_T = %s Myr' % tidx)
#plt.tight_layout()
plt.xlabel('x')
plt.ylabel('y')
plt.savefig('./denmap/DenMap_%s.png' % tidx)
plt.clf()
#plt.show()
#ani = animation.FuncAnimation(fig, ims, interval = 200,blit=True)
#plt.draw()
#plt.show()
#ani.save('densitimap_z_%s.gif' %z, writer='imagemagick')
|
from flask import Flask, jsonify, abort, request
from flask_script import Manager
# 导入认证的类库
from flask_httpauth import HTTPBasicAuth
from flask_restful import Api,Resource
app = Flask(__name__)
manager = Manager(app)
api=Api(__name__)
auth = HTTPBasicAuth()
#设置认证的回调函数
#设置认证的回调函数,需要认证时自动回调,成功返回True
@auth.verify_password
def verify_password(username,password):
if username == 'ZXY' and password == 'asdqwe':
return True
else:
return False
@auth.error_handler
def unauthorized():
return jsonify({'error':'Unauthorized Access'}),403
posts = [
{
"id": 1,
"title": "Python入门",
"content": "很多人都认为Python的语法很简单,但是真正能够用好的又有几个"
},
{
"id": 2,
"title": "WEB开发入门",
"content": "HTML看起来很简单,用起来也简单,但是写出优雅的页面还是有点难度的"
}
]
#定义404错误界面
@app.errorhandler(404)
def page_not_found(e):
return jsonify({'error': 'page not found'}), 404
#定义400错误
@app.errorhandler(400)
def bad_request(e):
return jsonify({'error': 'bad request'}), 400
#添加RESTful的API接口
#获取资源列表
@app.route('/posts',methods=['GET'])
# #添加认证
# @auth.login_required
# def get_posts_list():
# return jsonify({'posts':posts})
#
#
# #获取指定资源
# @app.route('/posts/<int:pid>',methods=['GET'])
# def get_posts(pid):
# p=list(filter(lambda t: t['id'] == pid,posts))
# if len(p) == 0:
# abort(404)
# return jsonify({'posts':p[0]})
#
# #创建新资源
# @app.route('/posts',methods=['POST'])
# def create_posts():
# if not request.json or 'title' not in request.json or 'content' not in request.json:
# abort(400)
# #新建资源
# post = {
# "id":posts[-1]['id']+1,
# 'title':request.json['title'],
# 'content':request.json['content']
# }
# #保存资源
# posts.append(post)
# return jsonify({'posts':post}),201
#
# #修改资源
# @app.route('/posts/<int:ppid>',methods=['PUT'])
# def put_posts(ppid):
# p = list(filter(lambda t: t['id'] == ppid, posts))
# if len(p) == 0:
# abort(400)
# #新建资源
# if 'title' in request.json:
# p[0]['title']=request.json.get('title')
# if 'content' in request.json:
# p[0]['content']=request.json.get('content')
#
# return jsonify({'posts':p[0]})
#
#
# #删除指定资源
# @app.route('/posts/<int:pid>',methods=['DELETE'])
# def delete_posts(pid):
# p=list(filter(lambda t: t['id'] == pid,posts))
# if len(p) == 0:
# abort(404)
# posts.remove(p[0])
# return jsonify({'posts':posts})
#添加RESTful的API接口
#获取资源列表
# @app.route('/posts/',methods=['GET'])
# def get_posts_list():
# return 'GET:Z帖子列表展示'
#
# #获取指定资源
# @app.route('/posts/<id>',methods=['GET'])
# def get_posts(id):
# return 'GET:%s号帖子详情'% id
#
# #创建新资源
# @app.route('/posts/',methods=['POST'])
# def create_posts():
# return 'POST:资源创建已完成'
#
# #修改指定资源
# @app.route('/posts/<id>',methods=['PUT'])
# def update_posts(id):
# return 'PUT:%s号数据更新完成' % id
#
# #删除制定资源
# @app.route('/posts/<id>',methods=['DELETE'])
# def delete_posts(id):
# return 'DELETE:%s号数据已删除'% id
class UserAPI(Resource):
def get(self,id):
return {'User':'GET'}
def put(self,id):
return {'User':'PUT'}
def delete(self,id):
return {'User':'DELETE'}
#添加资源
#参数1:资源的类名
#参数2:路由地址,可以是多个
#endpoint:端点
api.add_resource(UserAPI,'/user/<int:id>',endpoint = 'user')
@app.route('/')
def hello_world():
return 'RESTful API'
if __name__ == '__main__':
manager.run()
|
from django.shortcuts import render
# from django.template import loader
from django.views.generic import View
from django.views import generic
# Create your views here.
class IndexView(generic.TemplateView):
template_name = 'exchange/index.html'
|
#use ordered set
def newNumeralSystem(number):
alphaStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
alphaMap = {}
numberMap = {}
finalAns = []
for i in range(len(alphaStr)):
alphaMap[alphaStr[i]] = i
for i in range(len(alphaStr)):
numberMap[i] = alphaStr[i]
#print(alphaMap)
#print(numberMap)
target = alphaMap[number]
testArr = []
for x in alphaStr:
key1 = x
val1 = alphaMap[x]
find = target - val1
if find in numberMap:
testArr.append(key1)
testArr.append(numberMap[find])
testArr.sort()
strBuild = testArr[0]
strBuild += " + "
strBuild += testArr[1]
if strBuild not in finalAns:
finalAns.append(strBuild)
testArr = []
return(finalAns)
print(newNumeralSystem("W"))
|
from django.db import models
from django.contrib.auth.models import User
from django_jalali.db import models as jmodels
from .utils import get_kebab_case, get_formatted_jdatetime
# from .model_mixins import LogMixin
from .value_choices import (
WORK_UPDATE_TYPES,
ATTENDANCE_ACTION_TYPES,
AVAILABILITY_STATUS_REASON_TYPES,
ACTIVITY_TYPES,
DAYLONG_STATUS_TYPES,
INDEFINITE_STATUS_TYPES,
)
class Employee(models.Model):
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
job_title = models.CharField(max_length=100)
user = models.OneToOneField(User, on_delete=models.CASCADE)
class Meta:
verbose_name = "کارمند"
verbose_name_plural = "کارمندان"
@property
def full_name(self):
return f"{self.first_name} {self.last_name}"
def __str__(self):
return f"{self.first_name} {self.last_name} - {self.job_title}"
@property
def current_availability_status(self):
return (
AvailabilityStatus.objects.filter(employee=self)
.order_by("-datetime_occured")
.first()
.reason
)
@property
def current_availability_status_text(self):
return (
AvailabilityStatus.objects.filter(employee=self)
.order_by("-datetime_occured")
.first()
.get_reason_display()
)
@property
def current_work_update(self):
return (
WorkUpdate.objects.filter(employee=self)
.order_by("-datetime_occured")
.first()
.__str__()
)
@property
def current_availability_status_class(self):
return get_kebab_case(self.current_availability_status)
class LogMixin(models.Model):
log = models.OneToOneField("Log", on_delete=models.CASCADE, null=True, blank=True)
employee = models.ForeignKey("Employee", on_delete=models.CASCADE, default=1)
datetime_occured = jmodels.jDateTimeField(auto_now_add=True)
class Meta:
abstract = True
def get_type_info(self):
pass
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
_log = Log(
employee=self.employee,
event_message=str(self),
event_type=self.get_type_info(),
)
_log.save()
self.log = _log
class Log(models.Model):
objects = jmodels.jManager()
datetime_occured = jmodels.jDateTimeField(auto_now_add=True)
employee = models.ForeignKey("Employee", on_delete=models.SET_NULL, null=True)
event_message = models.TextField()
event_type = models.CharField(max_length=40)
class Meta:
verbose_name = "لاگ"
verbose_name_plural = "لاگها"
@property
def datetime_occured_formatted(self):
return get_formatted_jdatetime(self.datetime_occured)
def __str__(self):
return f"{self.datetime_occured_formatted} - {self.event_message}"
class Activity(models.Model):
name = models.CharField(max_length=50)
activity_type = models.CharField(
max_length=50, choices=ACTIVITY_TYPES, default="Standard"
)
class Meta:
verbose_name = "فعالیت"
verbose_name_plural = "فعالیتها"
def __str__(self):
return f"{self.get_activity_type_display()} - {self.name}"
class WorkUpdate(LogMixin):
update_type = models.CharField(max_length=20, choices=WORK_UPDATE_TYPES)
activity = models.ForeignKey("Activity", on_delete=models.SET_NULL, null=True)
work_title = models.CharField(max_length=200)
notes = models.TextField(null=True, blank=True)
estimated_remaining_time = models.DurationField()
class Meta:
verbose_name = "اعلان کاری"
verbose_name_plural = "اعلانات کاری"
def get_type_info(self):
return self.get_update_type_display()
def __str__(self):
return f"{self.get_type_info()}: {self.activity} - {self.work_title} "
class Workplace(models.Model):
name = models.CharField(max_length=40)
class Meta:
verbose_name = "محل کار"
verbose_name_plural = "محلهای کار"
def __str__(self):
return self.name
class Attendance(LogMixin):
workplace = models.ForeignKey("Workplace", on_delete=models.SET_NULL, null=True)
action_type = models.CharField(max_length=10, choices=ATTENDANCE_ACTION_TYPES)
class Meta:
verbose_name = "سابقهی حضور و غیاب"
verbose_name_plural = "سوابق حضور و غیاب"
def get_type_info(self):
return self.get_action_type_display()
def __str__(self):
return f"{self.get_type_info()}: {self.employee} - {self.workplace}"
class AvailabilityStatus(LogMixin):
objects = jmodels.jManager()
reason = models.CharField(max_length=20, choices=AVAILABILITY_STATUS_REASON_TYPES)
until = jmodels.jDateTimeField()
class Meta:
verbose_name = "وضعیت کارمند"
verbose_name_plural = "وضعیتهای کارمند"
def get_type_info(self):
return self.get_reason_display()
@property
def until_formatted(self):
object_type = "date" if self.reason in DAYLONG_STATUS_TYPES else "datetime"
formatted_datetime = get_formatted_jdatetime(
self.until, object_type=object_type, show_seconds=False
)
return formatted_datetime
def __str__(self):
until_details = (
"" if self.reason in INDEFINITE_STATUS_TYPES else f" تا {self.until_formatted}"
)
return f"{self.employee} - {self.get_reason_display()} {until_details}"
|
from game_picture.player import *
from game_picture.mob import Mob
from game_picture.explosion import Explosion
from game_picture.pow import Pow
import pygame
import os
import random
'''
初始化格式
'''
pygame.init()
pygame.mixer.init()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("飞机大战11.0")
clock = pygame.time.Clock()
background = pygame.image.load(os.path.join(os.path.join(os.getcwd()),'image','main_photo.jpg')).convert()
background_rect = background.get_rect()
pygame.mixer.music.load(os.path.join(os.path.join(os.getcwd()),'snd', 'tgfcoder-FrozenJam-SeamlessLoop.ogg'))
pygame.mixer.music.set_volume(0.4)
font_name = pygame.font.match_font('arial')
def draw_text(surf, text, size, x, y):
font = pygame.font.Font(font_name, size)
text_surface = font.render(text, True, WHITE)
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
surf.blit(text_surface, text_rect)
def draw_shield_bar(surf, x, y, pct):
if pct < 0:
pct = 0
BAR_LENGTH = 100
BAR_HEIGHT = 10
fill = (pct / 100) * BAR_LENGTH
outline_rect = pygame.Rect(x, y, BAR_LENGTH, BAR_HEIGHT)
fill_rect = pygame.Rect(x, y, fill, BAR_HEIGHT)
pygame.draw.rect(surf, GREEN, fill_rect)
pygame.draw.rect(surf, WHITE, outline_rect, 2)
def draw_lives(surf, x, y, lives, img):
for i in range(lives):
img_rect = img.get_rect()
img_rect.x = x + 30 * i
img_rect.y = y
surf.blit(img, img_rect)
def newmob():
m = Mob()
all_sprites.add(m)
mobs.add(m)
def hide(self):
# hide the player temporarily
self.hidden = True
self.hide_timer = pygame.time.get_ticks()
self.rect.center = (WIDTH / 2, HEIGHT + 200)
def show_go_screen():
screen.blit(background, background_rect)
draw_text(screen, "START!!!", 64, WIDTH / 2, HEIGHT / 4)
draw_text(screen, "Arrow keys move, Space to fire", 22,
WIDTH / 2, HEIGHT / 2)
draw_text(screen, "Pass Keywoed To Begin Game", 18, WIDTH / 2, HEIGHT * 3 / 4)
pygame.display.flip()
waiting = True
while waiting:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
if event.type == pygame.KEYUP:
waiting = False
# screen.fill(BLACK)
# screen.blit(background, background_rect)
# all_sprites.draw(screen)
# player = Player()
# all_sprites.add(player)
# for i in range(18):
# newmob()
'''
游戏开始循环体
'''
score = 0
pygame.mixer.music.play(loops=-1)
game_over = True
running = True
while running:
if game_over:
show_go_screen()
game_over = False
# all_sprites = pygame.sprite.Group()
# mobs = pygame.sprite.Group()
# bullets = pygame.sprite.Group()
# powerups = pygame.sprite.Group()
player = Player()
all_sprites.add(player)
for i in range(8):
newmob()
score = 0
clock.tick(FPS)
pygame.display.flip()
for event in pygame.event.get():
# check for closing window
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYDOWN: #空格---发射子弹
if event.key == pygame.K_SPACE:
player.shoot()
all_sprites.update()
hits = pygame.sprite.groupcollide(mobs, bullets, True, True) #碰撞检测
for hit in hits:
score += 50 - hit.radius
player.shoot_sound.play()
player.expl_sounds.play()
expl = Explosion(hit.rect.center, 'lg')
all_sprites.add(expl)
if random.random() > 0.9:
pow = Pow(hit.rect.center)
all_sprites.add(pow)
powerups.add(pow)
newmob()
hits = pygame.sprite.spritecollide(player, mobs, True, pygame.sprite.collide_circle)
for hit in hits:
player.exp2_sounds.play()
player.shield -= hit.radius * 2
expl = Explosion(hit.rect.center, 'sm')
all_sprites.add(expl)
newmob()
if player.shield <= 0:
death_explosion = Explosion(player.rect.center, 'player')
all_sprites.add(death_explosion)
player.hide()
player.lives -= 1
player.shield = 100
hits = pygame.sprite.spritecollide(player, powerups, True)
for hit in hits:
if hit.type == 'shield':
# player.exp3_sounds.play()
player.shield += random.randrange(10, 30)
if player.shield >= 100:
player.shield = 100
if hit.type == 'gun':
# player.exp4_sounds.play()
player.powerup()
# power_sound.play()
# if the player died and the explosion has finished playing
if player.lives == 0 and not death_explosion.alive():
running = False
game_over = True
screen.fill(BLACK)
screen.blit(background, background_rect)
all_sprites.draw(screen)
draw_text(screen,"score:",18,WIDTH/2-40,10)
draw_text(screen, str(score), 18, WIDTH / 2, 10)
draw_shield_bar(screen, 5, 5, player.shield)
draw_lives(screen, WIDTH - 100, 5, player.lives,player.player_mini_img)
pygame.display.flip()
pygame.quit()
|
from django.conf.urls import include, url
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
url(r'^$', 'home.views.home', name = 'home'),
#these 3 are for editing the database info as staff
url(r'^usermanage', 'home.views.users', name = 'manageusers'),
url(r'^productmanage', 'home.views.products', name = 'manageproducts'),
url(r'^ordermanage', 'home.views.orders', name = 'manageorders'),
url(r'^createaccount', 'user.views.createaccount', name = 'createaccount'),
url(r'^editaccount', 'user.views.editaccount', name = 'editaccount'),
url(r'^signin', 'user.views.signin', name = 'signin'),
url(r'^logout', 'user.views.logout_view', name = 'logout'),
url(r'^product', 'product.views.product', name = 'product'),
url(r'^alert', 'supply.views.alert', name = 'alert'),
url(r'^manage', 'home.views.manage', name = 'manage'),
url(r'^supply', 'supply.views.supply', name = 'supply'),
url(r'^order/([0-9]{1,2})', 'order.views.order', name = 'order'),
url(r'^cart', 'order.views.cart', name = 'cart'),
url(r'^createproduct', 'product.views.createproduct', name = 'createproduct'),
url(r'^admin/', include(admin.site.urls)),
]+ static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) |
import pytest
from maha.constants import (
ALEF_SUPERSCRIPT,
ARABIC,
ARABIC_NUMBERS,
BEH,
EMPTY,
FATHA,
KASRA,
)
from maha.parsers.functions import parse
from maha.parsers.templates import Dimension, DimensionType
from maha.rexy import Expression, ExpressionGroup
from tests.utils import list_only_in_string
def test_parse_with_no_arguments(simple_text_input):
with pytest.raises(ValueError):
parse(simple_text_input)
def test_parse_with_empty_text():
result = parse(EMPTY, english=True, numbers=True)
assert result == []
def test_parse_with_return_type(simple_text_input):
result = parse(simple_text_input, english=True)
assert isinstance(result, list)
assert all(isinstance(c, Dimension) for c in result)
def test_parse_correct_return_values(simple_text_input):
result = parse(simple_text_input, arabic=True)
assert isinstance(result, list)
assert list_only_in_string(list("[]+") + ARABIC, result[0].expression.pattern)
assert len(result) == 3
assert result[1].start == 19
assert result[1].end == 31
assert result[2].dimension_type == DimensionType.ARABIC
def test_parse_with_one_argument(simple_text_input):
result = parse(simple_text_input, arabic=True)
assert isinstance(result, list)
def test_parse_with_more_than_one_argument(simple_text_input):
result = parse(simple_text_input, arabic=True, english=True, emojis=True)
assert len(result) == 12
def test_parse_with_more_than_one_argument_with_space(simple_text_input):
result = parse(
simple_text_input, arabic=True, english=True, emojis=True, include_space=True
)
assert len(result) == 3
def test_parse_with_english(simple_text_input):
result = parse(simple_text_input, english=True)
assert len(result) == 9
def test_parse_with_arabic_letters(simple_text_input):
result = parse(simple_text_input, arabic_letters=True)
assert len(result) == 12
assert isinstance(result, list)
assert result[0].value == BEH
def test_parse_with_english_letters(simple_text_input):
result = parse(simple_text_input, english_letters=True)
assert len(result) == 9
assert isinstance(result, list)
assert result[0].value == "In"
def test_parse_with_english_small_letters(simple_text_input):
result = parse(simple_text_input, english_small_letters=True)
assert len(result) == 9
assert isinstance(result, list)
assert result[0].value == "n"
def test_parse_with_english_capital_letters(simple_text_input):
result = parse(simple_text_input, english_capital_letters=True)
assert len(result) == 6
assert isinstance(result, list)
assert result[0].value == "I"
def test_parse_with_numbers(simple_text_input):
result = parse(simple_text_input, numbers=True)
assert len(result) == 1
assert isinstance(result, list)
assert result[0].value == "1"
def test_parse_with_harakat(simple_text_input):
result = parse(simple_text_input, harakat=True)
assert len(result) == 12
assert isinstance(result, list)
assert result[0].value == KASRA
def test_parse_with_all_harakat(simple_text_input):
result = parse(simple_text_input, all_harakat=True)
assert len(result) == 12
assert isinstance(result, list)
assert result[7].value == FATHA + ALEF_SUPERSCRIPT
def test_parse_with_tatweel(simple_text_input):
result = parse(simple_text_input, tatweel=True)
assert len(result) == 0
def test_parse_with_punctuations(simple_text_input):
result = parse(simple_text_input, punctuations=True)
assert len(result) == 5
def test_parse_with_arabic_numbers(simple_text_input):
result = parse(simple_text_input, arabic_numbers=True)
assert len(result) == 0
def test_parse_with_english_numbers(simple_text_input):
result = parse(simple_text_input, english_numbers=True)
assert len(result) == 1
def test_parse_with_arabic_punctuations(simple_text_input):
result = parse(simple_text_input, arabic_punctuations=True)
assert len(result) == 1
def test_parse_with_english_punctuations(simple_text_input):
result = parse(simple_text_input, english_punctuations=True)
assert len(result) == 4
def test_parse_with_arabic_ligatures(multiple_tweets):
result = parse(multiple_tweets, arabic_ligatures=True)
assert len(result) == 1
def test_parse_with_arabic_hashtags(multiple_tweets):
result = parse(multiple_tweets, arabic_hashtags=True)
assert len(result) == 2
def test_parse_with_arabic_mentions(multiple_tweets):
result = parse(multiple_tweets, arabic_mentions=True)
assert len(result) == 0
def test_parse_with_emails(multiple_tweets):
result = parse(multiple_tweets, emails=True)
assert len(result) == 0
result = parse("a@gmail.com", emails=True)
assert len(result) == 1
def test_parse_with_english_hashtags(multiple_tweets):
result = parse(multiple_tweets, english_hashtags=True)
assert len(result) == 4
def test_parse_with_english_mentions(multiple_tweets):
result = parse(multiple_tweets, english_mentions=True)
assert len(result) == 0
def test_parse_with_hashtags(multiple_tweets):
result = parse(multiple_tweets, hashtags=True)
assert len(result) == 6
def test_parse_with_links(multiple_tweets):
result = parse(multiple_tweets, links=True)
assert len(result) == 0
result = parse("google.com", links=True)
assert len(result) == 1
def test_parse_with_mentions(multiple_tweets):
result = parse(multiple_tweets, mentions=True)
assert len(result) == 0
def test_parse_with_emojis(multiple_tweets):
result = parse(multiple_tweets, emojis=True)
assert len(result) == 4
def test_parse_with_custom_expressions(multiple_tweets):
exp = r"الساع[ةه] ([{}]+)".format("".join(ARABIC_NUMBERS))
result = parse(multiple_tweets, custom_expressions=Expression(exp))
assert len(result) == 1
assert isinstance(result, list)
assert result[0].value == "١"
assert result[0].expression.pattern == exp
def test_parse_with_mutiple_expressions(multiple_tweets):
exp1 = r"الساع[ةه] ([{}]+)".format("".join(ARABIC_NUMBERS))
exp2 = r"(\d+) days"
result = parse(
multiple_tweets,
custom_expressions=ExpressionGroup(Expression(exp1), Expression(exp2)),
)
assert len(result) == 2
assert isinstance(result, list)
assert result[0].value == "١"
assert result[1].value == "10"
def test_parse_raises_value_error_with_invalid_expression():
with pytest.raises(ValueError):
parse("test", custom_expressions=Expression(""))
def test_dimesion_output():
d = Dimension(Expression(" test "), "test", 1, 10, 17, DimensionType.GENERAL)
assert str(d) == (
"Dimension(body=test, value=1, start=10, end=17, "
"dimension_type=DimensionType.GENERAL)"
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pathlib import Path
import sys
import time
from typing import List, Optional
def read_proc(pid: int) -> dict:
f = Path('/proc') / str(pid) / 'status'
result = {}
for line in open(f).readlines():
k,v = line.split(':')
result[k.strip()] = v.strip()
return result
def plot_it(results: "numpy.ndarray",
out_file: Optional[Path],
labels: List[str]):
import matplotlib
if out_file is not None:
matplotlib.use('Agg')
from matplotlib import pyplot as pl
import numpy as np
cols = results.shape[1]
for col, label in zip(range(1, cols), labels):
pl.subplot(cols-1,1,col)
pl.plot(results[:,0], results[:,col])
pl.ylabel(label)
pl.xlabel('time (seconds)')
pl.legend()
if out_file:
pl.savefig(out_file)
else:
pl.show()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--pid', type=int, help='process id')
parser.add_argument('-m', '--metric', action='append', type=str, help='metric names')
parser.add_argument('-s', '--period', type=float, help='sampling period (seconds)', default=0.1)
parser.add_argument('-o', '--output', type=argparse.FileType('w'), help='output file', default=sys.stdout)
parser.add_argument('-P', '--plot', nargs='?', help='plot it', default=False, const=True)
args = parser.parse_args()
metrics = args.metric
period = args.period
pid = args.pid
output = args.output
if args.plot and output == sys.stdout:
print("Must output to file in order to plot", file=sys.stderr)
sys.exit(1)
try:
while True:
t = time.time()
result = read_proc(args.pid)
values = [result[metric].split()[0] for metric in metrics]
output.write('\t'.join([str(t)] + values) + '\n')
output.flush()
time.sleep(period)
except Exception as e:
print(f"Failed with {e}", file=sys.stderr)
if args.plot is not False:
if args.plot is True:
out_file = None
else:
out_file = args.plot
import numpy as np
results = np.genfromtxt(args.output.name)
plot_it(results, out_file, labels=metrics)
if __name__ == '__main__':
main()
|
from sqlalchemy import Column, Integer, String, Boolean
from init import db
class Msg(dict):
def __init__(self, success=False, msg='', obj=None):
super(Msg, self).__init__({'success': success, 'msg': msg, 'obj': obj})
class User(db.Model):
__tablename__ = 'user'
id = Column(Integer, primary_key=True, autoincrement=True)
username = Column(String(255), unique=True, nullable=False)
password = Column(String(255), nullable=False)
def __init__(self, username, password):
self.username = username
self.password = password
def to_json(self):
return {
'username': self.username,
'password': self.password,
}
class Setting(db.Model):
__tablename__ = 'setting'
id = Column(Integer, primary_key=True, autoincrement=True)
key = Column(String(255), unique=True, nullable=False, default='')
name = Column(String(255), unique=True, nullable=False, default='')
value = Column(String(4096), nullable=False, default='')
value_type = Column(String(50), nullable=False, default='text')
tip = Column(String(255), nullable=False, default='')
need_restart = Column(Boolean, nullable=False, default=True)
def __init__(self, key, name, value, value_type, tip='', need_restart=False):
self.key = key
self.name = name
self.value = value
self.value_type = value_type
self.tip = tip
self.need_restart = need_restart
def to_json(self):
return {
'id': self.id,
'key': self.key,
'name': self.name,
'value': self.value,
'value_type': self.value_type,
'tip': self.tip,
'need_restart': self.need_restart,
}
|
import unittest
import copy
import datetime
from anchore_engine.db import ImagePackageVulnerability
class TestImagePackageVulnerabilityHashing(unittest.TestCase):
def test_cmp(self):
c1 = ImagePackageVulnerability()
c1.pkg_name = 'testpkg1'
c1.pkg_version = '1.0'
c1.pkg_arch = 'x86'
c1.pkg_type = 'rpm'
c1.pkg_image_id = 'image123'
c1.pkg_user_id = '0'
c1.vulnerability_namespace_name = 'centos:6'
c1.vulnerability_id = 'CVE-2016-123'
c1.created_at = datetime.datetime.utcnow()
c2 = copy.deepcopy(c1)
self.assertEqual(c1, c2)
c3 = copy.deepcopy(c1)
self.assertEqual(c1, c3)
c4 = copy.deepcopy(c1)
self.assertEqual(c1, c4)
c3.pkg_version = '1.1'
c4.pkg_user_id = '1'
self.assertEqual(c1, c2)
self.assertNotEqual(c1, c4)
self.assertNotEqual(c1, c3)
self.assertListEqual(list({c1, c2, c3}), list({c1, c3}))
print(('Set: {}'.format({c1, c2, c3})))
if __name__ == '__main__':
t = TestImagePackageVulnerabilityHashing()
t.run() |
import numpy as np
import astropy.io.fits as fitsio
import matplotlib.pyplot as plt
import matplotlib.patheffects as PathEffects
import mk_sample
filters = ['a','w','u','b','v','i','z','d','j','s','h']
def mk_stamps(drop_filt,sample_type,cut_type='no_cut',s=67):
if cut_type=='neg_nuv':
sample = mk_sample.mk_sample(drop_filt,sample_type=sample_type,return_all=True,return_catalog=True)
sample = sample[(sample['MAG_B_F225W']==-99.) & (sample['MAG_B_F275W']==-99.) & (sample['MAG_B_F336W']==-99.)]
sample = np.sort(sample,order='MAG_B_F435W')[::-1]
label = 'MAG_F435W'
fname = 'stamps/stamps_%s%s_nnuv.jpg' % (sample_type[:4],drop_filt[1:-1])
print "NUV weirdness:", len(sample)
if len(sample)==0: return
elif cut_type=='neg_hlr':
sample = mk_sample.mk_sample(drop_filt,sample_type=sample_type,return_all=True,return_catalog=True)
sample = sample[sample['HLR_F435W']<0]
sample = np.sort(sample,order='HLR_F435W')
label = 'HLR_F435W'
fname = 'stamps/stamps_%s%s_nhlr.jpg' % (sample_type[:4],drop_filt[1:-1])
print "Negative HLR:", len(sample)
if len(sample)==0: return
elif cut_type=='no_cut':
sample = mk_sample.mk_sample(drop_filt,sample_type=sample_type,return_all=True)
sample = np.sort(sample,order='M_1500')
label = 'M_1500'
fname = 'stamps/stamps_%s%s.jpg' % (sample_type[:4],drop_filt[1:-1])
print "Full sample:", len(sample)
ncols = (len(sample) / 50) + 1
isplit = (np.arange(ncols)*50)[1:]
fig,axes = plt.subplots(1,ncols,figsize=(ncols*(len(filters)+1),min(len(sample),50)),dpi=50)
fig.subplots_adjust(left=0,right=1,bottom=0,top=1,wspace=0,hspace=0)
if ncols==1: axes = [axes,]
for i,filt in enumerate(filters):
img = fitsio.getdata('/data/highzgal/PUBLICACCESS/UVUDF/simulations/orig_run/data/%s.fits' % filt)
for ax,_sample in zip(axes,np.split(sample,isplit)):
for j,entry in enumerate(_sample):
xc,yc = int(entry['X']),int(entry['Y'])
stamp = img[yc-s/2:yc+s/2,xc-s/2:xc+s/2]
med,std = np.median(stamp), np.std(stamp)
_stamp = np.clip(stamp,med-5*std,med+5*std)
med,std = np.median(_stamp[s/5:4*s/5,s/5:4*s/5]),np.std(_stamp[s/5:4*s/5,s/5:4*s/5])
vmin, vmax = med-3*std, med+3*std
extent = [s*i,s*(i+1),s*j,s*(j+1)]
ax.imshow(stamp,cmap=plt.cm.Greys_r,vmin=vmin,vmax=vmax,interpolation='none',extent=extent)
if i==0:
ax.text(s*i+2,s*j +2,"%i" %entry['ID'], color='w',va='top', ha='left',
fontsize=14,fontweight=600,path_effects=[PathEffects.withStroke(linewidth=2,foreground="k" if entry['SAMPLE_FLAG']==1 else "r")])
ax.text(s*i+2,s*(j+1)-2,"%.2f"%entry[label],color='w',va='bottom',ha='left',
fontsize=14,fontweight=600,path_effects=[PathEffects.withStroke(linewidth=2,foreground="k" if entry['SAMPLE_FLAG']==1 else "r")])
seg = fitsio.getdata('/data/highzgal/PUBLICACCESS/UVUDF/simulations/orig_run/cpro/udf_run_merge_template/det_segm.fits')
for ax,_sample in zip(axes,np.split(sample,isplit)):
for j,entry in enumerate(_sample):
xc,yc = int(entry['X']),int(entry['Y'])
id0 = seg[yc,xc]
stamp = seg[yc-s/2:yc+s/2,xc-s/2:xc+s/2].astype(float)
idx = np.unique(stamp)
idx = idx[(idx!=0) & (idx!=id0)]
for ix,ic in zip(idx,[0.9,0.2,0.7,0.35]): stamp[stamp==ix] = ic
stamp[stamp==id0] = 0.5
extent = [s*len(filters),s*(len(filters)+1),s*j,s*(j+1)]
ax.imshow(stamp,cmap=plt.cm.hot,vmin=0,vmax=1,interpolation='none',extent=extent)
ax.vlines(s*(np.arange(len(filters)+2)),0,s*len(_sample), color='k',lw=2,alpha=0.8)
ax.hlines(s*(np.arange(len(_sample)+1)),0,s*(len(filters)+1),color='k',lw=2,alpha=0.8)
for ax in axes:
ax.axis("off")
ax.set_xlim(0,s*(len(filters)+1))
if ncols>1: ax.set_ylim(s*50,0)
else: ax.set_ylim(s*len(sample),0)
fig.savefig(fname)
if __name__ == '__main__':
for drop_filt in ['f225w','f275w','f336w']:
for sample_type in ['dropout','photoz']:
for cut_type in ['neg_nuv','neg_hlr','no_cut']:
mk_stamps(drop_filt=drop_filt,sample_type=sample_type,cut_type=cut_type) |
# Задание - 1
# Найти сумму и произведение цифр трехзначного числа, которое вводит пользователь.
#
# Блок - схема
# https://drive.google.com/file/d/1RI4u-DGMvbfC7f_qaUMVbSWK1Pmy7F7Q/view?usp=sharing
print('Введите трехзначное число')
a = int(input('a = '))
if 99 < a < 1000:
a_1 = a // 100
a_2 = a // 10 % 10
a_3 = a % 10
summ = a_1 + a_2 + a_3
prod = a_1 * a_2 * a_3
print(f'Сумма цифр числа равна - {summ}, произведение цифр - {prod}')
else:
print('Число не верное!') |
# Generated by Django 3.1.3 on 2020-12-03 18:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('listing', '0004_auto_20201129_1211'),
]
operations = [
migrations.RenameField(
model_name='listing',
old_name='red_room',
new_name='bed_room',
),
migrations.AlterField(
model_name='listing',
name='sale_type',
field=models.CharField(choices=[('rent', 'Rent'), ('sale', 'Sale')], default='rent', max_length=40),
),
]
|
import os
import pytest
import six
from .find import find_all, find_one
from ..testing.utils import apply_fs
def test_find_no_files(tmpdir):
with tmpdir.as_cwd():
paths = list(find_all(os.getcwd(), ('readthedocs.yml',)))
assert len(paths) == 0
def test_find_at_root(tmpdir):
apply_fs(tmpdir, {'readthedocs.yml': '', 'otherfile.txt': ''})
base = str(tmpdir)
paths = list(find_all(base, ('readthedocs.yml',)))
assert paths == [
os.path.abspath(os.path.join(base, 'readthedocs.yml'))
]
def test_find_nested(tmpdir):
apply_fs(tmpdir, {
'first': {
'readthedocs.yml': '',
},
'second': {
'confuser.txt': 'content',
},
'third': {
'readthedocs.yml': 'content',
'Makefile': '',
},
})
apply_fs(tmpdir, {'first/readthedocs.yml': ''})
base = str(tmpdir)
paths = list(find_all(base, ('readthedocs.yml',)))
assert set(paths) == set([
str(tmpdir.join('first', 'readthedocs.yml')),
str(tmpdir.join('third', 'readthedocs.yml')),
])
def test_find_multiple_files(tmpdir):
apply_fs(tmpdir, {
'first': {
'readthedocs.yml': '',
'.readthedocs.yml': 'content',
},
'second': {
'confuser.txt': 'content',
},
'third': {
'readthedocs.yml': 'content',
'Makefile': '',
},
})
apply_fs(tmpdir, {'first/readthedocs.yml': ''})
base = str(tmpdir)
paths = list(find_all(base, ('readthedocs.yml',
'.readthedocs.yml')))
assert paths == [
str(tmpdir.join('first', 'readthedocs.yml')),
str(tmpdir.join('first', '.readthedocs.yml')),
str(tmpdir.join('third', 'readthedocs.yml')),
]
paths = list(find_all(base, ('.readthedocs.yml',
'readthedocs.yml')))
assert paths == [
str(tmpdir.join('first', '.readthedocs.yml')),
str(tmpdir.join('first', 'readthedocs.yml')),
str(tmpdir.join('third', 'readthedocs.yml')),
]
@pytest.mark.skipif(not six.PY2, reason='Only for python2')
@pytest.mark.xfail(raises=UnicodeDecodeError)
def test_find_unicode_path(tmpdir):
base_path = os.path.abspath('integration_tests/bad_encode_project')
assert isinstance(base_path, str)
unicode_base_path = base_path.decode('utf-8')
assert isinstance(unicode_base_path, unicode)
path = find_one(unicode_base_path, ('readthedocs.yml',))
assert path == ''
assert False, 'The UnicodeDecodeError was not raised'
|
#! usr/bin/env python
# -*- coding:utf-8 -*-
'test'
__author__ = 'HUSKY'
import sys,os
import time
print('just test...')
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals
from contextlib import closing
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.event import listens_for
from psycopg2.extensions import new_type, register_type
from last_fm.app import app
__all__ = [b"db"]
db = SQLAlchemy(app)
@listens_for(db.engine, "first_connect")
def register_citext_type(dbapi_con, connection_record):
def cast_citext(in_str, cursor):
if in_str == None:
return None
return unicode(in_str, cursor.connection.encoding)
with closing(dbapi_con.cursor()) as c:
c.execute(b"SELECT pg_type.oid FROM pg_type WHERE typname = 'citext'")
citext_oid = c.fetchone()
if citext_oid != None:
citext_type = new_type(citext_oid, b"CITEXT", cast_citext)
register_type(citext_type)
|
import mgrs
from RPLCD.i2c import CharLCD
import time
import gps
# Listen on port 2947 (gpsd) of localhost
session = gps.gps("localhost", "2947")
session.stream(gps.WATCH_ENABLE | gps.WATCH_NEWSTYLE)
#lcd definitions
lcd=CharLCD(i2c_expander='PCF8574', address=0x27)
sample='$GPGGA,123519,4807.038,N,01131.000,E,1,08,0.9,545.4,M,46.9,M,,*47'
def charposition(string, char):
pos = [] #list to store positions for each 'char' in 'string'
for n in range(len(string)):
if string[n] == char:
pos.append(n)
print(pos)
return pos
while True:
try:
report = session.next()
if report['class'] == 'TPV':
latVal=report.lat;
lonVal=report.lon;
#print("Lat: ")
#print(latVal)
#print("Lon: ")
#print(lonVal)
m=mgrs.MGRS()
d=m.toMGRS(latVal, lonVal)
print(d)
#print(d.decode("ascii"))
lcd.write_string('Tgt loc: ')
lcd.cursor_pos=(1,0)
lcd.write_string(d.decode("ascii"))
#e=m.toLatLon(d)
#print(e)
#y='321942.29N'
#yd=m.dmstodd(y)
#print(y)
#d,m,s=m.ddtodms(yd)
#print(d)
#print(m)
#print(s)
except KeyError:
pass
except KeyboardInterrupt:
quit()
except StopIteration:
session = None
print("GPSD has terminated")
#mypos=[]
#mypos=charposition(sample, ',') #find all instances of commas and index them
#sampleLat=float(sample[mypos[1]+1:mypos[2]]) #.replace(",","") #find substring in GGA sentence for lat and remove commas
#sampleLong=float(sample[mypos[3]+1:mypos[4]]) #.replace(",","") #find substring in GGA for long and remove commas
#sampleLat=sampleLat/100
#sampleLong=sampleLong/100
#print(sampleLat)
#print(sampleLong)
#latitude=42.0
#longitude=-93.0
|
from django.db import models
class Team(models.Model):
first_name = models.CharField(max_length=32)
last_name = models.CharField(max_length=32)
photo = models.ImageField(upload_to='photos/%Y/%m/%d')
designation = models.CharField(max_length=32)
facebook_url = models.URLField(max_length=255)
Instagram_url = models.URLField(max_length=255)
linkedin_url = models.URLField(max_length=255)
created_date = models.DateTimeField(auto_now_add=True)
def __str__(self):
name = self.first_name +" " +self.last_name
return name |
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
python_preamble:
- import: base64
- import: re
- import: google.appengine.ext.bulkload.transform
- import: google.appengine.ext.bulkload.bulkloader_wizard
- import: google.appengine.ext.db
- import: google.appengine.api.datastore
- import: google.appengine.api.users
transformers:
- kind: Offer
connector: csv
connector_options:
property_map:
- property: __key__
external_name: key
export_transform: transform.key_id_or_name_as_string
- property: description
external_name: description
# Type: String Stats: 2 properties of this type in this kind.
- property: title
external_name: title
# Type: String Stats: 2 properties of this type in this kind.
- property: imageUrl
external_name: imageUrl
- kind: Place
connector: csv
connector_options:
property_map:
- property: __key__
external_name: key
export_transform: transform.key_id_or_name_as_string
- property: address
external_name: address
# Type: String Stats: 6 properties of this type in this kind.
- property: location
external_name: location
# Type: GeoPt Stats: 6 properties of this type in this kind.
import_transform: google.appengine.api.datastore_types.GeoPt
- property: name
external_name: name
# Type: String Stats: 6 properties of this type in this kind.
- property: placeId
external_name: placeId
# Type: String Stats: 6 properties of this type in this kind.
- kind: Recommendation
connector: csv
connector_options:
property_map:
- property: __key__
external_name: key
export_transform: transform.key_id_or_name_as_string
- property: description
external_name: description
# Type: String Stats: 4 properties of this type in this kind.
- property: title
external_name: title
# Type: String Stats: 4 properties of this type in this kind.
- property: imageUrl
external_name: imageUrl
- property: expiration
external_name: expiration
import_transform: transform.import_date_time('%m/%d/%Y')
- kind: Product
connector: csv
connector_options:
property_map:
- property: __key__
external_name: key
export_transform: transform.key_id_or_name_as_string
- property: id
external_name: id
# Type: String Stats: 6 properties of this type in this kind.
- property: description
external_name: description
- property: category
external_name: category
- property: weight
external_name: weight
- property: price
external_name: price
- property: barcode_format
external_name: barcode_format
- property: barcode_content
external_name: barcode_content
- property: name
external_name: name
# Type: String Stats: 6 properties of this type in this kind.
- property: storeid
external_name: storeid
# Type: String Stats: 6 properties of this type in this kind.
- kind: HistoryItem
connector: csv
connector_options:
property_map:
- property: __key__
external_name: key
export_transform: transform.key_id_or_name_as_string
- property: userEmail
external_name: userEmail
- property: productName
external_name: productName
- property: productid
external_name: productid
- property: purchaseDate
external_name: purchaseDate
import_transform: transform.import_date_time('%Y-%m-%d %H:%M:%S')
- property: purchasePrice
external_name: purchasePrice
- property: placeName
external_name: placeName
|
#!/usr/bin/env python
# coding: utf-8
import os
current_dir = os.path.dirname(os.path.realpath(__file__))
def get_certs():
return (
open(os.path.join(current_dir, "certs", "ali_private_key.pem")).read(),
open(os.path.join(current_dir, "certs", "ali_public_key.pem")).read()
)
def get_certs_path():
return (
os.path.join(current_dir, "certs", "ali_private_key.pem"),
os.path.join(current_dir, "certs", "ali_public_key.pem")
)
|
"""
Given n pairs of parentheses, write a function to generate all combinations of well-formed parentheses.
"""
class Solution:
def generateParenthesis(self, n: int) -> list:
res = []
self.helper('', res, 0, 0, n)
return res
def helper(self, cur, res, left, right, n):
if right == n:
res.append(cur)
elif left - right >= 0:
self.helper(cur + "(", res, left + 1, right, n)
self.helper(cur + ')', res, left, right + 1, n)
else:
return
|
import pandas as pd
import glob
import datetime
# #readAllFiles
all_files = glob.glob("*.csv")
df = pd.concat((pd.read_csv(f) for f in all_files))
# # df = pd.read_excel("em290301_bike.xlsx",sep=";")
# print(df.head())
# print(df.shape)
#
# #concatenate all columns into one
df['concat'] = pd.Series(df.fillna('').values.tolist()).str.join('')
df = df[["concat"]]
# print(df.head())
# print(df.shape)
#
# #split columns into separate column
df_1 = df['concat'].str.split(';', 13, expand=True)
# print(df_1.head())
df_1 = df_1[[0,11,12]]
# # df_1= df_1[df_1[11] != 0]
# # df_1= df_1[df_1[12] != 0]
#asdsDFSDFadsasfdsadf\
adsfsdfsdfsdf
# #sort by values
SDGSRAFGsfgsdfgasfg
df_1[0] = pd.to_datetime(df_1[0])#,format="%d-%m-%Y %H:%M:%S")
df_1 = df_1.sort_values(by=[0],ascending=True)
aspdkmswdwkpfnmapdfmpkdasmf
calories_list = []
#subset dataframe from start time to end time
df_betTime = df_1["2019-03-28 13:11:00":"2019-03-28 13:12:10"]
# for i,j in df_betTime.iterrows():
# print(j[11],j[12])
#calculate calories
for i,j in df_betTime.iterrows():
calories = ((j[11] * j[12] * 2 * 3.14) / 60) * 0.05 * 0.860421 * 4
calories_list.append(calories)
print(calories_list)
# # print(df_1.iloc[0][11])
asdasdasdasdasfdsdfsdf
|
import numpy as np
import networkx as nx
import scipy.stats as stats
from point_process import *
from graph_manip import *
import sys
def let_hawkes_fly(target, alpha, maxgen=100):
G = nx.read_edgelist('%s/%s.txt' % (target, target), nodetype=int)
theta = alpha / lambda_max(G)
T = exact_hawkes_maxgen_all(G, maxgen, theta)
#T = exact_hawkes_all(G, theta)
nodes = list(G.nodes)
df = pd.DataFrame(nodes, columns=['node'])
df['hawkes'] = T
df = df.sort_values(by='hawkes', ascending=False)
df.to_csv('%s/rank_node_hawkes.txt' % (target), header=False, index=False, sep=' ')
if __name__ == "__main__":
if len(sys.argv) != 4:
print("Error: not enough args")
print("usage: python3 single_hawkes.py target alpha maxgen")
print("ex: python3 single_hawkes.py socfb-Swarthmore42 0.99")
exit()
target = sys.argv[1]
alpha = np.float(sys.argv[2])
maxgen = np.int(sys.argv[3])
let_hawkes_fly(target, alpha, maxgen)
|
#!/user/bin/env python3
#---------------------------------------------------------------------#
# Script Name Ops Challenge 12
# Author Kimberley Cabrera-Boggs
# Date of last revision October 20, 2020
# Description of purpose Network Security Tool w/Scapy Part 2 of 3
#---------------------------------------------------------------------#
# ,-~~-.___.
# / | ' \
# ( ) 0
# \_/-, ,----'
# ==== //
# / \-'~; /~~~(O)
# / __/~| / |
# =( _____| (_________|
# Part 2 of 3
from ipaddress import IPv4Network
import random
from scapy.all import ICMP, IP, sr1, TCP
# IP range to ping
network = "192.168.117.0/24"
# Addresses
addresses = IPv4Network(network)
live_count = 0
# ICMP ping request, get response host address
if (host in (addresses.network_address, addresses.broadcast_address)):
# Ignore network and broadcast addresses
resp = sr1(
IP(dst=str(host))/ICMP(),
timeout=2,
verbose=0,
)
if resp is None:
print(f"{host} is down or unresponsive.")
elif (
int(resp.getlayer(ICMP).type)==3 and
int(resp.getlayer(ICMP).code) in [1,2,3,9,10,13]
):
print(f"{host} is actively blocking ICMP.")
else:
print(f"{host} is actively responding.")
live_count += 1
print(f"{live_count}/{addresses.num_addresses} the hosts are online.")
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-17 16:06
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Artist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hash', models.CharField(max_length=255)),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Playlist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hash', models.CharField(max_length=255)),
('title', models.CharField(max_length=255)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='playlists', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Song',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hash', models.CharField(max_length=255)),
('title', models.CharField(max_length=255)),
('artist', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='songs', to='player.Artist')),
],
),
migrations.AddField(
model_name='playlist',
name='songs',
field=models.ManyToManyField(blank=True, to='player.Song'),
),
]
|
from django.db import models
from datetime import datetime
class Realtor(models.Model):
name = models.CharField(max_length=200, verbose_name="اسم")
photo = models.ImageField(upload_to='photos/%Y/%m/%d/', verbose_name="تصویر")
description = models.TextField(blank=True, verbose_name="توضیحات")
phone = models.CharField(max_length=20, verbose_name="شماره تماس")
email = models.CharField(max_length=50, verbose_name="ایمیل")
is_mvp = models.BooleanField(default=False, verbose_name="نمایش")
hire_date = models.DateTimeField(default=datetime.now, blank=True, verbose_name="تاریخ ثبت")
def __str__(self):
return self.name
class Meta:
verbose_name_plural = "مشاورین املاک"
verbose_name = "مشاوره املاک" |
# coding=utf-8
from pymongo import MongoClient
from lxml import html
import requests
base_url = "https://sou.zhaopin.com/?pageSize=60&jl=664&in=10100&jt=23,160000,045&kt=3"
headers = {
'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
}
client = MongoClient(connect=False)
db = client['zhilian']
collection = db['zhilian']
def get_page(page=1):
now_url = '{}{}'.format(base_url,str(page))
selector = html.fromstring(requests.get(now_url,headers=headers).text)
#获取工作名称
jobname = selector.xpath('//table[@class="newlist"]/tr/td/div/a/text()')
#过滤掉网站上的紧急招聘的多余内容
jobname = filter(lambda x:x!='\xa0',jobname)
#获取公司名称
gsmc = selector.xpath('//table[@class="newlist"]/tr/td[@class="gsmc"]/a/text()')
#获取职位月薪
zwyx = selector.xpath('//table[@class="newlist"]/tr/td[@class="zwyx"]/text()')
#获取工作地点
gzdd = selector.xpath('//table[@class="newlist"]/tr/td[@class="gzdd"]/text()')
for job,gs,yx,dd in zip(jobname,gsmc,zwyx,gzdd):
dict = {
"职位名称":job,
"公司名称":gs,
"月薪":yx,
"工作地点":dd
}
collection.insert(dict)
print(dict)
print(u'插入数据库成功')
def run(i):
get_page(i)
from multiprocessing import Pool
if __name__ == '__main__':
p = Pool(2)
for i in range(1,90+1):
p.apply_async(run,(i,))
p.close()
p.join() |
#!/usr/bin/env python
import roslib
roslib.load_manifest('beginner_tutorials')
import rospy
from geometry_msgs.msg import Twist
import curses.wrapper
import curses
def talker(screen):
pub=rospy.Publisher('/RosAria/cmd_vel',Twist)
rospy.init_node('keyboard_vel_cmd')
twist=Twist()
while not rospy.is_shutdown():
key=screen.getch()
screen.refresh()
if key == curses.KEY_UP:
twist=Twist()
twist.linear.x=0.2
elif key == curses.KEY_DOWN:
twist=Twist()
twist.linear.x=-0.2
elif key == curses.KEY_RIGHT:
twist=Twist()
twist.angular.z=-0.2
elif key == curses.KEY_LEFT:
twist=Twist()
twist.angular.z=0.2
else:
twist=Twist()
pub.publish(twist)
# rospy.sleep(0.1)
if __name__=='__main__':
try:
curses.wrapper(talker)
except rospy.ROSInterruptException:
print "exception raised"
pass
|
import os
import sys
from credentials import SPOTIFY_AUTH_TOKENS
import spotipy
import webbrowser
import spotipy.util as util
from spotipy.oauth2 import SpotifyOAuth
import json
from json.decoder import JSONDecodeError
SCOPE = 'user-read-private user-read-playback-state user-modify-playback-state'
SPOTIPY_CLIENT_ID = SPOTIFY_AUTH_TOKENS["Client ID"]
SPOTIPY_CLIENT_SECRET = SPOTIFY_AUTH_TOKENS["Client Secret"]
SPOTIPY_REDIRECT_URI = SPOTIFY_AUTH_TOKENS["Redirect URI"]
USERNAME = "atomicknight002"
UID = 26816222
# try:
# token = util.prompt_for_user_token(UID, scope)
# except:
# os.remove(f".cache-{UID}")
# token = util.prompt_for_user_token(UID, scope)
token = util.prompt_for_user_token(username=USERNAME,
scope=SCOPE,
client_id=SPOTIPY_CLIENT_ID,
client_secret=SPOTIPY_CLIENT_SECRET,
redirect_uri=SPOTIPY_REDIRECT_URI)
sp = spotipy.Spotify(token)
devices = sp.devices()
def next_song(text = None):
sp.next_track()
def previous_song(text=None):
sp.previous_track()
def pause(text=None):
sp.pause_playback()
def play(text=None):
sp.start_playback
def mute(text=None):
sp.volume(0)
def unmute(text=None):
sp.volume(50)
def fullVolume(text=None):
sp.volume(100)
if __name__ == "__main__":
print(devices)
|
rows = int(input("How many rows?: "))
k = 0
for row in range(rows,0,-1):
for col in range(0,k):
print(end=" ")
for col in range(0,row):
print("*",end=" ")
print()
k += 1
|
# Generated by Django 3.2.6 on 2021-10-05 00:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cadastros', '0008_alter_atividade_arquivo'),
]
operations = [
migrations.AlterField(
model_name='atividade',
name='arquivo',
field=models.FileField(blank=True, default='', null=True, upload_to='pdf/'),
),
]
|
"""Main Run File"""
import robot_library
def main():
"""Main Routine"""
bot = robot_library.Robot()
bot.part_a6()
bot.part_a2()
bot.part_a3()
bot.part_b7()
bot.create_plots()
main()
|
"""Loads in from json file and populated the locations table"""
import json
from sqlalchemy import func
from model import Location
from model import User
from model import connect_to_db, db
from server import app
def load_location_data():
"""Load location data from zipasaur.json into locations database"""
print "Location"
#Delete all rows in table, so if we need to run this a second time, we
#won't be trying to add duplicate users
# Location.query.delete()
#Read zipasaur.json file and insert data
location_dict = json.load(open("seed_data/zipasaur.json"))
for location in location_dict:
zipcode = int(location.get('code'))
city = location.get('city')
county = location.get('county')
lat = float(location.get('lat'))
lng = float(location.get('lng'))
location = Location(zipcode=zipcode,
city=city,
county=county,
lat=lat,
lng=lng)
# Add to the session if not a duplicate
result = Location.query.get(zipcode)
if not result:
db.session.add(location)
print location
#Commit to save adding it to the session
db.session.commit()
if __name__ == "__main__":
# As a convenience, if we run this module interactively, it will leave
# you in a state of being able to work with the database directly.
connect_to_db(app)
print "Connected to DB."
#create location table
db.create_all()
load_location_data() |
la = iface.activeLayer()
feat = la.getFeatures()
feat = [f for f in feat]
for i in feat:
g = eval(i.geometry().asJson())['coordinates'][0]
if len(g)>1:
la.select(i.id())
|
"""DB Service Module
Single service to handle all database interactions.
Uses Lazy Pirate Pattern
The goal is to minimize `psycopg2.connect` calls (which is expensive).
"""
import logging
import psycopg2
import zmq
logger = logging.getLogger(__name__)
class Server:
"""Single server that interacts with DB"""
def __init__(self, URL):
"""
Parameters
----------
URL: str
address bound to REP server
"""
self.URL = URL
def main(self, *args, **kwargs):
"""Main routine
Parameters
----------
*args, **kwargs
passed to `psycopg2.connect`
"""
logger.info("[DBSvc] Creating PG connection...")
conn = psycopg2.connect(*args, **kwargs)
logger.info("[DBSvc] Starting zmq.REP socket...")
ctx = zmq.Context.instance()
server = ctx.socket(zmq.REP) # pylint: disable=no-member
server.bind(self.URL)
while True:
try:
msg = server.recv_pyobj()
logger.debug("[DBSvc] Request: %s", msg)
recon_attempt = 0
while True:
try:
with conn.cursor() as cursor:
# execute stuff
cursor.execute(msg["query"], msg["query_data"])
result = cursor.fetchall() if msg["fetch"] else None
if msg["commit"]:
conn.commit()
else:
conn.rollback()
break
except psycopg2.OperationalError as err:
recon_attempt += 1
logger.error(
"[DBSvc] Error caught: %s, Restarting (Attempt #%.0f)",
repr(err),
recon_attempt,
)
conn = psycopg2.connect(*args, **kwargs)
logger.debug("[DBSvc] Response: %s", result)
server.send_pyobj(result)
except (KeyboardInterrupt, SystemExit):
logger.info("[DBSvc] Stopping...")
break
server.close()
ctx.term()
logger.info("[DBSvc] Exited")
class Client:
"""Client to send queries to Server"""
def __init__(self, URL):
"""
Parameters
----------
URL: str
address to connect REQ socket
"""
self.URL = URL
self.ctx = zmq.Context.instance()
self.client = self.ctx.socket(zmq.REQ) # pylint: disable=no-member
self.client.connect(self.URL)
def __del__(self):
self.close()
def close(self):
"""Cleanup, close zmq socket and context"""
self.client.close()
self.ctx.term()
def send_query(self, query, query_data=None, commit=False, fetch=False):
"""Send SQL query
Parameters
----------
query: str
passed as first argument to `cursor.execute()`
query_data
passed as second argument to `cursor.execute()`; defaults to None
commit: bool
flag whether to run `conn.commit()`; defaults to `False`
fetch: bool
flag whether to run `cursor.fetchall()`; defaults to `False`
Returns
----------
query result if `fetch` is `True`
"""
data = {
"query": query,
"query_data": query_data,
"commit": commit,
"fetch": fetch,
}
self.client.send_pyobj(data)
return self.client.recv_pyobj()
|
import numpy as np
from stl import mesh
import math
import sys
Point_1 = np.array([-25,0,0])
Point_2 = np.array([-24.14815,7.07,-6.47])
Point_3 = np.array([-24.373,0,-5.56])
Normal = np.array([-0.99,0,-0.11])
def counter_clockwise_check(P1,P2,P3,N):
V1 = np.subtract(P2,P1)
V1 = V1/np.linalg.norm(V1)
#print(V1)
V2 = np.subtract(P3,P2)
V2 = V2/np.linalg.norm(V2)
#print(V2)
Normal = np.cross(V1,V2)
Normal = Normal/np.linalg.norm(Normal)
#print(Normal)
Sub = np.subtract(Normal,N)
#print(Sub)
a = np.linalg.norm(Sub)
#print(a)
if(a < 0.1):
return P1,P2
else:
return P2,P1
#print(counter_clockwise_check(Point_1,Point_2,Point_3,Normal))
|
import math
print("Este programa retorna o tepo estimado de download para um dado tamanho de arquivo e velocidade internet")
arquivo = float(input("Digite o tamanho do arquivo em MB: "))
velocidade = float(input("Digite a velocidade da sua internet em mbps: "))
tempo = (arquivo*8.0)/(velocidade*60.0)
print(f"Seu arquivo de {arquivo}Mb vai demorar {round(tempo,2)} minutos") |
class Solution(object):
def topKFrequent(self, words, k):
"""
:type words: List[str]
:type k: int
:rtype: List[str]
"""
import collections, heapq
res = []
count = collections.Counter(words)
heap = [(-n, word) for word, n in count.items()]
heap.sort(key=lambda w: w[0])
# heapq.heapify(heap)
# for _ in range(k):
# res.append(heapq.heappop(heap)[1])
return [word for num, word in heap[:k]]
test = Solution()
words = ["i", "love", "leetcode", "i", "love", "coding"]
k = 2
print (test.topKFrequent(words, k)) |
# -*- coding: UTF-8 -*-
import uuid
from .managers import *
# django imports
from django.db import models
from django.db.models.signals import m2m_changed, post_save, post_delete
from django.dispatch import receiver
from django.utils import timezone
class Area(models.Model):
title = models.TextField( default = 'Area Title')
def __str__(self):
return self.title.encode('utf8')
class SubArea(models.Model):
title = models.TextField( default = 'Subarea Title')
AreasField = models.ManyToManyField(Area, blank = True)
def __str__(self):
return self.title.encode('utf8')
class Node(models.Model):
id = models.UUIDField(default=uuid.uuid4, primary_key = True, editable = False)
author = models.ForeignKey('auth.User', blank=True, null=True)
title = models.TextField(default = 'Untitled')
description = models.TextField(default = 'No description')
# incoming and outcoming nodes
fromNodes = models.ManyToManyField('self', blank=True, symmetrical = False, related_name='+')
toNodes = models.ManyToManyField('self', blank=True, symmetrical = False, related_name='+')
# sci area
area = models.ManyToManyField('Area', related_name = 'areaField')
subArea = models.ManyToManyField('SubArea', related_name = 'subAreaField', blank=True)
## link info
#toNodeLinkInfo=JSONField(default=dict)
created = models.DateTimeField(default=timezone.now)
updated = models.DateTimeField(default=timezone.now)
published = models.DateTimeField(blank=True, null=True)
# implement custom manager
#objects = NodeManager()
@property
def type(self):
return 'node'
def publish(self):
self.published = timezone.now()
self.save()
def __str__(self):
return self.title.encode('utf8') + ' ' + str(self.id)
class Link(models.Model):
id = models.UUIDField(default = uuid.uuid4, primary_key = True, editable = False)
title = models.TextField(default = 'Untitled')
description = models.TextField(default = 'No description')
fromNode = models.ForeignKey(Node, related_name = '+')
toNode = models.ForeignKey(Node, related_name = '+')
def __str__(self):
return self.title.encode('utf8')
# making link unique
class Meta:
unique_together = ('fromNode', 'toNode')
class Route(models.Model):
id = models.UUIDField(default = uuid.uuid4, primary_key=True, editable=False)
title = models.TextField(default = 'Untitled')
description = models.TextField(default = 'Description')
nodes = models.ManyToManyField(Node)
@property
def type(self):
return 'route'
def __str__(self):
return self.title.encode('utf8') + ' ' + str(self.id)
# check and fix one-sided relations before saving
@receiver(m2m_changed, sender = Node.fromNodes.through)
def checkFromNodes(instance, action, **kwargs):
fromNodesArr = instance.fromNodes.all()
if action == 'post_add':
for curNode in fromNodesArr:
nodeForUpdating = Node.objects.get(id = curNode.id)
toNodesArr = nodeForUpdating.toNodes.all()
# loop prevention
if not instance in toNodesArr:
nodeForUpdating.toNodes.add(instance)
@receiver(m2m_changed, sender = Node.toNodes.through)
def checkToNodes(instance, action, **kwargs):
toNodesArr = instance.toNodes.all()
if action == 'post_add':
for curNode in toNodesArr:
nodeForUpdating = Node.objects.get(id = curNode.id)
fromNodesArr = nodeForUpdating.fromNodes.all()
# fill links
try:
Link.objects.get(fromNode = instance, toNode = curNode)
except Link.DoesNotExist:
Link.objects.create(fromNode = instance, toNode = curNode)
# loop prevention
if not instance in fromNodesArr:
nodeForUpdating.fromNodes.add(instance)
# establish node connections for changed or created link object
@receiver(post_save, sender = Link)
def checkNodeLinks(instance, **kwargs):
nodeInLinkFrom = instance.fromNode
nodeInLinkTo = instance.toNode
theFromNode = Node.objects.get(id = nodeInLinkFrom.id)
theToNode = Node.objects.get(id = nodeInLinkTo.id)
theToArr = theFromNode.toNodes.all()
theFromArr = theToNode.fromNodes.all()
# check for link existance
for node in theToArr:
if node == nodeInLinkTo:
return
for node in theFromArr:
if node == nodeInLinkFrom:
return
theFromNode.toNodes.add(nodeInLinkTo)
theToNode.fromNodes.add(nodeInLinkFrom)
#@receiver(post_delete, sender = Link)
#def delNodeLinks(instance, **kwargs):
#
# nodeInLinkFrom = instance.fromNode
# nodeInLinkTo = instance.toNode
# theNode = Node.objects.get(id = nodeInLinkFrom.id)
# theToArr = theNode.toNodes.all()
#
# for node in theToArr:
# if node == nodeInLinkTo:
# return
#
# theNode.toNodes.add(nodeInLinkTo) |
'''from django import forms
from .models import Lecturer
class PostForm(forms.ModelForm):
class Meta:
model = Lecturer
fields = ['bank', 'account', 'career', 'certification', 'profilephoto', 'idphoto']
'''
from django import forms
from lecturer.models import Lecturer
class PostForm(forms.ModelForm):
class Meta:
model = Lecturer
fields = ['profilephoto', 'idphoto', 'bank', 'account', 'career', 'certification'],
|
from numpy import *
a = float(input('Aceleracao: '))
v = float(input('Velocidade inicial: '))
n = int(input('Numero: '))
t = arange(n)
d = zeros(n)
k = ((a*(t**2))/2)+v*t
print(k) |
from django.db import models
from django.contrib.auth import settings
class Cat(models.Model):
class Meta:
verbose_name = ('Кот')
verbose_name_plural = ('Коты')
User = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE,verbose_name='Хозяин')
Name = models.CharField(max_length=30, verbose_name='Кличка')
Age = models.PositiveSmallIntegerField(default=0,verbose_name='Возраст')
Breed = models.CharField(max_length=50, verbose_name='Порода')
Hairiness = models.CharField(max_length=50,verbose_name='Волосатость')
Created = models.DateTimeField(auto_now_add=True,auto_now=False,verbose_name='Создан')
Changed = models.DateTimeField(auto_now_add=False,auto_now=True,verbose_name='Изменен')
def __str__(self):
return "Кот %s, Хозяин %s" %(self.Name,self.User.username) |
"""
Functions generate images from a step file.
Author: Drew
"""
import cadquery as cq
import re
from wand.image import Image
from wand.color import Color
import os
import requests
from PIL import Image as I
from PIL import ImageDraw as D
# import zipfile
# import json
VIEWS = {'x': (1, 0, 0),
'-x': (-1, 0, 0),
'y': (0, 1, 0),
'-y': (0, -1, 0),
'z': (0, 0, 1),
'-z': (0, 0, -1)}
URL = "http://component_classifier_ai_1:5000/api/classify_image/"
def create_images(connector_file, folder='ortho_views'):
"""Generate images from STEP file."""
if not os.path.exists(folder):
os.mkdir(folder)
connector = cq.importers.importStep(connector_file).combine()
image_filenames = []
for view_name in VIEWS:
v = VIEWS[view_name]
svg = connector.toSvg(view_vector=v)
svg = process_svg(svg)
img_name = os.path.join(folder, connector_file.split(".")[0] + "_" + view_name + '.png')
image_filenames.append(img_name)
svg_blob = svg.encode('utf-8')
with Image(blob=svg_blob, format='svg') as img:
img.format = "png"
img.trim()
img.transform(resize='200x200')
width, height = img.size
height_border = (200 - height)/2
width_border = (200 - width)/2
img.border(Color('#FFFFFF'), width_border, height_border)
img.sample(200, 200)
img.save(filename=img_name)
# Return the list of filenames
return image_filenames
def process_svg(svg):
"""Hacky way to remove hidden lines from cadquery exports -
better way would be to modify cq source"""
# TODO: remove the entire hidden line group with some regex magic
new_svg = re.sub('(160, 160, 160)', '255, 255, 255', svg, re.DOTALL)
return new_svg
def get_image_class(image_file):
"""Get classification of image file"""
files = {'image': open(image_file, 'rb')}
payload = {'filename': 'hello.png'}
r = requests.post(URL, files=files, data=payload)
return r.text
def classify_images(image_files):
"""Classify a list of image files"""
classifications = {}
for image in image_files:
classification = get_image_class(image)
classifications[image] = classification
return classifications
def classify_step(step_file):
"""Creates images of a step file and classifies them"""
image_files = create_images(step_file)
return classify_images(image_files)
def classification_image(step_file):
"""Creates an agglomerated image of all generated images, with the
classified image highlighted in red."""
image_files = create_images(step_file)
classifications = classify_images(image_files)
new_img = I.new('RGB', (1200, 200))
x_offset = 0
for i,im in enumerate(image_files):
img = I.open(im)
if classifications[im].strip()=='\"c\"':
draw = D.Draw(img)
draw.rectangle((0,0,200,2), fill=(255,0,0))
draw.rectangle((0,197,200,200), fill=(255,0,0))
draw.rectangle((0,0,2,200), fill=(255,0,0))
draw.rectangle((197,0,200,200), fill=(255,0,0))
new_img.paste(img, (x_offset, 0))
x_offset += 200
new_img.save('classification.png')
return 'classification.png'
if __name__ == '__main__':
# create_images('USB_Micro-B_Molex_47346-0001.step')
create_images('Conn_HIROSE_ZX62WD1-B-5PC_eec.STEP') |
# Module for processing student data.
from ...student import Student
from ..memory_store import MemoryStore
from . import exam_tools
ID_FIELD_NAME = "studentId"
def get_students():
"""
Gets the results of students with at least one exam score.
:returns: String List of student IDs.
"""
student_ids = set() # unique set of student IDs.
memstore = MemoryStore()
results = memstore.get(Student)
for row_dict in results:
if is_valid_student(row_dict) and \
exam_tools.is_valid_exam(row_dict) and \
exam_tools.is_valid_exam_score(row_dict):
student_ids.add(row_dict[ID_FIELD_NAME])
return list(student_ids)
def get_results_by_studentid(student_id):
"""
Gets the results of student of student_id.
:param student_id: ID of student.
:returns: String List of results.
"""
memstore = MemoryStore()
return memstore.get(Student, ID_FIELD_NAME, student_id)
def get_average_by_studentid(student_id):
"""
Gets the test score average of student of student_id.
:param student_id: ID of student.
:returns: Float value of average.
"""
average = -1.0 # Average of -1.0 means student doesn't have any scores.
scores = []
memstore = MemoryStore()
results = memstore.get(Student, ID_FIELD_NAME, student_id)
for row_dict in results:
scores.append(row_dict["score"]) # score is a decimal value.
if len(scores) > 0:
average = sum(scores)/len(scores)
return average
def get_results_average_by_studentid(student_id):
"""
Gets test results and test score average of student of student_id.
:param student_id: ID of student.
:returns: Tuple of (string results list, float average)
"""
average = -1.0 # Average of -1.0 means student doesn't have any scores.
scores = []
memstore = MemoryStore()
results = memstore.get(Student, ID_FIELD_NAME, student_id)
for row_dict in results:
scores.append(row_dict["score"]) # score is a decimal value.
if len(scores) > 0:
average = sum(scores)/len(scores)
return (results, average)
def is_valid_student(row_dict):
"""
Checks if a row in the datastore has valid student identifiers.
:param row_dict: Row in the datastore represented as a dictionary.
:returns: Boolean True or False.
"""
if ID_FIELD_NAME not in row_dict or row_dict[ID_FIELD_NAME] is None:
return False
if not isinstance(row_dict[ID_FIELD_NAME], str):
return False
return True
|
__author__ = 'MOLTRES'
import os
import datetime
import pandas as pd
from util.futuresdatabase import FuturesDatabase
instrument_list = ['GC', 'CL', 'ZB']
futures_db = FuturesDatabase()
for instrument in instrument_list:
table_name = instrument + '_LAST'
futures_db.drop_table_if_exist(table_name)
futures_db.create_historical_table(table_name)
rootPath = "/home/aouyang1/NinjaTrader/TickData/" + instrument
folders = os.listdir(rootPath)
fnames = os.listdir(rootPath)
for fileNames in fnames:
print fileNames
df = pd.read_csv(rootPath + '/' + fileNames, delimiter=";", names=['Date', 'Last', 'Volume'], parse_dates=[0],
date_parser=lambda x: datetime.datetime.strptime(x, '%Y%m%d %H%M%S'))
futures_db.upload_dataframe_to_table(df, table_name)
futures_db.create_table_index(table_name, "Date")
futures_db.close_database_connection()
|
from rdflib import Namespace, Graph, Literal, RDF, URIRef
from rdfalchemy.rdfSubject import rdfSubject
from rdfalchemy import rdfSingle, rdfMultiple, rdfList
from brick.brickschema.org.schema._1_0_2.Brick.UndefinedMeasurement import UndefinedMeasurement
from brick.brickschema.org.schema._1_0_2.Brick.Water import Water
class Chilled_Water(UndefinedMeasurement,Water):
rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').Chilled_Water
|
def FT_HDP(soup, tag, tbl, home, away):
_0, _1, _2, _3 = [], [], [], []
_4, _5, _6, _7 = [], [], [], []
_8, _9, _10, _11 = [], [], [], []
_12, _13, _14 = [], [], []
try:
for i in soup.find(tag, tbl):
# print(i)
# print(" ")
nullodd = '1.00'
odd = i.find('span', 'koeff').text
bet_type = i.find('span', 'bet_type').text
data_type = i.find('span', 'bet_type').get('data-type')
""" Home -, Away + """
info = '(-3.5)'
if info in bet_type and data_type == '7':
_0.insert(1, home)
_0.insert(0, info.replace('(', '').replace(')', ''))
_0.insert(2, odd)
_0.insert(3, nullodd)
info = '(+3.5)'
if info in bet_type and data_type == '8':
_0.insert(4, odd)
_0.insert(5, away)
info = '(-3)'
if info in bet_type and data_type == '7':
_1.insert(1, home)
_1.insert(0, info.replace('(', '').replace(')', ''))
_1.insert(2, odd)
_1.insert(3, nullodd)
info = '(+3)'
if info in bet_type and data_type == '8':
_1.insert(4, odd)
_1.insert(5, away)
info = '(-2.5)'
if info in bet_type and data_type == '7':
_2.insert(1, home)
_2.insert(0, info.replace('(', '').replace(')', ''))
_2.insert(2, odd)
_2.insert(3, nullodd)
info = '(+2.5)'
if info in bet_type and data_type == '8':
_2.insert(4, odd)
_2.insert(5, away)
info = '(-2)'
if info in bet_type and data_type == '7':
_3.insert(1, home)
_3.insert(0, info.replace('(', '').replace(')', ''))
_3.insert(2, odd)
_3.insert(3, nullodd)
info = '(+2)'
if info in bet_type and data_type == '8':
_3.insert(4, odd)
_3.insert(5, away)
info = '(-1.5)'
if info in bet_type and data_type == '7':
_4.insert(1, home)
_4.insert(0, info.replace('(', '').replace(')', ''))
_4.insert(2, odd)
_4.insert(3, nullodd)
info = '(+1.5)'
if info in bet_type and data_type == '8':
_4.insert(4, odd)
_4.insert(5, away)
info = '(-1)'
if info in bet_type and data_type == '7':
_5.insert(1, home)
_5.insert(0, info.replace('(', '').replace(')', ''))
_5.insert(2, odd)
_5.insert(3, nullodd)
info = '(+1)'
if info in bet_type and data_type == '8':
_5.insert(4, odd)
_5.insert(5, away)
info = '(-0.5)'
if info in bet_type and data_type == '7':
_6.insert(1, home)
_6.insert(0, info.replace('(', '').replace(')', ''))
_6.insert(2, odd)
_6.insert(3, nullodd)
info = '(+0.5)'
if info in bet_type and data_type == '8':
_6.insert(4, odd)
_6.insert(5, away)
info = '(0)'
if info in bet_type and data_type == '7':
_7.insert(1, home)
_7.insert(0, info.replace('(', '').replace(')', ''))
_7.insert(2, odd)
_7.insert(3, nullodd)
info = '(0)'
if info in bet_type and data_type == '8':
_7.insert(4, odd)
_7.insert(5, away)
""" Home +, Away - """
info = '(+0.5)'
if info in bet_type and data_type == '7':
_8.insert(1, home)
_8.insert(0, info.replace('(', '').replace(')', ''))
_8.insert(2, odd)
_8.insert(3, nullodd)
info = '(-0.5)'
if info in bet_type and data_type == '8':
_8.insert(4, odd)
_8.insert(5, away)
info = '(+1)'
if info in bet_type and data_type == '7':
_9.insert(1, home)
_9.insert(0, info.replace('(', '').replace(')', ''))
_9.insert(2, odd)
_9.insert(3, nullodd)
info = '(-1)'
if info in bet_type and data_type == '8':
_9.insert(4, odd)
_9.insert(5, away)
info = '(+1.5)'
if info in bet_type and data_type == '7':
_10.insert(1, home)
_10.insert(0, info.replace('(', '').replace(')', ''))
_10.insert(2, odd)
_10.insert(3, nullodd)
info = '(-1.5)'
if info in bet_type and data_type == '8':
_10.insert(4, odd)
_10.insert(5, away)
info = '(+2)'
if info in bet_type and data_type == '7':
_11.insert(1, home)
_11.insert(0, info.replace('(', '').replace(')', ''))
_11.insert(2, odd)
_11.insert(3, nullodd)
info = '(-2)'
if info in bet_type and data_type == '8':
_11.insert(4, odd)
_11.insert(5, away)
info = '(+2.5)'
if info in bet_type and data_type == '7':
_12.insert(1, home)
_12.insert(0, info.replace('(', '').replace(')', ''))
_12.insert(2, odd)
_12.insert(3, nullodd)
info = '(-2.5)'
if info in bet_type and data_type == '8':
_12.insert(4, odd)
_12.insert(5, away)
info = '(+3)'
if info in bet_type and data_type == '7':
_13.insert(1, home)
_13.insert(0, info.replace('(', '').replace(')', ''))
_13.insert(2, odd)
_13.insert(3, nullodd)
info = '(-3)'
if info in bet_type and data_type == '8':
_13.insert(4, odd)
_13.insert(5, away)
info = '(+3.5)'
if info in bet_type and data_type == '7':
_14.insert(1, home)
_14.insert(0, info.replace('(', '').replace(')', ''))
_14.insert(2, odd)
_14.insert(3, nullodd)
info = '(-3.5)'
if info in bet_type and data_type == '8':
_14.insert(4, odd)
_14.insert(5, away)
for _list in (_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12,
_13, _14):
if len(_list) == 6:
# print("*** HANDICAP *** ")
print(tuple([(_list)]))
XB_Db.w('XB_HDP', tuple([(_list)]))
except AttributeError:
pass
except TypeError:
pass
if __name__ == 'bettypes.FT_HDP':
from XBdatabase import XB_Db
elif __name__ == 'XB.bettypes.FT_HDP':
from XB.Database import XB_Db
|
import json
import requests
import smtplib
from email.mime.text import MIMEText # 导入模块
from datetime import datetime
from .db.api import SQL
from .db import config
from functools import wraps
from flask import session
from .service_exception import GetOpenIdException
from . import config as service_config
def login_require(func):
@wraps(func)
def wrapper(*argvs, **kwargs):
if 'id' not in session:
return json.dumps({
'success': False,
'redict': True,
'msg': '您没有访问权限, 请登录'
})
return func(*argvs, **kwargs)
return wrapper
def register(identity, id):
if identity == 'teacher':
SQL().insert(config.TEACHER_TABLE_NAME, teacher_id=id)
else:
print('identity not define')
def get_open_id(code):
url = config.GET_OPEN_ID_URL % (config.APPID, config.SECRET, code)
page = requests.get(url)
json_str = page.text
try:
return eval(json_str)['openid']
except Exception as e:
raise GetOpenIdException('get open_id error return value is %s, system return is %s'%(json_str, str(e)))
def reservate(date: str, time: str, id):
return SQL().insert(config.TASK_TABLE_NAME, reservate_time='%s %s'%(date, time),
teacher_id=id
)
def is_reservated(date, time):
res_data = SQL().select(config.TASK_TABLE_NAME, ['*'], teacher_id=session['id'],
reservate_time='%s %s'%(date, time))
return True if len(res_data) != 0 else False
def reservate_info(date: str):
"""
查询某个日期
:param date: 某个日期
:return: list 数组 存储每条记录字典
"""
sql_client = SQL()
sql_client.conn.ping(reconnect=True)
sql_str = "select reservate_time, count(*) reservated_num " \
"from task " \
"where to_days(reservate_time) = to_days('%s') " \
"group by reservate_time"%date
sql_client.cursor.execute(sql_str)
res_data = sql_client.cursor.fetchall()
res_list = []
for e in res_data:
timestamp_str = e[0].strftime('%Y-%m-%d %H:%M:%S')
if int(e[1]) >= config.MAX_TASK_NUM:
res_list.append({'reservate_time': timestamp_str, 'reservate_forbid':True})
return res_list
def reservate_teacher(id):
teacher_id = id
sql_client = SQL()
res_data = sql_client.select(config.TASK_TABLE_NAME, ['reservate_time', 'state'], teacher_id=teacher_id)
return [{'reservate_time': e[0].strftime('%Y-%m-%d %H:%M:%S'), 'state': e[1]} for e in res_data]
def send_emil(recv: str, content: str,
title=service_config.EMAIL_TITLE, username=service_config.EMAIL_USERNAME, passwd=service_config.EMAIL_PASSWORD,
mail_host=service_config.EMAIL_HOST, port=service_config.EMAIL_PORT):
"""
:param recv: 收件者邮箱地址 例如123456@qq.com
:param content: 邮件文本内容
:param title:
:param username:
:param passwd:
:param mail_host:
:param port:
:return:
"""
msg = MIMEText(content) # 邮件内容
msg['Subject'] = title
msg['From'] = username
msg['To'] = recv
smtp = smtplib.SMTP_SSL(mail_host, port=port) # 定义邮箱服务类型
smtp.login(username, passwd) # 登录邮箱
smtp.sendmail(username, recv, msg.as_string()) # 发送邮件
smtp.quit()
return True
def reservate_del(reservate_time: str, teacher_id: str):
sql_client = SQL()
sql_client.delete(config.TASK_TABLE_NAME, teacher_id=teacher_id, reservate_time=reservate_time)
def reservate_max(reservate_max_value: str):
config.MAX_TASK_NUM = reservate_max_value
return config.MAX_TASK_NUM |
"""
miRACL - MultI-label Relation-Aware Collaborative Learning
for Unified Aspect-based Sentiment Analysis
# Sentence Representation
A. Word Embeddings - Pretrained distil-USE (multi-lingual sentence embeddings)
B. Features:
0. Sharing Feature: Dropout -> CNN -> Dropout
1. Aspect Extraction [AE]: CNN
2. Opinion Extraction [OE]: CNN
3. Sentiment Classification [SC]: CNN
C. Relations:
1. R1 - relation between AE and OE: Attention
2. R2 - relation between SC and R1: Attention
3. R3 - relation between SC and OE: Attention
4. R4 - relation between SC and AE: Attention
5. Opinion Propagation: SoftMax -> Clip
# Predictions + Loss Function
A. Aspect: Fully-Connected -> SoftMax -> Cross-Entropy
B. Opinion: Fully-Connected -> SoftMax -> Cross-Entropy
C. Sentiment: Fully-Connected -> SoftMax -> Cross-Entropy
D. Category: Fully-Connected -> SoftMax -> Cross-Entropy
E. Attribute: Fully-Connected -> SoftMax -> Cross-Entropy
F. Loss = Weighted_Sum(Aspect_Loss, Opinion_Loss, Sentiment_Loss, Category_Loss, Attribute_Loss, Regularization_Loss)
"""
import logging
import ntpath
import numpy as np
import os
import pickle
import tensorflow as tf
import time
from glob import glob
if not tf.executing_eagerly():
tf.enable_eager_execution()
# tf.config.run_functions_eagerly(True)
from tensorflow.keras import backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.layers import (
Input, Conv1D, Dropout, Concatenate, Lambda, Softmax, )
try:
from tensorflow.keras.activations import softmax, sigmoid
from tensorflow.keras.initializers import Identity, GlorotNormal, GlorotUniform
from tensorflow.keras.optimizers import Adam, Nadam, Adagrad, Adadelta, RMSprop, SGD
from tensorflow.keras.callbacks import ReduceLROnPlateau, TensorBoard, LearningRateScheduler, EarlyStopping, ModelCheckpoint, Callback
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.utils import plot_model
except ImportError as e:
pass
from src.utils import log_summary, plot_history
from src.models.racl.layers import dropoutize_embeddings, DropBlock2D, L2Norm, RACL_Block, ExpandDim, ReduceDim
from src.sub_processes.losses import RACL_losses
from src.sub_processes.metrics import evaluate_absa, evaluate_multilists
from src.sub_processes.optimizers import get_optimizer
from src.sub_processes.lr_schedulers import CyclicLR
from src.sub_processes.data_generator import DataGenerator
class MIRACL(object):
def __init__(self, opt):
self.opt = opt
self.mode = 'train' if opt.is_training else 'predict'
if opt.random_type == 'uniform':
self.initializer = GlorotUniform(seed=opt.random_seed)
else:
self.initializer = GlorotNormal(seed=opt.random_seed)
if opt.is_training:
# Build logger
log_dir = opt.logs_path
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
filename = os.path.join(log_dir, f'{time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())}.txt')
self.logger = logging.getLogger(filename)
self.logger.setLevel(logging.DEBUG)
# self.logger.propagate = False
self.logger.addHandler(logging.StreamHandler())
self.logger.addHandler(logging.FileHandler(filename, 'a'))
# Log hyper-parameters
info = ''
for arg in vars(opt):
info += ('>>> {0}: {1}\n'.format(arg, getattr(opt, arg)))
self.logger.info('{:-^80}\n{}\n'.format('Parameters', info))
# Build checkpoint & tensorboard
self.ckpt_dir = opt.ckpt_path
self.board_dir = os.path.join(opt.output_path, "tensorboard")
self.viz_dir = os.path.join(opt.output_path, "visualization")
for dir_ in [self.ckpt_dir, self.board_dir, self.viz_dir]:
if not os.path.isdir(dir_):
os.makedirs(dir_)
# Build model
inputs, embeddings, position_att, token_mask, sentiment_mask = self.build_input_block()
predictions = list(self.build_RACL_block(embeddings, position_att, token_mask))
if opt.is_training or opt.is_evaluating:
model_inputs = [inputs, token_mask, sentiment_mask, position_att]
model_outputs = predictions + [token_mask, sentiment_mask]
self.model = CustomModel(inputs=model_inputs, outputs=model_outputs, name='miRACL')
if opt.is_training:
model_summary = log_summary(self.model)
self.logger.info(model_summary)
self.visualize_architecture()
else:
predictions_as_prob = self.build_output_block(predictions)
self.model = CustomModel(inputs=[inputs, token_mask, position_att],
outputs=predictions_as_prob, name='miRACL')
self.model.summary()
def visualize_architecture(self):
plot_model(self.model, to_file=f'{self.opt.model}_{self.mode}.png', dpi=128, show_shapes=True, show_layer_names=True)
def build_input_block(self):
inputs, embeddings = dropoutize_embeddings(self.opt)
inputs._name = 'embeddings_concat'
# Inputs for Masking
position_att = Input(shape=(self.opt.max_sentence_len, self.opt.max_sentence_len), name='position_att')
token_mask = Input(shape=(self.opt.max_sentence_len,), name='token_mask')
sentiment_mask = Input(shape=(self.opt.max_sentence_len,), name='sentiment_mask')
return inputs, embeddings, position_att, token_mask, sentiment_mask
def build_RACL_block(self, embeddings, position_att, token_mask):
# Preprocessing
inputs = Dropout(rate=1-self.opt.keep_prob_1, name='inputs_dropout')(embeddings)
# Shared Features
conv_args = {'kernel_size': 1, 'strides': 1, 'padding': 'same', 'activation': 'relu', }
Feature_Extractor = Conv1D(filters=self.opt.embedding_dim, name='shared_features', **conv_args)
shared_features = Feature_Extractor(inputs)
shared_features = Dropout(rate=1-self.opt.keep_prob_1, name='shared_features_dropout')(shared_features)
# Define repeatable layers in RACL interactions
DropBlock_aspect = DropBlock2D(keep_prob=self.opt.keep_prob_2, block_size=3, name='DropBlock2D_aspect')
DropBlock_opinion = DropBlock2D(keep_prob=self.opt.keep_prob_2, block_size=3, name='DropBlock2D_opinion')
DropBlock_context = DropBlock2D(keep_prob=self.opt.keep_prob_2, block_size=3, name='DropBlock2D_context')
L2Normalize = L2Norm()
Tile = Lambda(lambda x: tf.tile(tf.expand_dims(x, axis=1), [1, self.opt.max_sentence_len, 1]), name='Tiler-in-RACL')
# We found that the SC task is more difficult than the AE and OE tasks.
# Hence, we augment it with a memory-like mechanism by updating the aspect query with the retrieved contexts.
# For more details about the memory network, refer to
# https://www.aclweb.org/anthology/D16-1021/ .
aspect_inputs, opinion_inputs, context_inputs = [shared_features], [shared_features], [shared_features]
aspect_preds, opinion_preds, sentiment_preds, category_preds, attribute_preds = [], [], [], [], []
context_queries = [shared_features]
conv_args['kernel_size'] = self.opt.kernel_size
classifier_args = dict()
for clss, n_clss in zip(['aspect', 'opinion', 'sentiment', 'category', 'attribute'],
[3, 3, self.opt.n_sentiments, self.opt.n_categories, self.opt.n_attributes]):
classifier_args[clss] = {'units': n_clss,
'activation': 'softmax',
'kernel_initializer': self.initializer, }
for interact_i in range(self.opt.n_interactions):
racl_block = RACL_Block(self.opt, L2Normalize, [DropBlock_aspect, DropBlock_opinion, DropBlock_context], Tile,
conv_args, classifier_args, block_id=interact_i)
output_preds, output_interacts = racl_block([aspect_inputs[-1], opinion_inputs[-1], context_inputs[-1], context_queries[-1], token_mask, position_att])
aspect_pred, opinion_pred, sentiment_pred, category_pred, attribute_pred = output_preds
aspect_interact, opinion_interact, context_interact, context_conv = output_interacts
# Stacking
aspect_preds.append(ExpandDim(axis=-1, name=f'aspect_pred-{interact_i}')(aspect_pred))
opinion_preds.append(ExpandDim(axis=-1, name=f'opinion_pred-{interact_i}')(opinion_pred))
sentiment_preds.append(ExpandDim(axis=-1, name=f'sentiment_pred-{interact_i}')(sentiment_pred))
category_preds.append(ExpandDim(axis=-1, name=f'category_pred-{interact_i}')(category_pred))
attribute_preds.append(ExpandDim(axis=-1, name=f'attribute_pred-{interact_i}')(attribute_pred))
aspect_inputs.append(aspect_interact)
opinion_inputs.append(opinion_interact)
context_inputs.append(context_conv)
context_queries.append(context_interact) # update query
# Multi-layer Short-cut
aspect_preds = Concatenate(axis=-1, name='aspect_preds')(aspect_preds)
opinion_preds = Concatenate(axis=-1, name='opinion_preds')(opinion_preds)
sentiment_preds = Concatenate(axis=-1, name='sentiment_preds')(sentiment_preds)
category_preds = Concatenate(axis=-1, name='category_preds')(category_preds)
attribute_preds = Concatenate(axis=-1, name='attribute_preds')(attribute_preds)
aspect_pred = ReduceDim('mean', axis=-1, name='AE_pred')(aspect_preds)
opinion_pred = ReduceDim('mean', axis=-1, name='OE_pred')(opinion_preds)
sentiment_pred = ReduceDim('mean', axis=-1, name='SC_pred')(sentiment_preds)
category_pred = ReduceDim('mean', axis=-1, name='CC_pred')(category_preds)
attribute_pred = ReduceDim('mean', axis=-1, name='AC_pred')(attribute_preds)
return aspect_pred, opinion_pred, sentiment_pred, category_pred, attribute_pred
def build_output_block(self, preds):
aspect_pred, opinion_pred, sentiment_pred, category_pred, attribute_pred = preds
# Scale probability
aspect_prob = Softmax(axis=-1, name='aspect_prob')(aspect_pred)
opinion_prob = Softmax(axis=-1, name='opinion_prob')(opinion_pred)
sentiment_prob = Softmax(axis=-1, name='sentiment_prob')(sentiment_pred)
category_prob = Softmax(axis=-1, name='category_prob')(category_pred)
attribute_prob = Softmax(axis=-1, name='attribute_prob')(attribute_pred)
return aspect_prob, opinion_prob, sentiment_prob, category_prob, attribute_prob
def train(self):
# Load generators
train_set = DataGenerator(self.opt.train_path, self.opt, validate=False)
val_set = DataGenerator(self.opt.val_path, self.opt, validate=True)
test_set = DataGenerator(self.opt.test_path, self.opt, validate=True)
n_trains, n_vals, n_tests = len(train_set), len(val_set), len(test_set)
################################
# Training Procedure #
################################
Evaluator = ABSA_Evaluation(val_set[0], self.logger, opt=self.opt, include_opinion=self.opt.include_opinion)
train_arguments = {
'x': train_set,
'steps_per_epoch': n_trains,
'validation_data': val_set,
'validation_steps': n_vals,
'verbose': 1,
'callbacks': [
# ReduceLROnPlateau(monitor='val_loss', factor=0.69, patience=5, min_lr=1e-7, verbose=1),
CyclicLR(mode='exponential', base_lr=self.opt.lr//169, max_lr=self.opt.lr, step_size=n_trains*2),
# TensorBoard(self.board_dir),
ModelCheckpoint(os.path.join(self.opt.ckpt_path, 'miRACL-epoch={epoch:03d}.h5'), save_weights_only=True, monitor='loss', verbose=1),
Evaluator,
# EarlyStopping(monitor="val_loss", patience=11, restore_best_weights=True, verbose=1)
]
}
self.model.set_opt(self.opt)
self.model.compile(optimizer=get_optimizer(self.opt.optimizer, learning_rate=self.opt.lr))
phases = ['all', 'opinion', 'aspect', 'sentiment', 'all']
epochs = [p*self.opt.n_epochs for p in range(len(phases)+1)]
histories = []
for l in range(self.opt.n_loops):
self.logger.info(f"\n\tLoop {l+1:03d} / {self.opt.n_loops:03d}")
for p_i, phase in enumerate(phases):
self.logger.info(f"\n\t\tPhase {p_i+1}: Training {phase.upper()} layers ...")
history = self.train_per_phase(initial_epoch=l*self.opt.n_epochs*len(phases)+epochs[p_i],
epochs=l*self.opt.n_epochs*len(phases)+epochs[p_i+1],
train_arguments=train_arguments,
phase=phase)
histories.append(history)
# Update weights for losses
self.logger.info(f"\n\t\tPhase {p_i+1}: Updating loss weights ...")
if p_i >= len(phases)-1:
if p_i == len(phases) and l == self.opt.n_loops-1:
continue
f1_o, f1_a, _, f1_s, _ = self.evaluate(test_set=val_set, RACL_only=True)
scores = np.array([f1_a, f1_o, f1_s], dtype=float)
weights = 1 / (scores+K.epsilon())
weights /= np.min(weights)
weights = np.clip(weights, 1., 16.9)
else:
next_phase = phases[p_i+1]
if next_phase == 'aspect':
weights = [3.69, 1.00, 1.00]
elif next_phase == 'opinion':
weights = [1.00, 3.69, 1.00]
elif next_phase == 'sentiment':
weights = [1.00, 1.00, 1.69]
else:
weights = [1.00, 1.00, 1.00]
self.opt.aspect_weight = weights[0]
self.opt.opinion_weight = weights[1]
self.opt.sentiment_weight = weights[2]
self.logger.info(f"\n\t\t\t aspect_weight = {weights[0]} \n\t\t\t opinion_weight = {weights[1]} \n\t\t\t sentiment_weight = {weights[2]}")
# Save best weights per phase
ckpt_ids_to_keep = [Evaluator.min_loss_index, Evaluator.max_score_ABSA_index, Evaluator.max_score_miRACL_index]
for ckpt_id, ckpt_type in zip(ckpt_ids_to_keep, ['loss', 'score_ABSA', 'score_miRACL']):
model_ckpt = os.path.join(self.ckpt_dir, f'miRACL-epoch={ckpt_id:03d}.h5')
self.model.load_weights(model_ckpt)
self.model.save_weights(os.path.join(self.ckpt_dir, f'miRACL-best-{ckpt_type}-loop={l+1}.h5'))
# Clean epoch weights
ckpt_ids_to_keep = [Evaluator.min_loss_index, Evaluator.max_score_ABSA_index, Evaluator.max_score_miRACL_index]
ckpt_ids_to_keep = list(set(ckpt_ids_to_keep))
for ckpt_file in glob(os.path.join(self.ckpt_dir, 'miRACL-epoch=*.h5')):
ckpt_id = int(ntpath.basename(ckpt_file)[11:14])
if ckpt_id in ckpt_ids_to_keep:
continue
if os.path.isfile(ckpt_file):
os.remove(ckpt_file)
# Visualization
try:
history_fig = plot_history(histories)
history_fig.savefig(os.path.join(self.viz_dir, 'training_history.png'))
for t_i, train_history in enumerate(histories):
with open(f'train_history_{t_i}.hst', 'wb') as f_writer:
pickle.dump(train_history.history, f_writer)
except Exception:
pass
# Testing Process
self.logger.info('\n\t Testing')
for ckpt_file in sorted(glob(os.path.join(self.ckpt_dir, 'miRACL-best-*.h5'))):
scores = self.evaluate(model_ckpt=ckpt_file, test_set=test_set)
self.logger.info(f'\n\t Prediction by {ntpath.basename(ckpt_file)}')
self.logger.info(f'\t\t opinion_f1={scores[0]:.7f} \n\t\t aspect_f1={scores[1]:.7f} \n\t\t sentiment_acc={scores[2]:.7f} \n\t\t sentiment_f1={scores[3]:.7f} \n\t\t ABSA_f1={scores[4]:.7f}')
self.logger.info(f'\t\t category_f1={scores[5]:.7f} \n\t\t category_acc={scores[6]:.7f} \n\t\t attribute_f1={scores[7]:.7f} \n\t\t attribute_acc={scores[8]:.7f} \n\t\t miRACL_f1={scores[9]:.7f}')
def train_aspect(self, initial_epoch: int, epochs: int, train_arguments: dict):
for layer in self.model.layers:
if 'aspect' in layer.name.lower():
layer.trainable = True
self.logger.info(f"\t\t\t{layer.name}")
else:
layer.trainable = False
history = self.model.fit(initial_epoch=initial_epoch, epochs=epochs, **train_arguments)
return history
def train_opinion(self, initial_epoch: int, epochs: int, train_arguments: dict):
for layer in self.model.layers:
if 'opinion' in layer.name.lower():
layer.trainable = True
self.logger.info(f"\t\t\t{layer.name}")
else:
layer.trainable = False
history = self.model.fit(initial_epoch=initial_epoch, epochs=epochs, **train_arguments)
return history
def train_sentiment(self, initial_epoch: int, epochs: int, train_arguments: dict):
for layer in self.model.layers:
if any(ss in layer.name.lower() for ss in ['sentiment', 'context']):
layer.trainable = True
self.logger.info(f"\t\t\t{layer.name}")
else:
layer.trainable = False
history = self.model.fit(initial_epoch=initial_epoch, epochs=epochs, **train_arguments)
return history
def train_all(self, initial_epoch: int, epochs: int, train_arguments: dict):
for layer in self.model.layers:
layer.trainable = True
history = self.model.fit(initial_epoch=initial_epoch, epochs=epochs, **train_arguments)
return history
def train_per_phase(self, initial_epoch: int, epochs: int, train_arguments: dict, phase: str='all'):
phase = phase.lower()
if phase == 'embedding':
history = self.train_embedding(initial_epoch, epochs, train_arguments)
elif phase == 'aspect':
history = self.train_aspect(initial_epoch, epochs, train_arguments)
elif phase == 'opinion':
history = self.train_opinion(initial_epoch, epochs, train_arguments)
elif phase == 'sentiment':
history = self.train_sentiment(initial_epoch, epochs, train_arguments)
else:
history = self.train_all(initial_epoch, epochs, train_arguments)
return history
def evaluate(self, model_ckpt='', test_set=None, RACL_only: bool=False):
# Load generator
if not isinstance(test_set, DataGenerator):
test_set = DataGenerator(self.opt.test_path, self.opt, validate=True)
# Load weights
if model_ckpt != '' and os.path.isfile(model_ckpt):
self.model.load_weights(model_ckpt)
# Evaluate
Xs, Ys_true = test_set[0]
*Ys_pred, token_mask, _ = self.model.predict(Xs)
scores_RACL = evaluate_absa(Ys_true[0], Ys_pred[0],
Ys_true[1], Ys_pred[1],
Ys_true[2], Ys_pred[2],
token_mask, include_opinion=self.opt.include_opinion)
if RACL_only:
return scores_RACL
scores_MI = evaluate_multilists(Ys_true[3:], Ys_pred[3:], token_mask)
score_miRACL = (scores_RACL[4] + (scores_MI[0]+scores_MI[2])/2) / 2
return scores_RACL + scores_MI + [score_miRACL]
def predict(self, sentence, token_mask, position_att):
"""
Return:
ae_pred, oe_pred, sc_pred, ctgr_pred, attr_pred
"""
Ys_pred = self.model.predict([sentence, token_mask, position_att])
return Ys_pred
def load_weights(self, weights_path):
if not os.path.isfile(weights_path):
raise FileNotFoundError(f"weights_path:\n\t{weights_path}\ndoesn't exist!")
try:
self.model.load_weights(weights_path)
except Exception as e:
print(e)
class CustomModel(Model):
def set_opt(self, opt):
self.opt = opt
def train_step(self, data):
Xs, Ys_true = data
with tf.GradientTape() as tape:
# Forward pass
*Ys_pred, token_mask, sentiment_mask = self(Xs, training=True)
# Compute the loss value. Loss function is configured in `compile()`.
losses = RACL_losses(Ys_true, Ys_pred, [token_mask, sentiment_mask], self.opt)
# Backward progagation - Compute gradients & Update weights
trainable_vars = self.trainable_variables
gradients = tape.gradient(losses[0], trainable_vars)
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
return {
'OE_loss': losses[1], 'AE_loss': losses[2], 'SC_loss': losses[3],
'CC_loss': losses[4], 'AC_loss': losses[5],
'Reg_cost': losses[6], 'loss': losses[0], 'lr': self.optimizer.learning_rate,
}
def test_step(self, data):
# Unpack the data
Xs, Ys_true = data
# Compute predictions
*Ys_pred, token_mask, sentiment_mask = self(Xs, training=False)
# Compute the loss value
losses = RACL_losses(Ys_true, Ys_pred, [token_mask, sentiment_mask], self.opt)
return {
'OE_loss': losses[1], 'AE_loss': losses[2], 'SC_loss': losses[3],
'CC_loss': losses[4], 'AC_loss': losses[5],
'Reg_cost': losses[6], 'loss': losses[0],
}
class ABSA_Evaluation(Callback):
def __init__(self, validation_data, logger, opt, include_opinion: bool=True, threshold: float=0.2, name='ABSA_scores', **kwargs):
super().__init__(**kwargs) # handle base args (e.g. dtype)
self._name = name
self.Xs, self.Ys_true = validation_data
self.opt = opt
self.logger = logger
self.include_opinion = include_opinion
self.threshold = threshold
self.records = {
'opinion_f1': [], 'OE_loss': [],
'aspect_f1': [], 'AE_loss': [],
'sentiment_acc': [], 'sentiment_f1': [], 'SC_loss': [],
'ABSA_f1': [], 'miRACL_f1': [],
'category_acc': [], 'category_f1': [], 'CC_loss': [],
'attribute_acc': [], 'attribute_f1': [], 'AC_loss': [],
'Reg_cost': [], 'total_loss': [],
}
def on_train_begin(self, logs={}):
...
def on_epoch_end(self, epoch, logs={}):
start = time.time()
# Forward pass
*Ys_pred, token_mask, sentiment_mask = self.model(self.Xs, training=False)
# Compute losses
losses = RACL_losses(self.Ys_true, Ys_pred, [token_mask, sentiment_mask], self.opt)
# Evaluate
scores_RACL = evaluate_absa(self.Ys_true[0], Ys_pred[0],
self.Ys_true[1], Ys_pred[1],
self.Ys_true[2], Ys_pred[2],
token_mask, self.include_opinion)
scores_MI = evaluate_multilists(self.Ys_true[3:], Ys_pred[3:], token_mask)
score_miRACL = (scores_RACL[4] + (scores_MI[0]+scores_MI[2])/2) / 2
end = time.time()
metrics = {
'opinion_f1': scores_RACL[0], 'OE_loss': losses[1],
'aspect_f1': scores_RACL[1], 'AE_loss': losses[2],
'sentiment_acc': scores_RACL[2], 'sentiment_f1': scores_RACL[3], 'SC_loss': losses[3],
'ABSA_f1': scores_RACL[4], 'miRACL_f1': score_miRACL,
'total_loss': losses[0], 'Reg_cost': losses[6],
'category_f1': scores_MI[0], 'category_acc': scores_MI[1], 'CC_loss': losses[4],
'attribute_f1': scores_MI[2], 'attribute_acc': scores_MI[3], 'AC_loss': losses[5],
}
self.max_score_ABSA_index, \
self.max_score_miRACL_index, self.min_loss_index = self.update_metrics(metrics)
display_text = f'Epoch {epoch+1:03d} - Evaluation in {int(end-start)} seconds\n' + \
f'\t OE_loss={losses[1]:.3f}, AE_loss={losses[2]:.3f}, SC_loss={losses[3]:.3f}, CC_loss={losses[4]:.3f}, AC_loss={losses[5]:.3f}, Reg_cost={losses[6]:.3f}, total_loss={losses[0]:.3f}' + \
f'\n --> Best loss at Epoch {self.min_loss_index}' + \
f'\n\t opinion_f1={scores_RACL[0]:.7f}, aspect_f1={scores_RACL[1]:.7f}, sentiment_acc={scores_RACL[2]:.7f}, sentiment_f1={scores_RACL[3]:.7f}, ABSA_f1={scores_RACL[4]:.7f}' + \
f'\n --> Best ABSA-score at Epoch {self.max_score_ABSA_index}' + \
f'\n\t category_f1={scores_MI[0]:.7f}, category_acc={scores_MI[1]:.7f}, attribute_f1={scores_MI[2]:.7f}, attribute_acc={scores_MI[3]:.7f}, miRACL_f1={score_miRACL:.7f}' + \
f'\n --> Best miRACL-score at Epoch {self.max_score_miRACL_index}'
self.logger.info(display_text)
return metrics
def update_metrics(self, metrics):
for k, v in metrics.items():
self.records[k].append(v)
return np.argmax(self.records['ABSA_f1'])+1, \
np.argmax(self.records['miRACL_f1'])+1, \
np.argmin(self.records['total_loss'])+1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
replaces = [(b'gear', '0001_initial'), (b'gear', '0002_auto_20150925_1031'), (b'gear', '0003_auto_20150925_2358'), (b'gear', '0004_auto_20150926_1047'), (b'gear', '0005_remove_gearitem_sessions')]
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('sport', '0016_auto_20150722_1630'),
]
operations = [
migrations.CreateModel(
name='GearBrand',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=250)),
('official', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='GearCategory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=250)),
('official', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='GearItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=250)),
('description', models.TextField()),
('start', models.DateTimeField(null=True, blank=True)),
('end', models.DateTimeField(null=True, blank=True)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('brand', models.ForeignKey(related_name='items', to='gear.GearBrand')),
('category', models.ForeignKey(related_name='items', to='gear.GearCategory')),
('sports', models.ManyToManyField(to=b'sport.Sport', blank=True)),
('user', models.ForeignKey(related_name='items', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='gearbrand',
name='owner',
field=models.ForeignKey(default=1, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AddField(
model_name='gearcategory',
name='owner',
field=models.ForeignKey(default=1, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AlterModelOptions(
name='gearbrand',
options={'ordering': ('name',)},
),
migrations.AlterModelOptions(
name='gearcategory',
options={'ordering': ('name',)},
),
migrations.AlterModelOptions(
name='gearitem',
options={'ordering': ('user', 'category', 'brand', 'created')},
),
migrations.AlterField(
model_name='gearitem',
name='brand',
field=models.ForeignKey(related_name='items', verbose_name='Brand', to='gear.GearBrand'),
),
migrations.AlterField(
model_name='gearitem',
name='category',
field=models.ForeignKey(related_name='items', verbose_name='Category', to='gear.GearCategory'),
),
migrations.AlterField(
model_name='gearitem',
name='description',
field=models.TextField(verbose_name='Description'),
),
migrations.AlterField(
model_name='gearitem',
name='end',
field=models.DateTimeField(null=True, verbose_name='End usage date', blank=True),
),
migrations.AlterField(
model_name='gearitem',
name='name',
field=models.CharField(max_length=250, verbose_name='Equipment name'),
),
migrations.AlterField(
model_name='gearitem',
name='sports',
field=models.ManyToManyField(to=b'sport.Sport', verbose_name='Default sports', blank=True),
),
migrations.AlterField(
model_name='gearitem',
name='start',
field=models.DateTimeField(null=True, verbose_name='Start usage date', blank=True),
),
]
|
# Generated by Django 3.0.6 on 2020-05-24 05:43
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dt_id', models.IntegerField()),
('dt_name', models.CharField(max_length=20)),
('dt_location', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('st_roll', models.IntegerField()),
('st_name', models.CharField(max_length=20)),
('st_sub', models.CharField(max_length=20)),
('st_join_date', models.DateTimeField(default=django.utils.timezone.now)),
('st_did', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='student.Department')),
],
),
]
|
from django.db import models
from enum import Enum
from django.db.models.deletion import CASCADE
# Create your models here.
class Quarto(models.Model):
quarto_id = models.AutoField(primary_key=True)
nome = models.CharField(blank=False, null=False, max_length=30)
numero = models.IntegerField(blank=False, null=False)
preco = models.FloatField(blank=False, null=False)
camas = models.IntegerField(blank=False, null=False)
frigobar = models.BooleanField(default=False, blank=False, null=False)
def __str__(self):
return self.nome
class Reserva(models.Model):
reserva_id = models.AutoField(primary_key=True)
quarto = models.ForeignKey(Quarto, on_delete=CASCADE, blank=False, null=False)
checkIn = models.DateField(blank=False, null=False)
checkOut = models.DateField(blank=False, null=False)
def __str__ (self):
return str(self.reserva_id)
# class Disponibilidade(models.Model):
# ESCOLHAS = (
# ('Livre', 'Livre')
# )
# disponibilidade = models.CharField(max_length=5)
# class Situacao(Enum):
|
import string
import random
import tkinter as tk
# tkinter set-up
root = tk.Tk()
frame = tk.Frame(root)
frame.pack()
root2 = tk.Tk()
words = tk.Frame(root2)
words.pack()
wordList = ['PROGRAMMING','CODE','ENCRYPTION','ALGORITHM','BOOLEAN','STRING','INTEGER','ARRAY','INTERNET','CIPHER','CAESAR','VARIABLES','DEBUGGING','FUNCTION','COMPRESSION','FILES','BINARY','BYTES','DATA','COMPRESSION','ENCODING','PACKETS','ROUTERS','INTERNET','ASYMMETRIC','SYMMETRIC','VIGENERE','DECRYPTING','CRACKING','LOOPS','PYTHON','JAVA','JAVASCRIPT','RUBY','BASIC','ASSEMBLY','LANGUAGE','COMPUTER','SOFTWARE','PROGRAMS','LIST','DATABASE','CLASS','OBJECT','MATRIX','THEORY','IMAGE','PACKETS','WHILELOOPS','ASCII','COMMAND','COMPILER','FLEMMING','EMULATOR','WINDOWS','DOS','SCRIPT','LINUX','MAC','FREEBSD', 'UBUNTU', 'ARCH', 'MINT', 'DEBIAN']
size = 25
numWords = 25
# String to hold current letters which are pressed in series
pressedWord = ''
prev = [0,0]
route = [0,0]
arr = [[0 for x in range(size)]for y in range(size)]
button = [[0 for x in range(size)]for y in range(size)]
check = [0 for numWords in range(size)]
dictionary = [0 for createWordSet in range(numWords)]
# 3 2 1
# \|/
# 4--+--0
# /|\
# 5 6 7
#All 8 possible directions (Number above corresponding to array index)
directionArr = [[1,0],[1,1],[0,1],[-1,1],[-1,0],[-1,-1],[0,-1],[1,-1]]
class square:
status = False # true = has been pressed
char = '' # character at location x, y
filled = False # does a character fill this space (f = no, t = yes)
# Puts word on board location (Called by wordPlace) after valid location is found
def fill(x, y, word, direction):
for i in range(len(word)):
arr[x + direction[0]*i][ y + direction[1]*i].char = word[i]
arr[x + direction[0]*i][ y + direction[1]*i].filled = True
# Picks random word from wordList list, and picks random location and direction. If it fails, it will contiune until a valid location is found
def wordPlace(j, dictionary):
word = random.choice(wordList)
direction = directionArr[random.randrange(0,7)]
x = random.randrange(0, size-1)
y = random.randrange(0, size-1)
if(x + len(word)*direction[0] > size-1 or x + len(word)*direction[0] < 0
or y + len(word)*direction[1] > size-1) or y + len(word)*direction[1] < 0:
wordPlace(j, dictionary)
return
for i in range(len(word)):
if(arr[x + direction[0]*i][y+ direction[1]*i].filled == True):
if(arr[x + direction[0]*i][y+ direction[1]*i].char != word[i]):
wordPlace(j, dictionary)
return
dictionary[j] = word
check[j] = tk.Label(words, text = word,height = 1, width = 15, font=('None %d ' %(10)), anchor = 'c')
check[j].grid()
fill(x, y, word, direction)
return dictionary
# Colours area appropirate colour, depending if word was valid or not
def colourWord(pressedWord, valid):
route[0] *= -1
route[1] *= -1
for i in range(len(pressedWord)):
if valid == True or arr[prev[0]+i*route[0]][prev[1]+i*route[1]].status == True:
button[prev[0]+i*route[0]][prev[1]+i*route[1]].config(bg='lime green')
arr[prev[0]+i*route[0]][prev[1]+i*route[1]].status = True
elif(arr[prev[0]+i*route[0]][prev[1]+i*route[1]].status == False):
button[prev[0]+i*route[0]][prev[1]+i*route[1]].config(bg= '#F0F0F0')
#Checks to see if word is a valid word on lits
def checkWord():
global pressedWord
if pressedWord in dictionary:
check[int(dictionary.index(pressedWord))].configure(font=('None %d overstrike' %(10)))
check[int(dictionary.index(pressedWord))].grid()
dictionary[dictionary.index(pressedWord)] = '' #For cases when same word appears multiple times
colourWord(pressedWord, True)
else:
colourWord(pressedWord, False)
pressedWord = ''
prev = [0,0]
# Makes sure direction being clicked is consistent, also handles highlighting button clicked yellow
def buttonPress (x, y):
global pressedWord, prev, route
newPressed = [x, y]
#Allows first click to be anywhere on board
if(len(pressedWord) == 0):
prev = newPressed
print(prev)
pressedWord = arr[x][y].char
button[x][y].configure(bg='yellow')
# Second click needs to be one of the 8 surrounding the initial click (Less for when first press is along wall boarder)
elif(len(pressedWord) == 1 and (x - prev[0])**2 <= 1 and (y - prev[1])**2 <= 1 and newPressed != prev):
pressedWord += arr[x][y].char
button[x][y].configure(bg='yellow')
route = [x-prev[0], y-prev[1]]
prev = [x, y]
# Uses direction defined by the second click to only allow presses to contiune in the direction that was initally clicked
elif(len(pressedWord) > 1 and x - prev[0] == route[0] and y - prev[1] == route[1]):
pressedWord += arr[x][y].char
button[x][y].configure(bg='yellow')
prev = [x,y]
#Creates physcial (size x size) grid of buttons
for x in range(size):
for y in range(size):
arr[x][y] = square()
#Puts all words on word search
for i in range(numWords):
wordPlace(i, dictionary)
#Fills in remaining space with random letters, and creates tkinter windows
for y in range(size):
for x in range(size):
# Fills empty locations (only locations listed as empty after word placed)
if(arr[x][y].filled == False):
#arr[x][y].char = ' ' # Used for debugging (does not fill in random letters, making it easier to find valid words
arr[x][y].char = random.choice(string.ascii_uppercase)
button[x][y] = tk.Button(frame, text = arr[x][y].char, bg= '#F0F0F0', width=2, height=1, command=lambda x=x, y=y: buttonPress(x, y))
button[x][y].grid(row=x, column=y)
checkW = tk.Button(words, text = "check Word", height = 1, width = 15, anchor = 'c', command = checkWord)
checkW.grid()
root.title("Word Search Board")
root2.title("Word List")
root.mainloop()
root2.mainloop()
|
# -*- coding: utf-8 -*-
#JI-69
from Tkinter import *
root = Tk()
drawpad = Canvas(root, width=400,height=725, background="grey")
drawpad.grid(row=0, column=1)
screen = drawpad.create_rectangle(50,50,350,250, fill = "white")
#Buttons 1-9
button0 = drawpad.create_rectangle(112.5,650,162.5,675, fill = "white")
button1 = drawpad.create_rectangle(112.5,612.5,162.5,637.5, fill = "white")
button2 = drawpad.create_rectangle(175,612.5,225,637.5, fill = "white")
button3 = drawpad.create_rectangle(237.5,612.5,287.5,637.5, fill = "white")
button4 = drawpad.create_rectangle(112.5,575,162.5,600, fill = "white")
button5 = drawpad.create_rectangle(175,575,225,600, fill = "white")
button6 = drawpad.create_rectangle(237.5,575,287.5,600, fill = "white")
button7 = drawpad.create_rectangle(112.5,537.5,162.5,562.5, fill = "white")
button8 = drawpad.create_rectangle(175,537.5,225,562.5, fill = 'white')
button9 = drawpad.create_rectangle(237.5,537.5,287.5,562.5, fill= 'white')
#Rest of Buttons
buttonENTER = drawpad.create_rectangle(237.5,650,350,675, fill='white')
buttonplus = drawpad.create_rectangle(300,612.5,350,637.5, fill='white')
buttonminus = drawpad.create_rectangle(300,575,350,600, fill='white')
buttonmultiply = drawpad.create_rectangle(300,537.5,350,562.5, fill='white')
buttondivide = drawpad.create_rectangle(300,500,350,525, fill='white')
buttonexponent = drawpad.create_rectangle(300,462.5,350,487.5, fill='white')
buttonbigarrow = drawpad.create_rectangle(300,425,350,450, fill='white')
buttonparenthesis = drawpad.create_rectangle(237.5,500,287.5,525, fill='white')
buttonTAN = drawpad.create_rectangle(237.5,462.5,287.5,487.5, fill='white')
buttoni = drawpad.create_rectangle(237.5,425,287.5,450, fill='white')
buttondot = drawpad.create_rectangle(175,650,225,675, fill='white')
buttonE = drawpad.create_rectangle(175,500,225,525, fill='white')
buttonCOS = drawpad.create_rectangle(175,462.5,225,487.5, fill='white')
button1dividedbyx = drawpad.create_rectangle(175,425,225,450, fill='white')
buttonRCL = drawpad.create_rectangle(175,387.5,225,412.5, fill='white')
buttonGTO = drawpad.create_rectangle(175,350,225,375, fill='white')
buttonsigmaplus = drawpad.create_rectangle(112.5,500,162,525, fill='white')
buttonSIN = drawpad.create_rectangle(112.5,462.5,162.5,487.5, fill='white')
buttonyexponentx = drawpad.create_rectangle(112.5,425,162.5,450, fill='white')
buttonxleftarrowrightarrowy = drawpad.create_rectangle(112.5,387.5,162.5,412.5, fill='white')
buttonMODE = drawpad.create_rectangle(112.5,350,162.5,375, fill='white')
buttonsigman = drawpad.create_rectangle(50,650,100,675, fill = 'white')
buttonCLEAR = drawpad.create_rectangle(50,612.5,100,637.5, fill = 'white')
buttonrightarrow = drawpad.create_rectangle(50,575,100,600, fill = '#3300FF')
buttonleftarrow = drawpad.create_rectangle(50,537.5,100,562.5, fill = '#EDDF1A')
buttonSERIES = drawpad.create_rectangle(50,500,100,525, fill = 'white')
buttonplusslashminus = drawpad.create_rectangle(50,462.5,100,487.5, fill = 'white')
buttonradicalx = drawpad.create_rectangle(50,425,100,450, fill = 'white')
buttonrdownarrow = drawpad.create_rectangle(50,387.5,100,412.5, fill = 'white')
buttonrslashs = drawpad.create_rectangle(50,350,100,375, fill= 'white')
#Big Arrows
buttonbigarrowup = drawpad.create_rectangle(287.5,337.5,337.5,363.5, fill = '#DCDCDE')
buttonbigarrowleft = drawpad.create_rectangle(237.5,350,275,400, fill = '#DCDCDE')
buttonbigarrowright = drawpad.create_rectangle(350,350,387.5,400, fill = '#DCDCDE')
buttonbigarrowdown = drawpad.create_rectangle(287.5,387.5,337.5,412.5, fill = '#DCDCDE')
class MyApp:
def __init__(self, parent):
global drawpad
self.myParent = parent
self.myContainer1 = Frame(parent)
self.myContainer1.pack()
self.button1 = Button(self.myContainer1)
self.button1.configure(text="1", background= "white")
self.button1.grid(row=0,column=0)
self.button2 = Button(self.myContainer1)
self.button2.configure(text="2", background= "white")
self.button2.grid(row=0,column=1)
drawpad.bind("<Button-1>", self.click)
drawpad.pack()
def click(self, event):
if drawpad.find_withtag(CURRENT):
drawpad.itemconfig(CURRENT, fill="blue")
drawpad.update_idletasks()
drawpad.after(200)
drawpad.itemconfig(CURRENT, fill="white")
myapp = MyApp(root)
root.mainloop() |
# -*- coding:utf-8 -*-
#!python3
'''
auth : yi.chen
date :
desc : CART
'''
import pickle
import numpy as np
from DecisionTree.treePlotter import treePlotter
def loadDataSet(filename):
"""
:param filename: 文件路径
:return: dataMat
"""
fr = open(filename)
dataMat = []
for line in fr.readlines():
cutLine = line.strip().split('\t')
floatLine = map(float,cutLine)
dataMat.append(floatLine)
return dataMat
def binarySplitData(dataSet,feature,value):
"""
:param dataSet:
:param feature:
:param value:
:return: 左右子数据集
"""
matLeft = dataSet[np.nonzero(dataSet[:feature] <= value)[0]:]
matRight = dataSet[np.nonzero(dataSet[:,feature] > value)[0]:]
return matLeft,matRight
#-------------------------------回归树所需子函数---------------------------------#
def regressLeaf(dataSet):
"""
:param dataSet:
:return: 对应数据集的叶节点
"""
return np.mean(dataSet[:-1])
def regressErr(dataSet):
"""
求数据集划分左右子数据集的误差平方和
:param dataSet: 数据集
:return: 划分后的误差平方和
"""
# 由于回归树中输出的均值作为叶节点,所以在这里求误差平方和实质上就是方差
return np.var(dataSet[:,-1]) * np.shape(dataSet)[0]
def regressData(fileName):
"""
将数据序列化
:param fileName:
:return:
"""
fr = open(fileName)
return pickle.load(fr)
def choosBestSplit(dataSet ,leafType = regressLeaf,errType=regressErr
,threshold=(1,4)):
"""
:param dataSet:
:param leafType:
:param errType:
:param threshold:
:return:
"""
thresholdErr = threshold[0]
thresholdSamples = threshold[1]
#当数据中输出值都相等的时候,feature = None,value = 输出值的均值(叶子节点)
if len(set(dataSet[:,-1].T.tolist()[0])) == 1:
return None,leafType(dataSet)
m,n = np.shape(dataSet)
err = errType(dataSet)
bestErr = np.inf
bestFeatureIndex = 0
bestFeatureValue = 0
for featureIndex in range(n-1):
for featureValue in dataSet[:,featureIndex]:
matLeft,matRight = binarySplitData(dataSet,featureIndex,featureValue)
if((np.shape(matLeft)[0] < thresholdSamples) or
(np.shape(matRight)[0] <thresholdSamples)):
continue
tempErr = errType(matLeft) + errType(matRight)
if tempErr <bestErr:
bestErr = tempErr
bestFeatureIndex = featureIndex
bestFeatureValue = featureValue
#在所选出的最优化分的划特征及其取值下,检验误差平方和与未划分时候的差是否小于阈值 若是,则划分不合适
if(err-bestErr) < thresholdErr:
return None,leafType(dataSet)
matLeft,matRight = binarySplitData(dataSet,bestFeatureIndex,bestFeatureValue)
#检验 所选的最优划分特征及其取值下,划分的左右数据集的样本数是否小于阈值 如果是,则不合适划分
if((np.shape(matLeft)[0] <thresholdSamples) or
(np.shape(matRight)[0] < thresholdSamples)):
return None,leafType(dataSet)
return bestFeatureIndex,bestFeatureValue
def creatCARTTree(dataSet,leafType=regressLeaf,errType=regressErr,threshold=(1,4)):
"""
:param dataSet: 数据集
:param leafType: regressLeaf(回归树)、modelLeaf(模型树)
:param errType: 差平方和
:param threshold:用户自定义阈值参数
:return: 以字典嵌套数据形式返回子回归树或子模型树或叶结点
"""
feature,value = choosBestSplit(dataSet,leafType,errType,threshold)
#当不满足阈值或者某一子数据集下输出全相等时,返回叶子节点
if feature == None:
return value
returnTree = {}
returnTree['bestSplitFeature'] = feature
returnTree['bestSplitFeatValue'] = value
leftSet,right = binarySplitData(dataSet,feature,value)
returnTree['left'] = creatCARTTree(leftSet,leafType,errType,threshold)
returnTree['right'] = creatCARTTree(leftSet,leafType,errType,threshold)
return returnTree
#--------------------------------回归树剪枝函数------------------------------------------
def isTree(obj):
return (type(obj).__name__ == 'dict')
def getMean(tree):
"""
:param tree:
:return:
"""
if isTree(tree['left']):
tree['left'] = getMean(tree['left'])
if isTree(tree['right']):
tree['right'] = getMean(tree['right'])
return (tree['left']+tree['right'])/2.0
def prune(tree,testData):
"""
剪枝
:param tree:
:param testData:
:return:
"""
#存在测试集中没有训练数据的情况
if np.shape(testData)[0] == 0:
return getMean(tree)
if isTree(tree['left']) or isTree(tree['right']):
leftTestData,rightTestData = binarySplitData(testData,tree['right'])
#递归调用prune函数对左右子树,注意与左右子树对应的测试数据集
if isTree(tree['left']):
tree['left'] = prune(tree['left'],leftTestData)
if isTree(tree['right']):
tree['right'] = prune(tree['right'],rightTestData)
#当递归搜索到左右子树均为节点,计算测试数据集的误差平方和
if not isTree(tree['left']) and not isTree(tree['right']):
leftTestData,rightTestData = binarySplitData(testData,
tree['bestSplitFeature'],
tree['bestFeatureValue'])
errNoMerge = sum(np.power(leftTestData[:,-1] - tree['left'],2))+sum(np.power(rightTestData[:,-1] - tree['right'],2))
errorMerge = sum(np.power(testData[:,1] - getMean(tree),2))
if errorMerge <errNoMerge:
print("merging")
return getMean(tree)
else:
return tree
else:
return tree
#--------------------------------回归树剪枝函数 END-----------------------------------------
#--------------------------------模型树子函数------------------------------------------
def linearSolve(dataSet):
m,n = np.shape(dataSet)
X = np.mat(np.ones((m,n)))
Y = np.mat(np.ones(m,1))
X[:,1:n] = dataSet[:,0:(n-1)]
Y = dataSet[:,-1]
xTx = X.T * X
if np.linalg.det(xTx) == 0:
raise NameError('This matrix is singular ,cannot do inverse, try increasing the second value of threashold')
ws = xTx.I * (X.T * Y)
return ws,X,Y
def modelLeaf(dataSet):
ws,X,Y = linearSolve(dataSet)
return ws
def modelErr(dataSet):
ws,X,Y = linearSolve(dataSet)
yHat = X * ws
return sum(np.power(Y - yHat,2))
#-------------------------------------------模型树子函数END-------------------------------------------------------
#-------------------------------------------CART预测子函数-------------------------------------------------------
def regressEvaluation(tree,inputData):
#只有当tree为叶节点的时候,才会输出
return float(tree)
def modelTreeEvaluation(model , inputData):
n = np.shape(inputData)
X = np.mat(np.ones((1,n+1)))
X[:,1:1+n] = inputData
return float(X * model)
def treeForceCast(tree,inputData,modelEval = regressEvaluation):
if not isTree(tree):
return modelEval(tree,inputData)
if inputData[tree['bestSplitFeature']] <= tree['bestSplitFeatureValue']:
if isTree(tree['left']):
return treeForceCast(tree['left'],inputData,modelEval)
else:
return modelEval(tree['left'],inputData)
else:
if isTree(tree['right']):
return treeForceCast(tree['right'],inputData,modelEval)
else:
return modelEval(tree['right'],inputData)
def createForceCast(tree,testData,modelEval=regressEvaluation):
m = len(testData)
yHat = np.mat(np.zeros((m,1)))
for i in range(m):
yHat = treeForceCast(tree,testData[i],modelEval)
return yHat
#--------------------------------------CART预测子函数END-------------------------------------------------------
if __name__ == '__main__':
pass
|
from __future__ import absolute_import
import pytest
import doctest
import os
import numpy as np
import pandas as pd
import neurokit as nk
run_tests_in_local = False
#==============================================================================
# BIO
#==============================================================================
def test_read_acqknowledge():
if run_tests_in_local is False:
data_path = os.getcwdu() + ur"/data/test_bio_data.acq" # If running from travis
else:
data_path = u"data/test_bio_data.acq" # If running in local
# Read data
df, sampling_rate = nk.read_acqknowledge(data_path, return_sampling_rate=True)
# Resample to 100Hz
df = df.resample(u"10L").mean()
df.columns = [u'ECG', u'EDA', u'PPG', u'Photosensor', u'RSP']
# Check length
assert len(df) == 35645
return(df)
# ---------------
def test_bio_process():
df = test_read_acqknowledge()
if run_tests_in_local is False: # If travis
ecg_quality_model = os.path.abspath('..') + ur"/neurokit/materials/heartbeat_classification.model"
else: # If local
ecg_quality_model = u"default"
bio = nk.bio_process(ecg=df[u"ECG"], rsp=df[u"RSP"], eda=df[u"EDA"], ecg_sampling_rate=100, rsp_sampling_rate=100,eda_sampling_rate=100, add=df[u"Photosensor"], ecg_quality_model=ecg_quality_model, age=24, sex=u"m", position=u"supine")
assert len(bio) == 4
return(bio)
if __name__ == u'__main__':
# nose.run(defaultTest=__name__)
doctest.testmod()
pytest.main()
|
import gevent
from common import *
class TestResult(BaseTestCase):
def test_ack_1_message(self):
self.assertEqual(self.post(['foo']), [1])
messages=self.pull()
self.assertDictContainsSubset({'id':1,'message':'foo'}, messages[0])
self.assertInDatabase(
'queue1_rst',
{
'm_id':1,
'receiver':'receiver1',
'status':'processing',
'fail_count':0,
'result':None
}
)
data = {
'receiver': 'receiver1',
'results': {
'queue1': {
'1':'done'
}
}
}
r=s.post('/pull', json=data)
self.assertEqual(r.status_code, 200)
self.assertInDatabase(
'queue1_rst',
{
'm_id':1,
'receiver':'receiver1',
'status':'finished',
'fail_count':0,
'result':'done'
}
)
def test_ack_2_messages(self):
self.assertEqual(self.post(['foo', 'bar', 'baz']), [1, 2, 3])
messages=self.pull(max_count=2)
self.assertDictContainsSubset({'id':1,'message':'foo'}, messages[0])
self.assertDictContainsSubset({'id':2,'message':'bar'}, messages[1])
data = {
'receiver': 'receiver1',
'results': {
'queue1': {
'1':'ok',
'2':'received'
}
}
}
r=s.post('/pull', json=data)
self.assertEqual(r.status_code, 200)
self.assertInDatabase(
'queue1_rst',
{
'm_id':1,
'receiver':'receiver1',
'result':'ok'
}
)
self.assertInDatabase(
'queue1_rst',
{
'm_id':2,
'receiver':'receiver1',
'result':'received'
}
)
def test_pull_and_ack(self):
"""acknowledge result and pull new message at the same time
[description]
"""
self.assertEqual(self.post(['foo', 'bar']), [1, 2])
messages=self.pull()
self.assertDictContainsSubset({'id':1,'message':'foo'}, messages[0])
data = {
'receiver': 'receiver1',
'queues': {
'queue1': {}
},
'results': {
'queue1': {
'1':'received',
}
}
}
r=s.post('/pull', json=data)
self.assertEqual(r.status_code, 200)
messages = r.json()['messages']['queue1']
self.assertLessEqual(len(messages), 1)
self.assertDictContainsSubset({'id':2,'message':'bar'}, messages[0])
self.assertInDatabase(
'queue1_rst',
{
'm_id':1,
'receiver':'receiver1',
'result':'received'
}
)
self.assertInDatabase(
'queue1_rst',
{
'm_id':2,
'receiver':'receiver1',
'status':'processing',
}
)
|
import argparse
import pandas as pd
from tkinter import *
from tkinter import ttk
from tkinter import scrolledtext
import os
from pydub import AudioSegment
from pydub.playback import play
import threading
import logging
from datetime import datetime
class PlayAudioSample(threading.Thread):
"""plays the sound corresponding to an audio sample in its own thread"""
def __init__(self, file_name):
super().__init__()
self.file_name = file_name
def run(self):
sound = AudioSegment.from_wav(self.file_name)
play(sound)
if __name__ == "__main__":
parser = argparse.ArgumentParser("""manually completes the transcriptions of audio files""")
parser.add_argument("--audio_folder", help="folder that contains the audio files", required=True)
parser.add_argument("--csv", help="name of the csv file that is to be filled with the files transcriptions", required=True)
parser.add_argument("--start_offset", help="offset of the first file to consider", default=0, type=int)
args = parser.parse_args()
logdir = "logs"
if not os.path.isdir(logdir):
os.mkdir(logdir)
logging.basicConfig(filename=os.path.join(logdir, f'{args.csv}_{str(datetime.now()).replace(" ", "_")}.log'), level=logging.DEBUG)
files = pd.read_csv(args.csv, sep=";", dtype = {'file': "string", 'sentence': "string"})
offsets_deleted_sentences = []
window = Tk()
window.title(f"Transcription of {args.csv}")
WIN_SIZE = 1200
window.geometry(f'{WIN_SIZE}x500')
instructions = Label(window, text="Audio files will be played automatically, transcribe them in the text area, then press ctrl-n to get to the next sample, ctrl-d to delete the current sample or ctrl-r to repeat the sample")
instructions.grid(row=0, columnspan=3)
transcription = scrolledtext.ScrolledText(window, width=130, height=20)
transcription.grid(row=1, columnspan=3, pady=30)
def prepare_next_turn():
"""loads next file or ends the program"""
global current_offset, audio_player
current_offset += 1
progress_bar["value"] = current_offset
if current_offset < len(files):
transcription.delete("1.0", END)
sent = files.sentence.iat[current_offset]
if isinstance(sent, str) and sent != "":
transcription.insert("1.0", sent)
audio_player = PlayAudioSample(os.path.join(args.audio_folder, files.file.iat[current_offset])).start()
transcription.focus()
else:
window.destroy()
def press_next():
"""modifies csv with text content and prepares for next turn"""
files.iat[current_offset, 1] = transcription.get("1.0", END).replace("\n", "")
logging.info(f"{current_offset} - {files.iat[current_offset, 1]}")
prepare_next_turn()
def press_delete():
"""adds current phrase offset to instance of deleted phrases and prepares for next turns"""
offsets_deleted_sentences.append(current_offset)
logging.info(f"{current_offset} deleted")
prepare_next_turn()
def press_repeat():
"""repeats the previous audio file"""
PlayAudioSample(os.path.join(args.audio_folder, files.file.iat[current_offset])).start()
button_delete = Button(window, text="Delete", command=press_delete, bg="red")
button_delete.grid(row=2, column=0)
button_repeat = Button(window, text="Repeat", command=press_repeat, bg="blue")
button_repeat.grid(row=2, column=1)
button_next = Button(window, text="Next", command=press_next, bg="green")
button_next.grid(row=2, column=2)
window.bind('<Control-d>', lambda _: press_delete())
window.bind('<Control-r>', lambda _: press_repeat())
window.bind('<Control-n>', lambda _: press_next())
progress_bar = ttk.Progressbar(window, style='blue.Horizontal.TProgressbar', length=WIN_SIZE, maximum=len(files))
progress_bar.grid(row=4, columnspan=3)
window.grid_rowconfigure(3, weight=1) # so that pbar is at the bottom
current_offset = args.start_offset - 1 # will be incremented by prepare_next_turn
prepare_next_turn()
window.mainloop()
# deletes wav files to delete
for i in offsets_deleted_sentences:
file_name = os.path.join(args.audio_folder, files.file.iat[i])
os.remove(file_name)
print(f"{file_name} was deleted")
index_to_keep = [i for i in range(len(files)) if i not in set(offsets_deleted_sentences)]
files = files.iloc[index_to_keep]
# save modified csv
print("Save modified csv file")
files.to_csv(args.csv, sep=";", index=False) |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import calendar
from datetime import datetime
def ut(date_time):
return calendar.timegm(date_time.timetuple())
def dt(unix_time):
return datetime.utcfromtimestamp(float(unix_time))
|
#!/usr/bin/python3
# This decrypts the protected µCTF Hollywood code ("stage2") and writes it to a file suitable for opening with IDA
# We re-implement the Hollywood function that decrypts code before execution
# If X is the address of the value to decrypt, we use X+2 as the key and *X as the value to decrypt
# Some unit tests are included, because the web emulator doesn't match the MSP430 spec I have wrt flags, so this works empirically
import struct
from operator import add
# Format a word for hex output
def fmtw(w):
return '0x'+format(w, '04X')
# Is this signed number positive?
def positive(x):
return x<0x8000
# add x to y, returns (sr, result)
def add(x, y):
r = x+y
v=0
c = r>=0x10000
r %= 0x10000
if positive(x) and positive(y) and not positive(r):
v=1
if not positive(x) and not positive(y) and positive(r):
v=1
n=not positive(r)
z=r==0
v=0 # The spec says we need to take V into account, but the web emulator never does ?
sr = 0x100*v + 4*n + 2*z + c
return sr, r
# sub x to y, returns (sr, result)
def sub(x, y):
x = (((~x)&0xFFFF) + 1) & 0xFFFF
return add(x, y)
# decimal addition, returns (sr, result)
def bcd_add(a, b, sr=None):
if sr is None: sr=0
h = [0, 0, 0, 0]
c = [0, 0, 0, 0]
f = lambda n : n# n+6 if n>=0xA else n
a = [f(a>>12&0xF), f(a>>8&0xF), f(a>>4&0xF), f(a>>0&0xF)]
b = [f(b>>12&0xF), f(b>>8&0xF), f(b>>4&0xF), f(b>>0&0xF)]
for i in range(4):
if a[i]>9 or b[i]>9 or a[i]+b[i]>9:
h[i]=6
r = [sum(x) for x in zip(a, b)]
r = [sum(x) for x in zip(r, h)]
for i in [3,2,1,0]:
#print('r['+str(i)+'] is '+fmtw(r[i])+', c['+str(i)+'] is '+fmtw(c[i]))
if i<3 and c[i+1]:
if r[i]==0x9: r[i]=0x10
else: r[i]+=1
while r[i]>=0x10:
c[i]=1
r[i] -= 0x10
#print('r['+str(i)+'] is '+fmtw(r[i])+', c['+str(i)+'] is '+fmtw(c[i]))
r = (r[0]<<12) + (r[1]<<8) + (r[2]<<4) + (r[3]<<0)
sr &= 0xFFFE # We only affect the C flag and ignore the rest
if c[0]: sr |= 1 # Set the carry on decimal overflow
return sr, r
def cryptWord(addr, value):
key=addr
key=struct.unpack("<H", struct.pack(">H", key))[0]
sr, key = sub(0x4D2, key)
sr, key = add(sr, key)
key = (0x8000*(sr&1)) | (key>>1)
sr, key = bcd_add(key,addr,sr)
if positive(key): key >>= 1
else: key = (0x8000) | (key>>1)
key = (0x8000*(sr&1)) | (key>>1)
sr, key = bcd_add(key,0x3C01, sr)
sr, key = add(sr, key)
key = (0x8000*(sr&1)) | (key>>1)
sr, key = add(0x100E, key)
newsr=key&1
key = (0x8000*(sr&1)) | (key>>1)
sr=newsr
key = (0x8000*(sr&1)) | (key>>1)
value ^= key
return value;
def testDADD(x,y,e):
r=bcd_add(x, y)[1]
if r==e:
print('[OK ] DADD result of '+fmtw(x)+'+'+fmtw(y)+' is '+fmtw(r)+', expected '+fmtw(e))
else:
print('[FAIL] DADD result of '+fmtw(x)+'+'+fmtw(y)+' is '+fmtw(r)+', expected '+fmtw(e))
def testCrypt(x,y,e):
r=cryptWord(x, y)
if r==e:
print('[OK ] Crypt result of '+fmtw(x)+','+fmtw(y)+' is '+fmtw(r)+', expected '+fmtw(e))
else:
print('[FAIL] Crypt result of '+fmtw(x)+','+fmtw(y)+' is '+fmtw(r)+', expected '+fmtw(e))
# Test vectors
def test():
testDADD(0, 0, 0)
testDADD(2, 2, 4)
testDADD(8, 8, 0x16)
testDADD(0xA, 0, 0x10)
testDADD(0xD, 0, 0x13)
testDADD(0x10, 0, 0x10)
testDADD(0xAA, 0, 0x110)
testDADD(0x21, 0x33, 0x54)
testDADD(0x160E, 0x04A2, 0x2116)
testDADD(0xF, 0x0, 0x15)
testDADD(0xF, 0x4, 0x19)
testDADD(0xF, 0x5, 0x1A)
testDADD(0xF, 0xA, 0x1F)
testDADD(0xDD0, 0, 0x1430)
testDADD(0x3C01, 0, 0x4201)
testDADD(0xF00, 0x100, 0x1600)
testDADD(0x3F00, 0x100, 0x4600)
testDADD(0x3F00, 0xA00, 0x4F00)
testDADD(0x3C01, 0xDD0, 0x4031)
testDADD(0xC, 0xE, 0x10)
testDADD(0xF, 0x1, 0x16)
testDADD(0xC, 0xDD, 0x14f)
testDADD(0xC0, 0xDD, 0x103)
testDADD(0x1299, 0x3C01, 0x5500)
testDADD(0x76A4, 0x16F2, 0x93F6)
testDADD(0x3C01, 0x653E, 0x0745)
testDADD(0x3C01, 0x1A28, 0x5C29)
testCrypt(0x160E, 0x0F7D, 0x8231)
testCrypt(0x1610, 0x4D68, 0x403C)
testCrypt(0x1612, 0xC482, 0x49EA)
testCrypt(0x1614, 0xC064, 0x4D00)
testCrypt(0x1616, 0x0DF9, 0x8095)
testCrypt(0x1618, 0x9D40, 0x9034)
testCrypt(0x161A, 0xF339, 0xFEB1)
testCrypt(0x1858, 0x4EB4, 0x4032)
testCrypt(0x185A, 0x8E8E, 0x8000)
testCrypt(0x185C, 0x4E9F, 0x403C)
testCrypt(0x185E, 0x4661, 0x48CA)
testCrypt(0x1738, 0x4CB8, 0x40B1)
testCrypt(0x16F2, 0x1640, 0x12B0)
testCrypt(0x16F4, 0x84FC, 0x0010)
testCrypt(0x1A92, 0xCF3D, 0x40B1)
testCrypt(0x1A94, 0x0FE8, 0x0061)
testCrypt(0x1578, 0x4E5E, 0x40B1)
testCrypt(0x157A, 0x0F77, 0x0074)
testCrypt(0x157C, 0x8F0D, 0x0006)
testCrypt(0x157E, 0xCF3B, 0x403C)
testCrypt(0x1580, 0x4669, 0x4966)
testCrypt(0x1BF6, 0xC814, 0x4D00)
test()
#import sys; sys.exit(0)
with open('hollywood_stage2_crypt', 'rb') as src:
with open('hollywood_stage2_decrypted', 'wb') as dst:
addr = 0x1400
while 1:
byte_s = src.read(2)
if not byte_s:
break
word = byte_s[0]+(byte_s[1]<<8)
addr += 2
r = cryptWord(addr, word)
#print(fmtw(word)+' -> '+fmtw(r))
dst.write(bytes([r&0xFF, (r>>8)&0xFF]))
|
import math
class Grid:
def __init__(self, options, width):
self.x = 0
self.y = 0
self.options = options
self.width = width
self.height = math.ceil(len(self.options)/self.width)
self._grid = self.generateGrid()
def generateGrid(self):
resGrid = []
for i in range(self.height):
row = []
for j in range(self.width):
index = (i * self.width) + j
if (index >= len(self.options)):
row.append('*')
continue
instrument = self.options[index]
row.append(instrument)
resGrid.append(row)
return resGrid
def move(self, x, y):
self.x = x
self.y = y
def moveValue(self, direction):
tempX = self.x
tempY = self.y
if (direction == 'up'):
tempY -= 1
elif (direction == 'down'):
tempY += 1
elif (direction == 'right'):
tempX += 1
elif (direction == 'left'):
tempX -= 1
return (tempX, tempY)
def isValid(self, newX, newY, toggled):
# if newX >= 0 and newX < self.width and newY >= 0 and newY < self.height:
# if self._grid[newY][newX] != '*':
# if self._grid[newY][newX] in toggled:
# return 'skip'
# else:
# return True
# return False
return newX >= 0 and newX < self.width and newY >= 0 and newY < self.height and self._grid[newY][newX] != '*' |
import doctest, math
def distance(p1, p2):
'''
>>> distance((0, 0), (3,4))
25
>>> distance((0,0), (1,1))
2
'''
return (pow(p1[0]-p2[0],2) + pow(p1[1]-p2[1],2))
if __name__ == "__main__":
doctest.testmod()
|
from django.template import Library
from django.conf import settings
from mgcprojects.models import mlProjects
register = Library()
@register.inclusion_tag("mgccms/snippet/top_digg.html")
def projects_top_digg():
return {
'news': mlProjects.objects.filter(status=2).select_related(depth=1).order_by("-digg")[:10],
}
@register.inclusion_tag("mgccms/snippet/last_articles.html")
def projects_last_articles():
news = mlProjects.objects.filter(status=2).select_related(depth=1).order_by("-publish")
return {
'news': news[:5]
}
@register.inclusion_tag("mgccms/snippet/relate_tag_articles.html")
def projects_relate_tag_articles(num, news):
tags = news.tags.split(',')
relate_news = []
for tag in tags:
if tag:
tag = get_object_or_404(Tag, name=tag)
relate_news.extend(TaggedItem.objects.get_by_model(mlProjects, tag))
return {
'news': relate_news[:num]
}
# TODO
# def relate_title_news():
@register.inclusion_tag("mgcprojects/snippet/projects_tag_list.html")
def projects_show_tags_for_article(obj):
return {"obj": obj}
@register.inclusion_tag("mgcprojects/snippet/projects_month_links.html")
def projects_month_links():
return {
'dates': mlProjects.objects.dates('publish', 'month')[:12],
}
|
#!/usr/bin/env python3
import sys
def num_pcg(fname):
count = 0
for line in open(fname):
fields = line.rstrip("\r\n").split()
if line.startswith("#!"):
continue
if "gene" in fields[2] and "protein_coding" in line:
count = count + 1
# print('check')
print(count)
test_output = num_pcg(sys.argv[1]) |
import logging
from django.db import models
from blobstore_storage.storage import BlobStoreStorage
class Category(models.Model):
name = models.CharField(max_length=40, unique=True)
def __unicode__(self):
return unicode(self.name)
class File(models.Model):
file = models.FileField(
storage=BlobStoreStorage(), upload_to='blobs/', max_length=255)
categories = models.ManyToManyField(Category)
def __unicode__(self):
index = self.file.name.find('/')
if index > -1:
return self.file.name[index + 1:]
return self.file.name |
#!/usr/bin/env python3
import json
import time
import lzma
import glob
from datetime import datetime
import timeout_decorator
import instaloader
import sys
import os
TIMEOUT = 7200
@timeout_decorator.timeout(TIMEOUT)
class DownloadComments():
"""
Classe para coletar comentários de posts do instagram. Utiliza os posts
coletados pela interface de linha de comando do Instaloader para isso
Atributos
---------
max_comments : int
máximo de comentários *por post* que devem ser coletados
input_dir : str
nome da pasta em que se encontram os dados dos perfis coletados
Métodos
---------
download_comments()
Função que itera sobre as pastas dos perfis coletados, obtêm os códigos
de posts de cada uma e dispara a coleta dos comentários para cada post
"""
def __init__(self, max_comments, input_dir):
"""
Inicializa o objeto
Parâmetros
---------
max_comments : int
máximo de comentários *por post* que devem ser coletados
input_dir : str
nome da pasta em que se encontram os dados dos perfis coletados
"""
self.max_comments = max_comments
self.input_dir = input_dir
def _collect_comments(self, media_codes_list, outfile, iloader):
"""
Itera sobre a lista de códigos de post coletados. Dispara a coleta
dos comentários de cada post. É feito parsing em cima dos comentários
e respostas a comentaŕios de cada post.
Parâmetros
---------
medias_codes_list : list(str)
lista dos códigos de mídia dos posts
outfile : str
nome do arquivo de saída onde serão armazenados os comentários
iloader : Instaloader()
instância do instaloader utilizada para coleta de comentários
"""
post_counter = 1
for code in media_codes_list:
print(self._now_str(), "Crawling post ", post_counter,
"of ", len(media_codes_list), " --- ", code)
post_counter = post_counter + 1
try:
n_comm = 0
# usa o instaloader instanciado e um codigo de midia para coletar os comentarios daquele post
comments = self._get_comments_safe(iloader, code)
for comment in comments:
self._parse_comment(comment, outfile, code)
for reply in comment.answers:
self._parse_comment_replies(
comment, reply, outfile, code)
n_comm += 1
print(self._now_str(), "Crawled", n_comm, "comments")
except timeout_decorator.TimeoutError as e: # lida com excecoes de tempo esperando p/ coleta e erro
print(self._now_str(), "Timeout for post", code)
except Exception as e:
print(self._now_str(), "Error for post", code)
def _parse_comment_replies(self, parent_comment, reply, outfile, media_code):
"""
Método que realiza parsing de comentários que são respostas a outros
comentários, retirando apenas as informações relevantes e
armazenando em um json que será armazenado em um arquivo externo
Parâmetros
---------
parent_comment : Comment
objeto do tipo comentário ao qual o comentário atual está respondendo
reply : Comment
Comentário atual, resposta ao parent_comment
outfile : str
nome do arquivo onde serão os json gerados para cada comentário
media_code : str
código do post de onde o comentário foi coletado
"""
my_reply = {}
my_reply["text"] = reply.text
my_reply["created_time"] = int(reply.created_at_utc.timestamp())
my_reply["created_time_str"] = reply.created_at_utc.strftime(
'%Y-%m-%d %H:%M:%S')
my_reply["media_code"] = media_code
my_reply["id"] = str(reply.id)
my_reply["owner_username"] = reply.owner.username
my_reply["owner_id"] = str(reply.owner.userid)
my_reply["parent_comment_id"] = str(parent_comment.id)
text = my_reply['text']
tags = {tag.strip("#").strip(',<.>/?;:\'"[{]}\\|=+`~!@#$%^&*()').lower()
for tag in text.split() if tag.startswith("#")}
users = {tag.strip("@").strip(',<.>/?;:\'"[{]}\\|=+`~!@#$%^&*()').lower()
for tag in text.split() if tag.startswith("@")}
my_reply['tags'] = list(tags)
my_reply['mentioned_usernames'] = list(users)
print(json.dumps(my_reply), file=outfile)
def _parse_comment(self, comment, outfile, media_code):
"""
Realiza parsing em cada objeto do tipo "Comment" retornado pelo instaloader,
retirando apenas as informações relevantes e armazenando em um json que será
armazenado em um arquivo externo
Parâmetros
---------
comment : Comment
objeto do tipo comentário retornado pelo instaloader
outfile : str
nome do arquivo onde serão os json gerados para cada comentário
media_code : str
código do post de onde o comentário foi coletado
"""
my_comment = {}
my_comment["text"] = comment.text
my_comment["created_time"] = int(comment.created_at_utc.timestamp())
my_comment["created_time_str"] = comment.created_at_utc.strftime(
'%Y-%m-%d %H:%M:%S')
my_comment["media_code"] = media_code
my_comment["id"] = str(comment.id)
my_comment["owner_username"] = comment.owner.username
my_comment["owner_id"] = str(comment.owner.userid)
my_comment["parent_comment_id"] = None
text = my_comment['text']
tags = {tag.strip("#").strip(',<.>/?;:\'"[{]}\\|=+`~!@#$%^&*()').lower()
for tag in text.split() if tag.startswith("#")}
users = {tag.strip("@").strip(',<.>/?;:\'"[{]}\\|=+`~!@#$%^&*()').lower()
for tag in text.split() if tag.startswith("@")}
my_comment['tags'] = list(tags)
my_comment['mentioned_usernames'] = list(users)
print(json.dumps(my_comment), file=outfile)
def _now_str(self):
"""
Retorna um timestamp do momento em que a função é chamada
"""
return datetime.now().strftime("[%Y-%m-%d %H:%M:%S]")
# retorna os comentarios dado um identificador de post (shortcode)
def _get_comments_safe(self, il, short_code):
"""
Retorna os comentários coletados de um post
Parâmetros
----------
il : Instaloader
instância do instaloader criada para coletar os comentários
short_code : str
Código do post cujos comentários serão coletados
"""
post = instaloader.Post.from_shortcode(il.context, short_code)
n_comm = 0
comments = []
for c in post.get_comments():
comments.append(c)
n_comm += 1
if n_comm >= self.max_comments:
print(self._now_str(), "Comment limit ({}) reached".format(
self.max_comments))
break
return comments
def _collect_media_codes(self, current_profile):
"""
Coleta os códigos do post feito por um perfil
Parâmetros
----------
current_profile : str
nome do perfil de onde serão retirados os códigos de post
"""
codes = set()
# essa parte coleta os codigos identificadores do post/midia
for f in os.listdir(self.input_dir+"/"+current_profile):
# if f.endswith("UTC.json.xz"):
if "UTC.json" in f:
try:
media = json.loads(lzma.open(
self.input_dir+"/"+current_profile+'/'+str(f), "r").read().decode("utf-8"))
media_code = media["node"]["shortcode"]
codes.add(media_code)
except:
print(f)
print("Marformed json line")
return codes
def download_comments(self):
"""
Dispara o download de comentários feitos em posts coletados. Itera
sobre as pastas de perfis coletados, disparando coleta, parsing e
armazenamento dos comentários.
"""
# as pastas tem como nome o username de cada usuario
folders = sorted(os.listdir(self.input_dir))
n_folders = len(folders)
for count in range(n_folders):
current_profile = folders[count]
print("Collecting profile", (count+1), "of",
n_folders, "---", current_profile)
OUTFILE = self.input_dir+"/"+current_profile+"/comments_"+current_profile+".json"
codes = self._collect_media_codes(current_profile)
fo = open(OUTFILE, "a+")
iloader = instaloader.Instaloader()
media_codes_list = sorted(codes)
self._collect_comments(media_codes_list, fo, iloader)
fo.close()
print("\n--- Finished Crawling comments from ",
current_profile, "---\n")
print(self._now_str(), "Finished")
|
# -*- coding: utf-8 -*-
import hr_employee
import base_agency
import account_journal
import account_move_line
import res_users
import account_invoice
import account_account
|
from django.db import models
# Create your models here.
class math(models.Model):
operands = (('+', '+'), ('-', '-'), ('x', 'x'), ('/', '/'))
num1 = models.CharField(max_length=10)
num2 = models.CharField(max_length=10)
operation = models.CharField(max_length=1, choices=operands)
|
# pylint: skip-file
# pylint: disable=too-many-public-methods
class ReplicationController(DeploymentConfig):
''' Class to wrap the oc command line tools '''
replicas_path = "spec.replicas"
env_path = "spec.template.spec.containers[0].env"
volumes_path = "spec.template.spec.volumes"
container_path = "spec.template.spec.containers"
volume_mounts_path = "spec.template.spec.containers[0].volumeMounts"
def __init__(self, content):
''' Constructor for OpenshiftOC '''
super(ReplicationController, self).__init__(content=content)
|
import tensorflow as tf
from tensorflow.python.ops import rnn, rnn_cell
import numpy as np
import matplotlib.pyplot as plt
import time
if(tf.__version__.split('.')[0]=='2'):
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
# Load MNIST dataset
import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
learningRate = 1e-3
trainingIters = 2500
batchSize = 128
displayStep = 50
nInput = 28 #input 28x28 pixels
nSteps = 28
nHidden = 128 #number of neurons for the RNN
nClasses = 10
x = tf.placeholder('float', [None, nSteps, nInput])
y = tf.placeholder('float', [None, nClasses])
weights = {
'out': tf.Variable(tf.random_normal([nHidden, nClasses]))
}
biases = {
'out': tf.Variable(tf.random_normal([nClasses]))
}
def RNN(x, weights, biases):
x = tf.transpose(x, [1,0,2])
x = tf.reshape(x, [-1, nInput])
x = tf.split(x, nSteps, 0)
lstmCell = rnn_cell.BasicRNNCell(nHidden)
outputs, states = rnn.static_rnn(lstmCell, x, dtype = tf.float32)
return tf.matmul(outputs[-1], weights['out'])+ biases['out']
time.time()
pred = RNN(x, weights, biases)
#cost, optimization, evaluation, and accuracy
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = pred, labels = y))
optimizer = tf.train.AdamOptimizer(learningRate).minimize(cost)
correctPred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correctPred, tf.float32), name = 'accuracy')
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
testData = mnist.test.images.reshape((-1, nSteps, nInput))
testLabel = mnist.test.labels
test_loss = []
test_acc = []
train_loss = []
train_acc = []
epoch = []
for iter in range(trainingIters):
batchX, batchY = mnist.train.next_batch(batchSize)
batchX = batchX.reshape((batchSize, nSteps, nInput))
#backprop
sess.run(optimizer, feed_dict= {x: batchX, y: batchY})
if iter % displayStep == 0 or iter == (trainingIters - 1):
#Training batch accuracy
acc = sess.run(accuracy, feed_dict= {x: batchX, y: batchY})
#Training batch loss
loss = sess.run(cost, feed_dict= {x: batchX, y: batchY})
#Test loss and accuracy
testLoss, test_accuracy = sess.run([cost, accuracy],
feed_dict= {x: testData, y: testLabel})
print("Iter " + str(iter) + ", Minibatch Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy= " + \
"{:.5f}".format(acc) + ", Test loss = " + \
"{:.6f}".format(testLoss) + ", Test Accuracy = " + \
"{:.5f}".format(test_accuracy)
)
#print('Iter {} Minibatch loss = {0.6f}, Training accuracy = {0.5f}'.format(iter, loss, acc))
test_loss.append(testLoss)
test_acc.append(test_accuracy)
train_loss.append(loss)
train_acc.append(acc)
epoch.append(iter)
print("Testing Accuracy: ", test_accuracy)
#plot the train, validation and test loss function
plt.figure()
plt.plot(epoch, test_loss, 'r-', label = 'Test_loss')
plt.plot(epoch, train_loss, 'b-', label = 'train_loss')
plt.title('Train and test loss function for basic RNN model')
plt.xlabel('Epochs')
plt.ylabel('loss')
plt.legend()
plt.show()
#plot the train, validation and test accuracy
plt.figure()
plt.plot(epoch, test_acc, 'r-', label = 'Test accuracy')
plt.plot(epoch, train_acc, 'b-', label = 'Train accuracy')
plt.title('Train and test accuracy function for basic RNN model')
plt.xlabel('Epochs')
plt.ylabel('accuracy')
plt.legend()
plt.show() |
'''
Module: maze
Author: David Frye
Description: Contains the Maze class.
'''
import collections
import random
import time
from cell import Cell
from region import Region
from utility import Direction
class Maze:
'''
Class: Maze
Description: Represents an individual maze, consisting of multiple cells.
'''
DEFAULT_WIDTH = 40
DEFAULT_HEIGHT = 30
DEFAULT_SCALE = 2
DEFAULT_OPEN_CHANCE = 50
def __init__(self, size=(DEFAULT_WIDTH, DEFAULT_HEIGHT), scale=DEFAULT_SCALE):
'''
Method: __init__
Description: Maze constructor
Parameters: size=(DEFAULT_WIDTH, DEFAULT_HEIGHT), scale=DEFAULT_SCALE
size: 2-Tuple - The dimensional lengths of the maze
[0] - Maze x-dimensional length
[1] - Maze y-dimensional length
scale: The printing scale of the maze, used to determine spacing
Return: None
'''
# The width/height of the maze.
self.m_size = size
# Scale must be an even number for proper pretty-printing.
self.m_scale = 2 * scale
# The individual cells of the maze.
self.m_cells = [[Cell(Cell.UNVISITED_STRING, (x, y)) for x in range(self.get_width())] for y in range(self.get_height())]
# A region representing the span of the maze.
self.m_region = Region((0, 0), (self.get_width(), self.get_height()))
def generate(self, region=None, exemptions=None, open_chance=DEFAULT_OPEN_CHANCE):
'''
Method: generate
Description: Generate a maze within the provided bounds.
Parameters: region=None, exemptions=None, open_chance=DEFAULT_OPEN_CHANCE
region: Region - A region for maze generation to span
exemptions: Regions - A collection of regions for maze generation to avoid
open_chance: The percent chance that each cell will
Return: None
'''
# Ensure that valid boundaries are set.
if region is None:
region = Region((0, 0), (self.get_width(), self.get_height()))
# Construct a set of valid cells.
valid_cell = self.valid_cell_set(region, exemptions)
# If there are no valid cells for generation, return.
if not valid_cell:
return
# Randomly choose a starting cell from the valid cells.
start_cell = random.sample(valid_cell, 1)[0]
# Visit the starting cell and push it onto the cell stack.
self.trailblaze(start_cell, None)
cell_stack = [start_cell]
# Crawl the entire maze.
while cell_stack:
# Grab the top cell from the cell stack.
current_cell = cell_stack[-1]
# Initialize the list of travel directions.
directions = set([Direction(x) for x in range(0, 4)])
while True:
# If all directions have been tried, backtrack through the cell stack.
if not directions:
cell_stack.pop()
break
# Find a valid direction to trailblaze in.
direction = random.sample(directions, 1)[0]
# Attempt to trailblaze to the neighboring cell.
target = self.trailblaze(current_cell, direction, region, exemptions)
# If the trailblaze was successful, move on to the next cell, opening up the maze in the process if desired.
if target is not None:
# Add the next cell to the cell stack.
cell_stack.append(target)
# Open up the maze by plowing through walls at random.
if random.randint(1, 100) <= open_chance:
direction = random.sample(directions, 1)[0]
if direction is not None:
neighbor = self.get_neighbor_cell(current_cell, direction)
if neighbor is not None and neighbor.is_visited():
self.set_wall(current_cell, direction, False)
break
# If direction is invalid, remove it before trying again.
directions.remove(direction)
def trailblaze(self, source_cell, direction=None, region=None, exemptions=None):
'''
Method: trailblaze
Description: Trailblaze from the source cell in the specified direction, knocking down both sides of the wall between the source and the target.
Parameters: source_cell, direction=None, region=None, exemptions=None
source_cell: Cell - The cell to trailblaze from
direction: Direction - The direction to trailblaze in (None simply visits the source cell)
region: Region - A region for trailblazing to span
exemptions: Regions - A collection of regions for trailblazing to avoid
Return: Cell - The target cell is trailblazing was successful, and None otherwise
'''
# If the direction is None, "trailblaze" to the source cell by marking it as visited.
if direction is None:
self.visit(source_cell)
return source_cell
# Ensure that valid boundaries are set.
if region is None:
region = Region((0, 0), (self.get_width(), self.get_height()))
# Grab the target cell.
target_cell = self.get_neighbor_cell(source_cell, direction)
# If the target cell is invalid, return without trailblazing.
if target_cell is None:
return None
# If the target cell is exempt, return without trailblazing.
if exemptions is not None:
for exemption in exemptions:
if exemption.contains(target_cell.m_position):
return None
# If non-exempt target cell is valid, trailblaze to it.
if not target_cell.is_visited() and region.contains(target_cell.m_position):
# Remove wall between source and target cells.
self.set_wall(source_cell, direction, False)
# Visit the target cell.
self.visit(target_cell)
return target_cell
def reset(self, region=None, exemptions=None):
'''
Method: reset
Description: Reset cells inside the provided region whose coordinates do not also fall within any of the provided exemption ranges.
Parameters: region=None, exemptions=None
region: Region - A region for maze reset to span
exemptions: Regions - A collection of regions for maze reset to avoid
Return: None
'''
# Ensure that valid boundaries are set.
if region is None:
region = Region((0, 0), (self.get_width(), self.get_height()))
# Reset all cells that do not fall inside any of the provided exempt ranges.
for row in self.m_cells:
# If the current row is inside the reset boundary, check for cells to reset inside that row.
for cell in row:
exempt = False
# If the current cell is outside the reset boundary, move on to the next cell.
if not region.contains(cell.m_position):
continue
# Check for the inclusion of each cell in each provided exempt range.
if exemptions is not None:
for exemption in exemptions:
# Reset the boundary walls of the provided exempt ranges.
border_directions = exemption.on_border(cell.m_position)
for border_direction in border_directions:
self.set_wall(cell, border_direction, True)
# If the cell falls inside any of the provided exempt ranges, do not reset it.
if exemption.contains(cell.m_position):
exempt = True
break
# Do not reset exempt cells.
if exempt:
continue
# Completely reset non-exempt cells.
self.unvisit(cell)
for direction in list(Direction):
self.set_wall(cell, direction, True)
def open(self, region=None, exemptions=None, open_border=True):
'''
Method: open
Description: Opens (visits all cells and destroys all walls within) the given region, avoiding the given exempt regions.
Parameters: region=None, exemptions=None
region: Region - A region for maze opening to span
exemptions: Regions - A collection of regions for maze opening to avoid
Return: None
'''
# Ensure that valid boundaries are set.
if region is None:
region = Region((0, 0), (self.get_width(), self.get_height()))
# Construct a set of valid cells.
valid_cells = self.valid_cell_set(region, exemptions)
# Visit all valid cells and open the walls as necessary (region borders only open if open_border is True).
for cell in valid_cells:
cell.visit()
border_directions = region.on_border(cell.m_position)
for direction in list(Direction):
# Ensure that the border is allowed to be destroyed.
if (self.get_neighbor_cell(cell, direction) is not None) and (open_border) or (direction not in border_directions):
self.set_wall(cell, direction, False)
def solve(self, start_cell_position, end_cell_position, breadcrumbs=False):
'''
Method: solve
Description: Finds a path between the given start and end cells.
Parameters: start_cell_position, end_cell_position, breadcrumbs=False
start_cell_position: 2-Tuple - The cell position to begin searching from
end_cell_position: 2-Tuple - The cell position to target in the search
breadcrumbs: Boolean - Whether or not to change the content of cells along the solution path for pretty-printing
Return: [Cell] - A list of cells denoting the solution path, or None if no solution is found
'''
# Reset any residual solution breadcrumb trails.
for row in self.m_cells:
for cell in row:
if self.get_cell_content(cell.m_position) == "*":
self.set_cell_content(cell.m_position, " ")
# If the start and end positions are the same, return the one cell as the entire solution path list.
if start_cell_position == end_cell_position:
return [start_cell_position]
# Ensure that the starting cell position is a valid cell.
start_cell = self.get_cell(start_cell_position)
if start_cell is None:
return None
# Enqueue the starting cell into the cell queue.
cell_queue = collections.deque([start_cell])
# Maintain traversal pathways throughout the maze.
pathways = {}
# Crawl the entire maze for as long as the end cell is not found.
while cell_queue:
# Grab the first cell from the cell queue.
current_cell = cell_queue.popleft()
# If the end cell has been found, perform a backtrace and return the solution path.
if current_cell.m_position == end_cell_position:
final_pathway = []
# Backtrace to the starting cell.
while current_cell.m_position != start_cell_position:
# Add the current cell to the final pathway.
final_pathway.append(current_cell)
# Backtrace to the previous cell.
current_cell = pathways[current_cell]
# Add the starting cell to the final pathway.
final_pathway.append(current_cell)
# Reverse the pathway due to its formation during backtracing.
final_pathway.reverse()
# If breadcrumbs are enabled, leave breadcrumbs along the final pathway.
if breadcrumbs:
for cell in final_pathway:
cell.set_content("*")
return final_pathway
# Add all accessible neighbor cells to the cell queue.
accessible_neighbors = self.get_accessible_neighbor_cells(current_cell)
for neighbor in accessible_neighbors:
if (neighbor is not None) and (neighbor not in pathways):
cell_queue.append(neighbor)
pathways[neighbor] = current_cell
def print_maze(self):
'''
Method: print_maze
Description: Pretty-prints the maze to a file.
Parameters: No parameters
Return: None
'''
with open("maze.txt", "w") as outfile:
# Print maze header.
outfile.write("Maze (" + str(self.get_width()) + " x " + str(self.get_height()) + "):\n")
for row in self.m_cells:
# Print the rows between the cells.
for cell in row:
outfile.write(Cell.WALL_VERTICAL_STRING) if cell.m_walls.get(Direction.WEST) else outfile.write(Cell.WALL_HORIZONTAL_STRING)
outfile.write(self.m_scale * Cell.WALL_HORIZONTAL_STRING) if cell.m_walls.get(Direction.NORTH) else outfile.write(self.m_scale * " ")
outfile.write(Cell.WALL_VERTICAL_STRING)
outfile.write("\n")
# Print the rows containing the cells.
for cell in row:
outfile.write(Cell.WALL_VERTICAL_STRING + " ") if cell.m_walls.get(Direction.WEST) else outfile.write(" ")
outfile.write((((self.m_scale - 1) // 2) * " ") + cell.m_content + (((self.m_scale - 1) // 2) * " "))
outfile.write(Cell.WALL_VERTICAL_STRING)
outfile.write("\n")
# Print bottom maze border.
outfile.write(Cell.WALL_VERTICAL_STRING + (((self.m_scale + 1) * self.get_width() - 1) * Cell.WALL_HORIZONTAL_STRING) + Cell.WALL_VERTICAL_STRING + "\n")
def visit(self, cell):
'''
Method: visit
Description: Sets the given cell into a visited state.
Parameters: cell
cell: Cell - The cell being visited
Return: None
'''
try:
cell.visit()
except AttributeError as e:
print(e)
def unvisit(self, cell):
'''
Method: unvisit
Description: Sets the given cell into an unvisited state.
Parameters: cell
cell: Cell - The cell being unvisited
Return: None
'''
try:
cell.unvisit()
except AttributeError as e:
print(e)
def direction_to_offset(self, direction):
'''
Method: direction_to_offset
Description: Converts a Direction to a cell offset.
Parameters: direction
direction: Direction - The direction to convert into an offset
Return: 2-Tuple - An offset in a given direction
[0] = The x-dimensional offset
[1] = The y-dimensional offset
'''
if direction == Direction.NORTH:
return (0, -1)
elif direction == Direction.EAST:
return (1, 0)
elif direction == Direction.SOUTH:
return (0, 1)
elif direction == Direction.WEST:
return (-1, 0)
else:
return (0, 0)
def is_valid_cell_position(self, position):
'''
Method: is_valid_cell_position
Description: Determines whether the given position is a valid cell within the maze.
Parameters: position
position: 2-Tuple - A position value
[0] = The x-position
[1] = The y-position
Return: Boolean - Whether or not the given position is a valid cell within the maze
'''
if position in self.m_region.to_set():
return True
else:
return False
def valid_cell_set(self, region, exemptions):
'''
Method: valid_cell_set
Description: Constructs a set of valid cells (cells that are in the intersection of the maze cell set and the region cell set, subtracting those in the exempt region sets).
Parameters: region, exemptions
region: Region - A region of cells to intersect with the cells of the maze
exemptions: Regions - A collection of regions to subtract from the valid cell set
Return: Set([Cell]) - A set of valid cells (cells that are in the intersection of the maze cell set and the region cell set, subtracting those in the exempt region sets)
'''
valid_cell_positions = self.m_region.to_set() & region.to_set()
if exemptions is not None:
for exemption in exemptions:
valid_cell_positions -= exemption.to_set()
return set([self.get_cell(x) for x in valid_cell_positions])
def get_accessible_neighbor_cells(self, source_cell):
'''
Method: get_accessible_neighbor_cells
Description: Gets a list of all neighboring cells which are directly-accessible from the given source cell.
Parameters: source_cell
source_cell: Cell - The cell to check the neighbors of
Return: [Cell] - All neighboring cells directly-accessible from the given source cell
'''
accessible_neighbor_cells = []
for direction in list(Direction):
if not source_cell.get_wall(direction):
accessible_neighbor_cells.append(self.get_neighbor_cell(source_cell, direction))
return accessible_neighbor_cells
def get_neighbor_cell(self, source_cell, direction):
'''
Method: get_neighbor_cell
Description: Gets the cell neighboring the given source cell in the given direction.
Parameters: source_cell, direction
source_cell: Cell - The cell neighboring the target
direction: Direction - The direction to grab from the source cell
Return: Cell - The cell neighboring the given source cell in the given direction
'''
neighbor_offset = self.direction_to_offset(direction)
return self.get_cell((source_cell.get_position_x() + neighbor_offset[0], source_cell.get_position_y() + neighbor_offset[1]))
def get_cell(self, position):
'''
Method: get_cell
Description: Gets the Cell at a given position, transposing column-major positional input into row-major positional input.
Parameters: position
position: 2-Tuple - A position value
[0] = The x-position
[1] = The y-position
Return: Cell - The Cell at a given position
'''
if self.is_valid_cell_position(position):
return self.m_cells[position[1]][position[0]]
else:
return None
def get_cell_content(self, position):
'''
Method: get_cell_content
Description: Gets the cell_content of the given cell.
Parameters: position
position: 2-Tuple - A position value
[0] = The x-position
[1] = The y-position
Return: String - A string visually representing the cell
'''
return self.get_cell(position).get_content()
def get_height(self):
'''
Method: get_height
Description: Gets the height of the maze.
Parameters: No parameters
Return: Int - The maze height
'''
return self.m_size[1]
def get_wall(self, source_cell, direction):
'''
Method: get_wall
Description: Modify both sides of a given cell's wall in a given direction by a given value.
Parameters: source_cell, direction
source_cell: Cell - The cell whose wall is to be retrieved
direction: Direction - The direction of the wall to be retrieved
Return: Boolean - Whether the wall exists or not
'''
return source_cell.get_wall(direction)
def get_width(self):
'''
Method: get_width
Description: Gets the width of the maze.
Parameters: No parameters
Return: Int - The maze width
'''
return self.m_size[0]
def set_cell_content(self, position, value):
'''
Method: set_cell_content
Description: Sets the cell content of the given cell.
Parameters: position, value
position: 2-Tuple - A position value
[0] = The x-position
[1] = The y-position
value: String - A string visually representing the cell
Return: None
'''
self.get_cell(position).set_content(value)
def set_height(self, height):
'''
Method: set_height
Description: Sets the height of the maze.
Parameters: height
height: Int - The maze height
Return: None
'''
self.m_size[1] = height
def set_wall(self, source_cell, direction, value):
'''
Method: set_wall
Description: Modify both sides of a given cell's wall in a given direction by a given value.
Parameters: source_cell, direction, value
source_cell: Cell - The cell whose wall is to be set
direction: Direction - The direction of the wall to be set
value: Boolean - Whether the wall should exist or not
Return: None
'''
# Modify wall on the given source_cell's side.
if self.is_valid_cell_position(source_cell.m_position):
source_cell.set_wall(direction, value)
# Modify shared wall of the neighbor cell in the given direction.
neighbor_cell = self.get_neighbor_cell(source_cell, direction)
if neighbor_cell is not None and self.is_valid_cell_position(neighbor_cell.m_position):
neighbor_cell.set_wall(Direction.get_opposite(direction), value)
def set_width(self, width):
'''
Method: set_width
Description: Sets the width of the maze.
Parameters: width
width: Int - The maze width
Return: None
'''
self.m_size[0] = width |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-10-17 02:47
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('search', '0006_auto_20161017_1046'),
]
operations = [
migrations.AlterUniqueTogether(
name='record',
unique_together=set([('item', 'recorded_at')]),
),
]
|
# Given a list of daily temperatures T, return a list such that, for each day in the input,
# tells you how many days you would have to wait until a warmer temperature. If there is no
# future day for which this is possible, put 0 instead.
# For example, given the list of temperatures T = [73, 74, 75, 71, 69, 72, 76, 73], your output should be [1, 1, 4, 2, 1, 1, 0, 0].
# Note: The length of temperatures will be in the range [1, 30000]. Each temperature will be an integer in the range [30, 100].
class Solution:
def dailyTemperatures(self, T: List[int]) -> List[int]:
#brute force
# res = []
# for i in range(len(T)):
# j = i
# while j < len(T) and T[j] <= T[i]:
# j += 1
# wait = j - i if j < len(T) else 0
# res.append(wait)
# return res
#better, stack based solution
res = [0] * len(T)
idx_stack = []
for i in range(len(T)):
while idx_stack and T[i] > T[idx_stack[-1]]:
j = idx_stack.pop()
res[j] = i - j
idx_stack.append(i)
return res
|
# -*- coding: utf-8 -*-
# 55.7522200 широта
# 37.6155600 долгота
# working google key for google maps directions API!
# AIzaSyDhefiliHi_T2eke5NRHzKWvGqj7OteDog
# example request
# https://maps.googleapis.com/maps/api/directions/json?origin=Toronto&destination=Montreal&key=AIzaSyDhefiliHi_T2eke5NRHzKWvGqj7OteDog
import json, requests#, ittertools
def get_pairs_list_from_dicts_list(coords_list_of_dicts):
p_list = []
for a_dict in coords_list_of_dicts:
p_list.append((a_dict[u'lat'],a_dict[u'lng']))
return p_list
def get_lat_lon_by_address(address_string):
url = "http://maps.google.com/maps/api/geocode/json"
# ?address=1600+Amphitheatre+Parkway,+Mountain+View,+CA&sensor=false
address_string+=',Russia'
params = dict(
address=address_string,
sensor='false',
)
resp = requests.get(url=url, params=params)
data = json.loads(resp.text)
# print(data)
# for key,val in (data[u'results'][0]).iteritems():
# print(key, len(val),type(val))
geo = (data[u'results'][0][u'geometry'])
# for key,val in (geo).iteritems():
# print(key, len(val),type(val))
location_dict = geo[u'location']
# print(location_dict)
return location_dict
def get_route_from_gmaps(origin,dest):
url = 'https://maps.googleapis.com/maps/api/directions/json'
params = dict(
# origin='Toronto',
origin='%f,%f' % (origin[u'lat'],origin[u'lng']),
# origin='55.7522200,37.6155600',
# destination='Montreal',
destination='%f,%f' % (dest[u'lat'],dest[u'lng']),
# destination='59.9386300,30.3141300',
# waypoints='Joplin,MO|Oklahoma+City,OK',
sensor='false',
key='AIzaSyDhefiliHi_T2eke5NRHzKWvGqj7OteDog'
)
resp = requests.get(url=url, params=params)
data = json.loads(resp.text)
routes = data["routes"]
print(len(routes[0]))
print(type(routes[0]))
for key, val in routes[-1].iteritems():
print(key)
# print("routes[0]['bounds']")
# print(routes[0]['bounds'])
# print("routes[0]['legs']")
# print(routes[0]['legs'])
# print(type(routes[0]['legs'][-1]))
# print(len(routes[0]['legs'][-1]))
for key,val in (routes[0]['legs'][-1]).iteritems():
print(key, len(val),type(val))
a_step = routes[0]['legs'][-1][u'steps'][0]
print("a_step")
print(type(a_step), len(a_step))
for key,val in (a_step.iteritems()):
print(key, len(val),type(val))
# print(len(routes[0]['legs']))
# print(routes[0]['legs'][-1])
duration_text = routes[0]['legs'][-1][u'duration']['text']
total_duration_value = routes[0]['legs'][-1][u'duration']['value']
distance_text = routes[0]['legs'][-1][u'distance']['text']
total_distance_value = routes[0]['legs'][-1][u'distance']['value']
annotations=[u"Source",u"Tver",u"Reciever\n%s\n%s" % (duration_text,distance_text)]
coord_pairs = [(origin[u'lat'],origin[u'lng']),(56.8583600,35.9005700),(dest[u'lat'],dest[u'lng'])]
coords_list_of_dicts = []
annotes4points = []
coords_list_of_dicts.append(origin)
annotes4points.append(u"Source")
print(origin)
for i,step in enumerate(routes[0]['legs'][-1][u'steps']):
print("step No %i: %s" % (i,str(step[u'end_location'])))
coords_list_of_dicts.append(step[u'end_location'])
annotes4points.append(step[u'duration']['text'])
print(dest)
coords_list_of_dicts.append(dest)
annotes4points.append(u"Reciever\n%s\n%s" % (duration_text,distance_text))
print(annotes4points)
list_of_coords_pairs = get_pairs_list_from_dicts_list(coords_list_of_dicts)
print("total points_count = %i" % len(coords_list_of_dicts))
print("total annotes count = %i" % len(annotes4points))
return list_of_coords_pairs, annotes4points, total_distance_value, total_duration_value
import numpy as np
import matplotlib
# matplotlib.use('nbagg')
# import matplotlib.pyplot as plt
# import matplotlib.cm as cm
# import mpld3
# matplotlib.use('nbagg')
def plot_route(coord_pairs,annotes):
# matplotlib.use('nbagg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
MIN_L_WIDTH=10
POINT_SIZE=2*MIN_L_WIDTH
fig = plt.figure("caption",figsize=(10,10))
ax = fig.add_subplot(111)
# colors_list = cm.rainbow(np.linspace(0,1,len(coord_pairs)))
ax.plot(*zip(*coord_pairs),ls='-',marker='o',ms=POINT_SIZE,lw=MIN_L_WIDTH,alpha=0.5,solid_capstyle='round',color='r')
for i, txt in enumerate(annotes):
ax.annotate(txt, (coord_pairs[i][0],coord_pairs[i][1]), xytext=(POINT_SIZE/2,POINT_SIZE/2), textcoords='offset points')
# ax.annotate(txt, (coord_pairs[i][0],coord_pairs[i][1]), xytext=(1,1))
ax.set_xlim([0.9*min(zip(*coord_pairs)[0]),1.1*max(zip(*coord_pairs)[0])]) # must be after plot
ax.set_ylim([0.9*min(zip(*coord_pairs)[1]),1.1*max(zip(*coord_pairs)[1])])
plt.gca().invert_xaxis()
plt.gca().invert_yaxis()
# mpld3.show() # bad rendering
plt.show()
# plot_route(coord_pairs,annotations)
# plot_route(list_of_coords_pairs,annotes4points)
from mpl_toolkits.basemap import Basemap
def plot_route_on_basemap(coord_pairs,annotes,added_points_param_list=None):
matplotlib.use('nbagg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# matplotlib.use('nbagg')
fig=plt.figure(figsize=(16,12))
ax=fig.add_axes([0.05,0.05,0.95,0.95])
lat_list, lng_list = zip(*coord_pairs)
# setup mercator map projection.
m = Basemap(llcrnrlon=min(lng_list)-2,llcrnrlat=min(lat_list)-2,urcrnrlon=max(lng_list)+2,urcrnrlat=max(lat_list)+2,\
rsphere=(6378137.00,6356752.3142),\
resolution='l',projection='merc',\
lat_0=0.,lon_0=0.,lat_ts=0.)
MIN_L_WIDTH=7
POINT_SIZE=2*MIN_L_WIDTH
m.drawcoastlines()
m.fillcontinents()
x_all=[]
y_all=[]
for i,point in enumerate(coord_pairs):
lon = point[-1]
lat = point[0]
x,y = m(*[lon,lat])
x_all.append(x)
y_all.append(y)
if (i!=0 and i!=len(annotes)-1):
plt.annotate(annotes[i], xy=(x,y), xytext=(POINT_SIZE/2,POINT_SIZE/2), textcoords='offset points',bbox=dict(boxstyle="round", fc=(1.0, 0.7, 0.7), ec="none"))
plt.annotate(annotes[-1], xy=(x_all[-1],y_all[-1]), xytext=(POINT_SIZE/2,POINT_SIZE), textcoords='offset points',bbox=dict(boxstyle="round", fc=(1.0, 0.7, 0.7)))
plt.annotate(annotes[0], xy=(x_all[0],y_all[0]), xytext=(POINT_SIZE/2,POINT_SIZE), textcoords='offset points',bbox=dict(boxstyle="round", fc=(1.0, 0.7, 0.7)))
plt.plot(x_all,y_all,ls='-',marker='o',ms=POINT_SIZE,lw=MIN_L_WIDTH,alpha=0.5,solid_capstyle='round',color='r')
#----
# plt, m = add_points_to_basemap_plot(plt,m,[1,1])
#----
with open("x.txt",'w') as f:
pass
if added_points_param_list!=None:
added_points_coords = added_points_param_list[0]
names = added_points_param_list[1]
# x_added=[]
# y_added=[]
for i,point in enumerate(added_points_coords):
lat = point[0]
lon = point[-1]
x,y = m(*[lon,lat])
# x_added.append(x)
# y_added.append(y)
# if (i!=0 and i!=len(names)-1):
# plt.annotate(names[i], xy=(x,y), xytext=(POINT_SIZE/2,POINT_SIZE/2), textcoords='offset points',bbox=dict(boxstyle="round", fc=(1.0, 0.5, 0.7), ec="none"))
plt.annotate(names[i], xy=(x,y), xytext=(0,-POINT_SIZE*2), textcoords='offset points',bbox=dict(boxstyle="round", fc=(1.0, 0.5, 0.7)))
plt.plot(x,y,ls='-',marker='o',ms=POINT_SIZE,lw=MIN_L_WIDTH,alpha=0.5,solid_capstyle='round',color='pink')
with open("x.txt",'a') as f:
f.write("plotted %f,%f\n" % (x,y))
# draw parallels
m.drawparallels(np.arange(-20,0,20),labels=[1,1,0,1])
# draw meridians
m.drawmeridians(np.arange(-180,180,30),labels=[1,1,0,1])
# ax.set_title('Great Circle from New York to London')
m.bluemarble()
plt.show()
# mpld3.show() # bad rendering
def test():
origin = {u'lat':55.7522200,u'lng':37.6155600}
dest = {u'lat':59.9386300,u'lng':30.3141300}
list_of_coords_pairs, annotes4points, dist,dur = get_route_from_gmaps(origin,dest)
print(dist,dur/(60*60.))
plot_route_on_basemap(list_of_coords_pairs,annotes4points)
if __name__ == "__main__":
# test()
# plot_route_on_basemap(list_of_coords_pairs,annotes4points)
coords_tver = get_lat_lon_by_address('Tver')
coords_krasnogorsk = get_lat_lon_by_address('Krasnogorsk')
coords_moscow = get_lat_lon_by_address('Moscow')
coords_sarov = get_lat_lon_by_address('Sarov')
print(coords_tver)
print(coords_krasnogorsk)
print(coords_moscow)
list_of_coords_pairs, annotes4points, dist,dur = get_route_from_gmaps(coords_moscow,coords_sarov)
# list_of_coords_pairs, annotes4points, dist,dur = get_route_from_gmaps(coords_moscow,coords_tver)
plot_route_on_basemap(list_of_coords_pairs, annotes4points) |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20150202_2124'),
]
operations = [
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(default=b'', max_length=255, verbose_name=b'email address'),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(default=b'', max_length=255),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='graduation_year',
field=models.CharField(default=b'', max_length=10),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='is_active',
field=models.BooleanField(default=False),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='last_name',
field=models.CharField(default=b'', max_length=255),
preserve_default=True,
),
]
|
def getIntList(intFileName):
# Get a list of integers from a file
intFile = open(intFileName)
intList = []
for line in intFile:
# Iterate through the lines of the file and add each integer to list
intList.append(int(line.strip()))
intFile.close()
return intList
def filterFastaWithIndexes(fastaFileName, indexesList, outputFileName):
# Filter a fasta file to include only specificied entires
fastaFile = open(fastaFileName)
outputFile = open(outputFileName, 'w+')
count = 0
for line in fastaFile:
# Iterate through the lines of the fasta file and record the entries specified in indexesList
if line[0] == ">":
# At the beginning of a new fasta entry
count = count + 1
if count in indexesList:
# Record the current fasta entry
outputFile.write(line)
fastaFile.close()
outputFile.close()
if __name__=="__main__":
import sys
fastaFileName = sys.argv[1]
indexesListFileName = sys.argv[2] # ASSUMES THAT THIS IS 1-INDEXED
outputFileName = sys.argv[3]
indexesList = getIntList(indexesListFileName)
filterFastaWithIndexes(fastaFileName, indexesList, outputFileName) |
import eventlet
from oa import oauth
from db import db
from ma import ma
import json
from datetime import datetime
from flask import Flask, jsonify
from flask_socketio import SocketIO
from flask_restful import Api
from flask_sqlalchemy import SQLAlchemy
from flask_apscheduler import APScheduler
from flask_jwt_extended import JWTManager
#from flask_uploads import patch_request_class, configure_uploads
from dotenv import load_dotenv
from marshmallow import ValidationError
from helpers.task import scheduleTask
#from helpers.image_helper import IMAGE_SET
from models.revoken_token import RevokedTokenModel
load_dotenv(".env", verbose=True)
app = Flask(__name__)
# load default configs from default_config.py
app.config.from_object("default_config")
app.config.from_envvar(
"APPLICATION_SETTINGS"
) # override with config.py (APPLICATION_SETTINGS points to config.py)
# 10 MB max image size upload
#patch_request_class(app, 10 * 1024 * 1024)
#configure_uploads(app, IMAGE_SET)
socketio = SocketIO(app, logger=True)
jwt = JWTManager(app)
api = Api(app)
@app.before_first_request
def create_tables():
db.create_all()
@app.errorhandler(ValidationError)
def handle_marshmallow_validation_error(err):
return jsonify(err.messages), 400
# This methods called every time when clients try to access secured endpoints
# It will check if a token is blacklisted.
@jwt.token_in_blocklist_loader
def check_if_token_in_blacklist(jwt_header, jwt_payload):
# jti means identity
jti = jwt_payload["jti"]
return RevokedTokenModel.is_jti_blacklisted(jti)
@jwt.expired_token_loader
def expired_token_callback(jwt_header, jwt_payload):
return jsonify({
'message': 'The token has expired.',
'error': 'token_expired'
}), 401
@jwt.invalid_token_loader
def invalid_token_callback(error):
return jsonify({
'message': 'Signature verification failed.',
'error': 'invalid_token'
}), 401
@jwt.unauthorized_loader
def missing_token_callback(error):
return jsonify({
"description": "Request does not contain an access token.",
'error': 'authorization_required'
}), 401
# I'll use required fresh token func.
# on the Change Password Acreen
@jwt.needs_fresh_token_loader
def token_not_fresh_callback():
return jsonify({
"description": "The token is not fresh.",
'error': 'fresh_token_required'
}), 401
@socketio.on("connect")
def connect():
from g_variables import whoop_list
print("A user connected.")
user_dict = {"whoops": whoop_list}
emitting_json = json.dumps(user_dict)
socketio.emit("user_event", emitting_json)
def set_api():
from resources.test import Test
from resources.home import HomePage
from resources.whoop import ShareWhoop
from resources.token import TokenRefresh, TokenBlacklist
# from resources.image import ImageUpload, Image, AvatarUpload, Avatar
from resources.google_login import GoogleLogin, GoogleAuthorize
from resources.facebook_login import FacebookLogin, FacebookAuthorize
from resources.twitter_login import TwitterLogin, TwitterAuthorize
from resources.user import (
User,
UserSignin,
UserSignup,
UserLogout,
AllUsers,
SetPassword
)
# home page resources
api.add_resource(HomePage, '/')
# user resources
api.add_resource(User, '/user')
api.add_resource(UserSignin, '/signin')
api.add_resource(UserSignup, '/signup')
api.add_resource(UserLogout, '/logout')
api.add_resource(AllUsers, '/users')
api.add_resource(SetPassword, '/user/set_password')
# test resources
api.add_resource(Test, '/test')
# whoop resources
api.add_resource(ShareWhoop, '/whoop/share')
# token resources
api.add_resource(TokenRefresh, '/token/refresh')
api.add_resource(TokenBlacklist, '/token/is_token_blacklisted')
# image resources
#api.add_resource(ImageUpload, '/upload/image')
#api.add_resource(Image, '/image/<string:filename>')
#api.add_resource(AvatarUpload, '/upload/avatar')
#api.add_resource(Avatar, '/avatar/<int:user_id>')
# google oauth resources
api.add_resource(GoogleLogin, '/login/google')
api.add_resource(GoogleAuthorize, '/login/google/authorized',
endpoint='google.authorized')
# facebook oauth resources
api.add_resource(FacebookLogin, '/login/facebook')
api.add_resource(FacebookAuthorize, '/login/facebook/authorized',
endpoint='facebook.authorized')
# twitter oauth resources
api.add_resource(TwitterLogin, '/login/twitter')
api.add_resource(TwitterAuthorize, '/login/twitter/authorized',
endpoint='twitter.authorized')
# I moved all the script outside of the main func. because when I run
# the app on Heroku with gunicorn it only works like that.
db.init_app(app)
ma.init_app(app)
oauth.init_app(app)
set_api()
scheduler = APScheduler()
scheduler.add_job(
id="Scheduled Task", func=scheduleTask, trigger="interval", seconds=1
)
scheduler.start()
#socketio.run(app, use_reloader=False)
if __name__ == "__main__":
# db.init_app(app)
# ma.init_app(app)
# oauth.init_app(app)
# set_api()
# scheduler = APScheduler()
# scheduler.add_job(
# id="Scheduled Task", func=scheduleTask, trigger="interval", seconds=1
# )
# scheduler.start()
socketio.run(app, debug=True, use_reloader=False)
|
import argparse
from IAA import calc_agreement_directory
from Dependency import *
from Weighting import *
from pointAssignment import *
from Separator import *
def calculate_scores_master(directory, tua_file = None, iaa_dir = None, scoring_dir = None, repCSV = None):
print("IAA PROPER")
iaa_dir = calc_agreement_directory(directory, hardCodedTypes=True, repCSV=repCSV, outDirectory=iaa_dir)
print('iaaaa', iaa_dir)
print("DEPENDENCY")
scoring_dir = eval_dependency(directory, iaa_dir, out_dir=scoring_dir)
print("WEIGHTING")
launch_Weighting(scoring_dir)
print("SORTING POINTS")
pointSort(scoring_dir, tua_file)
### Commented out because may not be updated to current output format??
###print("----------------SPLITTING-----------------------------------")
###splitcsv(scoring_dir)
def load_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'-i', '--input-dir',
help='Directory containing DataHuntHighlights DataHuntAnswers, '
'and Schema .csv files.')
parser.add_argument(
'-t', '--tua-file',
help='Filename to use for file with TUAs for taskruns in input-dir.')
parser.add_argument(
'-o', '--output-dir',
help='Pathname to use for IAA output directory.')
parser.add_argument(
'-s', '--scoring-dir',
help='Pathname to use for output files for scoring of articles.')
parser.add_argument(
'-r', '--rep-file',
help='Filename to use for User Reputation scores file.')
return parser.parse_args()
if __name__ == '__main__':
args = load_args()
input_dir = './data/Setup_Testing_4-18-19'
tua_file = './mt/allTUAS.csv'
output_dir = './output/Setup_Testing_4-18-19'
scoring_dir = './scoring/Setup_Testing_4-18-19'
rep_file = './UserRepScores.csv'
if args.input_dir:
input_dir = args.input_dir
if args.tua_file:
tua_file = args.tua_file
if args.output_dir:
output_dir = args.output_dir
if args.scoring_dir:
scoring_dir = args.scoring_dir
if args.rep_file:
rep_file = args.rep_file
calculate_scores_master(input_dir, tua_file=tua_file, iaa_dir=output_dir, scoring_dir=scoring_dir, repCSV=rep_file)
#calculate_scores_master("./langdeptest") |
import cv2
import numpy
img = cv2.imread("smallgray.png", 0)
print(img)
# cv2.imwrite("newsmallgray.png", img)
print(img[0:2, 2:4])
print("=" * 100)
ims = numpy.hstack((img,img))
print(ims)
lst = numpy.hsplit(ims, 2)
print(lst)
|
import os, datetime, shutil, sys
installe = raw_input('Install nncloudtv package(y/n): ')
if installe == 'y':
os.chdir("../../nncloudtv")
os.system("mvn clean compile install -DskipTests")
os.chdir("../nnqueue/installer")
os.chdir("..")
os.system("mvn clean compile")
os.system("mvn clean assembly:assembly -DskipTests")
os.system("mv target/nnqueue-0.0.1-SNAPSHOT-jar-with-dependencies.jar target/nnqueue.jar")
|
# Generated by Django 2.2.3 on 2019-08-05 15:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('community', '0012_cronlog'),
]
operations = [
migrations.AlterField(
model_name='cronlog',
name='cronjob_comment',
field=models.CharField(default='커뮤니티홈-오늘의 스토리 랜덤 숫자 동작 기록', max_length=300),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.