text stringlengths 38 1.54M |
|---|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-19 18:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0012_auto_20171012_1038'),
]
operations = [
migrations.AlterField(
model_name='location',
name='last_accessed',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='organization',
name='service',
field=models.CharField(max_length=200),
),
]
|
import json
import requests
import random
import subprocess
import sys
from urllib.request import urlopen, URLError
from getch import getch
red = "\033[91m"
green = "\033[92m"
reset = "\033[0m"
def display(result):
"""Display images fetched from any subreddit in your terminal.
Args:
result (list): A list of urls which you want to display.
"""
lst = []
while True:
url = random.choice(result)
if url not in lst:
subprocess.call("w3m -o ext_image_viewer=false -o confirm_qq=false {}".format(url), shell=True)
subprocess.call("clear", shell=True)
lst.append(url)
print("%sPress e to exit or any other key to continue....%s"%(green, reset))
key = getch()
if key=="e":
subprocess.call("clear", shell=True)
sys.exit()
# Check subreddit validity if url is invalid return error else return subreddit as sub
def check_validity():
"""Check validity of a subreddit.
Returns:
sub (string): Returns the name of the subreddit if it is valid.
"""
while True:
sub = input("%sEnter the name of a valid subreddit: "%(green))
print()
print("%s[+] Checking subreddit validity...."%(green))
print()
try:
if sub.isspace():
print("%s[-] Invalid subreddit"%(red))
print()
else:
urlopen("https://www.reddit.com/r/{}".format(sub))
print("%s[+] Subreddit found!"%(green))
print()
break
except URLError:
print("%s[-] Invalid subreddit!"%(red))
print()
return sub
# Get a list of image url from a supplied subreddit and category
def get_img(str1, str2):
"""Fetch image url links of a subreddit via .json method.
Args:
str1 (string): Subreddit name.
str2 (string): Catagory type (top, rising, etc).
Returns:
image_lsit (list): Returns a list of urls of scraped images.
"""
number = 99999
url = "https://www.reddit.com/r/{}/{}/".format(str1,str2)
# Request to fetch .json data
r = requests.get("{}.json?limit={}".format(url, number), headers = {'user-agent': 'Mozilla/5.0'})
# List to store urls and the following one to validate file format
img_list = []
formats = ["jpeg", "jpg", "png"]
# Loop through all url and validate against permitted formats
for post in r.json()['data']['children']:
if ([ele for ele in formats if(ele in post['data']['url'])]):
# Fill the list with image urls
img_list.append(post['data']['url'])
# If img url list is empty return false
if not img_list:
print("%s[-] Back luck partner! No images found in subreddit!%s"%(red, reset))
print()
return False
# Return the list of image urls
else:
return (img_list)
|
"""
Exercise 2
More anagrams!
Write a program that reads a word list from a file (see Section 9.1) and
prints all the sets of words that are anagrams.
Here is an example of what the output might look like:
['deltas', 'desalt', 'lasted', 'salted', 'slated', 'staled']
['retainers', 'ternaries']
['generating', 'greatening']
['resmelts', 'smelters', 'termless']
Hint: you might want to build a dictionary that maps from a collection of
letters to a list of words that can be spelled with those letters. The
question is, how can you represent the collection of letters in a way that
can be used as a key? Modify the previous program so that it prints the
longest list of anagrams first, followed by the second longest, and so on.
In Scrabble a “bingo” is when you play all seven tiles in your rack, along
with a letter on the board, to form an eight-letter word. What collection
of 8 letters forms the most possible bingos?
Solution: http://thinkpython2.com/code/anagram_sets.py.
"""
|
"""empty message
Revision ID: 515d2606c301
Revises: 1be5b55ecd31
Create Date: 2017-06-09 19:45:19.002828
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '515d2606c301'
down_revision = '1be5b55ecd31'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('rods')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('rods',
sa.Column('timestamp', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),
sa.Column('rods', sa.VARCHAR(), autoincrement=False, nullable=False),
sa.Column('board_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['board_id'], [u'board.id'], name=u'rods_board_id_fkey'),
sa.PrimaryKeyConstraint('timestamp', 'board_id', name=u'rods_pkey')
)
# ### end Alembic commands ###
|
A = float(input())
B = float(input())
C = float(input())
MEDIA = A*2+B*3+C*5
MEDIA /= 10
print("MEDIA = %.1f"% (MEDIA))
|
#JSON에 모든 정보가 들어있는데 보안상 문제가 있을 수 있어서 이것을 사용
from rest_framework import serializers #데이터를 보기 좋게 만들기 위함
from django.contrib.auth.models import User
class UserShortcutSerializer(serializers.ModelSerializer):
class Meta:
model = User # Model 등록
fields = ("username", "email", "first_name","last_name")
#위 정보만 보이게 하기 위함
|
'''
A quick set of tools for doing stack doping.
'''
import logging
import vtrace
logger = logging.getLogger(__name__)
def dopeThreadStack(trace, threadid):
curthread = trace.getCurrentThread()
try:
trace.selectThread(threadid)
sp = trace.getStackCounter()
mmap = trace.getMemoryMap(sp)
if mmap is None:
raise Exception('Thread %d has invalid stack pointer 0x%.8x' % (threadid, sp))
mapva, mapsize, mperms, mfname = mmap
dopesize = sp - mapva
trace.writeMemory(mapva, 'V' * dopesize)
except Exception as e:
logger.warning('dopeThreadStack Failed On %d (%s)', threadid, e)
trace.selectThread(curthread)
def dopeAllThreadStacks(trace):
'''
Apply stack doping to all thread stacks.
'''
for threadid in trace.getThreads().keys():
dopeThreadStack(trace, threadid)
class ThreadDopeNotifier(vtrace.Notifier):
def notify(self, event, trace):
dopeAllThreadStacks(trace)
dopenotif = ThreadDopeNotifier()
def enableEventDoping(trace):
trace.registerNotifier(vtrace.NOTIFY_CONTINUE, dopenotif)
def disableEventDoping(trace):
trace.deregisterNotifier(vtrace.NOTIFY_CONTINUE, dopenotif)
|
x = tf.placeholder(tf.float32)
y = tf.placeholder(tf.float32)
w = tf.Variable(tf.zeros([1, 1], dtype=tf.float32))
b = tf.Variable(tf.ones([1, 1], dtype=tf.float32))
y_hat = tf.add(b, tf.matmul(x, w))
# ...more setup for optimization and what not...
saver = tf.train.Saver() # defaults to saving all variables - in this case w and b
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
if FLAGS.train:
for i in xrange(FLAGS.training_steps):
# ...training loop...
if (i + 1) % FLAGS.checkpoint_steps == 0:
saver.save(sess, FLAGS.checkpoint_dir + 'model.ckpt', global_step=i+1)
else:
# Here's where you're restoring the variables w and b.
# Note that the graph is exactly as it was when the variables were
# saved in a prior training run.
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
else:
# ...no checkpoint found...
print "NO CHECKPOINT FOUND!"
# Now you can run the model to get predictions
batch_x = # ...load some data...
predictions = sess.run(y_hat, feed_dict={x: batch_x})
|
from openmc.filter import *
from openmc.filter_expansion import *
from openmc import RegularMesh, Tally
from tests.testing_harness import HashedPyAPITestHarness
def test_tallies():
harness = HashedPyAPITestHarness('statepoint.5.h5')
model = harness._model
# Set settings explicitly
model.settings.batches = 5
model.settings.inactive = 0
model.settings.particles = 400
model.settings.source = openmc.Source(space=openmc.stats.Box(
[-160, -160, -183], [160, 160, 183]))
azimuthal_bins = (-3.14159, -1.8850, -0.6283, 0.6283, 1.8850, 3.14159)
azimuthal_filter = AzimuthalFilter(azimuthal_bins)
azimuthal_tally1 = Tally()
azimuthal_tally1.filters = [azimuthal_filter]
azimuthal_tally1.scores = ['flux']
azimuthal_tally1.estimator = 'tracklength'
azimuthal_tally2 = Tally()
azimuthal_tally2.filters = [azimuthal_filter]
azimuthal_tally2.scores = ['flux']
azimuthal_tally2.estimator = 'analog'
mesh_2x2 = RegularMesh(mesh_id=1)
mesh_2x2.lower_left = [-182.07, -182.07]
mesh_2x2.upper_right = [182.07, 182.07]
mesh_2x2.dimension = [2, 2]
mesh_filter = MeshFilter(mesh_2x2)
azimuthal_tally3 = Tally()
azimuthal_tally3.filters = [azimuthal_filter, mesh_filter]
azimuthal_tally3.scores = ['flux']
azimuthal_tally3.estimator = 'tracklength'
cellborn_tally = Tally()
cellborn_tally.filters = [
CellbornFilter((model.geometry.get_all_cells()[10],
model.geometry.get_all_cells()[21],
22, 23))] # Test both Cell objects and ids
cellborn_tally.scores = ['total']
dg_tally = Tally()
dg_tally.filters = [DelayedGroupFilter((1, 2, 3, 4, 5, 6))]
dg_tally.scores = ['delayed-nu-fission', 'decay-rate']
dg_tally.nuclides = ['U235', 'O16', 'total']
four_groups = (0.0, 0.253, 1.0e3, 1.0e6, 20.0e6)
energy_filter = EnergyFilter(four_groups)
energy_tally = Tally()
energy_tally.filters = [energy_filter]
energy_tally.scores = ['total']
energyout_filter = EnergyoutFilter(four_groups)
energyout_tally = Tally()
energyout_tally.filters = [energyout_filter]
energyout_tally.scores = ['scatter']
transfer_tally = Tally()
transfer_tally.filters = [energy_filter, energyout_filter]
transfer_tally.scores = ['scatter', 'nu-fission']
material_tally = Tally()
material_tally.filters = [
MaterialFilter((model.geometry.get_materials_by_name('UOX fuel')[0],
model.geometry.get_materials_by_name('Zircaloy')[0],
3, 4))] # Test both Material objects and ids
material_tally.scores = ['total']
mu_bins = (-1.0, -0.5, 0.0, 0.5, 1.0)
mu_filter = MuFilter(mu_bins)
mu_tally1 = Tally()
mu_tally1.filters = [mu_filter]
mu_tally1.scores = ['scatter', 'nu-scatter']
mu_tally2 = Tally()
mu_tally2.filters = [mu_filter, mesh_filter]
mu_tally2.scores = ['scatter', 'nu-scatter']
polar_bins = (0.0, 0.6283, 1.2566, 1.8850, 2.5132, 3.14159)
polar_filter = PolarFilter(polar_bins)
polar_tally1 = Tally()
polar_tally1.filters = [polar_filter]
polar_tally1.scores = ['flux']
polar_tally1.estimator = 'tracklength'
polar_tally2 = Tally()
polar_tally2.filters = [polar_filter]
polar_tally2.scores = ['flux']
polar_tally2.estimator = 'analog'
polar_tally3 = Tally()
polar_tally3.filters = [polar_filter, mesh_filter]
polar_tally3.scores = ['flux']
polar_tally3.estimator = 'tracklength'
legendre_filter = LegendreFilter(order=4)
legendre_tally = Tally()
legendre_tally.filters = [legendre_filter]
legendre_tally.scores = ['scatter', 'nu-scatter']
legendre_tally.estimatir = 'analog'
harmonics_filter = SphericalHarmonicsFilter(order=4)
harmonics_tally = Tally()
harmonics_tally.filters = [harmonics_filter]
harmonics_tally.scores = ['scatter', 'nu-scatter', 'flux', 'total']
harmonics_tally.estimatir = 'analog'
harmonics_tally2 = Tally()
harmonics_tally2.filters = [harmonics_filter]
harmonics_tally2.scores = ['flux', 'total']
harmonics_tally2.estimatir = 'collision'
harmonics_tally3 = Tally()
harmonics_tally3.filters = [harmonics_filter]
harmonics_tally3.scores = ['flux', 'total']
harmonics_tally3.estimatir = 'tracklength'
universe_tally = Tally()
universe_tally.filters = [
UniverseFilter((model.geometry.get_all_universes()[1],
model.geometry.get_all_universes()[2],
3, 4, 6, 8))] # Test both Universe objects and ids
universe_tally.scores = ['total']
cell_filter = CellFilter((model.geometry.get_all_cells()[10],
model.geometry.get_all_cells()[21],
22, 23, 60)) # Test both Cell objects and ids
score_tallies = [Tally() for i in range(6)]
for t in score_tallies:
t.filters = [cell_filter]
t.scores = ['absorption', 'delayed-nu-fission', 'events', 'fission',
'inverse-velocity', 'kappa-fission', '(n,2n)', '(n,n1)',
'(n,gamma)', 'nu-fission', 'scatter', 'elastic',
'total', 'prompt-nu-fission', 'fission-q-prompt',
'fission-q-recoverable', 'decay-rate']
for t in score_tallies[0:2]: t.estimator = 'tracklength'
for t in score_tallies[2:4]: t.estimator = 'analog'
for t in score_tallies[4:6]: t.estimator = 'collision'
for t in score_tallies[1::2]:
t.nuclides = ['U235', 'O16', 'total']
cell_filter2 = CellFilter((21, 22, 23, 27, 28, 29, 60))
flux_tallies = [Tally() for i in range(3)]
for t in flux_tallies:
t.filters = [cell_filter2]
t.scores = ['flux']
flux_tallies[0].estimator = 'tracklength'
flux_tallies[1].estimator = 'analog'
flux_tallies[2].estimator = 'collision'
all_nuclide_tallies = [Tally() for i in range(4)]
for t in all_nuclide_tallies:
t.filters = [cell_filter]
t.estimator = 'tracklength'
t.nuclides = ['all']
t.scores = ['total']
all_nuclide_tallies[1].estimator = 'collision'
all_nuclide_tallies[2].filters = [mesh_filter]
all_nuclide_tallies[3].filters = [mesh_filter]
all_nuclide_tallies[3].nuclides = ['U235']
fusion_tally = Tally()
fusion_tally.scores = ['H1-production', 'H2-production', 'H3-production',
'He3-production', 'He4-production', 'heating', 'damage-energy']
model.tallies += [
azimuthal_tally1, azimuthal_tally2, azimuthal_tally3,
cellborn_tally, dg_tally, energy_tally, energyout_tally,
transfer_tally, material_tally, mu_tally1, mu_tally2,
polar_tally1, polar_tally2, polar_tally3, legendre_tally,
harmonics_tally, harmonics_tally2, harmonics_tally3, universe_tally]
model.tallies += score_tallies
model.tallies += flux_tallies
model.tallies += all_nuclide_tallies
model.tallies.append(fusion_tally)
harness.main()
|
import sys
import glob
import time
import os.path
import re
import tree
import parse
## Input Parameters
IN_DIR = sys.argv[1]
OUT_DIR = sys.argv[2]
IS_INPUT = sys.argv[3]
IV_INPUT = sys.argv[4]
IV_PUP_DIR = sys.argv[5]
## Key Variables
NC_P_FILE = "/data/project/RefStand/gavehan/PIPE/py_pickle/nc.treedic.nc.p"
FA_DIR = "/data/project/RefStand/FA/2.After_ManCNV"
ALT_PAT_SET = set("atgcATGC")
BQ_MIN_CNST = ord("!")
def GrabIvPupFile(iv_pup_dir, iv_pup_id, id_head):
iv_pup_list = glob.glob(iv_pup_dir + "/*.pileup")
if id_head:
iv_pup_pat = re.compile("^{}.*{}.*".format(id_head, iv_pup_id))
else:
iv_pup_pat = re.compile(".*{}.*".format(iv_pup_id))
for iv_pup in iv_pup_list:
iv_pup_name = iv_pup.split("/")[-1]
if not(re.fullmatch(iv_pup_pat, iv_pup_name) is None):
return iv_pup
return None
def GetFaFileList(fa_dir, mos):
fa_file_list = []
fa_file_head = fa_dir + "/FA-V"
if mos == 1:
fa_file_list.append(fa_file_head+"1-het.vcf")
fa_file_list.append(fa_file_head+"1-hom.vcf")
fa_file_list.append(fa_file_head+"2-het.vcf")
fa_file_list.append(fa_file_head+"2-hom.vcf")
elif mos == 2:
fa_file_list.append(fa_file_head+"1-het.vcf")
fa_file_list.append(fa_file_head+"1-hom.vcf")
fa_file_list.append(fa_file_head+"3-het.vcf")
fa_file_list.append(fa_file_head+"3-hom.vcf")
fa_file_list.append(fa_file_head+"4-het.vcf")
fa_file_list.append(fa_file_head+"4-hom.vcf")
elif mos == 3:
fa_file_list.append(fa_file_head+"1-het.vcf")
fa_file_list.append(fa_file_head+"1-hom.vcf")
fa_file_list.append(fa_file_head+"3-het.vcf")
fa_file_list.append(fa_file_head+"3-hom.vcf")
fa_file_list.append(fa_file_head+"5-het.vcf")
fa_file_list.append(fa_file_head+"5-hom.vcf")
return fa_file_list
def EnrichFaTreeDic(fa_file, fa_tree_dic):
init_time = time.time()
fa_file_id_list = fa_file.split("/")[-1].split(".")
fa_id = ".".join(fa_file_id_list[:-1])
with open(fa_file, "r") as fa_f:
for l_cnt, fa_l in enumerate(fa_f):
parse.printprog(l_cnt, init_time)
fa_e = fa_l.split()
fa_chr_key = fa_e[0][3:]
if not(fa_chr_key in fa_tree_dic):
fa_tree_dic[fa_chr_key] = tree.LLRBtree()
if not(fa_tree_dic[fa_chr_key].search(int(fa_e[1])) is None):
print("ERROR: Duplicate FA tree-dic entry: {}".format(fa_l))
fa_tree_dic[fa_chr_key].insert(int(fa_e[1]), fa_id)
return fa_tree_dic
def WriteCvcfFile(pup_file, out_file, fa_tree_dic):
init_time = time.time()
with open(pup_file, "r") as p_f, open(out_file, "w") as out_f:
hd = "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(
"#CHR", "POS", "REF_NT", "ALT_NT",
"REF_AVG_BQ", "ALT_AVG_BQ", "REF_CNT", "ALT_CNT",
"AF", "TAG")
out_f.write(hd)
for l_cnt, p_l in enumerate(p_f):
parse.printprog(l_cnt, init_time)
if "#" in p_l:
continue
p_e = p_l.split()
p_chr_key = p_e[0][3:]
if not(p_chr_key in fa_tree_dic):
continue
fa_tag = fa_tree_dic[p_chr_key].search(int(p_e[1]))
if fa_tag is None:
continue
try:
p_info = ProcessPupLine(p_e[4], p_e[5])
af = p_info[-1] / (p_info[-2] + p_info[-1]) if p_info[-1] > 0 else 0.0
out_l = "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(
p_e[0], p_e[1], p_e[2], p_info[0], p_info[1],
p_info[2], p_info[3], p_info[4], af, fa_tag)
out_f.write(out_l)
except Exception as e:
print("ERROR: .pup Line: {}".format(p_l))
raise e
def ProcessPupLine(base_str, qual_str):
base_idx = 0
qual_idx = 0
alt_cnt_dic = {"A":0, "T":0, "C":0, "G":0}
alt_bq_dic = {"A":0, "T":0, "C":0, "G":0}
ref_cnt = 0
ref_bq = 0.0
while base_idx < len(base_str) and qual_idx < len(qual_str):
cur_nt = base_str[base_idx]
cur_bq = ord(qual_str[qual_idx]) - BQ_MIN_CNST
if cur_nt == "." or cur_nt == ",":
ref_cnt += 1
ref_bq += cur_bq
qual_idx += 1
elif cur_nt in ALT_PAT_SET:
alt_cnt_dic[cur_nt.upper()] += 1
alt_bq_dic[cur_nt.upper()] += cur_bq
qual_idx += 1
elif cur_nt == "^":
base_idx += 1
elif cur_nt == "*" or cur_nt == "<" or cur_nt == ">":
qual_idx += 1
elif cur_nt == "+" or cur_nt == "-":
skip = base_idx + 1
indel_len_str = ""
while(base_str[skip].isdigit()):
indel_len_str += base_str[skip]
skip += 1
indel_len = int(indel_len_str)
base_idx = skip + indel_len - 1
base_idx += 1
maj_alt_nt = "."
maj_alt_cnt = 0
for nt in alt_cnt_dic:
if alt_cnt_dic[nt] > maj_alt_cnt:
maj_alt_nt = nt
maj_alt_cnt = alt_cnt_dic[nt]
ref_avg_bq = ref_bq / ref_cnt if ref_cnt > 1 else 0.0
maj_alt_avq_bq = 0.0 if maj_alt_nt == "." else alt_bq_dic[maj_alt_nt] / maj_alt_cnt
return (maj_alt_nt, ref_avg_bq, maj_alt_avq_bq, ref_cnt, maj_alt_cnt)
if __name__ == '__main__':
is_pup_file = IN_DIR + "/" + IS_INPUT
is_cvcf_file = OUT_DIR + "/" + IS_INPUT[:-4] + ".af.bq.tag.cvcf"
iv_pup_file = GrabIvPupFile(IV_PUP_DIR, IV_INPUT, "For_PU_")
iv_cvcf_file = OUT_DIR + "/" + IV_INPUT + ".af.bq.tag.cvcf"
if os.path.isfile(is_cvcf_file) and os.path.isfile(iv_cvcf_file):
print("** IS .cvcf and IV .cvcf have been generated previously")
else:
print("** Loading FA tree-dic with NC data from {}".format(NC_P_FILE))
fa_annot_tree_dic = parse.loadpickle(NC_P_FILE)
fa_file_list = GetFaFileList(FA_DIR, int(IV_INPUT[1]))
for fa_file in fa_file_list:
print("** Enriching FA tree-dic with {}".format(fa_file))
fa_annot_tree_dic = EnrichFaTreeDic(fa_file, fa_annot_tree_dic)
print("** FA tree-dic construction complete")
if os.path.isfile(is_cvcf_file):
print("** IS .cvcf has been generated previously: {}".format(is_cvcf_file))
else:
print("** Generating IS .cvcf: {}".format(is_cvcf_file))
WriteCvcfFile(is_pup_file, is_cvcf_file, fa_annot_tree_dic)
if os.path.isfile(iv_cvcf_file):
print("** IV .cvcf has been generated previously: {}".format(iv_cvcf_file))
else:
print("** Generating IV .cvcf: {}".format(iv_cvcf_file))
WriteCvcfFile(iv_pup_file, iv_cvcf_file, fa_annot_tree_dic)
print("** All operations complete")
|
"""
The module for Unbounded Interleaved-State Recurrent Neural Network.
An introduction is available at [README.md].
[README.md]: https://github.com/google/uis-rnn/blob/master/README.md
Source: https://github.com/google/uis-rnn
"""
from wildspeech.diarization.uisrnn import arguments, evals, utils, uisrnn
#pylint: disable=C0103
parse_arguments = arguments.parse_arguments
compute_sequence_match_accuracy = evals.compute_sequence_match_accuracy
output_result = utils.output_result
UISRNN = uisrnn.UISRNN
|
from collections import Counter as co, deque as dq
for _ in range(int(input())):
n,k = map(int, input().split())
c = list(map(int, input().split()))
ans = 0
mc = max([x for x in co(c).items()], key=lambda x: x[1])
i = 0
while i<n:
if c[i]!=mc[0]:
ans+=1
i+=k
elif i+k>=n:
for _ in range(i,n):
if c[i]!=mc[0]:
ans+=1
break
break
else:
i+=1
print(ans) |
class Computer:
def __init__(self):
#encapulation
#double __ is private
#single _ is protected
self.__maxprice = 900
def sell(self):
print("Selling Price: {}".format(self.__maxprice))
def setMaxPrice(self, price):
self.__maxprice = price
class Parrot:
def fly(self):
print("Parrot can fly")
def swim(self):
print("Parrot can't swim")
class Penguin:
def fly(self):
print("Penguin can't fly")
def swim(self):
print("Penguin can swim")
#common interface
def flying_test(bird):
bird.fly()
#instantiate object
blu = Parrot()
peggy = Penguin()
#passing the object
flying_test(blu)
flying_test(peggy)
#initialize the variable
c = Computer()
c.sell()
#changing price using variables.
c.__maxprice = 1000
c.sell()
#using setter
c.setMaxPrice(4000)
c.sell()
|
# -*- coding=utf-8 -*-
import math
from flask import Flask, current_app, request, \
render_template
from xp_mall.mall import mall_module
from xp_mall.models.order import Order
from xp_mall.extensions import db,csrf
from xp_mall.utils import get_pay_obj
@csrf.exempt
@mall_module.route('/pay/<string:payment>/<string:call_type>', methods=['GET', 'POST'])
def pay_confirm(payment, call_type):
current_app.logger.debug(request)
pay = get_pay_obj(payment)
res, order_info, out_html = pay.confirm_pay(request)
if res:
if call_type == "return":
return render_template("member/order/success.html")
elif call_type == "notify":
out_trade_no = order_info['out_trade_no']
order = Order.query.filter_by(order_no=out_trade_no, status=0).first()
total_price = order_info['total_price']
if math.isclose(order.total_price,total_price):
# status 0:等待付款, 1:已付款,2:已发货,3 已收货
order.status=1
db.session.commit()
return out_html
else:
if call_type == "return":
return "尚未到账,请稍后刷新页面"
elif call_type == "notify":
return out_html
|
b=10 # atribui valor 10 a B
a=20 # atribui valor 20 a A
b = int(input()) # entrada de valor pra B
print(a,b) # Exibi o valor de A e o novo valor de B => 5
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Test the DDEC6 in cclib"""
from __future__ import print_function
import sys
import os
import logging
import unittest
import numpy
from cclib.method import DDEC6, volume
from cclib.parser import Psi4
from cclib.method.calculationmethod import MissingAttributeError
from numpy.testing import assert_allclose
from ..test_data import getdatafile
class DDEC6Test(unittest.TestCase):
"""DDEC6 method tests."""
def setUp(self):
super(DDEC6Test, self).setUp()
self.parse()
def parse(self):
self.data, self.logfile = getdatafile(Psi4, "basicPsi4-1.2.1", ["water_mp2.out"])
self.volume = volume.Volume((-4, -4, -4), (4, 4, 4), (0.2, 0.2, 0.2))
def testmissingrequiredattributes(self):
"""Is an error raised when required attributes are missing?"""
for missing_attribute in DDEC6.required_attrs:
self.parse()
delattr(self.data, missing_attribute)
with self.assertRaises(MissingAttributeError):
trialBader = DDEC6(self.data, self.volume)
def test_proatom_read(self):
"""Are proatom densities imported correctly?"""
self.parse()
self.analysis = DDEC6(self.data, self.volume, os.path.dirname(os.path.realpath(__file__)))
refH_den = [
2.66407645e-01,
2.66407645e-01,
2.66407643e-01,
2.66407612e-01,
2.66407322e-01,
] # Hydrogen first five densities
refH_r = [
1.17745807e-07,
4.05209491e-06,
3.21078677e-05,
1.39448474e-04,
4.35643929e-04,
] # Hydrogen first five radii
refO_den = [
2.98258510e02,
2.98258510e02,
2.98258509e02,
2.98258487e02,
2.98258290e02,
] # Oxygen first five densities
refO_r = [
5.70916728e-09,
1.97130512e-07,
1.56506399e-06,
6.80667366e-06,
2.12872046e-05,
] # Oxygen first five radii
assert_allclose(self.analysis.proatom_density[0][0:5], refO_den, rtol=1e-3)
assert_allclose(self.analysis.proatom_density[1][0:5], refH_den, rtol=1e-3)
assert_allclose(self.analysis.proatom_density[2][0:5], refH_den, rtol=1e-3)
def test_step1_and_2_charges(self):
"""Are step 1 and 2 charges calculated correctly?
Here, values are compared against `chargemol` calculations.
Due to the differences in basis set used for calculation and slightly different integration
grid, some discrepancy is inevitable in the comparison.
TODO: Test suite based on horton densities will be added after full implementation of
DDEC6 algorithm.
"""
self.parse()
# use precalculated fine cube file
imported_vol = volume.read_from_cube(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "water_fine.cube")
)
analysis = DDEC6(self.data, imported_vol, os.path.dirname(os.path.realpath(__file__)))
analysis.calculate()
radial_indices = []
for atomi in range(len(self.data.atomnos)):
lst = []
for radius in [0.05, 0.10, 0.15, 0.20, 0.25]:
# find closest radius index
lst.append(numpy.abs(analysis.radial_grid_r[atomi] - radius).argmin())
radial_indices.append(lst)
# values from `chargemol` calculation
# which is based on proatomic densities calculated with different basis set.
# discrepancy comes from the fact that `chargemol` grid & `horton` grid don't exactly match
# (rtol is adjusted to account for this inevitable discrepancy)
# STEP 1
# Check assigned charges.
assert_allclose(analysis.refcharges[0], [-0.513006, 0.256231, 0.256775], rtol=0.10)
# STEP 2
# Check assigned charges.
assert_allclose(analysis.refcharges[1], [-0.831591, 0.415430, 0.416161], rtol=0.20)
# STEP 3
# Check integrated charge density (rho^cond(r)) on grid with integrated values (=nelec).
self.assertAlmostEqual(
analysis.chgdensity.integrate(), analysis.rho_cond.integrate(), delta=1
)
for atomi in range(len(analysis.data.atomnos)):
self.assertAlmostEqual(
analysis._integrate_from_radial([analysis._cond_density[atomi]], [atomi])
+ analysis.refcharges[-1][atomi],
analysis.data.atomnos[atomi],
delta=0.5,
)
# Also compare with data from `chargemol`
# discrepancy comes from the fact that `chargemol` grid and `horton` grid do not exactly match
assert_allclose(
analysis.tau[0][radial_indices[0]],
[0.999846160, 0.999739647, 0.999114037, 0.997077942, 0.994510889],
rtol=0.10,
)
assert_allclose(
analysis.tau[1][radial_indices[1]],
[0.864765882, 0.848824620, 0.805562019, 0.760402501, 0.736949861],
rtol=0.10,
)
assert_allclose(
analysis.tau[2][radial_indices[2]],
[0.845934391, 0.839099407, 0.803699493, 0.778428137, 0.698628724],
rtol=0.10,
)
|
#!/usr/bin/python
# labjack.py
import u3
d = u3.U3()
#d.debug = True
spi_conf_temp = {
"AutoCS": True,
"DisableDirConfig": False,
"SPIMode": 'C',
"SPIClockFactor": 0,
"CSPINNum": 8,
"CLKPinNum": 12,
"MISOPinNum": 15,
"MOSIPinNum": 14
}
spi_conf_pga = {
"AutoCS": True,
"DisableDirConfig": False,
"SPIMode": 'C',
"SPIClockFactor": 0,
"CSPINNum": 11,
"CLKPinNum": 12,
"MISOPinNum": 9,
"MOSIPinNum": 14
}
spi_conf_eeprom = {
"AutoCS": True,
"DisableDirConfig": False,
"SPIMode": 'C',
"SPIClockFactor": 0,
"CSPINNum": 10,
"CLKPinNum": 12,
"MISOPinNum": 15,
"MOSIPinNum": 14
}
class Labjack():
def __init__(self):
# print 'LabJack U3-LV initiated!'
pass
def read_temp(self):
# make sure pga and eeprom CS are high
d.setDOState(spi_conf_pga['CSPINNum'], 1)
d.setDOState(spi_conf_eeprom['CSPINNum'], 1)
data = d.spi([0x50, 0x00, 0x00, 0x00], **spi_conf_temp)
res = data['SPIBytes']
temp = (res[1] << 8 | res[2]) / 128.0
return "%.2f" % temp
def setup_temp(self):
# make sure pga and eeprom CS are high
d.setDOState(spi_conf_pga['CSPINNum'], 1)
d.setDOState(spi_conf_eeprom['CSPINNum'], 1)
data = d.spi([0x08, 0x80], **spi_conf_temp)
def read_gain(self):
# make sure temp chip and eeprom CS are high
d.setDOState(spi_conf_temp['CSPINNum'], 1)
d.setDOState(spi_conf_eeprom['CSPINNum'], 1)
res = d.spi([0x83, 0x00], **spi_conf_pga)
gain_read = res['SPIBytes'][1]
return (26.0 - gain_read / 4.0)
def set_gain(self, gain_value):
# make sure temp chip and eeprom CS are high
d.setDOState(spi_conf_temp['CSPINNum'], 1)
d.setDOState(spi_conf_eeprom['CSPINNum'], 1)
gain_value = 4 * (26 - gain_value)
res = d.spi([0x03, gain_value], **spi_conf_pga)
def check_eeprom_status(self):
# make sure temp and pga chips CS are high
# d.setDOState(spi_conf_eeprom['CSPINNum'], 0)
d.setDOState(spi_conf_temp['CSPINNum'], 1)
d.setDOState(spi_conf_pga['CSPINNum'], 1)
res = d.spi([0x05, 0x00], **spi_conf_eeprom)
# print "eeprom status 0x%02x\n" % res['SPIBytes'][1]
def write_eeprom(self, page, msg):
# print "writing %s to page %d\n" % (msg, page)
self.check_eeprom_status()
page <<= 4
# print "enable write latch"
res = d.spi([0x06], **spi_conf_eeprom)
self.check_eeprom_status()
# convert string to int array
string_list = list(msg)
int_array = [ord(s) for s in string_list]
# Add spaces if the length is smaller than 16
while len(int_array) < 16:
int_array.append(32)
cmd = [0x02, page] + int_array
res = d.spi(cmd, **spi_conf_eeprom)
self.check_eeprom_status()
# print "read page"
cmd = [0x03, page] + [0 for i in range(16)]
res = d.spi(cmd, **spi_conf_eeprom)
self.check_eeprom_status()
def read_eeprom(self, page):
# print "Reading page %d of EEPROM ......\n" % page
page <<= 4
cmd = [0x03, page] + [0 for i in range(16)]
res = d.spi(cmd, **spi_conf_eeprom)
# join all the 16 bytes into a string
readout = res['SPIBytes'][2:]
for idx, i in enumerate(readout):
if i == 0xff:
readout[idx] = 32
array = [chr(i) for i in readout]
return ''.join(array)
def read_serial(self):
cmd = [0x03, 16] + [0 for i in range(16)]
res = d.spi(cmd, **spi_conf_eeprom)
readout = res['SPIBytes'][2:]
for idx, i in enumerate(readout):
if i == 0xff:
readout[idx] = 32
array = [chr(i) for i in readout]
string = ''.join(array)
array = string.split(' ')
# return SiPM serial number
if len(array) == 3 and array[0] == array[2] and array[1] == 'UWSiPM':
return int(array[0])
else:
return 0
def set_led(self, led_no):
# convert led_number into binary format
binary = format(led_no-1, "#06b")
array = list(binary)
d.getFeedback(u3.BitStateWrite(16, int(array[2])))
d.getFeedback(u3.BitStateWrite(17, int(array[3])))
d.getFeedback(u3.BitStateWrite(18, int(array[4])))
d.getFeedback(u3.BitStateWrite(19, int(array[5])))
def read_led(self):
# encode led channel number in a binary string
read = []
read.append(d.getFeedback(u3.BitStateRead(16))[0])
read.append(d.getFeedback(u3.BitStateRead(17))[0])
read.append(d.getFeedback(u3.BitStateRead(18))[0])
read.append(d.getFeedback(u3.BitStateRead(19))[0])
array = [str(i) for i in read]
binary = ''.join(array)
led_no = int(binary, 2) + 1
return led_no
|
from tkinter import *
'''Thank you for coming here...
May Dazzler's light shine upon u all..'''
root =Tk()
root.geometry("350x350+300+300")
root.resizable(0,0)
root.title("Dazzler's Calculater v1.0")
root.wm_iconbitmap("k.ico")
#Dazzler's coding
def click(event):
global sc
text=event.widget.cget("text")
if text=="=":
if sc.get().isdigit():
value=int(sc.get())
else:
try:
value=eval(sc.get())
sc.set(value)
except Exception as e:
value="Error!"
sc.set(value)
elif text=="C":
sc.set("")
#screen.update()
else:
sc.set(sc.get() + text)
#screen.update()
#Screen
#dazzler's code
sc=StringVar()
sc.set("")
screen=Entry(root,font="Verdana 27",textvar=sc,bg="black",fg='white').pack(expand=True,fill="both")
#frame
#dazzler's code
btnrow1=Frame(root,bg="#000000")
btnrow1.pack(expand=True,fill="both")
btnrow2=Frame(root)
btnrow2.pack(expand=True,fill="both")
btnrow3=Frame(root,)
btnrow3.pack(expand=True,fill="both")
btnrow4=Frame(root)
btnrow4.pack(expand=True,fill="both")
btnrow5=Frame(root)
btnrow5.pack(expand=True,fill="both")
#button
#dazzler's code
#first row
btn1=Button(btnrow1,text="1",font="Verdana 20",relief="groove",border="0",fg='white',bg='black',activebackground="gray37")
btn1.pack(side="left",expand=True,fill="both")
btn1.bind("<Button-1>",click)
btn2=Button(btnrow1,text="2",font="Verdana 20",relief="groove",border="0",fg="white",bg='black',activebackground="gray37")
btn2.pack(side="left",expand=True,fill="both")
btn2.bind("<Button-1>",click)
btn3=Button(btnrow1,text="3",font="Verdana 20",relief="groove",border="0",fg='white',bg='black',activebackground="gray37")
btn3.pack(side="left",expand=True,fill="both")
btn3.bind("<Button-1>",click)
btnplus=Button(btnrow1,text="+",font="Verdana 20",relief="groove",border="0",fg='white',bg='black',activebackground="gray37")
btnplus.pack(side="left",expand=True,fill="both")
btnplus.bind("<Button-1>",click)
#second row
btn4=Button(btnrow2,text="4",font="Verdana 20",relief="groove",border="0",fg='white',bg='black',activebackground="gray37")
btn4.pack(side="left",expand=True,fill="both")
btn4.bind("<Button-1>",click)
btn5=Button(btnrow2,text="5",font="Verdana 20",relief="groove",border="0",fg='white',bg='black',activebackground="gray37")
btn5.pack(side="left",expand=True,fill="both")
btn5.bind("<Button-1>",click)
btn6=Button(btnrow2,text="6",font="Verdana 20",relief="groove",border="0",fg='white',bg='black',activebackground="gray37")
btn6.pack(side="left",expand=True,fill="both")
btn6.bind("<Button-1>",click)
btnmin=Button(btnrow2,text="-",font="Verdana 20",relief="groove",border="0",fg='white',bg='black',activebackground="gray37")
btnmin.pack(side="left",expand=True,fill="both")
btnmin.bind("<Button-1>",click)
#third row
btn7=Button(btnrow3,text="7",font="Verdana 20",relief="groove",border="0",fg='white',bg='black',activebackground="gray37")
btn7.pack(side="left",expand=True,fill="both")
btn7.bind("<Button-1>",click)
btn8=Button(btnrow3,text="8",font="Verdana 20",relief="groove",border="0",fg='white',bg='black',activebackground="gray37")
btn8.pack(side="left",expand=True,fill="both")
btn8.bind("<Button-1>",click)
btn9=Button(btnrow3,text="9",font="Verdana 20",relief="groove",border="0",fg='white',bg='black',activebackground="gray37")
btn9.pack(side="left",expand=True,fill="both")
btn9.bind("<Button-1>",click)
btnmul=Button(btnrow3,text="*",font="Verdana 20",relief="groove",border="0",fg='white',bg='black',activebackground="gray37")
btnmul.pack(side="left",expand=True,fill="both")
btnmul.bind("<Button-1>",click)
#forth row
btn0=Button(btnrow4,text=".",font="Verdana 20",relief="groove",border="0",fg='white',bg='black',activebackground="gray37")
btn0.pack(side="left",expand=True,fill="both")
btn0.bind("<Button-1>",click)
btn5=Button(btnrow4,text="0",font="Verdana 20",relief="groove",border="0",fg='white',bg='black',activebackground="gray37")
btn5.pack(side="left",expand=True,fill="both")
btn5.bind("<Button-1>",click)
btn6=Button(btnrow4,text="00",font="Verdana 20",relief="groove",border="0",fg='white',bg='black',activebackground="gray37")
btn6.pack(side="left",expand=True,fill="both")
btn6.bind("<Button-1>",click)
btnmin=Button(btnrow4,text="/",font="Verdana 20",relief="groove",border="0",fg='white',bg='black',activebackground="gray37")
btnmin.pack(side="left",expand=True,fill="both")
btnmin.bind("<Button-1>",click)
#forth row
btnmod=Button(btnrow5,text="%",font="Verdana 20",relief="groove",border="0",fg='white',bg='black',activebackground="gray37")
btnmod.pack(side="left",expand=True,fill="both")
btnmod.bind("<Button-1>",click)
btnclr=Button(btnrow5,text="C",font="Verdana 20",relief="groove",border="0",fg='white',bg='black',activebackground="gray37")
btnclr.pack(side="left",expand=True,fill="both")
btnclr.bind("<Button-1>",click)
btnmin=Button(btnrow5,text="=",font="Verdana 20",relief="groove",border="0",padx="5",pady=5,fg='white',bg="firebrick4",activebackground="gray37")
btnmin.pack(side="left",expand=True,fill="both")
btnmin.bind("<Button-1>",click)
root.mainloop()
#dazzler's code |
from quart import Quart
import asyncio
import uuid
import re
from app import workers
app = Quart(__name__)
tasks = {}
@app.route('/api/download/text/<path:url>', methods = ['GET'])
async def download_text(url):
regex = re.compile(
r'^(?:http|ftp)s?://'
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|'
r'localhost|'
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
r'(?::\d+)?'
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
if not re.match(regex, url):
return 'URL is not valid'
uid = uuid.uuid4().hex
tasks[uid] = asyncio.create_task(workers.text_worker(url))
return uid
@app.route('/api/download/images/<path:url>', methods = ['GET'])
async def download_images(url):
regex = re.compile(
r'^(?:http|ftp)s?://'
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|'
r'localhost|'
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
r'(?::\d+)?'
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
if not re.match(regex, url):
return 'URL is not valid'
uid = uuid.uuid4().hex
tasks[uid] = asyncio.create_task(workers.img_worker(url))
return uid
@app.route('/api/status/<id>', methods = ['GET'])
async def status(id):
isFinished = tasks[id].done()
if isFinished:
return 'Results are ready'
else:
return 'Results are not ready yet'
@app.route('/api/result/<id>', methods = ['GET'])
async def result(id):
if id not in tasks:
return 'There is no such task'
if not tasks[id].done():
return 'Task is not finished yet'
return await tasks[id]
|
import os
import logging
from flask import Flask, jsonify, request
from flask.logging import default_handler
import s3
import producer
application = Flask(__name__) # noqa
# Set up logging
ROOT_LOGGER = logging.getLogger()
ROOT_LOGGER.setLevel(application.logger.level)
ROOT_LOGGER.addHandler(default_handler)
# Kafka message bus
SERVER = os.environ.get('KAFKA_SERVER')
TOPIC = os.environ.get('KAFKA_TOPIC')
# S3 credentials
AWS_KEY = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET = os.environ.get('AWS_SECRET_ACCESS_KEY')
AWS_BUCKET = os.environ.get('AWS_S3_BUCKET_NAME')
@application.route("/", methods=['POST'])
def wake_up():
"""Endpoint for upload and publish requests."""
input_data = request.get_json(force=True)
data_id = input_data['id']
ai_service_id = input_data.get('ai_service', 'generic_ai')
raw_data = input_data['data']
s3_destination = f'{AWS_BUCKET}/{data_id}/{ai_service_id}'
application.logger.info(
'Saving data to location: s3://%s', s3_destination
)
filesystem = s3.connect(AWS_KEY, AWS_SECRET)
s3.save_data(filesystem, s3_destination, raw_data)
message = {
'message': f'AI-Ops pipeline successfull for {data_id}',
'url': s3_destination
}
application.logger.info('Publishing message on topic %s', TOPIC)
producer.publish_message(SERVER, TOPIC, message)
return jsonify(status='OK', message='Data published')
if __name__ == '__main__':
application.run()
|
from django.conf.urls import patterns, include, url
from dashboard import views
from payment_gateway_views import *
from profile_views import *
from store_category_views import *
from shipping_views import *
urlpatterns = patterns('',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^login$', views.dashboard_login_page, name='dashboard_login'),
url(r'^logout$', views.dashboard_logout, name='dashboard_logout'),
url(r'^products$', views.ProductListView.as_view(), name='product_list'),
url(r'^product/edit/(?P<pk>\d+)/$', views.ProductUpdateCreateView.as_view(), name='product_edit'),
url(r'^product/new/$', views.ProductUpdateCreateView.as_view(), name='product_new'),
url(r'^product/delete/(?P<pk>\d+)/$', views.ProductDeleteView.as_view(), name='product_delete'),
url(r'^profile/edit$', edit_profile, name="edit_profile"),
url(r'^payment_gateway/edit$', edit_payment_gateway, name="edit_payment_gateway"),
url(r'^store_categories$', store_categories, name="store_categories"),
url(r'^store_category/edit/(?P<pk>\d+)/$', edit_store_category, name='edit_store_category'),
url(r'^store_category/delete/(?P<pk>\d+)/$', delete_store_category, name='delete_store_category'),
url(r'^store_category/new/$', edit_store_category, name='new_store_category'),
url(r'^shipping_options$', shipping_options, name="shipping_options"),
url(r'^shipping_option/edit/(?P<pk>\d+)/$', edit_shipping_option, name='edit_shipping_option'),
url(r'^shipping_option/delete/(?P<pk>\d+)/$', delete_shipping_option, name='delete_shipping_option'),
url(r'^shipping_option/new/$', edit_shipping_option, name='new_shipping_option'),
)
|
# Generated by Django 3.1.7 on 2021-03-13 23:50
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('lost', '0002_auto_20210314_0510'),
]
operations = [
migrations.AlterField(
model_name='lostitem',
name='date',
field=models.DateField(blank=True, default=datetime.datetime(2021, 3, 14, 5, 20, 27, 666921)),
),
migrations.AlterField(
model_name='lostitem',
name='time',
field=models.TimeField(blank=True, default=datetime.datetime(2021, 3, 13, 23, 50, 27, 666921, tzinfo=utc)),
),
]
|
import pandas as pd
import pymysql
import os
import sys,csv
import argparse
import matplotlib.pyplot as plt
import numpy as np
import plotly.express as px
import mysql.connector
from mysql.connector import Error
mydb = mysql.connector.connect(
host="localhost",
user="souravkc",
passwd="pass123",
database="JdbcDatabase"
)
mycursor = mydb.cursor()
sql = "SELECT * FROM jdbcEtable "
#adr = ("jdbcEtable", )
mycursor.execute(sql)
myresult = mycursor.fetchall()
print(myresult)
lst=[]
for x in myresult:
lst.append(x)
#print(lst)
df = pd.DataFrame( [[ij for ij in i] for i in lst] )
#print(df)
#fig = px.line(df, x = df['0'], y = df['1'], title='JdbcDatabase_WheelBasedVehicleSpeed')
#fig.show()
|
# coding: utf-8
import pytest
import gridforme
@pytest.fixture
def app():
gridforme.app.config['TESTING'] = True
return gridforme.app.test_client()
def test_home_url(app):
rv = app.get('/')
assert 200 == rv.status_code
def test_image_url(app):
rv = app.get('/i/12/95/30/15/')
assert 200 == rv.status_code
def test_image_content(app):
rv = app.get('/i/12/95/30/15/')
with open('tests/thegrid.png', 'rb') as img:
assert img.read() == rv.data
def test_style(app):
rv = app.get('/12/95/30/15/')
assert 200 == rv.status_code
assert 'http://gridfor.me/i/12/95/30/15/' in rv.get_data(as_text=True)
assert 'text/css' == rv.mimetype
|
from Myro import *
from Graphics import *
from random import *
init("sim")
def findColorSpot(picture, color):
xPixelSum = 0
totalPixelNum = 0
averageXPixel = 0
show(picture)
for pixel in getPixels(picture):
if(color == 1 and getRed(pixel) > 220 and getGreen(pixel) == 0 and getBlue(pixel) == 0):
xPixelSum += getX(pixel)
totalPixelNum += 1
elif(color == 2 and getRed(pixel)== 0 and getGreen(pixel) > 100 and getBlue(pixel) == 0):
xPixelSum += getX(pixel)
totalPixelNum += 1
elif(color == 3 and getRed(pixel) == 0 and getGreen(pixel) == 0 and getBlue(pixel) > 220):
xPixelSum += getX(pixel)
totalPixelNum += 1
elif(color == 4 and getRed(pixel) > 200 and getGreen(pixel) > 200 and getBlue(pixel) == 0):
xPixelSum += getX(pixel)
totalPixelNum += 1
if(totalPixelNum != 0):
averageXPixel = xPixelSum/totalPixelNum
#Handles the case where robot has found the spot if it is near it
#If necessary adjust the value
if(totalPixelNum/(getWidth(picture)*getHeight(picture)) > 0.21):
averageXPixel = -1
return averageXPixel
#goes towards the ball
angle = randrange(-180,180)
j = 0
while j < 1:
pic = takePicture()
x = findColorSpot(pic, 3)
show(pic)
print(x)
if x == 0:
turnBy(angle)
takePicture()
pic = takePicture()
x = findColorSpot(pic, 3)
show(pic)
print(x)
elif x < 128:
turnBy(15)
forward(1,1)
elif x > 128:
turnBy(-15)
forward(1,1)
elif x == -1:
forward(1,1)
elif getStall() == 1:
backward(1,3)
turnBy(randrange(1,360))
|
import main
z=main.Meeting
dic={1:"1: 9.00-10.00 AM",2:"2: 10.15-11.15am",3:"3: 11.30am-12.30PM",4:"4: 1.00-2.00PM",5:"5: 2.15-3.15"}
keys=dic.keys()
z.time_view(dic)
time=eval(input("enter the slot you want: "))
z.schedule(dic,keys,time)
|
from random import *
base =[1,2,3,4,5,6,7]
print(*base,sep=' + ',end="")
tong = sum(base)
print(" =",tong)
|
from typing import Type
def evaluate_post_fix(string):
stack = []
try:
for i in string:
# if number append to stack
if i.isdigit():
stack.append(i)
# skip spaces
elif i == ' ':
continue
# if operator is encountered pop twice and run op
else:
num_to_right = stack.pop()
num_to_left = stack.pop()
stack.append(operate(num_to_left, num_to_right, i))
# push the result back into the stack and run op
except TypeError:
return "Invalid sequence"
return int(float(stack.pop()))
def operate(num1, num2, op):
return eval(f"{num1} {op} {num2}")
print(evaluate_post_fix('638*+4-')) #26
print(evaluate_post_fix('921 * - 8 - 4 +')) #3
print(evaluate_post_fix('98 - 4 +')) #5
print(evaluate_post_fix('92 *')) #18
|
from django.test import TestCase
from .models import Todo
# Create your tests here.
class TodoModelstCase(TestCase):
@classmethod
def setUpTestData(cls):
Todo.objects.create(title='A new title', body='Whats up danger.')
def testTaskTitle(self):
todo = Todo.objects.get(id=1)
expected_obj_name = f'{todo.title}'
self.assertEqual(expected_obj_name, 'A new title')
def testTaskContent(self):
todo = Todo.objects.get(id=1)
expected_obj_content = f'{todo.body}'
self.assertEqual(expected_obj_content, 'Whats up danger.')
|
# Team ID: <TODO: fill up>
def schedule2(locations, start_location, capacities, orders):
# TODO: replace the code in this function with your algorithm
#This dumb model solution does not make use of locations
#However, to optimize your total traveling distance, you must use locations' information
max_list = []
for i in range(len(capacities)):
max_list.append([])
for i in range(len(orders)):
weight = orders[i][1]
for k in range(len(capacities)):
if capacities[k] >= weight:
max_list[k].append(orders[i])
capacities[k] -= weight
weight = 0
break
if weight > 0:
#print("Wrong input. Cannot deliver this item: ")
#print(orders[i])
return []
return max_list
|
from django.contrib.auth.models import (AbstractBaseUser, BaseUserManager,
PermissionsMixin)
from django.core.mail import send_mail
from django.db import models
from django.conf import settings
from django.utils.translation import gettext_lazy as _
class CustomUserManager(BaseUserManager):
def create_superuser(self, email, username, password, **other_fields):
other_fields.setdefault("is_staff", True)
other_fields.setdefault("is_superuser", True)
other_fields.setdefault("is_active", True)
if other_fields.get("is_staff") is not True:
raise ValueError("Superuser must be assigned to is_staff=True.")
if other_fields.get("is_superuser") is not True:
raise ValueError("Superuser must be assigned to is_superuser=True.")
return self.create_user(email, username, password, **other_fields)
def create_user(self, email, username, password, **other_fields):
if not email:
raise ValueError(_("You must provide an email address"))
email = self.normalize_email(email)
user = self.model(email=email, username=username, **other_fields)
user.set_password(password)
user.save()
return user
class UserAccount(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(_("email address"), unique=True)
username = models.CharField(_("user name"), max_length=150, unique=True)
first_name = models.CharField(_("first name"), max_length=50, blank=True)
last_name = models.CharField(_("last name"), max_length=50, blank=True)
about = models.TextField(_("about"), max_length=400, blank=True)
date_created = models.DateTimeField(_("date created"), auto_now_add=True)
date_updated = models.DateTimeField(_("date updated"), auto_now=True)
is_active = models.BooleanField(_("active"), default=True)
is_guest = models.BooleanField(_("guest"), default=False)
is_staff = models.BooleanField(_("staff"), default=False)
fingerprint = models.CharField(max_length=32, null=True)
objects = CustomUserManager()
USERNAME_FIELD = "email"
REQUIRED_FIELDS = ["username"]
class Meta:
verbose_name = _("User account")
verbose_name_plural = _("User accounts")
def email_user(self, subject, message):
send_mail(
subject,
message,
settings.EMAIL_HOST_USER,
[self.email],
fail_silently=False,
)
def __str__(self):
return self.username
class Board(models.Model):
name = models.CharField(max_length=255)
created_by = models.ForeignKey(
UserAccount, related_name="board", on_delete=models.CASCADE
)
class Meta:
verbose_name = "Board"
verbose_name_plural = "Boards"
def categories(self):
qs = self.category.all()
result = []
for x in qs:
result.append(x.name)
return result
def __str__(self):
return self.name
class Category(models.Model):
name = models.CharField(max_length=250)
board = models.ForeignKey(Board, related_name="category", on_delete=models.CASCADE)
total_tasks = models.IntegerField(default=0)
created_by = models.ForeignKey(
UserAccount, related_name="category", on_delete=models.CASCADE
)
class Meta:
verbose_name = _("Category")
verbose_name_plural = _("Categories")
def __str__(self):
return self.name
class Task(models.Model):
STATUS = (
("Planned", "Planned"),
("In Progress", "In Progress"),
("Testing", "Testing"),
("Completed", "Completed"),
)
board = models.ForeignKey(Board, related_name="task", on_delete=models.CASCADE)
category = models.ForeignKey(
Category, related_name="task", on_delete=models.CASCADE
)
status = models.CharField(max_length=50, choices=STATUS, default="Planned")
name = models.CharField(max_length=250)
created_by = models.ForeignKey(
UserAccount, related_name="task", on_delete=models.CASCADE
)
description = models.TextField(max_length=500, blank=True)
date_created = models.DateTimeField(auto_now_add=True)
date_updated = models.DateTimeField(auto_now=True)
extend_state = models.BooleanField(default=False)
class Meta:
verbose_name = _("Task")
verbose_name_plural = _("Tasks")
def __str__(self):
return self.name
class Subtask(models.Model):
name = models.CharField(max_length=250, blank=True)
task = models.ForeignKey(Task, related_name="subtask", on_delete=models.CASCADE)
is_complete = models.BooleanField(default=False)
class Meta:
verbose_name = _("Subtask")
verbose_name_plural = _("Subtasks")
def __str__(self):
return self.name
|
from django.contrib import admin
from imagekit.admin import AdminThumbnail
from common.admin import AutoUserMixin
from shapes.models import MaterialShape, SubmittedShape
from photos.models import FlickrUser, PhotoSceneCategory, Photo, \
PhotoWhitebalanceLabel, PhotoSceneQualityLabel
admin.site.register(FlickrUser)
admin.site.register(PhotoSceneCategory)
admin.site.register(PhotoWhitebalanceLabel)
admin.site.register(PhotoSceneQualityLabel)
class PhotoAdmin(AutoUserMixin, admin.ModelAdmin):
fieldsets = [
(None, {
'fields': ['added', 'user', 'image_orig', 'admin_thumb_span6', 'aspect_ratio', 'scene_category',
'scene_category_correct', 'whitebalanced', 'description', 'exif',
'flickr_user', 'flickr_id']
}),
]
# fields
readonly_fields = ['added', 'admin_thumb_span6']
list_display = ['user', 'admin_thumb_span1', 'scene_category',
'scene_category_correct', 'whitebalanced', 'added']
# field display
list_filter = ['added', 'scene_category_correct', 'whitebalanced']
search_fields = ['user', 'description']
date_hierarchy = 'added'
admin_thumb_span6 = AdminThumbnail(image_field='image_span6')
admin_thumb_span1 = AdminThumbnail(image_field='thumb_span1')
# inlines
class PhotoLabelInlineBase(AutoUserMixin, admin.TabularInline):
fk_name = 'photo'
extra = 1
class PhotoWhitebalanceLabelInline(PhotoLabelInlineBase):
model = PhotoWhitebalanceLabel
class PhotoSceneQualityLabelInline(PhotoLabelInlineBase):
model = PhotoSceneQualityLabel
class SubmittedShapeInline(PhotoLabelInlineBase):
model = SubmittedShape
class MaterialShapeInline(PhotoLabelInlineBase):
model = MaterialShape
inlines = [
SubmittedShapeInline,
MaterialShapeInline,
PhotoWhitebalanceLabelInline,
PhotoSceneQualityLabelInline,
]
admin.site.register(Photo, PhotoAdmin)
#class PhotoCollectionAdmin(AutoUserMixin, admin.ModelAdmin):
#pass
#admin.site.register(PhotoCollection, PhotoCollectionAdmin)
#class PhotoSceneCategoryAdmin(AutoUserMixin, admin.ModelAdmin):
#pass
#admin.site.register(PhotoSceneCategory, PhotoSceneCategoryAdmin)
#class PhotoAttributeAdmin(AutoUserMixin, admin.ModelAdmin):
#pass
#admin.site.register(PhotoAttribute, PhotoAttributeAdmin)
|
"""
file: test_phone_number_extraction.py
brief:
author: S. V. Paulauskas
date: April 17, 2020
"""
SAMPLE_LIST = [
"6464159260",
"212 6561437"
"123-456-7890"
"(919) 612-0710"
" 5129657186"
]
|
#bot details =====================================================================================================
token = "11111111:xxxxxxxxxxxxxxxxxxxxxxxxxxxxx" #Telegram bot token
# wapi =====================================================================================================
# register in wapi website to get this key link : https://mrcyjanek.net/wapi/ or contact t.me/@mrcyjanek
api_key = "ooooooooooooooo00000000000ooooooooooooo"
# register in wapi website to get this key link : https://mrcyjanek.net/wapi/ or contact t.me/@mrcyjanek
wapi_username = 'lllllllll'
# wapi end =====================================================================================================
# database =====================================================================================================
#your MySql database host
db_host = "localhost"
#your MySql database username
db_user = "xxxxxxxx"
#your MySql database password
db_pass = "xxxxxxxx"
#your MySql database database name
db_database = "xxxxxxxx"
# database end =====================================================================================================
# config details =====================================================================================================
# minimum withdraw ( keep them all in decimals only )
ltc_min_withdraw = 0.00002280 #example : 0.0001
doge_min_withdraw = 1.00 #example : 1.000
btc_min_withdraw = 0.00000100 #example : 0.00001
#admin id
adminid = 1152662911
#info button message
bot_info = "<b>Welcome To Multi Tip Bot</b>\n\n<b>Commands :</b>\n💦 <code>/rain 10 doge</code> - Raining In a Group to all the active users\n🎉 <code>/tip 10 doge</code> - Reply To a users message whom you want to tip/transfer/send your balance\n⚡ <code>/active</code> - Send You the number of active users currently\n<i>All the above mentioned command are only allowed in Group</i>\n\nThis Bot is made by @mrbeandev\n<i>This is only a trail bot</i>To Add this type of a bot to Your Group contact Admin"
# bot name or any contant which will be displayed on start message
bot_name = "<b>Welcome To tip bot</b>" # this supports html markups
#dont change this if you dont understand what it is !!
active_users_store_time = 300.00 # i recommend not to change this value XXXXXXXXXX
#menu =====
# balance button
button1 = "💰 Balance 💰"
# deposit button
button2 = "📥 Dump 📥"
# withdraw button
button3 = "📤 Payout 📤"
# info button
button4 = "🛡 Info 🛡"
#new menu ===================
exchangeButton = "♻ Exchange"
walletButton = "💼 Wallet"
freetokensButton = "🎊 Free Tokens! 🎉"
uniREAPButton = "🦄 UniREAP"
listTokensButton = "🗒 List Tokens"
groupAdminsButton = "🕹 Group Admins"
helpButton = "☎ Help"
aboutButton = "📜 About"
convertButton = "↔ Convert"
swapButton = "↕ Swap"
backButton = "🔙 Back"
mainMenuButton = "🔝 Main Menu"
connectWalletButton = "💼 Connect Wallet"
balanceButton = "💰 Balance"
withdrawButton = "📤 Withdraw"
depositButton = "📥 Deposit"
transferButon = "↔ Transfer"
historyButon = "📋 History"
connectEmailButton = "📧 Connect Email"
onChainAddressButton = "🔗 On-chain address"
howAirdropButton = "🎊 How to airdrop?"
howGrabButton = "🔥 How to grab?"
howRainButton = "🌧 How to rain?"
howTipButton = "👍 How to tip?"
coinsTokensButton = "💎 Coins/Tokens"
howCheckPriceButton = "🤔 How to check price?"
supportButton = "❓ Support"
makeAirdropWithKeywordsButton = "🔑 Make an airdrop with keywords"
makeAirdropWithoutKeywordsButton = "📬 Make an airdrop without keywords"
createGiveawayButton = "🎁 Create a giveaway"
makeAirdropInFastButton = "🏃♀ Make an airdrop in fast"
addBotToYougGroupButton = "🤝 Add REAPit to your group" |
allS = int(input())
h = allS // 3600
m = allS % 3600 // 60
s = allS % 3600 % 60
print("{}:{}:{}".format(h, m, s))
|
def get_board_vars():
file = open("board_vars.txt", 'r')
text = file.read()
file.close()
var = []
curr_string = ""
for v in text:
if v == "\n":
var.append(curr_string)
curr_string = ""
else:
curr_string = curr_string + v
for i in range(len(var)):
var[i] = float(var[i])
return var
print(get_board_vars()) |
# Crie um programa que leia uma frase qualquer e diga se ela é
# um palíndromo, desconsiderando os espaços. (Palindromo)
# Exemplo: APOS A SOPA (Ao inverter a frase é mesma frase.)
frase = str(input('Digite um frase: ')).strip().upper() # Eliminou os espaços antes e depois, alterei para maiúsculas
palavras = frase.split() #criou um lista
junto =''.join(palavras) #junto a lista para eliminar os espaços antes
inverso =''
for letra in range(len(junto) - 1, -1, -1): #criou a inversou da frase junto com a linha abaixo.
inverso += junto[letra]
# as linhas 9 e 10 podem ser alteradas para os comandos (inverso = junto[::-1])
print('O inverso de {} é {}'.format(junto, inverso))
if inverso == junto: #Mostrou se o inverso é mesma coisa.
print('É um palindromo!')
else:
print('A frase não é um palindromo!')
|
# coding=utf-8
import os
import time
import datetime
import logging
import yaml
from porter.grads_parser.grads_ctl_parser import GradsCtl, GradsCtlParser
from porter.grads_tool.converter.grads_to_micaps import GradsToMicaps
logger = logging.getLogger(__name__)
class GradsConvert(object):
def __init__(self):
pass
def print_record_info(self, record):
logger.info("[{class_name}] Converting {name} with level {level} to {target_type}...".format(
class_name=self.__class__.__name__,
name=record["name"],
level=record["level"],
target_type=record["target_type"]
), end='')
def convert(self, config_file_path):
with open(config_file_path) as config_file:
config_object = yaml.load(config_file)
ctl_file_path = os.path.abspath(config_object['ctl'])
grads_ctl = GradsCtl()
# output parser
output_dir = os.path.abspath(config_object['output_dir'])
# time parser
start_time_str = config_object.get('start_time', '')
forecast_time_str = config_object.get('forecast_time', '')
if start_time_str != "":
str_length = len(start_time_str)
if str_length == 10:
start_time = datetime.datetime.strptime(start_time_str, "%Y%m%d%H")
grads_ctl.start_time = start_time
else:
logger.error("parser start_time has error: {start_time}".format(start_time=start_time_str))
if forecast_time_str != "":
# TODO (windroc, 2014.08.18): use format:
# XXXhXXmXXs
if len(forecast_time_str) == 3:
forecast_time = datetime.timedelta(hours=int(forecast_time_str))
grads_ctl.forecast_time = forecast_time
# ctl parser
grads_ctl_parser = GradsCtlParser(grads_ctl)
grads_ctl = grads_ctl_parser.parse(ctl_file_path)
# record parser
records = config_object['records']
for a_record in records:
target_type = a_record["target_type"]
self.print_record_info(a_record)
def convert_a_record():
if target_type.startswith("micaps"):
grads_to_micaps = GradsToMicaps(grads_ctl)
a_record['output_dir'] = output_dir
grads_to_micaps.convert(a_record)
else:
raise NotImplemented("Not implemented for %s" % target_type)
time1 = time.clock()
convert_a_record()
time2 = time.clock()
logger.info("{time_cost:.2f}".format(time_cost=time2 - time1))
|
#!/usr/bin/env python3
""" Long Short Term Memory Cell """
import numpy as np
class LSTMCell:
""" class LSTMcell that represents a LSTM unit """
def __init__(self, i, h, o):
"""
Constructor
i is the dimensionality of the data
h is the dimensionality of the hidden state
o is the dimensionality of the outputs
Creates the public instance attributes Wf, Wu, Wc, Wo, Wy, bf, bu,
bc, bo, by that represent the weights and biases of the cell
Wfand bf are for the forget gate
Wuand bu are for the update gate
Wcand bc are for the intermediate cell state
Woand bo are for the output gate
Wyand by are for the outputs
The weights should be initialized using a random normal distribution in
the order listed above
The weights will be used on the right side for matrix multiplication
The biases should be initialized as zeros
"""
# initializating Weights in order
self.Wf = np.random.normal(size=(h + i, h)) # size = (25, 15)
self.Wu = np.random.normal(size=(h + i, h)) # size = (25, 15)
self.Wc = np.random.normal(size=(h + i, h)) # size = (25, 15)
self.Wo = np.random.normal(size=(h + i, h)) # size = (25, 15)
self.Wy = np.random.normal(size=(h, o)) # size = (15, 5)
# initializating bias in order
self.bf = np.zeros(shape=(1, h))
self.bu = np.zeros(shape=(1, h))
self.bc = np.zeros(shape=(1, h))
self.bo = np.zeros(shape=(1, h))
self.by = np.zeros(shape=(1, o))
def forward(self, h_prev, c_prev, x_t):
"""
x_t is a numpy.ndarray of shape (m, i) that contains the data
input for the cell
m is the batche size for the data
h_prev is a numpy.ndarray of shape (m, h) containing the previous
hidden state
c_prev is a numpy.ndarray of shape (m, h) containing the previous
cell state
The output of the cell should use a softmax activation function
Returns: h_next, c_next, y
h_next is the next hidden state
c_next is the next cell state
y is the output of the cell
"""
# https://victorzhou.com/blog/intro-to-rnns/
x = np.concatenate((h_prev, x_t), axis=1)
# gate u:
u = np.dot(x, self.Wu) + self.bu
# activating usng sigmoid
u = 1 / (1 + np.exp(-u))
# gate f:
f = np.dot(x, self.Wf) + self.bf
# activating usng sigmoid
f = 1 / (1 + np.exp(-f))
# gate o:
o = np.dot(x, self.Wo) + self.bo
# activating using sigmoid
o = 1 / (1 + np.exp(-o))
c = np.tanh(np.dot(x, self.Wc) + self.bc)
c_t = u * c + f * c_prev
h_t = o * np.tanh(c_t)
# ŷ = Wₕᵧ · hₜ + bᵧ
ŷ = np.dot(h_t, self.Wy) + self.by
# Activating using softmax
y = (np.exp(ŷ) / np.sum(np.exp(ŷ), axis=1, keepdims=True))
return h_t, c_t, y
|
# Generated by Django 2.0.3 on 2018-03-22 12:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0008_tag'),
]
operations = [
migrations.RemoveField(
model_name='tag',
name='post',
),
migrations.AddField(
model_name='post',
name='tags',
field=models.ManyToManyField(to='blog.Tag'),
),
migrations.AddField(
model_name='tag',
name='name',
field=models.CharField(default=1, max_length=50),
preserve_default=False,
),
]
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class HuaxiaItem(scrapy.Item):
grab_time = scrapy.Field()
brandname = scrapy.Field()
brand_id = scrapy.Field()
factoryname = scrapy.Field()
family_id = scrapy.Field()
familyname = scrapy.Field()
vehicle = scrapy.Field()
vehicle_id = scrapy.Field()
years = scrapy.Field()
displacement = scrapy.Field()
guideprice = scrapy.Field()
url = scrapy.Field()
status = scrapy.Field()
year = scrapy.Field()
month = scrapy.Field()
mile = scrapy.Field()
city = scrapy.Field()
purchasing_price = scrapy.Field()
Individual_transaction_price = scrapy.Field()
retail_price = scrapy.Field()
newcar_price = scrapy.Field()
|
from urllib.request import build_opener, HTTPCookieProcessor
import http.cookiejar
# 生成cookie 文件
filename = "cookie.txt"
# 将文件保存为Mozilla 类型浏览器的 cookie 格式 文件
# 申明一个 cookie 对象
cookie = http.cookiejar.MozillaCookieJar(filename)
# 用HTTPCookieProcessor 构建handler
handler = HTTPCookieProcessor(cookie)
# 用build_opener 构建一个opener
opener = build_opener(handler)
req = opener.open("http://www.baidu.com")
# 保存cookie
cookie.save(ignore_discard=True, ignore_expires=True)
"""
输出: 参见cookie.txt
"""
|
import unittest
from app.models import Comment,User,Pitch
from app import db
class CommentModelTest(unittest.TestCase):
def setUp(self):
self.user_postgres = User(username = 'kayleen',password = 'password', email = 'kayleen@gmail.com')
self.new_comment = Comment(comment='nice work',user = self.user_kayleen,pitch_id=1 )
|
import unittest
from JustFriends.code.tests.location_test import TestLocationMethods
from JustFriends.code.tests.places_test import TestPlacesMethods
if __name__ == '__main__':
places_test_suite = unittest.TestLoader().loadTestsFromTestCase(TestPlacesMethods)
location_test_suite = unittest.TestLoader().loadTestsFromTestCase(TestLocationMethods)
test_suite = [
location_test_suite,
places_test_suite
]
for suite in test_suite:
unittest.TextTestRunner(verbosity=2).run(suite) |
# -*- coding:utf-8 -*-
# 求解一元一次方程
print("This program is for equation Ax + B = C.")
print("Please input number A.")
numberA = input()
print("Please input number B.")
numberB = input()
print("Please input number C.")
numberC = input()
print("The equation is: " + str(numberA) + ' * x + ' + str(numberB) + ' = ' + \
str(numberC) + ' .')
if float(numberA) == 0 and float(numberC) - float(numberB) == 0:
print('X can be any number.')
elif float(numberA) == 0 and float(numberC) - float(numberB) !=0:
print('The equation has no solution.')
elif numberA != 0:
solutionx = (float(numberC) - float(numberB)) / float(numberA)
print("The equation's solution is x = " + str(solutionx))
|
from os import listdir, path, makedirs
from os.path import isfile, join
from IPython.display import display, Markdown, Latex
import nbformat as nbf
import re, string
class Migration(object):
def parse_path(self, wiki):
filename = re.match('.*\+\+\W(.*)', wiki)
if filename:
title = filename.group(1)
title = title.split('/')
return title
else:
print("Error:")
for i in range(3):
print("%i. %s\n".format(i, wiki.readline()))
def create_new_book(self, title, cells, prefix):
if len(title) == 1:
nb = nbf.v4.new_notebook()
nb['cells'] = cells
fname = prefix + "/" + title[0] + ".ipynb"
with open(fname, 'w', encoding='utf8') as f:
nbf.write(nb, f)
elif len(title) > 1:
pre = self.create_folder(title[0:-1], prefix)
self.create_new_book([title[-1]], cells, pre)
def extend_book(self, book_uri, cells):
nb = nbf.read(book_uri, as_version=4)
if nb['cells']:
nb['cells'].extend(cells)
else:
nb['cells'] = cells
with open(book_uri, 'w', encoding='utf8') as f:
nbf.write(nb, f)
def create_folder(self, title, prefix):
path_to_book = prefix
for segment in title:
path_to_book = path.join(path_to_book, segment)
if not path.exists(path_to_book):
makedirs(path_to_book)
return path.abspath(path_to_book)
def parse_minutia(self, cell_text):
pass
def parse_page(self, wiki_file):
# returns path and new cell_text
new_cell = ""
with open( wiki_file, "r") as f:
lines = f.readlines()
title = "/".join(parse_path(lines[0]))
text_body = parse_links(lines[1:])
text_body = "## " + title + "\n".append(text_body)
def parse_links(self, cell_lines):
# When the links list for a page is passed in it uses replace link and
# returns new text for the cell.
pattern = re.compile("(\[//.*?\])", re.I)
final_text = []
if isinstance(cell_lines, str):
lines = [cell_lines]
elif isinstance(cell_lines, list):
lines = cell_lines
for line in lines:
for match in re.findall(pattern, line):
replacement = self.replace_link(match)
line = line.replace(match, replacement)
final_text.append(line)
return final_text
def replace_link(self, old_link_text):
# replaces link with jupyter link to appropriate notebook.
# Uses the form [//link/ext/ext2]
prefix = "(http://localhost:8888/notebooks/"
olt = old_link_text.split("/")
cell = olt[-1]
if olt[-2] == "":
return "[" + cell + prefix + "index.ipynb)"
else:
return "[" + "/".join(olt[2:]) + prefix + "/".join(olt[2:-1]) + ".ipynb)"
if __name__ == "__main__":
old_journal_dir = "Z:/data/"
new_journal_dir = "Z:/jupyter/new_journal/"
wikifiles = [f for f in listdir(old_journal_dir) if isfile(join(old_journal_dir, f))]
wikifiles.sort()
for wiki in wikifiles:
with open(old_journal_dir + wiki, 'r', encoding="utf8") as wiki_file:
text = wiki_file.read()
title = parse_path(text)
if title:
cells = create_new_title(title)
print("Done!")
|
import sqlite3
DB_NAME = 'example.db'
conn = sqlite3.connect(DB_NAME)
conn.cursor().execute('''
CREATE TABLE IF NOT EXISTS posts
(
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
author TEXT NOT NULL,
content TEXT,
price REAL,
datestamp TEXT,
active INT,
buyer TEXT,
poster TEXT
)
''')
conn.cursor().execute('''
CREATE TABLE IF NOT EXISTS users
(
id INTEGER PRIMARY KEY AUTOINCREMENT,
username TEXT UNIQUE NOT NULL,
password TEXT NOT NULL,
email TEXT UNIQUE NOT NULL,
address TEXT NOT NULL,
phone TEXT UNIQUE NOT NULL,
bought TEXT
)
''')
conn.commit()
class DB:
def __enter__(self):
self.conn = sqlite3.connect(DB_NAME)
return self.conn.cursor()
def __exit__(self, type, value, traceback):
self.conn.commit()
|
import cv2, cameraUtils
import numpy as np
from math import sqrt
from more_itertools import sort_together
class CoordinateBox:
def __init__(self, mins, maxes):
"""init"""
self.x_min = mins[0]
self.y_min = mins[1]
self.x_max = maxes[0]
self.y_max = maxes[1]
self.x_center, self.y_center = self.getAbsoluteBoxCenter()
self.ratio_of_image = self._getRatioOfImage()
def _getRatioOfImage(self):
self.width, self.height = self.getWidthAndHeightFromMinsAndMaxes((self.x_min, self.y_min), (self.x_max, self.y_max))
box_area = self.height * self.width
total_area = cameraUtils.CAP_HEIGHT * cameraUtils.CAP_WIDTH
return (box_area / total_area)
def setDistances(self, distances):
self.distances = distances
self.avgOfDistances = sum(distances) / len(distances)
def getAbsoluteBoxCenter(self):
x = (self.x_min + self.x_max) / 2
y = (self.y_min + self.y_max) / 2
return (x, y)
@staticmethod
def getEndCoordsFromStartAndDist(mins, dists):
maxes = []
maxes.insert(0, (mins[0] + dists[0]))
maxes.insert(1, (mins[1] + dists[1]))
return maxes
@staticmethod
def getWidthAndHeightFromMinsAndMaxes(mins, maxes):
width = maxes[0] - mins[0]
height = maxes[1] - mins[1]
return (width, height)
def _sortBoxesByArea(boxes):
ratios = []
boxesByArea = []
for box in boxes:
ratios.append(box.ratio_of_image)
boxesByAreaSmToLg = sort_together([ratios, boxes])[1]
boxesByAreaLgToSm = list(reversed(boxesByAreaSmToLg))
return boxesByAreaLgToSm
def _getDistanceFromBoxToAllOthers(boxNum, boxes):
distances = []
for box2 in boxes:
distances.append(sqrt((box2.x_center - boxes[boxNum].x_center)**2 + (box2.y_center - boxes[boxNum].y_center)**2))
return distances
def _removeOutlyingBoxes(boxes):
numOfBoxes = len(boxes)
avgDistsForBoxes = []
boxesToRemove = []
for box in boxes:
avgDistsForBoxes.append(box.avgOfDistances)
avgDistBetweenAllBoxes = sum(avgDistsForBoxes) / len(avgDistsForBoxes)
for i in range(len(boxes)):
areaBias = 1 / (1+boxes[i].ratio_of_image)
if (avgDistsForBoxes[i] * areaBias) > avgDistBetweenAllBoxes:
boxesToRemove.append(i)
for index in boxesToRemove:
try:
boxes.pop(index)
except:
pass
return boxes
def calculateDistanceOfEachBoxToAllOthers(boxes):
boxesByArea = _sortBoxesByArea(boxes)
# distances to each point are correspond to the same box indexes as in boxesByArea
for i in range(len(boxesByArea)):
distances = _getDistanceFromBoxToAllOthers(i, boxesByArea)
boxesByArea[i].setDistances(distances)
def getClusterBoxFromCloseBoxes(boxes):
x_mins, y_mins, x_maxes, y_maxes = [], [], [], []
if len(boxes) > 1:
boxes = _removeOutlyingBoxes(boxes)
for box in boxes:
x_mins.append(box.x_min)
y_mins.append(box.y_min)
x_maxes.append(box.x_max)
y_maxes.append(box.y_max)
return CoordinateBox((min(x_mins), min(y_mins)), (max(x_maxes), max(y_maxes)))
kernelOpen = np.ones((5,5))
kernelClose = np.ones((20,20))
def _createImageMask(image_np, lowerBound=np.array([20, 100, 100]), upperBound=np.array([50, 255, 255])):
imgHSV = cv2.cvtColor(image_np, cv2.COLOR_BGR2HSV)
imgHSV[...,2] = imgHSV[...,2] * 0.9
yellow_mask = cv2.inRange(imgHSV, lowerBound, upperBound)
maskOpen = cv2.morphologyEx(yellow_mask, cv2.MORPH_OPEN, kernelOpen)
maskClose = cv2.morphologyEx(maskOpen, cv2.MORPH_CLOSE, kernelClose)
return maskClose
def getCentralCoordsOfYellowFromImage(image_np):
detectionBoxes = []
maskedImage = _createImageMask(image_np)
contours, h = cv2.findContours(maskedImage, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for i in range(len(contours)):
x_min, y_min, width, height = cv2.boundingRect(contours[i])
cv2.rectangle(image_np, (x_min, y_min), (x_min + width, y_min + height), (230, 11, 0), 2)
x_max, y_max = CoordinateBox.getEndCoordsFromStartAndDist((x_min, y_min), (width, height))
detectionBoxes.append(CoordinateBox((x_min, y_min), (x_max, y_max)))
if not contours:
return (9999, 9999)
else:
calculateDistanceOfEachBoxToAllOthers(detectionBoxes)
clusterBox = getClusterBoxFromCloseBoxes(detectionBoxes)
cv2.rectangle(image_np, (clusterBox.x_min, clusterBox.y_min), (clusterBox.x_max, clusterBox.y_max), (11, 252, 0), 2)
return clusterBox.getAbsoluteBoxCenter() |
import re
import datetime
def weekday(fulldate):
day_dict = {
0: 'Måndag',
1: 'Tisdag',
2: 'Onsdag',
3: 'Torsdag',
4: 'Fredag',
5: 'Lördag',
6: 'Söndag'
}
return day_dict[datetime.datetime.weekday(fulldate)]
def find_poi(dataframe, column):
# Find the point-of-interest in the column
pattern = r"(\w*(?<=(gatan))\s\d+|\w*(?<=(vägen))\s\d+|\w*(?<=(vägen))|\w*(?<=(gatan)))"
for row in dataframe[column]:
try:
(re.search(pattern, row.value)).group(0).isalpha()
return (re.search(pattern, row.value)).group(0)
except AttributeError:
return "None"
return True
def get_street(textinput):
# Do some regex magic
pattern = r"(\w*(?<=(gatan))\s\d+|\w*(?<=(vägen))\s\d+|\w*(?<=(vägen))|\w*(?<=(gatan)))"
try:
(re.search(pattern, textinput)).group(0).isalpha()
return (re.search(pattern, textinput)).group(0)
except AttributeError:
return "None"
def create_street_column(df):
df['street'] = df.summary.apply(lambda x: get_street(x))
return df
|
import time
import logging as log
import asyncore
import threading, collections, queue, os, os.path
import deepspeech
import numpy as np
import scipy
import wave
import asyncio
from pyAudioAnalysis import ShortTermFeatures as sF
from pyAudioAnalysis import MidTermFeatures as mF
from pyAudioAnalysis import audioTrainTest as aT
import struct
import json
# Setup the log file
log.basicConfig(filename='wernicke_server.log', filemode='a', format='%(asctime)s - %(levelname)s - %(message)s', level=log.DEBUG)
# log.getLogger('asyncio').setLevel(log.DEBUG)
class ModelsMotherShip():
def __init__(self):
self.BEAM_WIDTH = 500
self.LM_ALPHA = 0.75
self.LM_BETA = 1.85
self.model_dir = 'DeepSpeech/data/wernicke/model/'
self.model_file = os.path.join(self.model_dir, 'output_graph.pb')
# self.model_dir = 'deepspeech-0.6.0-models/'
# self.model_file = os.path.join(self.model_dir, 'output_graph.pbmm')
self.lm_file = os.path.join(self.model_dir, 'lm.binary')
self.trie_file = os.path.join(self.model_dir, 'trie')
self.save_dir = 'saved_wavs'
os.makedirs(self.save_dir, exist_ok=True)
# load segment model
log.info('Initializing pyAudioAnalysis classifier model...')
[self.classifier, self.MEAN, self.STD, self.class_names, self.mt_win, self.mt_step, self.st_win, self.st_step, _] = aT.load_model("wernicke_server_model")
self.fs = 16000
log.info('Initializing deepspeech model...')
self.model = deepspeech.Model(self.model_file, self.BEAM_WIDTH)
# Temporarily disabling this. I don't think I have nearly enough samples to start doing LM and trie files, etc
self.model.enableDecoderWithLM(self.lm_file, self.trie_file, self.LM_ALPHA, self.LM_BETA)
log.info('Models ready.')
def WhatIsThis(self, data):
# There are two completely separate models, one is a classifier that uses pyaudioanalysis, the other is a deepspeech model
# Convert or cast the raw audio data to numpy array
log.debug('Converting data to numpy')
if len(data) % 2 != 0:
log.critical('Data length: {0}'.format(len(data)))
log.critical('Data: {0}'.format(data))
return { #bullshit
'loudness': 0.0,
'class': 'bullshit',
'probability': 1.0,
'text': 'fuckitall',
}
AccumulatedData_np = np.frombuffer(data, np.int16)
# Get the loudness, hope this works
rms = np.sqrt(np.mean(AccumulatedData_np**2))
log.debug(f'Raw loudness: {rms}')
# normalize it, make it between 0.0 and 1.0.
# rms = round((rms - 20.0) / 45, 2)
# rms = float(np.clip(rms, 0.0, 1.0))
seg_len = len(AccumulatedData_np)
log.debug('seg_len ' + str(seg_len))
# Run the classifier. This is ripped directly out of paura.py and carelessly sutured into place. There's so much blood! Thank you!!!
log.debug('Running classifier')
try:
[mt_feats, _, _] = mF.mid_feature_extraction(AccumulatedData_np, self.fs,
seg_len,
seg_len,
round(self.fs * self.st_win),
round(self.fs * self.st_step)
)
cur_fv = (mt_feats[:, 0] - self.MEAN) / self.STD
except ValueError:
log.error('Yeah, that thing happened')
log.critical('Data length: {0}'.format(len(data)))
log.critical('Data: {0}'.format(data))
return { #bullshit
'loudness': 0.0,
'class': 'bullshit',
'probability': 1.0,
'text': 'fuckitall',
}
# classify vector:
[res, prob] = aT.classifier_wrapper(self.classifier, "svm_rbf", cur_fv)
win_class = self.class_names[int(res)]
win_prob = round(prob[int(res)], 2)
log.info('Classified {0:s} with probability {1:.2f}'.format(win_class, win_prob))
# Run the accumulated audio data through deepspeech, if it's speech
if win_class == 'lover':
log.debug('Running deepspeech model')
text = self.model.stt(AccumulatedData_np)
log.info('Recognized: %s', text)
else:
text = 'undefined'
# Save the utterance to a wav file. I hope later I'll be able to use this for training a better model, after I learn how to do that.
# log.debug('Saving wav file')
# wf = wave.open(os.path.join(self.save_dir, str(int(time.time())) + '_' + win_class + '_' + text.replace(' ', '_') + '.wav'), 'wb')
# wf.setnchannels(1)
# wf.setsampwidth(2)
# wf.setframerate(16000)
# wf.writeframes(data)
# wf.close()
# return an object
return {
'loudness': rms,
'class': win_class,
'probability': win_prob,
'text': text,
}
# Start up the models
AllTheModels = ModelsMotherShip()
# The wernicke_client.py connects to this script and sends audio data. This is where it's received and processed.
class AudioAnalysisServer(asyncio.Protocol):
def connection_made(self, transport):
peername = transport.get_extra_info('peername')
log.info('Connection from {}'.format(peername))
self.transport = transport
self.AccumulatedData = bytearray()
def data_received(self, data):
log.debug('Data received, length: ' + str(len(data)))
self.AccumulatedData.extend(data)
def eof_received(self):
if self.AccumulatedData == b'HEY_I_LOVE_YOU':
# Say it back, because you mean it
log.info('Received: HEY_I_LOVE_YOU')
log.info('Sending: I_LOVE_YOU_TOO')
self.transport.write(b'I_LOVE_YOU_TOO')
else:
# Classify and analyze, lift weights.
log.info('Processing data')
result = AllTheModels.WhatIsThis(self.AccumulatedData)
result_json = json.dumps(result)
# Send result
log.info('Sending back result: ' + result_json)
self.transport.write(result_json.encode())
# Close the connection
log.debug('Close the client socket')
self.transport.close()
# Reset the bytearray back to 0
self.AccumulatedData = bytearray()
def connection_lost(self, exc):
log.info('The client closed the connection')
loop = asyncio.get_event_loop()
# Each client connection will create a new protocol instance
coro = loop.create_server(AudioAnalysisServer, '192.168.0.88', 3000)
server = loop.run_until_complete(coro)
# Serve requests until Ctrl+C is pressed
log.info('Serving on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
|
# -*- coding: utf-8 -*-
from odoo import api,models,fields
class ProductBrand(models.Model):
_name='product.brand'
name=fields.Char('Brand Name')
code=fields.Integer('Brand Code')
|
import sys
def main():
if(len(sys.argv) != 2):
print("Pass in one geojson file in the command line")
with open("../" + sys.argv[1]) as f: #Add the file that needs to be reformated
formattedFile = "../formatted_" + sys.argv[1]
with open(formattedFile, "w") as f1:
for line in f:
newLine = line[0:line.find("[34")] + "[34."
restOfLine = line[line.find("[34") + 3: None]
newLine += restOfLine[0:restOfLine.find(", -119")] + ", -119."
restOfLine = restOfLine[restOfLine.find(", -119") + 6: None]
while len(restOfLine) > 0:
if restOfLine.find("[34") == -1:
newLine += restOfLine
restOfLine = ""
else:
newLine += restOfLine[0:restOfLine.find("[34")] + "[34."
restOfLine = restOfLine[restOfLine.find("[34") + 3: len(line)]
newLine += restOfLine[0:restOfLine.find(", -119")] + ", -119."
restOfLine = restOfLine[restOfLine.find(", -119") + 6: None]
f1.write(newLine)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# I will add feedforwardterm later
# I will high speed damping term too
import rospy
import math
from sympy import Derivative, symbols
from std_msgs.msg import Float32MultiArray
from std_msgs.msg import Float32
from nav_msgs.msg import Odometry
max_rad=0.523598 #I added margin
min_rad=-0.523598
soft_term=0.01
global velocity
velocity=0
# Please change gain here for your system
gain=0.5
rospy.init_node('stanley')
def error(msg):
global velocity
x = symbols('x')
fx = msg.data[0] * x ** 3 + msg.data[1] * x ** 2 + msg.data[2] * x ** 1 + msg.data[3]
fprime = Derivative(fx, x).doit()
n = fprime.subs({x: 2.6})
cte=-1*fx.subs({x: 2.6}) # check this today
crosstrack_error_term=math.atan((gain*cte)/(velocity+soft_term))
heading_error_term=-1*math.atan(n)
#print("heading_error term: ",heading_error_term )
#print("crosstrack_error_term: ",crosstrack_error_term)
delta= crosstrack_error_term+ heading_error_term
if max_rad < delta:
delta=max_rad
elif delta< min_rad:
delta=min_rad
pub.publish(delta)
def get_odom_cb(msg):
global velocity
velocity=msg.twist.twist.linear.x
print(velocity)
if __name__== '__main__':
pub=rospy.Publisher('delta_wheel',Float32,queue_size=10)
rospy.Subscriber('coefficients',Float32MultiArray,error)
rospy.Subscriber("Odometry/ekf_estimated",Odometry,get_odom_cb)
rospy.spin()
|
N = int(input())
result = []
for i in range(N):
xy = list(map(int, input().split(" ")))
result.append(xy)
result.sort(key=lambda x: (x[0], x[1]))
for i in result:
print(str(i[0]) + " " + str(i[1])) |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================================
msproteomicstools -- Mass Spectrometry Proteomics Tools
=========================================================================
Copyright (c) 2013, ETH Zurich
For a full list of authors, refer to the file AUTHORS.
This software is released under a three-clause BSD license:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of any author or any participating institution
may be used to endorse or promote products derived from this software
without specific prior written permission.
--------------------------------------------------------------------------
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING
INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------
$Maintainer: Hannes Roest$
$Authors: Hannes Roest$
--------------------------------------------------------------------------
"""
import unittest
import os
import msproteomicstoolslib.data_structures.Precursor as precursor
from msproteomicstoolslib.data_structures.PeakGroup import PeakGroupBase
class MockPeakGroup():
def __init__(self, fdr=1, id=1, selected=False):
self.fdr_score = fdr
self.id = id
self.sel = selected
def get_id(self):
return self.id
def is_selected(self):
return self.id
class TestUnitPrecursorBase(unittest.TestCase):
def setUp(self):
pass
def test_create_precursor(self):
self.assertRaises(Exception, precursor.PrecursorBase, 0, 0)
class TestUnitPrecursor(unittest.TestCase):
def setUp(self):
pass
def test_create_precursor(self):
p = precursor.Precursor("precursor_2", [])
str(p)
self.assertTrue(True)
def test_get_id(self):
p = precursor.Precursor("precursor_2", [])
self.assertEqual(p.get_id(), "precursor_2")
def test_get_decoy(self):
p = precursor.Precursor("precursor_2", [])
self.assertFalse(p.get_decoy())
p.set_decoy("TRUE")
self.assertTrue(p.get_decoy())
p.set_decoy("FALSE")
self.assertFalse(p.get_decoy())
p.set_decoy("1")
self.assertTrue(p.get_decoy())
p.set_decoy("0")
self.assertFalse(p.get_decoy())
self.assertRaises(Exception, p.set_decoy, "dummy")
def test_add_peakgroup_tpl(self):
"""
0. id
1. quality score (FDR)
2. retention time (normalized)
3. intensity
(4. d_score optional)
"""
pg_tuple = ("someID", 0.1, 100, 10000, 2)
p = precursor.Precursor("precursor_2", [])
self.assertRaises(Exception, p.add_peakgroup_tpl, pg_tuple, "notMatchingID", 4)
p.add_peakgroup_tpl(pg_tuple, "precursor_2", 4)
self.assertEqual( len(list(p.get_all_peakgroups())), 1)
firstpg = list(p.get_all_peakgroups())[0]
self.assertEqual( firstpg.get_cluster_id(), 4)
self.assertAlmostEqual( firstpg.get_fdr_score(), 0.1)
self.assertAlmostEqual( firstpg.get_normalized_retentiontime(), 100)
self.assertAlmostEqual( firstpg.get_intensity(), 10000)
pg_tuple = ("someID", 0.1, 100, 10000)
p = precursor.Precursor("precursor_2", [])
p.add_peakgroup_tpl(pg_tuple, "precursor_2", 4)
self.assertEqual( len(list(p.get_all_peakgroups())), 1)
def test_setClusterId(self):
pg_tuple = ("someID", 0.1, 100, 10000, 2)
p = precursor.Precursor("precursor_2", [])
p.add_peakgroup_tpl(pg_tuple, "precursor_2", 4)
firstpg = list(p.get_all_peakgroups())[0]
self.assertEqual( firstpg.get_cluster_id(), 4)
p.setClusterID("someID", 5)
firstpg = list(p.get_all_peakgroups())[0]
self.assertEqual( firstpg.get_cluster_id(), 5)
def test_selectpg(self):
pg_tuple = ("someID", 0.1, 100, 10000, 2)
p = precursor.Precursor("precursor_2", [])
p.add_peakgroup_tpl(pg_tuple, "precursor_2", 4)
firstpg = list(p.get_all_peakgroups())[0]
self.assertEqual( firstpg.get_cluster_id(), 4)
p.unselect_pg("someID")
firstpg = list(p.get_all_peakgroups())[0]
self.assertEqual( firstpg.get_cluster_id(), -1)
p.select_pg("someID")
firstpg = list(p.get_all_peakgroups())[0]
self.assertEqual( firstpg.get_cluster_id(), 1)
p.unselect_pg("someID")
firstpg = list(p.get_all_peakgroups())[0]
self.assertEqual( firstpg.get_cluster_id(), -1)
def test_selection(self):
p = precursor.Precursor("precursor_2", [])
self.assertIsNone( p.get_best_peakgroup() )
pg_tuple = ("someID", 0.1, 100, 10000, 2)
p.add_peakgroup_tpl(pg_tuple, "precursor_2", 1)
pg_tuple = ("someID_", 0.01, 105, 10000, 2)
p.add_peakgroup_tpl(pg_tuple, "precursor_2", -1)
self.assertEqual( p.get_selected_peakgroup().get_feature_id(), "someID")
self.assertEqual( p.get_best_peakgroup().get_feature_id(), "someID_")
self.assertEqual( p.find_closest_in_iRT(99).get_feature_id(), "someID")
self.assertEqual( p.find_closest_in_iRT(110).get_feature_id(), "someID_")
self.assertEqual( p.find_closest_in_iRT(102.8).get_feature_id(), "someID_")
self.assertEqual( len(list(p.getClusteredPeakgroups())), 1)
self.assertEqual( list(p.getClusteredPeakgroups())[0].get_feature_id(), "someID")
# Un-select all pg
# Only one pg should be selected at a time
p.unselect_all()
self.assertIsNone(p.get_selected_peakgroup())
p.select_pg("someID_")
self.assertRaises(AssertionError, p.select_pg, "someID")
class TestUnitGeneralPrecursor(unittest.TestCase):
def setUp(self):
pass
def test_create_precursor(self):
p = precursor.GeneralPrecursor("precursor_2", [])
self.assertTrue(True)
def test_get_id(self):
p = precursor.GeneralPrecursor("precursor_2", [])
self.assertEqual(p.get_id(), "precursor_2")
def test_append(self):
# TODO is this even used anywhere ???
pg = MockPeakGroup(1, "precursor_2")
p = precursor.GeneralPrecursor("precursor_2", [])
p.append(pg)
self.assertEqual( len(list(p.get_all_peakgroups())), 1)
def test_add_peakgroup(self):
pg = PeakGroupBase()
p = precursor.GeneralPrecursor("precursor_2", [])
p.add_peakgroup(pg)
self.assertEqual( len(list(p.get_all_peakgroups())), 1)
def test_selectpg(self):
pg = PeakGroupBase()
pg.cluster_id_ = 4
pg.id_ = "someID"
p = precursor.GeneralPrecursor("precursor_2", [])
p.add_peakgroup(pg)
self.assertEqual( len(list(p.get_all_peakgroups())), 1)
firstpg = list(p.get_all_peakgroups())[0]
self.assertEqual( firstpg.get_cluster_id(), 4)
def test_selection(self):
p = precursor.GeneralPrecursor("precursor_2", [])
self.assertIsNone( p.get_selected_peakgroup() )
self.assertIsNone( p.get_best_peakgroup() )
pg = PeakGroupBase()
pg.cluster_id_ = 1
pg.id_ = "someID"
pg.normalized_retentiontime = 100
p.add_peakgroup(pg)
self.assertEqual( len(list(p.get_all_peakgroups())), 1)
pg = PeakGroupBase()
pg.cluster_id_ = 2
pg.id_ = "someID_"
pg.normalized_retentiontime = 105
p.add_peakgroup(pg)
self.assertEqual( len(list(p.get_all_peakgroups())), 2)
self.assertEqual( p.get_selected_peakgroup().get_feature_id(), "someID")
self.assertEqual( p.get_best_peakgroup().get_feature_id(), "someID_")
self.assertEqual( p.find_closest_in_iRT(99).get_feature_id(), "someID")
self.assertEqual( p.find_closest_in_iRT(110).get_feature_id(), "someID_")
self.assertEqual( p.find_closest_in_iRT(102.8).get_feature_id(), "someID_")
if __name__ == '__main__':
unittest.main()
|
def SumDigit(a):
suma=0
while a>0:
digit=a%10
suma+=digit
a//=10
return suma
print(SumDigit(112345)) |
# -*- coding: utf-8 -*-
from git import Repo
import os
class Git:
def __init__(self, oniroku):
self.repos = self.get_template_repo()
self.directory = oniroku.directory
self.name = oniroku.name
def get_template_repo(self):
return "git@github.com:takaaki-mizuno/oniroku-template-tab.git"
def clone_repo(self):
repo_directory = os.path.join(self.directory, self.name)
Repo.clone_from(self.repos, repo_directory, branch='master')
return repo_directory
|
"""
The Data Science Assistant
==========================
The Data Science ``Assistant`` provides a collection of methods to address
the most typical procedures when analyzing data. Such processes include::
- Profiling data
- Filling missing values
- Detecting and removing outliers
- Feature transformations
- Feature engineering
- Feature selection
- Model selection
- Model hyper-parameter optimization
- Ensemble
This assistant can help you automate or reduce many of the boiler plate code
or repetitive tasks for dealing with Data Science projects or ETL processes.
Through a simple, but configurable API, you are able to achieve the bulk of
the work in data analysis scenarios with some few methods. Furthermore,
detailed reports for each procedure are generated and made available to the user
so he/she can analyze the results of the full process and gather valuable insights
on potential improvements with little to no effort.
The ``Assistant`` class wraps the full functionality of ``automl_toolbox``
into a single object with lost of methods to work with and it enables users
to access and modify its configurations and set it up according to their needs.
"""
import pandas as pd
from pandas_profiling import ProfileReport
from typing import Dict, Union, List, Optional
from .data_cleaning import profiler
from .utils import parse_backend_name_string, parse_or_infer_task_name
class Assistant(object):
"""Data Science Assistance / Wizard.
Parameters
----------
df : pandas.DataFrame
Input data.
target : str
Target column name.
task : str, optional
Type of task to analyze. If no task is passed as input,
it will be inferred using the target label with the input
DataFrame. Options: 'classification', 'cls', 'regression',
'reg', 'clustering', 'cluster'. Default: None.
backend : str, optional
Name of the model's backend. Default: 'lightgbm'.
"""
def __init__(self,
df: pd.DataFrame,
target: str,
task: str = None,
backend: str = 'lightgbm'
) -> None:
self.df = df
self.target = target
self.task = parse_or_infer_task_name(self.df, self.target, task)
self.backend = parse_backend_name_string(backend)
self.data = DataProfiler(self.df, self.target, self.task, self.backend)
class DataProfiler(object):
"""Analyses and profiles the data.
Parameters
----------
df : pandas.DataFrame
Input data.
target : str
Target column name.
task : str, optional
Type of task to analyze. If no task is passed as input,
it will be inferred using the target label with the input
DataFrame. Options: 'classification', 'cls', 'regression',
'reg', 'clustering', 'cluster'. Default: None.
backend : str, optional
Name of the model's backend. Default: 'lightgbm'.
"""
def __init__(self,
df: pd.DataFrame,
target: str,
task: str = None,
backend: str = 'lightgbm') -> None:
self.df = df
self.target = target
self.task = parse_or_infer_task_name(self.df, self.target, task)
self.backend = parse_backend_name_string(backend)
def profile(self,
df: pd.DataFrame = None,
target: str = None,
task: str = None,
show: Union[bool, str, list] = 'all'
) -> Dict[str, Union[ProfileReport, dict]]:
"""Generates profile reports from a Pandas DataFrame.
Parameters
----------
df : pandas.DataFrame, optional
Input pandas DataFrame to be profiled. Default: None.
target : str, optional
Target column of the DataFrame. Default: None.
task : str, optional
Type of task to analyze. If no task is passed as input,
it will be inferred using the target label with the input
DataFrame. Options: 'classification', 'cls', 'regression',
'reg', 'clustering', 'cluster'. Default: None.
show : bool | str | list, optional
Manages what information of the profile report is displayed
on screen. Options: 'all', 'full', 'basic'. Default: 'all'
Returns
-------
dict
Report of the data.
"""
if df:
df_analysis = df
target_analysis = target
task_analysis = parse_or_infer_task_name(df_analysis, target_analysis, task)
else:
df_analysis = self.df
target_analysis = self.target
task_analysis = self.task
report: dict = profiler(df=df_analysis,
target=target_analysis,
task=task_analysis,
show=show)
return report
|
from scipy.io import arff
import numpy as np
from sklearn.preprocessing import MinMaxScaler
m = MinMaxScaler()
#Movement Libras load and formating
data, metadata = arff.loadarff('movement_libras.arff')
l = []
for d in data:
l.append(list(d))
D = np.array([l[0]], dtype = np.float32)
for i in range(1,len(l)):
D = np.append(D, np.array([l[i]], dtype = np.float32), axis = 0)
D[:, 0:90] = m.fit_transform(D[:, 0:90])
np.save("data_libras.npy", D)
#Wdbc load and formating
samples, metadata = arff.loadarff('wdbc.arff')
l = []
for d in samples:
l.append(list(d))
D = np.array([l[0][1:31]], dtype = np.float32)
for i in range(1,len(l)):
D = np.append(D, np.array([l[i][1:31]], dtype = np.float32), axis = 0)
D = m.fit_transform(D)
dictionary = dict(zip(['B','M'], [0,1]))
new_labels = [dictionary[label] for label in samples['class']]
D = np.insert(D, 30, new_labels, axis = 1)
np.save("data_wdbc.npy", D)
#Arrhythmia load and formating
samples, metadata = arff.loadarff('arrhythmia.arff')
l = []
for d in samples:
l.append(list(d))
D = np.array([l[0]], dtype = np.float32)
for i in range(1,len(l)):
D = np.append(D, np.array([l[i]], dtype = np.float32), axis = 0)
D[:, 0:278] = m.fit_transform(D[:, 0:278])
np.save("data_arrhythmia.npy", D)
|
# Generated by Django 2.2.3 on 2019-07-17 08:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rooms', '0006_auto_20190717_0808'),
]
operations = [
migrations.AlterField(
model_name='room',
name='accuracy_rating',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='room',
name='checkin_rating',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='room',
name='clean_rating',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='room',
name='communication_rating',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='room',
name='location_rating',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='room',
name='total_rating',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='room',
name='value_rating',
field=models.FloatField(default=0),
),
]
|
'''
Напиши программу на вход которой в единственной строке поступает вещественное число.
Программа должна вывести только целую часть этого числа без незначащих нулей.
Stepik001132ITclassPyсh03p01st02TASK01_20200611.py
'''
num = input()
a, b = num.split('.')
print(int(a))
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""
发送邮件
"""
"""
第一种发送邮件的方法
import smtplib
from email.mime.text import MIMEText
from email.utils import formataddr
#通过qq邮箱发送
my_sender='1617265674@qq.com' # 发件人邮箱账号
my_pass = 'vvdyneaymjbbddhf' # 发件人邮箱密码(当时申请smtp给的口令)
my_user='rongzepei@gosuncn.com' # 收件人邮箱账号,我这边发送给自己
def mail():
ret=True
try:
msg=MIMEText('填写邮件内容','plain','utf-8')
msg['From']=formataddr(["发件人昵称",my_sender]) # 括号里的对应发件人邮箱昵称、发件人邮箱账号
msg['To']=formataddr(["收件人昵称",my_user]) # 括号里的对应收件人邮箱昵称、收件人邮箱账号
msg['Subject']="邮件主题-测试" # 邮件的主题,也可以说是标题
server=smtplib.SMTP_SSL("smtp.qq.com", 465) # 发件人邮箱中的SMTP服务器,端口是465
server.login(my_sender, my_pass) # 括号中对应的是发件人邮箱账号、邮箱密码
server.sendmail(my_sender,[my_user,],msg.as_string()) # 括号中对应的是发件人邮箱账号、收件人邮箱账号、发送邮件
server.quit()# 关闭连接
except Exception:# 如果 try 中的语句没有执行,则会执行下面的 ret=False
ret=False
return ret
ret=mail()
if ret:
print("邮件发送成功")
else:
print("邮件发送失败")
"""
"""
第二种发送邮件的方法
"""
import yagmail
#连接邮箱服务器
yag = yagmail.SMTP(user='1617265674@qq.com',password='vvdyneaymjbbddhf',host='smtp.qq.com')
#邮件正文
contents = ['邮件正文','asdasdasdasdasdasd','qweqweqweqweqweq']
#发送邮件
yag.send(
'rongzepei@gosuncn.com','邮件主题',contents,['D:\python\selenium_learn\selenium-6.py']
)
#若是发送给多个用户的时候可以定义一个列表['xx@xx.com','xx@xx.com']放到邮箱的位置 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Author : Kun Luo
# @Email : olooook@outlook.com
# @File : transformer.py
# @Date : 2021/06/28
# @Time : 17:05:19
import torch
from torch import nn
def pad_mask(q, k=None) -> torch.BoolTensor:
"""
Args:
q: [batch_size, seq_len]
k: [batch_size, seq_len]. Defaults to None.
Returns:
BoolTensor: [batch_size, seq_len, seq_len]
"""
q_mask = q.bool().unsqueeze(2) # -> [batch_size, seq_len, 1]
k_mask = k.bool().unsqueeze(1) if k is not None else q_mask.transpose(-1, -2) # -> [batch_size, 1, seq_len]
return q_mask & k_mask # -> [batch_size, seq_len, seq_len]
def subsequence_mask(x) -> torch.BoolTensor:
"""
Args:
x: [batch_size, seq_len]
Returns:
BoolTensor: [batch_size, seq_len, seq_len]
"""
seq_len = x.size(1)
return torch.ones(seq_len, seq_len).tril().bool()
class TokenEmbedding(nn.Module):
def __init__(self, vocab_size, d_model):
super(TokenEmbedding, self).__init__()
self.d_model = d_model
self.embed = nn.Embedding(vocab_size, d_model)
def forward(self, x):
"""
Args:
x: [batch_size, seq_len]
Returns:
Tensor: [batch_size, seq_len, d_model]
"""
return self.embed(x) * self.d_model**.5
class PostionalEncoding(nn.Module):
r"""
PE(2i) = sin(\frac{pos}{10000^{\frac{2i}{d_{model}}}})
PE(2i + 1) = cos(\frac{pos}{10000^{\frac{2i}{d_{model}}}})
"""
def __init__(self, d_model, max_len):
super(PostionalEncoding, self).__init__()
# [max_len, d_model]
self.encoding = nn.Parameter(torch.empty(max_len, d_model), requires_grad=False)
# postion
pos = torch.arange(0, max_len, 1.0).unsqueeze(-1)
exp = torch.arange(0, d_model, 2.0) / d_model
# encoding
self.encoding[:, 0::2] = torch.sin(pos / (10000 ** exp))
self.encoding[:, 1::2] = torch.cos(pos / (10000 ** exp))
def forward(self, x):
"""
Args:
x: [batch_size, seq_len]
Returns:
Tensor: [batch_size, seq_len, d_model]
"""
seq_len = x.size(1)
return self.encoding[:seq_len]
class Embeddings(nn.Module):
def __init__(self, vocab_size, d_model, max_len, dropout=.1):
super(Embeddings, self).__init__()
self.embed = TokenEmbedding(vocab_size, d_model)
self.encode = PostionalEncoding(d_model, max_len)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
"""
Args:
x: [batch_size, seq_len, d_model]
Returns:
Tensor: [batch_size, seq_len, d_model]
"""
return self.dropout(self.embed(x) + self.encode(x))
class LayerNorm(nn.Module):
def __init__(self, d_model, eps=1e-6):
super(LayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(d_model), requires_grad=True)
self.beta = nn.Parameter(torch.zeros(d_model), requires_grad=True)
self.eps = eps
def forward(self, x):
"""
Args:
x: [batch_size, seq_len, d_model]
Returns:
Tensor: [batch_size, seq_len, d_model]
"""
std, mean = torch.std_mean(x, dim=-1, keepdim=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta
class MultiHeadAttention(nn.Module):
def __init__(self, d_model, n_heads, dropout=None):
super(MultiHeadAttention, self).__init__()
self.n_heads = n_heads
self.to_q, self.to_k, self.to_v, self.proj = [nn.Linear(d_model, d_model) for _ in range(4)]
self.dropout = nn.Dropout(dropout) if dropout else None
def forward(self, q, k, v, mask=None):
"""
Args:
q: [batch_size, seq_len, d_model]
k: [batch_size, seq_len, d_model]
v: [batch_size, seq_len, d_model]
mask (BoolTensor): [seq_len, seq_len] or [batch, seq_len, seq_len]. Defaults to None.
Returns:
Tensor: [batch_size, seq_len, d_model]
"""
batch_size, seq_len = q.size(0), q.size(1)
#========== calculate multi-head attention qkv ==========#
# map to qkv
q, k, v = self.to_q(q), self.to_k(k), self.to_v(v)
# [batch_size, seq_len, n_heads, d_k] -> [n_heads, batch_size, seq_len, d_k]
q = q.view(batch_size, seq_len, self.n_heads, -1).permute(2, 0, 1, 3)
# [batch_size, seq_len, n_heads, d_k] -> [n_heads, batch_size, seq_len, d_k]
k = k.view(batch_size, seq_len, self.n_heads, -1).permute(2, 0, 1, 3)
# [batch_size, seq_len, n_heads, d_v] -> [n_heads, batch_size, seq_len, d_v]
v = v.view(batch_size, seq_len, self.n_heads, -1).permute(2, 0, 1, 3)
#========== calculate multi-head attention tensor ==========#
# [n_heads, batch_size, seq_len, d_k] @ [n_heads, batch_size, d_k, seq_len] / \sqrt{d_k}
scaled_dot_prod = q @ k.transpose(-1, -2) * k.size(-1)**-.5 # -> [n_heads, batch_size, seq_len, seq_len]
if mask is not None:
scaled_dot_prod += torch.where(mask, .0, -1e12)
attn = torch.softmax(scaled_dot_prod, dim=-1) # -> [n_heads, batch_size, seq_len, seq_len]
if self.dropout is not None:
attn = self.dropout(attn)
#========== calculate multi-head attention output ==========#
# [n_heads, batch_size, seq_len, seq_len] @ [n_heads, batch_size, seq_len, d_v]
v = attn @ v # -> [n_heads, batch_size, seq_len, d_v]
# [batch_size, seq_len, n_heads, d_v] -> [batch_size, seq_len, d_model]
v = v.permute(1, 2, 0, 3).reshape(batch_size, seq_len, -1)
out = self.proj(v) # -> [batch_size, seq_len, d_model]
return self.dropout(out) if self.dropout else out
class PositionwiseFeedForward(nn.Sequential):
def __init__(self, d_model, d_hidden, dropout=.1):
super(PositionwiseFeedForward, self).__init__(
nn.Linear(d_model, d_hidden),
nn.ReLU(),
nn.Linear(d_hidden, d_model),
nn.Dropout(dropout)
)
class EncoderLayer(nn.Module):
def __init__(self, d_model, d_ffn, n_heads=8, dropout=.1):
super(EncoderLayer, self).__init__()
self.attn = MultiHeadAttention(d_model=d_model, n_heads=n_heads)
self.norm1 = LayerNorm(d_model=d_model)
self.dropout1 = nn.Dropout(dropout)
self.ffn = PositionwiseFeedForward(d_model=d_model, d_hidden=d_ffn, dropout=dropout)
self.norm2 = LayerNorm(d_model=d_model)
self.dropout2 = nn.Dropout(dropout)
def forward(self, x, self_attn_mask):
"""
Args:
x: [batch_size, seq_len, d_model]
self_attn_mask (BoolTensor): [seq_len, seq_len]
Returns:
Tensor: [batch_size, seq_len, d_model]
"""
res = x # residual
x = self.attn(x, x, x, mask=self_attn_mask) # self attention
x = self.norm1(x + res) # add & norm
x = self.dropout1(x) # dropout
res = x # residual
x = self.ffn(x) # feed forward
x = self.norm2(x + res) # add & norm
x = self.dropout2(x) # dropout
return x
class Encoder(nn.Module):
def __init__(self, vocab_size, max_len, d_model, d_ffn, n_heads=8, n_layers=6, dropout=.1):
super().__init__()
self.embed = Embeddings(vocab_size=vocab_size, max_len=max_len, d_model=d_model, dropout=dropout)
self.layers = nn.ModuleList([
EncoderLayer(d_model=d_model, d_ffn=d_ffn, n_heads=n_heads, dropout=dropout)
for _ in range(n_layers)
])
def forward(self, source):
"""
Args:
source (LongTensor): [batch_size, seq_len]
Returns:
Tensor: [batch_size, seq_len, d_model]
"""
self_attn_mask = pad_mask(source)
x = self.embed(source)
for layer in self.layers:
x = layer(x, self_attn_mask)
return x
class DecoderLayer(nn.Module):
def __init__(self, d_model, d_ffn, n_heads=8, dropout=.1):
super(DecoderLayer, self).__init__()
self.attn1 = MultiHeadAttention(d_model=d_model, n_heads=n_heads)
self.norm1 = LayerNorm(d_model=d_model)
self.dropout1 = nn.Dropout(dropout)
self.attn2 = MultiHeadAttention(d_model=d_model, n_heads=n_heads)
self.norm2 = LayerNorm(d_model=d_model)
self.dropout2 = nn.Dropout(dropout)
self.ffn = PositionwiseFeedForward(d_model=d_model, d_hidden=d_ffn, dropout=dropout)
self.norm3 = LayerNorm(d_model=d_model)
self.dropout3 = nn.Dropout(dropout)
def forward(self, x, memory, self_attn_mask, cross_attn_mask):
"""
Args:
x: [batch_size, seq_len, d_model]
memory: [batch_size, seq_len, d_model], output of encoder
self_attn_mask (BoolTensor): [seq_len, seq_len]
cross_attn_mask (BoolTensor): [batch_size, 1, seq_len, seq_len]
Returns:
Tensor: [batch_size, seq_len, d_model]
"""
res = x # residual
x = self.attn1(x, x, x, mask=self_attn_mask) # self attention
x = self.norm1(x + res) # add & norm
x = self.dropout1(x) # dropout
res = x # residual
x = self.attn2(x, memory, memory, mask=cross_attn_mask) # decoder-encoder attention
x = self.norm2(x + res) # add & norm
x = self.dropout2(x) # dropout
res = x # residual
x = self.ffn(x) # feed forward
x = self.norm3(x + res) # add & norm
x = self.dropout3(x) # dropout
return x
class Decoder(nn.Module):
def __init__(self, vocab_size, max_len, d_model, d_ffn, n_heads=8, n_layers=6, dropout=.1):
super().__init__()
self.embed = Embeddings(vocab_size=vocab_size, max_len=max_len, d_model=d_model, dropout=dropout)
self.layers = nn.ModuleList([
DecoderLayer(d_model=d_model, d_ffn=d_ffn, n_heads=n_heads, dropout=dropout)
for _ in range(n_layers)
])
def forward(self, source, target, memory):
"""
Args:
source (LongTensor): [batch_size, seq_len]
target (LongTensor): [batch_size, seq_len]
memory: [batch_size, seq_len, d_model], output of encoder,
Returns:
[batch_size, seq_len, d_model]
"""
# calculate mask
self_attn_mask = pad_mask(target) & subsequence_mask(target)
cross_attn_mask = pad_mask(target, source)
# calcuate output
x = self.embed(target)
for layer in self.layers:
x = layer(x, memory, self_attn_mask, cross_attn_mask)
return x
class Generator(nn.Sequential):
def __init__(self, vocab_size, d_model):
super(Generator, self).__init__(
nn.Linear(d_model, vocab_size),
nn.LogSoftmax(dim=-1)
)
class Transformer(nn.Module):
def __init__(
self, source_vocab_size, target_vocab_size, max_len,
d_model=512, d_ffn=2048, n_heads=8, n_layers=6, dropout=.1
):
super(Transformer, self).__init__()
self.encoder = Encoder(vocab_size=source_vocab_size, max_len=max_len, d_model=d_model, d_ffn=d_ffn,
n_heads=n_heads, n_layers=n_layers, dropout=dropout)
self.decoder = Decoder(vocab_size=target_vocab_size, max_len=max_len, d_model=d_model, d_ffn=d_ffn,
n_heads=n_heads, n_layers=n_layers, dropout=dropout)
self.generator = Generator(target_vocab_size, d_model)
def forward(self, source, target):
"""
Args:
source (LongTensor): [batch_size, seq_len]
target (LongTensor): [batch_size, seq_len]
Returns:
Tensor: [batch_size, seq_len, target_vocab_size]
"""
#========== encode ==========#
encoding = self.encoder(source) # -> [batch_size, seq_len, d_model]
#========== decode ==========#
decoding = self.decoder(source, target, encoding) # -> [batch_size, seq_len, d_model]
#========== generate ==========#
output = self.generator(decoding) # -> [batch_size, seq_len, target_vocab_size]
return output
if __name__ == "__main__":
sentences = [
['i love you <space>', '<bos> ich liebe dich', 'ich liebe dich <eos>'],
['you love me <space>', '<bos> du liebst mich', 'du liebst mich <eos>']
]
source_vocab = ['<space>', 'i', 'love', 'you', 'me']
target_vocab = ['<space>', '<bos>', '<eos>', 'ich', 'liebe', 'dich', 'du', 'liebst', 'mich']
source_vocab_dict = {word: i for i, word in enumerate(source_vocab)}
target_vocab_dict = {word: i for i, word in enumerate(target_vocab)}
encoder_inputs = torch.tensor([[source_vocab_dict[word] for word in strs[0].split(' ')] for strs in sentences])
decoder_inputs = torch.tensor([[target_vocab_dict[word] for word in strs[1].split(' ')] for strs in sentences])
decoder_outputs = torch.tensor([[target_vocab_dict[word] for word in strs[2].split(' ')] for strs in sentences])
print(source_vocab_dict)
print(target_vocab_dict)
print(encoder_inputs)
print(decoder_inputs)
print(decoder_outputs)
transformer = Transformer(source_vocab_size=len(source_vocab), target_vocab_size=len(target_vocab), max_len=4)
output = transformer(encoder_inputs, decoder_inputs)
print(output.shape)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-05-06 16:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('student', '0016_stock_details'),
]
operations = [
migrations.CreateModel(
name='register',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('id_no', models.CharField(max_length=7)),
('email', models.CharField(max_length=100)),
('mobile', models.BigIntegerField(blank=True, null=True)),
('password', models.CharField(max_length=50)),
],
),
]
|
# adapted from https://twistedmatrix.com/documents/current/_downloads/stdiodemo.py
from twisted.internet import stdio, reactor
from twisted.protocols import basic
"""
A basic user console
"""
class Console(basic.LineReceiver):
delimiter = '\n' # unix terminal style newlines. remove this line
# for use with Telnet
def connectionMade(self):
self.sendLine("Command line interface. Type 'help' for help.")
self.transport.write('>>> ')
def connectionLost(self, reason):
# stop the reactor, only because this is meant to be run in Stdio.
reactor.stop()
def lineReceived(self, line):
# Ignore blank lines
if not line: return
# Parse the command
commandParts = line.split()
command = commandParts[0].lower()
args = commandParts[1:]
# Dispatch the command to the appropriate method. Note that all you
# need to do to implement a new command is add another do_* method.
res = None
try:
method = getattr(self, 'do_' + command)
except AttributeError, e:
self.sendLine('Error: no such command.')
else:
try:
method(*args)
except Exception, e:
self.sendLine('Error: ' + str(e))
self.transport.write('>>> ')
def printResult(self, res):
if res is not None:
self.sendLine(str(res))
self.transport.write('>>> ')
def do_help(self, command=None):
"""help [command]: List commands, or show help on the given command"""
if command:
self.sendLine(getattr(self, 'do_' + command).__doc__)
else:
commands = [cmd[3:] for cmd in dir(self) if cmd.startswith('do_')]
self.sendLine("Valid commands: " + " ".join(commands))
def do_echo(self, message):
"""echo [message]: repeat input message"""
if message:
self.printResult(message)
def do_quit(self):
"""quit: Quit this session"""
self.sendLine('Goodbye.')
self.transport.loseConnection()
if __name__ == "__main__":
stdio.StandardIO(Plugin())
reactor.run() |
import bhi160
import display
import leds
import os
import utime
try:
accel = bhi160.BHI160Accelerometer()
except:
os.reset()
leds.clear()
with display.open() as d:
d.clear()
d.update()
for i in range(3):
leds.set_rocket(i, 0)
# Characters are encoded in columns per 11 bits in an integer.
# LSB = top, MSB = bottom.
charset = {
'A': [0x780, 0x178, 0x107, 0x178, 0x780],
'B': [0x7ff, 0x663, 0x663, 0x19c],
'C': [0x1fc, 0x306, 0x603, 0x603],
'D': [0x7ff, 0x603, 0x603, 0x30e, 0x1fc],
'E': [0x7ff, 0x663, 0x663, 0x663],
'F': [0x7ff, 0x063, 0x063, 0x063],
'G': [0x7ff, 0x603, 0x633, 0x1e6],
'H': [0x7ff, 0x060, 0x060, 0x7ff],
'I': [0x603, 0x7ff, 0x603],
'J': [0x780, 0x603, 0x603, 0x7ff],
'K': [0x7ff, 0x070, 0x1dc, 0x707],
'L': [0x7ff, 0x600, 0x600, 0x600],
'M': [0x7ff, 0x006, 0x0fc, 0x006, 0x7ff],
'N': [0x7ff, 0x00e, 0x078, 0x1c0, 0x7ff],
'O': [0x1fc, 0x603, 0x603, 0x603, 0x1fc],
'P': [0x7ff, 0x063, 0x03e, 0x01c],
'Q': [0x1fc, 0x603, 0x683, 0x703, 0x7fc],
'R': [0x7ff, 0x063, 0x3e3, 0x41c],
'S': [0x67f, 0x663, 0x663, 0x7e3],
'T': [0x003, 0x003, 0x7ff, 0x003, 0x003],
'U': [0x1ff, 0x600, 0x600, 0x600, 0x1ff],
'V': [0x00f, 0x0f0, 0x700, 0x0f0, 0x00f],
'W': [0x7ff, 0x300, 0x8f0, 0x300, 0x7ff],
'X': [0x603, 0x1dc, 0x020, 0x1dc, 0x603],
'Y': [0x003, 0x00c, 0x7f8, 0x00c, 0x003],
'Z': [0x783, 0x6c3, 0x673, 0x61b, 0x60f],
'_': [0x400] * 4,
}
nick = 'sample text'
try:
with open('/nickname.txt', 'r') as f:
nick = str(f.read())
except:
pass
string = []
for c in nick.upper():
if not c in charset:
c = '_'
string = string + charset[c] + [0]
while True:
sign = lambda v: 1 if v>=0 else -1
accel_hist = []
direction = 0
while direction == 0:
samples = accel.read()
accel_hist.extend([ 0 if abs(s.y) < 0.2 else s.y for s in samples ])
accel_hist = accel_hist[max(len(accel_hist)-20, 0):]
if len(accel_hist) > 2:
direction = sign(accel_hist[-1]) - sign(accel_hist[-2])
colors = [(0, 0, 0), (0xff, 0xff, 0xff)]
string_iter = string
if direction > 0:
string_iter = reversed(string_iter)
for column in string:
for l in range(11):
leds.set(10-l, colors[column>>l & 1])
leds.clear()
utime.sleep(0.001)
|
from django.urls import path, include
from . import views
urlpatterns = [
path('add_book/', views.add_book),
path('show_books/', views.show_books),
] |
from mamba import description, context, it
from expects import expect, equal, contain, end_with
from fractions import Fraction
from ly2abc.lilypond_music import LilypondMusic
from ly2abc.output_buffer import OutputBuffer
from spec.ly2abc_spec_helper import *
class LyCommand:
def __init__(self,text,siblings=[]):
self.token = text
self.next_sibling = lambda: siblings[0]
class LyString:
def __init__(self,text):
self.plaintext = lambda: text
class Repeat:
def __init__(self,specifier,repeat_count,inner_music=[]):
self.specifier = lambda: specifier
self.repeat_count = lambda: repeat_count
self.inner_music = inner_music
def __iter__(self):
return iter(self.inner_music)
class LyNote:
def __init__(self,pitch,length):
self.pitch = pitch
self.length = lambda: length
class LyRest:
def __init__(self,length):
self.length = lambda: length
class TimeSignature:
def __init__(self,numerator,denominator):
self.numerator = lambda: numerator
self.fraction = lambda: Fraction(1,denominator)
class KeySignature:
def __init__(self,pitch,mode):
self.pitch = lambda: pitch
self.mode = lambda: mode
class Partial:
def __init__(self,duration):
self.partial_length = lambda: duration
with description('LilypondMusic') as self:
with before.each:
self.output = OutputBuffer(TestOutputter())
self.l = LilypondMusic(music=None,outputter=self.output)
with it('can be constructed'):
expect(LilypondMusic('foo')).not_to(equal(None))
with description('time_signature'):
with context('in 6/8'):
with before.each:
self.l.time_signature(TimeSignature(6,8))
self.l.outputter.print_buffer()
with it('sets the bar manager'):
expect(self.l.bar_manager.numerator).to(equal(6))
expect(self.l.bar_manager.denominator).to(equal(8))
with it('sets the unit length to 1/8'):
expect(self.l.unit_length).to(equal(Fraction(1,8)))
with it('prints an M line'):
expect(self.output.all_output()).to(contain('M: 6/8\n'))
with it('prints an L line'):
expect(self.output.all_output()).to(contain('L: 1/8\n'))
with context('in 2/2'):
with before.each:
self.l.time_signature(TimeSignature(2,2))
self.l.outputter.print_buffer()
with it('sets the bar manager'):
expect(self.l.bar_manager.numerator).to(equal(2))
expect(self.l.bar_manager.denominator).to(equal(2))
with it('sets the unit length to 1/4'):
expect(self.l.unit_length).to(equal(Fraction(1,4)))
with it('prints an M line'):
expect(self.output.all_output()).to(contain('M: 2/2\n'))
with it('prints an L line'):
expect(self.output.all_output()).to(contain('L: 1/4\n'))
with context('in 2/4'):
with before.each:
self.l.time_signature(TimeSignature(2,4))
self.l.outputter.print_buffer()
with it('sets the bar manager'):
expect(self.l.bar_manager.numerator).to(equal(2))
expect(self.l.bar_manager.denominator).to(equal(4))
with it('sets the unit length to 1/16'):
expect(self.l.unit_length).to(equal(Fraction(1,16)))
with it('prints an M line'):
expect(self.output.all_output()).to(contain('M: 2/4\n'))
with it('prints an L line'):
expect(self.output.all_output()).to(contain('L: 1/16\n'))
with context('a change in time signature from 6/8 to 2/2'):
with before.each:
self.l.time_signature(TimeSignature(6,8))
self.l.ouputter = self.l.bar_manager.pass_time(Fraction(6,8))
self.l.time_signature(TimeSignature(2,2))
self.l.ouputter = self.l.bar_manager.pass_time(Fraction(2,2))
with it('sets the bar manager'):
expect(self.l.bar_manager.numerator).to(equal(2))
expect(self.l.bar_manager.denominator).to(equal(2))
with it('does not change the unit length'):
expect(self.l.unit_length).to(equal(Fraction(1,8)))
with it('prints an M for the changed time signature'):
expect(self.output.all_output()).to(contain("M: 2/2\n"))
with it('does not print an L line for the changed time signature'):
expect(self.output.all_output()).not_to(contain('L: 1/4\n'))
with description('key_signature'):
with context('in C major'):
with before.each:
self.l.key_signature(KeySignature(Pitch.c,'major'))
self.l.outputter.print_buffer()
with it('sets the key in the note context'):
expect(self.l.note_context.sharps).to(equal(0))
with it('outputs the key'):
expect(self.output.all_output()).to(contain('K: C major\n'))
with context('in F# minor'):
with before.each:
self.l.key_signature(KeySignature(Pitch.fs,'minor'))
self.l.outputter.print_buffer()
with it('sets the key in the note context'):
expect(self.l.note_context.sharps).to(equal(3))
with it('outputs the key'):
expect(self.output.all_output()).to(contain('K: F# minor\n'))
with context('with a piece in 4/4 in C major'):
with before.each:
self.l.time_signature(TimeSignature(4,4))
self.l.key_signature(KeySignature(Pitch.c,'major'))
self.base_pitch = Pitch.c
self.l.last_pitch = self.base_pitch
with it('outputs a barline before a meter change'):
self.l.music_list(ly_snippet("{ c1 \\time 6/8 c4. c4. }"))
self.l.pass_time()
expect(self.output.all_output()).to(contain("| \nM: 6/8\n"))
with it('outputs a barline marker before a key change'):
self.l.music_list(ly_snippet("{ c1 \\key ees\\dorian c1 }"))
self.l.pass_time()
expect(self.output.all_output()).to(contain('C8 | \nK: Eb dorian\n'))
with it('outputs notes inside it'):
self.l.music_list(ly_snippet("{ c8 d e }"))
self.l.pass_time()
expect(self.output.all_output()).to(contain('CDE'))
with it('handles relative octave'):
self.l.music_list(ly_snippet("{ c8 c' d}"))
self.l.pass_time()
expect(self.output.all_output()).to(contain('Ccd'))
with context('partial'):
with it('rewinds the elapsed time by the length of the partial'):
self.l.partial(Partial(Fraction(1,4)))
expect(self.l.bar_manager.elapsed_time).to(equal(Fraction(-1,4)))
with description('note'):
with context('with a quarter note E above middle C'):
with before.each:
self.l.relative_mode = True
self.ly_note = LyNote(Pitch.e,Fraction(1/4))
self.l.note(self.ly_note)
self.l.pass_time()
self.output.reify()
with it('passes time equaling the length of the note'):
expect(self.l.bar_manager.elapsed_time).to(equal(Fraction(1/4)))
with it('makes it absolute with respect to the previous pitch'):
expect(self.ly_note.pitch.absolute).to(equal(self.base_pitch))
with it('prints the note'):
expect(self.output.outputter.items).to(contain('E2'))
with it('updates the last pitch'):
expect(self.l.last_pitch).to(equal(self.l.last_pitch))
with description('barline'):
def barline(self,lilypond_music,text):
lilypond_music.command(LyCommand("\\bar",siblings=[LyString(text)]))
lilypond_music.outputter.print_buffer()
with it('outputs the bar immediately'):
self.barline(self.l,'||')
expect(self.output.all_output()).to(end_with(' || '))
with it('outputs a normal barline if requested'):
self.barline(self.l,'|')
expect(self.output.all_output()).to(end_with(' | '))
with it('translates a thick-thin double bar to ABC'):
self.barline(self.l,'.|')
expect(self.output.all_output()).to(end_with(' [| '))
with it('translates a thick-thin double bar to ABC'):
self.barline(self.l,'|.')
expect(self.output.outputter.items[-1]).to(equal(' |] '))
with it('prints unhandled barlines as a regular barline'):
self.barline(self.l,'foo')
expect(self.output.outputter.items[-1]).to(equal(' | '))
with it('prevents double-printing barlines'):
self.l.ouputter = self.l.bar_manager.pass_time(1)
self.l.command(LyCommand("\\bar",siblings=[LyString('||')]))
self.l.pass_time()
expect(self.output.outputter.all_output()).to(end_with(" || "))
with it('prints barlines in the middle of a measure'):
self.l.ouputter = self.l.bar_manager.pass_time(1/4)
self.l.command(LyCommand("\\bar",siblings=[LyString('||')]))
self.l.ouputter = self.l.bar_manager.pass_time(3/4)
self.l.pass_time()
expect(self.output.all_output()).to(contain("|| | "))
with it('handles repeat barlines'):
self.l.command(LyCommand("\\bar",siblings=[LyString('.|:')]))
self.l.ouputter = self.l.bar_manager.pass_time(1)
self.l.command(LyCommand("\\bar",siblings=[LyString(':|.')]))
self.l.pass_time()
expect(self.output.all_output()).to(contain("|: :|"))
with description('rest'):
with context('with a 8th rest'):
with before.each:
self.l.rest(LyRest(Fraction(1/8)))
self.l.pass_time()
self.output.reify()
with it('outputs the rest'):
expect(self.output.outputter.items).to(contain('z'))
with it('passes time equaling the length of the rest'):
expect(self.l.bar_manager.elapsed_time).to(equal(Fraction(1/8)))
with description('repeat'):
def handlers(self):
return { str: lambda x,_: self.str_handler(self,x,None) }
with context('with a volta repeat'):
with before.each:
def str_handler(string,_):
self.l.ouputter = self.l.bar_manager.pass_time(Fraction(1,2))
self.l.outputter.output_test(string)
handlers = { str: lambda x,_: str_handler(x,None) }
self.l.repeat(Repeat('volta',2,["some","thing"]), handlers)
self.l.pass_time()
self.output.reify()
with it('first outputs an opening repeat'):
expect(self.output.outputter.all_output()).to(contain(' |: '))
with it('traverses inner music with the given handlers'):
expect(self.output.outputter.items).to(contain('some'))
expect(self.output.outputter.items).to(contain('thing'))
with it('outputs a closing repeat'):
expect(self.output.outputter.all_output()).to(contain(' :| '))
with context('with an unfolded repeat'):
with before.each:
handlers = { str: lambda x,_: self.l.outputter.output_test(x) }
self.l.repeat(Repeat('unfold',3,["some","thing"]),handlers)
with it('does not output a repeat bar'):
expect(self.output.outputter.items).not_to(contain('|: '))
with it('duplicates the inner music the specified number of times'):
expect(self.output.outputter.items).to(contain('some','thing','some','thing','some','thing'))
with it('combines :| and |: if needed'):
def str_handler(string,_):
self.l.ouputter = self.l.bar_manager.pass_time(Fraction(1,2))
self.l.outputter.output_test(string)
handlers = { str: lambda x,_: str_handler(x,None) }
self.l.relative_mode = True
self.l.music_list(ly_snippet("{ \\repeat volta 2 { c2 c } \\repeat volta 2 { c2 c } }"))
self.l.pass_time()
expect(self.output.all_output()).to(contain('|: C4 C4 :: C4 C4 :|'))
with context('traverse'):
with it('calls the specified handler for each item in the node'):
self.l.traverse([1,2,3],{ int: lambda x,_: self.l.outputter.output_test(x) })
expect(self.output.outputter.items).to(equal([1,2,3]))
with it('iterates on children for classes without a handler'):
self.l.traverse([1,2,[3,4],5],{ int: lambda x,_: self.l.outputter.output_test(x) })
expect(self.output.outputter.items).to(equal([1,2,3,4,5]))
|
###############################################################
# Image Feature classification using Random Forest classifier #
# Input: Training and Test feature file in *.csv format #
# Output: Accuracy #
###############################################################
import pandas as pd
import numpy as np
# In[43]:
# Provide the absolute path of the Train and Test feature files written in .csv format
train_dataset = pd.read_csv('Color_ImageReadWriteWang_1000_ZM_F_OC_JMagn_7_Train.csv', header=None)
test_dataset = pd.read_csv('Color_ImageReadWriteWang_1000_ZM_F_OC_JMagn_7_Test.csv', header=None)
rc = train_dataset.shape
r = rc[0]
c = rc[1]
# In[44]:
# Arrange the dataset into training features, and training class labels and shows the sample of features
X_train = train_dataset.iloc[:,0:c-1].values
y_train = train_dataset.iloc[:,c-1].values
print('The independent features set: ')
print(X_train[:3,:])
print('The dependent variable: ')
print(y_train[:5])
# In[45]:
# Arrange the dataset into test features, and test class labels and shows the sample of features
X_test = test_dataset.iloc[:,0:c-1].values
y_test = test_dataset.iloc[:,c-1].values
print('The independent features set: ')
print(X_test[:3,:])
print('The dependent variable: ')
print(y_test[:5])
# In[46]:
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
import time
start_time = time.time()
# optimizing the results using grid search over the training features using cross validation
classifier = RandomForestClassifier()
param_grid = {'n_estimators':[200, 400, 600, 800, 1000, 1200, 1400, 1600],
'criterion':['entropy'],
#'random_state':[20, 40, 60, 80, 100],
'random_state':[0],
'n_jobs':[-1]
}
CV_rfc = GridSearchCV(estimator=classifier, param_grid=param_grid, cv=5)
CV_rfc.fit(X_train, y_train)
print (CV_rfc.best_params_)
print("--- %s seconds ---" % (time.time() - start_time))
# In[47]:
# Intializing the parameters to fit the classifier on test dataset
bestParam = CV_rfc.best_params_
start_time = time.time()
classifier = RandomForestClassifier(n_estimators = bestParam['n_estimators'],
criterion = 'entropy', random_state = 0)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
print(y_pred)
print (CV_rfc.best_params_)
print("--- %s seconds ---" % (time.time() - start_time))
# In[50]:
#Import scikit-learn metrics module for accuracy calculation
from sklearn import metrics
# Model Accuracy, how often is the classifier correct?
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
|
import inspect
import requests
from deepviz.result import *
try:
import json
except:
import simplejson as json
URL_INTEL_REPORT = "https://api.deepviz.com/intel/report"
URL_INTEL_SEARCH = "https://api.deepviz.com/intel/search"
URL_INTEL_IP = "https://api.deepviz.com/intel/network/ip"
URL_INTEL_DOMAIN = "https://api.deepviz.com/intel/network/domain"
URL_INTEL_SEARCH_ADVANCED = "https://api.deepviz.com/intel/search/advanced"
class Intel:
def __init__(self):
pass
def sample_info(self, md5=None, api_key=None, filters=None):
if not api_key:
return Result(status=INPUT_ERROR, msg="API key cannot be null or empty String")
if not md5:
return Result(status=INPUT_ERROR, msg="MD5 cannot be null or empty String")
if not filters:
return Result(status=INPUT_ERROR, msg="filters cannot be null or empty")
if len(filters) > 10:
return Result(status=INPUT_ERROR, msg="Parameter 'filters' takes at most 10 values ({count} given).".format(count=len(filters)))
body = json.dumps(
{
"md5": md5,
"api_key": api_key,
"output_filters": filters
}
)
try:
r = requests.post(URL_INTEL_REPORT, data=body)
except Exception as e:
return Result(status=NETWORK_ERROR, msg="Error while connecting to Deepviz: %s" % e)
try:
data = json.loads(r.content)
except Exception as e:
return Result(status=INTERNAL_ERROR, msg="Error loading Deepviz response: %s" % e)
if r.status_code == 428:
return Result(status=PROCESSING, msg="Analysis is running")
else:
try:
data = json.loads(r.content)
except Exception as e:
return Result(status=INTERNAL_ERROR, msg="Error loading Deepviz response: %s" % e)
if r.status_code == 200:
return Result(status=SUCCESS, msg=data['data'])
else:
if r.status_code >= 500:
return Result(status=SERVER_ERROR, msg="{status_code} - Error while connecting to Deepviz: {errmsg}".format(status_code=r.status_code, errmsg=data['errmsg']))
else:
return Result(status=CLIENT_ERROR, msg="{status_code} - Client error: {errmsg}".format(status_code=r.status_code, errmsg=data['errmsg']))
def sample_result(self, md5=None, api_key=None):
if not api_key:
return Result(status=INPUT_ERROR, msg="API key cannot be null or empty String")
if not md5:
return Result(status=INPUT_ERROR, msg="MD5 cannot be null or empty String")
return self.sample_info(md5, api_key, ["classification"])
def ip_info(self, api_key=None, ip=None, filters=None):
if not api_key:
return Result(status=INPUT_ERROR, msg="API key cannot be null or empty String")
if not ip:
msg = "Parameters missing or invalid. You must specify an IP"
return Result(status=INPUT_ERROR, msg=msg)
if not isinstance(ip, str):
msg = "You must provide the IP in a string"
return Result(status=INPUT_ERROR, msg=msg)
if filters is not None:
if not isinstance(filters, list):
msg = "You must provide one or more output filters in a list"
return Result(status=INPUT_ERROR, msg=msg)
elif not filters:
msg = "You must provide at least one filter"
return Result(status=INPUT_ERROR, msg=msg)
if filters is not None:
body = json.dumps(
{
"api_key": api_key,
"ip": ip,
"output_filters": filters
}
)
else:
body = json.dumps(
{
"api_key": api_key,
"ip": ip,
}
)
try:
r = requests.post(URL_INTEL_IP, data=body)
except Exception as e:
return Result(status=NETWORK_ERROR, msg="Error while connecting to Deepviz: %s" % e)
try:
data = json.loads(r.content)
except Exception as e:
return Result(status=INTERNAL_ERROR, msg="Error loading Deepviz response: %s" % e)
if r.status_code == 200:
return Result(status=SUCCESS, msg=data['data'])
else:
if r.status_code >= 500:
return Result(status=SERVER_ERROR, msg="{status_code} - Error while connecting to Deepviz: {errmsg}".format(status_code=r.status_code, errmsg=data['errmsg']))
else:
return Result(status=CLIENT_ERROR, msg="{status_code} - Client error: {errmsg}".format(status_code=r.status_code, errmsg=data['errmsg']))
def domain_info(self, api_key=None, domain=None, filters=None):
if not api_key:
return Result(status=INPUT_ERROR, msg="API key cannot be null or empty String")
if not domain:
msg = "Parameters missing or invalid. You must specify a domain"
return Result(status=INPUT_ERROR, msg=msg)
elif not isinstance(domain, str):
msg = "You must provide one a domain in a string"
return Result(status=INPUT_ERROR, msg=msg)
if filters is not None:
if not isinstance(filters, list):
msg = "You must provide one or more output filters in a list"
return Result(status=INPUT_ERROR, msg=msg)
elif not filters:
msg = "You must provide at least one filter"
return Result(status=INPUT_ERROR, msg=msg)
if filters:
body = json.dumps(
{
"api_key": api_key,
"domain": domain,
"output_filters": filters,
}
)
else:
body = json.dumps(
{
"api_key": api_key,
"domain": domain,
}
)
try:
r = requests.post(URL_INTEL_DOMAIN, data=body)
except Exception as e:
msg = "Error while connecting to Deepviz: %s" % e
return Result(status=NETWORK_ERROR, msg=msg)
try:
data = json.loads(r.content)
except Exception as e:
return Result(status=INTERNAL_ERROR, msg="Error loading Deepviz response: %s" % e)
if r.status_code == 200:
return Result(status=SUCCESS, msg=data['data'])
else:
if r.status_code >= 500:
return Result(status=SERVER_ERROR, msg="{status_code} - Error while connecting to Deepviz: {errmsg}".format(status_code=r.status_code, errmsg=data['errmsg']))
else:
return Result(status=CLIENT_ERROR, msg="{status_code} - Client error: {errmsg}".format(status_code=r.status_code, errmsg=data['errmsg']))
def search(self, api_key=None, search_string=None, start_offset=None, elements=None):
if not api_key:
return Result(status=INPUT_ERROR, msg="API key cannot be null or empty String")
if not search_string:
return Result(status=INPUT_ERROR, msg="String to be searched cannot be null or empty")
if start_offset is not None and elements is not None:
result_set = ["start=%d" % start_offset, "rows=%d" % elements]
body = json.dumps(
{
"result_set": result_set,
"string": search_string,
"api_key": api_key,
}
)
else:
body = json.dumps(
{
"string": search_string,
"api_key": api_key,
}
)
try:
r = requests.post(URL_INTEL_SEARCH, data=body)
except Exception as e:
return Result(status=NETWORK_ERROR, msg="Error while connecting to Deepviz: %s" % e)
try:
data = json.loads(r.content)
except Exception as e:
return Result(status=INTERNAL_ERROR, msg="Error loading Deepviz response: %s" % e)
if r.status_code == 200:
return Result(status=SUCCESS, msg=data['data'])
else:
if r.status_code >= 500:
return Result(status=SERVER_ERROR, msg="{status_code} - Error while connecting to Deepviz: {errmsg}".format(status_code=r.status_code, errmsg=data['errmsg']))
else:
return Result(status=CLIENT_ERROR, msg="{status_code} - Client error: {errmsg}".format(status_code=r.status_code, errmsg=data['errmsg']))
def advanced_search(self, api_key=None, sim_hash=None, created_files=None, imp_hash=None, url=None, strings=None,
ip=None, asn=None, classification=None, rules=None, country=None, never_seen=None,
time_delta=None, result_set=None, ip_range=None, domain=None):
if not api_key:
return Result(status=INPUT_ERROR, msg="API key cannot be null or empty String")
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
body = {
'api_key': api_key
}
for i in args:
if values[i] and i != "self" and i != "api_key":
if i == "sim_hash" or i == "created_files" or i == "imp_hash" or i == "url" or i == "strings" or i == "ip" or i == "asn" or i == "rules" or i == "country" or i == "result_set" or i == "domain":
if isinstance(values[i], list):
body[i] = values[i]
else:
msg = "Value '%s' must be in a list form" % i
return Result(status=INPUT_ERROR, msg=msg)
else:
if isinstance(values[i], str):
body[i] = values[i]
else:
msg = "Value '%s' must be in a string form" % i
return Result(status=INPUT_ERROR, msg=msg)
final_body = json.dumps(body)
try:
r = requests.post(URL_INTEL_SEARCH_ADVANCED, data=final_body)
except Exception as e:
return Result(status=NETWORK_ERROR, msg="Error while connecting to Deepviz: %s" % e)
try:
data = json.loads(r.content)
except Exception as e:
return Result(status=INTERNAL_ERROR, msg="Error loading Deepviz response: %s" % e)
if r.status_code == 200:
msg = data['data']
return Result(status=SUCCESS, msg=msg)
else:
if r.status_code >= 500:
return Result(status=SERVER_ERROR, msg="{status_code} - Error while connecting to Deepviz: {errmsg}".format(status_code=r.status_code, errmsg=data['errmsg']))
else:
return Result(status=CLIENT_ERROR, msg="{status_code} - Client error: {errmsg}".format(status_code=r.status_code, errmsg=data['errmsg'])) |
import csv
import sys
import math
import operator
import numpy as np
#open and read file depending on file
# Reads in data
with open('movies.csv') as f:
lines = csv.reader(f, delimiter=',')
movies={}
for row in lines:
movieID= row[0]
movieTitle= row[1]
movies[movieID] = str(movieTitle)
f.close()
#open and read csv rating file
# Reads in data
with open('ratings.csv') as f:
lines = csv.reader(f, delimiter=',')
ratings_dict={}
userIDarray = []
#count = 0
for row in lines:
userID=row[0]
movieID=row[1]
rating=row[2]
userIDarray.append(userID)
if (movieID == "movieId"): # skips row of labels
continue
if not (movieID in ratings_dict): #will enter only if movieID has not been made
ratings_dict[movieID] =[[int(userID),float(rating)]]
#count += 1
#if(count == 399):
#break
else:
ratings_dict[movieID] = ratings_dict[movieID]+[[int(userID),float(rating)]]
#count += 1
#if(count == 399):
#break
f.close()
#print("Ratings Orginal")
#print(ratings_dict)
#User Rating Averages
sum = 0
average ={}
for key in ratings_dict:
for i in range(len(ratings_dict[key])):
sum = sum + ratings_dict[key][i][1]
average[key]=float(sum/len(ratings_dict[key]))
sum = 0
#print("average")
#print(average)
#Normalizing Data
noramlizedMovieRatings ={}
for key in ratings_dict:
for i in range(len(ratings_dict[key])):
#print(ratings_dict[key][i][1])
ratings_dict[key][i][1] = ratings_dict[key][i][1] - average[key]
#print(ratings_dict[key][i][1])
noramlizedMovieRatings[key]= sorted(ratings_dict[key], key=lambda x: x[1], reverse=True)#sorted(ratings_dict[key], key=operator.itemgetter(0), reverse=True)
#print("Normalized")
#print(noramlizedMovieRatings)
numerator = 0
denomenator = 0
denom1 =0
denom2 = 0
#Center Cosine Similarity Calcuation
cosineSimilarities = {}
for key in ratings_dict:
for keyTwo in ratings_dict:
for i in range(len(ratings_dict[key])): #SOOO, sorry to use nested for loops but I didn't know how to do it any other way because I can't use pandas on my laptop
for j in range(len(ratings_dict[keyTwo])):
if(ratings_dict[key][i][0]==(ratings_dict[keyTwo][j][0])):
numerator += ratings_dict[key][i][1] *ratings_dict[keyTwo][j][1]
#print(ratings_dict[key][i][1])
#print(ratings_dict[keyTwo][j][1])
denom2 += ratings_dict[keyTwo][j][1]**2
denom1 += ratings_dict[key][i][1]**2
denomenator = math.sqrt(denom1*denom2)
#print("numer:" + str(numerator))
if not (key in cosineSimilarities): #will enter only if movieID has not been made
if(denomenator <= 0):
continue
cosineSimilarities[key] =[[keyTwo,numerator/denomenator]]
else:
if(denomenator <= 0):
continue
cosineSimilarities[key] = cosineSimilarities[key]+[[keyTwo,numerator/denomenator]]
#print("cosine sim")
#print(cosineSimilarities)
#Finding User's Top Movies
favorites = {}
for key in cosineSimilarities:
cosineSimilarities[key].sort(reverse=True)
for key in cosineSimilarities:
i = 0
if not (key in favorites): #will enter only if movieID has not been made
favorites[key] = [[cosineSimilarities[key][i][0]]]
#print(cosineSimilarities[key][i][0])
i += 1
else:
favorites[key] = favorites[key]+[[cosineSimilarities[key][i][0]]]
#print(cosineSimilarities[key][i][0])
i += 1
if(i == 6):
continue
print("favorites")
print(favorites)
topFiveMovieRecommendations= {}
bestMovie=0
for i in userIDarray:
for key in noramlizedMovieRatings:
bestMovie = noramlizedMovieRatings[key][1][0]
favorites[bestMovie][0]= movies[favorites[bestMovie][0]]
favorites[bestMovie][1]= movies[favorites[bestMovie][1]]
favorites[bestMovie][2]= movies[favorites[bestMovie][2]]
favorites[bestMovie][3]= movies[favorites[bestMovie][3]]
favorites[bestMovie][4]= movies[favorites[bestMovie][4]]
topFiveMovieRecommendations[i]= [favorites[bestMovie]]
print(topFiveMovieRecommendations)
#Printing Output to Output File
original_stdout = sys.stdout # Save a reference to the original standard output
with open('output.txt', 'w') as f:
sys.stdout = f # Change the standard output to the file created.
#print("Cosine Similarities")
print("Movie Reccomendations")
for key in topFiveMovieRecommendations:
print(key +" : " + topFiveMovieRecommendations[key])
#print("Movie Reccomendations")
#print(topFiveMovieRecommendations)
sys.stdout = original_stdout # Reset the standard output to its original value
f.close()
|
'''
David Lettier (C) 2013.
http://www.lettier.com/
This script plots the 3D path/trajectory of the ball.
'''
import os;
import sys;
from os import listdir;
from os.path import isfile, join;
import matplotlib.pyplot as plt;
from mpl_toolkits.mplot3d import Axes3D;
# Ball path experiment data directory.
directory = "../data/";
# Get and sort file names.
experiment_files = [ f for f in listdir( directory ) if isfile( join( directory, f ) ) ];
if ( len( experiment_files ) == 0 ):
print( "\nNo files.\n" );
sys.exit( 0 );
# Get rid of the max distances file as one of the files to read in.
distances_file_index = 0;
for i in xrange( 0, len( experiment_files ) ):
if experiment_files[ i ].find( "max_distances_to_standard" ) != -1:
distances_file_index = i;
break;
del experiment_files[ distances_file_index ];
# Y_M_D_H_M_S.N-N#0,0#.csv
# 0 5
experiment_files = sorted( experiment_files, key = lambda x: int( "".join( x.split( "_" )[ 0 : 5 ] ) ) );
# List files.
print( "\nFiles: " );
i = 0;
for experiment_file in experiment_files:
print ( "[" + str( i ) + "] " + experiment_file );
i += 1;
user_input = raw_input( "\nPlot which file ([q] to quit or [a] for all or [a1] to plot all with one)?\n" );
# Quit?
if ( user_input[ 0 ] == "q" ):
sys.exit( 0 );
plot_all_with_one = False;
# Plot all?
if ( user_input == "a" ):
# Generate plot setup.
matplotlib_colors = [ "b", "g", "r", "c", "m", "y", "k" ];
matplotlib_colors_index = 0;
fig = plt.figure( figsize = ( 5 * 3.13, 5 * 3.13 ) );
fig.suptitle( "BBAutoTune", fontsize = 18 );
ax = fig.gca( projection = "3d" );
ax.set_title( "Parameter Influence" );
ax.set_xlabel( "x-axis" );
ax.set_ylabel( "y-axis" );
ax.set_zlabel( "z-axis" );
ax.view_init( 10, 180 + 45 );
i = 0;
while ( len( experiment_files ) != 0 ):
# Open files.
csv_file = None;
try:
csv_file = open( directory + experiment_files[ i ], "r" );
except:
print( "File does not exist: [" + str( i ) + "] " + directory + experiment_files[ i ] );
sys.exit( 1 );
# Gather points.
x_points = [ ];
y_points = [ ];
z_points = [ ];
titles = csv_file.readline( );
line = csv_file.readline( );
while ( line != "" ):
line = line.rstrip( '\n' );
line = line.rsplit( "," );
x_points.append( float( line[ 1 ] ) );
y_points.append( float( line[ 2 ] ) );
z_points.append( float( line[ 3 ] ) );
line = csv_file.readline( );
# Create plot label.
# Y_M_D_H_M_S.N-N#0,0#.csv
# 0 1 2
# N-N-N#0,0#
# 0 1 2
# 0,0
# 0 1
label = experiment_files[ i ].split( "." )[ 1 ];
parameter_name = " ".join( label.split( "#" )[ 0 ].split( "-" ) );
parameter_value = ".".join( label.split( "#" )[ 1 ].split( "," ) );
label = parameter_name + ": " + parameter_value;
# Add points to plot.
ax.plot( x_points, y_points, z_points, matplotlib_colors[ matplotlib_colors_index ] + "o-", label = label );
# Remove file from available choices.
del experiment_files[ i ];
if ( len( experiment_files ) == 0 ):
break;
# Use next available color.
matplotlib_colors_index += 1;
matplotlib_colors_index = matplotlib_colors_index % len( matplotlib_colors );
elif ( user_input == "a1" ):
plot_all_with_one = True;
user_input = raw_input( "\nWhich one to plot all with ([0], ..., [" + str( i - 1 ) + "])?\n" );
# Check user_input.
while ( True ):
try:
user_input = int( user_input );
if ( user_input > ( i - 1 ) ):
user_input = raw_input( "\nWhich one to plot all with ([0], ..., [" + str( i - 1 ) + "])?\n" );
continue;
break;
except:
user_input = raw_input( "\nWhich one to plot all with ([0], ..., [" + str( i - 1 ) + "])?\n" );
continue;
show_plots = raw_input( "\nShow plots ([y] for yes or [n] for no)?\n" );
if ( show_plots[ 0 ] == "y" ):
show_plots = True;
matplotlib_colors = [ "b", "g", "r", "c", "m", "y", "k" ];
for i in xrange( 0, len( experiment_files ) ):
if i == user_input:
continue;
# User Input #########################################################
matplotlib_colors_index = 0;
fig = plt.figure( figsize = ( 5 * 3.13, 5 * 3.13 ) );
fig.suptitle( "BBAutoTune", fontsize = 18 );
fig.canvas.set_window_title( "Figure " + str( i ) );
ax = fig.gca( projection = "3d" );
ax.set_title( "Parameter Influence" );
ax.set_xlabel( "x-axis" );
ax.set_ylabel( "y-axis" );
ax.set_zlabel( "z-axis" );
ax.view_init( 10, 180 + 45 );
# Open files.
csv_file = None;
try:
csv_file = open( directory + experiment_files[ user_input ], "r" );
except:
print( "File does not exist: [" + str( user_input ) + "] " + directory + experiment_files[ user_input ] );
sys.exit( 1 );
# Gather points.
x_points = [ ];
y_points = [ ];
z_points = [ ];
titles = csv_file.readline( );
line = csv_file.readline( );
while ( line != "" ):
line = line.rstrip( '\n' );
line = line.rsplit( "," );
x_points.append( float( line[ 1 ] ) );
y_points.append( float( line[ 2 ] ) );
z_points.append( float( line[ 3 ] ) );
line = csv_file.readline( );
# Create plot label.
# Y_M_D_H_M_S.N-N#0,0#.csv
# 0 1 2
# N-N-N#0,0#
# 0 1 2
# 0,0
# 0 1
label = experiment_files[ user_input ].split( "." )[ 1 ];
parameter_name = " ".join( label.split( "#" )[ 0 ].split( "-" ) );
parameter_value = ".".join( label.split( "#" )[ 1 ].split( "," ) );
label = parameter_name + ": " + parameter_value;
# Add points to plot.
ax.plot( x_points, y_points, z_points, matplotlib_colors[ matplotlib_colors_index ] + "o-", label = label );
# Use next available color.
matplotlib_colors_index += 1;
matplotlib_colors_index = matplotlib_colors_index % len( matplotlib_colors );
# i #########################################################
# Open files.
csv_file = None;
try:
csv_file = open( directory + experiment_files[ i ], "r" );
except:
print( "File does not exist: [" + str( user_input ) + "] " + directory + experiment_files[ i ] );
sys.exit( 1 );
# Gather points.
x_points = [ ];
y_points = [ ];
z_points = [ ];
titles = csv_file.readline( );
line = csv_file.readline( );
while ( line != "" ):
line = line.rstrip( '\n' );
line = line.rsplit( "," );
x_points.append( float( line[ 1 ] ) );
y_points.append( float( line[ 2 ] ) );
z_points.append( float( line[ 3 ] ) );
line = csv_file.readline( );
# Create plot label.
# Y_M_D_H_M_S.N-N#0,0#.csv
# 0 1 2
# N-N-N#0,0#
# 0 1 2
# 0,0
# 0 1
label = experiment_files[ i ].split( "." )[ 1 ];
parameter_name = " ".join( label.split( "#" )[ 0 ].split( "-" ) );
parameter_value = ".".join( label.split( "#" )[ 1 ].split( "," ) );
label = parameter_name + ": " + parameter_value;
# Add points to plot.
ax.plot( x_points, y_points, z_points, matplotlib_colors[ matplotlib_colors_index ] + "o-", label = label );
# Use next available color.
matplotlib_colors_index += 1;
matplotlib_colors_index = matplotlib_colors_index % len( matplotlib_colors );
# Show plot(s).
ax.legend( );
plt.tight_layout( );
plt.subplots_adjust( left = 0.0, right = 1.0, top = 1.0, bottom = 0.0 );
if ( show_plots ):
plt.show( );
else:
print "Saving figure: " + "../figures/" + str( i ) + "_" + experiment_files[ i ].replace( ".csv", ".png" );
plt.savefig( "../figures/" + str( i ) + "_" + experiment_files[ i ].replace( ".csv", ".png" ) );
else: # Plot one by one.
# Generate plot setup.
matplotlib_colors = [ "b", "g", "r", "c", "m", "y", "k" ];
matplotlib_colors_index = 0;
fig = plt.figure( figsize = ( 5 * 3.13, 5 * 3.13 ) );
fig.suptitle( "BBAutoTune", fontsize = 18 );
ax = fig.gca( projection = "3d" );
ax.set_title( "Parameter Influence" );
ax.set_xlabel( "x-axis" );
ax.set_ylabel( "y-axis" );
ax.set_zlabel( "z-axis" );
ax.view_init( 10, 180 + 45 );
# Check user input.
while ( True ):
try:
user_input = int( user_input );
if ( user_input > ( i - 1 ) ):
user_input = raw_input( "\nPlot which file ([q] to quit)?\n" );
if ( user_input[ 0 ] == "q" ):
break;
sys.exit( 0 );
continue;
break;
except:
user_input = raw_input( "\nPlot which file ([q] to quit)?\n" );
if ( user_input[ 0 ] == "q" ):
sys.exit( 0 );
continue;
# Open file.
csv_file = None;
try:
csv_file = open( directory + experiment_files[ user_input ], "r" );
except:
print( "File does not exist: [" + str( user_input ) + "] " + directory + experiment_files[ user_input ] );
sys.exit( 1 );
# Read in points.
x_points = [ ];
y_points = [ ];
z_points = [ ];
titles = csv_file.readline( );
line = csv_file.readline( );
while ( line != "" ):
line = line.rstrip( '\n' );
line = line.rsplit( "," );
x_points.append( float( line[ 1 ] ) );
y_points.append( float( line[ 2 ] ) );
z_points.append( float( line[ 3 ] ) );
line = csv_file.readline( );
# Create plot label.
# Y_M_D_H_M_S.N-N#0,0#.csv
# 0 1 2
# N-N-N#0,0#
# 0 1 2
# 0,0
# 0 1
label = experiment_files[ user_input ].split( "." )[ 1 ];
parameter_name = " ".join( label.split( "#" )[ 0 ].split( "-" ) );
parameter_value = ".".join( label.split( "#" )[ 1 ].split( "," ) );
label = parameter_name + ": " + parameter_value;
# Add points to plot.
ax.plot( x_points, y_points, z_points, matplotlib_colors[ matplotlib_colors_index ] + "o-", label = label );
# Remove file from possible options.
del experiment_files[ user_input ];
# Use next available color.
matplotlib_colors_index += 1;
matplotlib_colors_index = matplotlib_colors_index % len( matplotlib_colors );
# Plot multiple paths if there are more than one file?
if ( len( experiment_files ) == 0 ):
user_input = "0";
else:
user_input = raw_input( "\nPlot another file?\n[y] Yes.\n[n] No.\n" );
while ( user_input[ 0 ] == 'y' ):
i = 0;
# List files.
print( "\nFiles: " );
for experiment_file in experiment_files:
print ( "[" + str( i ) + "] " + experiment_file );
i += 1;
user_input = raw_input( "\nPlot which file?\n" );
# Check user input.
while ( True ):
try:
user_input = int( user_input );
if ( user_input > ( i - 1 ) ):
user_input = raw_input( "\nPlot which file?\n" );
continue;
break;
except:
user_input = raw_input( "\nPlot which file?\n" );
continue;
# Open file.
csv_file = None;
try:
csv_file = open( directory + experiment_files[ user_input ], "r" );
except:
print( "File does not exist: [" + str( user_input ) + "] " + directory + experiment_files[ user_input ] );
sys.exit( 1 );
# Read in points.
x_points = [ ];
y_points = [ ];
z_points = [ ];
titles = csv_file.readline( );
line = csv_file.readline( );
while ( line != "" ):
line = line.rstrip( '\n' );
line = line.rsplit( "," );
x_points.append( float( line[ 1 ] ) );
y_points.append( float( line[ 2 ] ) );
z_points.append( float( line[ 3 ] ) );
line = csv_file.readline( );
# Create plot label.
# Y_M_D_H_M_S.N-N#0,0#.csv
# 0 1 2
# N-N-N#0,0#
# 0 1 2
# 0,0
# 0 1
label = experiment_files[ user_input ].split( "." )[ 1 ];
parameter_name = " ".join( label.split( "#" )[ 0 ].split( "-" ) );
parameter_value = ".".join( label.split( "#" )[ 1 ].split( "," ) );
label = parameter_name + ": " + parameter_value;
# Add points to plot.
ax.plot( x_points, y_points, z_points, matplotlib_colors[ matplotlib_colors_index ] + "o-", label = label );
# Remove file from possible options.
del experiment_files[ user_input ];
if ( len( experiment_files ) == 0 ):
break;
# Use next available color.
matplotlib_colors_index += 1;
matplotlib_colors_index = matplotlib_colors_index % len( matplotlib_colors );
# Plot another file?
user_input = raw_input( "\nPlot another file?\n[y] Yes.\n[n] No.\n" );
if ( not plot_all_with_one ):
# Show plot(s).
ax.legend( );
plt.tight_layout( );
plt.subplots_adjust( left = 0.0, right = 1.0, top = 1.0, bottom = 0.0 );
plt.show( );
|
'''
Highly divisible triangular number
The sequence of triangle numbers is generated by adding the natural numbers.
So the 7th triangle number would be 1 + 2 + 3 + 4 + 5 + 6 + 7 = 28. The first ten terms would be:
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
Let us list the factors of the first seven triangle numbers:
1: 1
3: 1,3
6: 1,2,3,6
10: 1,2,5,10
15: 1,3,5,15
21: 1,3,7,21
28: 1,2,4,7,14,28
36 - 9
45 - 6
55 - 4
66 - 8
78 - 8
91 - 4
105 - 8
We can see that 28 is the first triangle number to have over five divisors.
What is the value of the first triangle number to have over five hundred divisors?
'''
def stevilo_deliteljev(n): #učinkovita
x = 0
polovic = (n // 2) + 1
for i in range(1, polovic):
if n % i == 0:
x += 1
return x + 1
import itertools #za neksončen range
trikotna = 0
for i in itertools.count(1): #neskončen range po 1
trikotna += i
print(i)
#print(stevilo_deliteljev(trikotna))
if stevilo_deliteljev(trikotna) > 500:
print(trikotna) #tekel bi v neskončno, ampak sprinta prvo pravo stevilko in nato breaka
break
#start ob 21:58
#22:01 na 2370
#22:15 na 4100
#22:29 na 5000
#22:52 na 6000
#23:34 na 7200 23:45 na 7500
#00:12 na 8100 00:37 na 8500
#01:09 na 9000 01:49 na 9550
#grem spat
#ob 8:25 je ze koncal
#zadnja sprintana stevilka je 12375
#sprintana trikotna je 76576500
#12375-to zaporedno trikotno stevilo je to. deluje.
|
class Solution:
def multiply(self, num1: str, num2: str) -> str:
if num1 == '0' or num2 == '0':
return '0'
product = [0] * (len(num1) + len(num2))
for i in range(len(num1) - 1, -1, -1):
for j in range(len(num2) - 1, -1, -1):
product[i + j + 1] += int(num1[i]) * int(num2[j])
product[i + j] += product[i + j + 1] // 10
product[i + j + 1] %= 10
i = 0
while product[i] == 0:
i += 1
product = product[i:]
return ''.join(list(map(str, product)))
|
import mediapipe as mp
import cv2
import numpy as np
import uuid
import os
import time
#import serial
#ser = serial.Serial('com3', 9600)
pTime = 0
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
cap = cv2.VideoCapture(0)
#val = (143.0/640)
with mp_hands.Hands(max_num_hands=1, min_detection_confidence=0.8, min_tracking_confidence=0.5) as hands:
while cap.isOpened():
ret, frame = cap.read()
# BGR 2 RGB
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Flip on horizontal
image = cv2.flip(image, 1)
# Set flag
image.flags.writeable = False
# Detections
results = hands.process(image)
# Set flag to true
image.flags.writeable = True
# RGB 2 BGR
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# Detections
image_height = image.shape[0]
image_width = image.shape[1]
i_m = []
i_p = []
i_d = []
i_t = []
m_m = []
m_p = []
m_d = []
m_t = []
r_m = []
r_p = []
r_d = []
r_t = []
p_m = []
p_p = []
p_d = []
p_t = []
# Rendering results
if results.multi_hand_landmarks:
for num, hand in enumerate(results.multi_hand_landmarks):
mp_drawing.draw_landmarks(image, hand, mp_hands.HAND_CONNECTIONS,
mp_drawing.DrawingSpec(color=(121, 22, 76), thickness=2, circle_radius=4),
mp_drawing.DrawingSpec(color=(250, 44, 250), thickness=2, circle_radius=2),
)
i_m.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.INDEX_FINGER_MCP].x * image_width)
i_m.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.INDEX_FINGER_MCP].y * image_height)
i_m.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.INDEX_FINGER_MCP].z)
i_p.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.INDEX_FINGER_PIP].x * image_width)
i_p.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.INDEX_FINGER_PIP].y * image_height)
i_p.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.INDEX_FINGER_PIP].z)
i_d.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.INDEX_FINGER_DIP].x * image_width)
i_d.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.INDEX_FINGER_DIP].y * image_height)
i_d.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.INDEX_FINGER_DIP].z)
i_t.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].x * image_width)
i_t.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].y * image_height)
i_t.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].z)
m_m.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.MIDDLE_FINGER_MCP].x * image_width)
m_m.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.MIDDLE_FINGER_MCP].y * image_height)
m_m.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.MIDDLE_FINGER_MCP].z)
m_p.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.MIDDLE_FINGER_PIP].x * image_width)
m_p.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.MIDDLE_FINGER_PIP].y * image_height)
m_p.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.MIDDLE_FINGER_PIP].z)
m_d.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.MIDDLE_FINGER_DIP].x * image_width)
m_d.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.MIDDLE_FINGER_DIP].y * image_height)
m_d.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.MIDDLE_FINGER_DIP].z)
m_t.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.MIDDLE_FINGER_TIP].x * image_width)
m_t.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.MIDDLE_FINGER_TIP].y * image_height)
m_t.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.MIDDLE_FINGER_TIP].z)
r_m.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.RING_FINGER_MCP].x * image_width)
r_m.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.RING_FINGER_MCP].y * image_height)
r_m.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.RING_FINGER_MCP].z)
r_p.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.RING_FINGER_PIP].x * image_width)
r_p.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.RING_FINGER_PIP].y * image_height)
r_p.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.RING_FINGER_PIP].z)
r_d.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.RING_FINGER_DIP].x * image_width)
r_d.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.RING_FINGER_DIP].y * image_height)
r_d.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.RING_FINGER_DIP].z)
r_t.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.MIDDLE_FINGER_TIP].x * image_width)
r_t.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.RING_FINGER_TIP].y * image_height)
r_t.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.RING_FINGER_TIP].z)
p_m.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.PINKY_MCP].x * image_width)
p_m.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.PINKY_MCP].y * image_height)
p_m.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.PINKY_MCP].z)
p_p.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.PINKY_PIP].x * image_width)
p_p.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.PINKY_PIP].y * image_height)
p_p.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.PINKY_PIP].z)
p_d.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.PINKY_DIP].x * image_width)
p_d.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.PINKY_DIP].y * image_height)
p_d.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.PINKY_DIP].z)
p_t.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.PINKY_TIP].x * image_width)
p_t.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.PINKY_TIP].y * image_height)
p_t.append(results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.PINKY_TIP].z)
#ser.write(i_m)
print(p_p)
# Save our image
#cv2.imwrite(os.path.join('Output Images', '{}.jpg'.format(uuid.uuid1())), image)
#print(image.shape) # shape = (480,640,3) : height, width, channels (3 for rgb)
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(image, f'Fps: {int(fps)}', (28, 78), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 0), 2)
cv2.imshow('Hand Tracking', image)
if cv2.waitKey(10) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows() |
import requests
import pandas as pd
from collections import OrderedDict
import os
from collections import OrderedDict
#global api_key,sim_id_dict,stock_price_dict
# here you have to enter your actual API key from SimFin
api_key = "*"
sim_ids = []
stock_price_dict={}
#tickers = ['AAPL', 'MSFT']
df2 = pd.read_csv('stocks_list.csv')
tickers = df2.columns.values[:21]
os.makedirs('share history')
for ticker in tickers:
request_url = f'https://simfin.com/api/v1/info/find-id/ticker/{ticker}?api-key={api_key}'
content = requests.get(request_url)
data = content.json()
# print(data)
if "error" in data or len(data) < 1:
sim_ids.append(None)
else:
sim_ids.append(data[0]['simId'])
# tickers = ["AMD", "NVDA"]
print(sim_ids)
data = OrderedDict()
for idx, sim_id in enumerate(sim_ids):
os.makedirs('share history/' + str(tickers[idx]))
d = data[tickers[idx]] = OrderedDict({"Date" : []})
#companyId = sim_ids[ticker]
request_url = f'https://simfin.com/api/v1/companies/id/{sim_id}/shares/prices?api-key={api_key}'
price_content = requests.get(request_url)
price_data = price_content.json()
stock_price = pd.DataFrame(price_data['priceData'])
stock_price["closeAdj"] = pd.to_numeric(stock_price["closeAdj"])
stock_price = stock_price[["date", "closeAdj"]]
# data[ticker]=stock_price
stock_price.set_index('date', inplace=True)
stock_price.to_csv(str(tickers[idx]) + '_share_history.csv')
#stock_price_dict['INTC'].head(10)
#print(data)
#df = pd.DataFrame(data)
#print(df) |
#!/usr/bin/python
import trello as trellomodule
import plistlib
import subprocess
import os
import sys
from datetime import date, datetime, timedelta
import requests
import json
import optparse
from string import atoi
from ConfigParser import RawConfigParser
# Default settings (overridden by config file and command line options)
DEFAULT_CONFIG_FILE_LOCATIONS= [
'/etc/munki-trello/munki-trello.cfg',
'munki-trello.cfg'
]
DEFAULT_DEV_LIST = "Development"
DEFAULT_TEST_LIST = "Testing"
DEFAULT_PROD_LIST = "Production"
DEFAULT_TO_DEV_LIST = "To Development"
DEFAULT_TO_TEST_LIST = "To Testing"
DEFAULT_TO_PROD_LIST = "To Production"
DEFAULT_PRODUCTION_SUFFIX = "Production"
DEFAULT_MUNKI_PATH = "/Volumes/Munki"
DEFAULT_MAKECATALOGS = "/usr/local/munki/makecatalogs"
DEFAULT_MUNKI_DEV_CATALOG = "development"
DEFAULT_MUNKI_TEST_CATALOG = "testing"
DEFAULT_MUNKI_PROD_CATALOG = "production"
DEFAULT_DATE_FORMAT = '%d/%m/%y'
DEFAULT_AUTO_STAGE_TO_TEST=False
DEFAULT_AUTO_STAGE_TO_PROD=False
DEFAULT_DEV_STAGE_DAYS='0'
DEFAULT_TEST_STAGE_DAYS='0'
def fail(message):
sys.stderr.write(message)
sys.exit(1)
def execute(command):
popen = subprocess.Popen(command, stdout=subprocess.PIPE)
lines_iterator = iter(popen.stdout.readline, b"")
for line in lines_iterator:
print(line) # yield line
def update_pos(list_id, value):
resp = requests.put("https://trello.com/1/lists/%s/pos" % (list_id), params=dict(key=KEY, token=TOKEN), data=dict(value=value))
resp.raise_for_status()
return json.loads(resp.content)
def name_in_list(name, to_development, development, testing, to_testing, to_production):
found = False
for card in to_development:
if card['name'] == name:
return True
for card in development:
if card['name'] == name:
return True
for card in testing:
if card['name'] == name:
return True
for card in to_testing:
if card['name'] == name:
return True
for card in to_production:
if card['name'] == name:
return True
return False
def get_app_version(card_id):
cards = trello.cards.get_action(card_id)
cards.reverse()
for action in cards:
if action['type']=="commentCard":
comment_data = action['data']['text'].split("\n")
if comment_data[0] != "**System Info**":
continue
for fragment in comment_data:
if str(fragment).startswith('Name: '):
app_name = fragment[6:]
if str(fragment).startswith('Version: '):
version = fragment[9:]
return app_name, version
def migrate_packages(trello_connection, source_cards,
dest_list_id, dest_catalog_name,
due=0, message=None, auto_move=False):
run_makecatalogs = 0
due_date_str = None
if due > 0:
delta = timedelta(days=due)
now = datetime.utcnow()
due_date = now + delta
due_date_str = due_date.strftime('%Y-%m-%dT%H:%M:%S.000Z')
# Find items from the source list, update pkginfo, and change trello
# card to dest
for card in source_cards:
app_name, version = get_app_version(card['id'])
# create a list of pkgsinfo files
pkgsinfo_dirwalk = os.walk(os.path.join(MUNKI_PATH,'pkgsinfo'),
topdown=False)
plist = None
for root, dirs, files in pkgsinfo_dirwalk:
for file in files:
# It is conceivable there are broken / non plist files
# so we try to parse the files, just in case
pkgsinfo = os.path.join(root, file)
try:
plist = plistlib.readPlist(pkgsinfo)
except:
plist = None # Just in case
continue
if plist['name'] == app_name and plist['version'] == version:
plist['catalogs'] = [dest_catalog_name]
plistlib.writePlist(plist, pkgsinfo)
trello_connection.cards.update_idList(card['id'], dest_list_id)
# If we are automatically moving cards, reset their
# due date
if auto_move:
trello_connection.cards.update_due(card['id'], None)
if message != None:
trello_connection.cards.new_action_comment(card['id'], message)
if due_date_str != None:
trello_connection.cards.update_due(card['id'], due_date_str)
run_makecatalogs = run_makecatalogs + 1
else:
plist = None
if plist != None:
break
return run_makecatalogs
def read_config(cmdopts):
config = RawConfigParser(allow_no_value=True)
# Set up defaults
config.add_section('main')
config.set('main', 'boardid', None)
config.set('main', 'key', None)
config.set('main', 'token', None)
config.set('main', 'makecatalogs', DEFAULT_MAKECATALOGS)
config.set('main', 'repo_path', DEFAULT_MUNKI_PATH)
config.set('main', 'date_format', DEFAULT_DATE_FORMAT)
config.add_section('development')
config.set('development', 'list', DEFAULT_DEV_LIST)
config.set('development', 'catalog', DEFAULT_MUNKI_DEV_CATALOG)
config.set('development', 'to_list', DEFAULT_TO_DEV_LIST)
config.set('development', 'stage_days', DEFAULT_DEV_STAGE_DAYS)
config.add_section('testing')
config.set('testing', 'list', DEFAULT_TEST_LIST)
config.set('testing', 'catalog', DEFAULT_MUNKI_TEST_CATALOG)
config.set('testing', 'to_list', DEFAULT_TO_PROD_LIST)
config.set('testing', 'stage_days', DEFAULT_TEST_STAGE_DAYS)
config.set('testing', 'autostage', DEFAULT_AUTO_STAGE_TO_TEST)
config.add_section('production')
config.set('production', 'list', DEFAULT_PROD_LIST)
config.set('production', 'catalog', DEFAULT_MUNKI_PROD_CATALOG)
config.set('production', 'to_list', DEFAULT_TO_PROD_LIST)
config.set('production', 'suffix', DEFAULT_PRODUCTION_SUFFIX)
config.set('production', 'autostage', DEFAULT_AUTO_STAGE_TO_PROD)
config_file_locations = DEFAULT_CONFIG_FILE_LOCATIONS
if cmdopts.config:
config_file_locations.append(cmdopts.config)
rc = config.read(config_file_locations)
if not cmdopts.boardid:
cmdopts.boardid = config.get('main', 'boardid')
if not cmdopts.key:
cmdopts.key = config.get('main', 'key')
if not cmdopts.token:
cmdopts.token = config.get('main', 'token')
if not cmdopts.repo_path:
cmdopts.repo_path = config.get('main', 'repo_path')
if not cmdopts.makecatalogs:
cmdopts.makecatalogs = config.get('main', 'makecatalogs')
if not cmdopts.date_format:
cmdopts.date_format = config.get('main', 'date_format')
if not cmdopts.to_dev_list:
cmdopts.to_dev_list = config.get('development', 'to_list')
if not cmdopts.dev_list:
cmdopts.dev_list = config.get('development', 'list')
if not cmdopts.dev_catalog:
cmdopts.dev_catalog = config.get('development', 'catalog')
if cmdopts.dev_stage_days == None:
val = atoi(config.get('development', 'stage_days'))
cmdopts.dev_stage_days = val
else:
cmdopts.dev_stage_days == atoi(cmdopts.dev_stage_days)
if not cmdopts.to_test_list:
cmdopts.to_test_list = config.get('testing', 'to_list')
if not cmdopts.test_list:
cmdopts.test_list = config.get('testing', 'list')
if not cmdopts.test_catalog:
cmdopts.test_catalog = config.get('testing', 'catalog')
if cmdopts.test_stage_days == None:
val = atoi(config.get('testing', 'stage_days'))
cmdopts.test_stage_days = val
else:
cmdopts.test_stage_days == atoi(cmdopts.test_stage_days)
if cmdopts.stage_test == None:
cmdopts.test_autostage = config.get('testing', 'autostage')
if not cmdopts.prod_list:
cmdopts.prod_list = config.get('production', 'list')
if not cmdopts.to_prod_list:
cmdopts.to_prod_list = config.get('production', 'to_list')
if not cmdopts.prod_catalog:
cmdopts.prod_catalog = config.get('production', 'catalog')
# We check for None here, as the only way to override this
# on the command line is to set --suffix=
if cmdopts.prod_suffix == None:
cmdopts.prod_suffix = config.get('production', 'suffix')
if cmdopts.stage_prod == None:
cmdopts.prod_autostage = config.get('production', 'autostage')
def find_or_create_list(trello, board_id, name_id_dict, required_name, position):
if name_id_dict.has_key(required_name):
return name_id_dict[required_name]
new_list = trello.boards.new_list(board_id, required_name)
if position == 0:
position = 1000001
update_pos(new_list['id'], position-1)
return new_list['id']
def update_card_list(trello, app_cards, app_catalog):
for card in app_cards:
app_name, version = get_app_version(card['id'])
found = False
for item in app_catalog:
if item['name'] == app_name and item['version'] == version:
found = True
if not found:
trello.cards.delete(card['id'])
def find_auto_migrations(card_list):
migrate_cards = []
now = datetime.utcnow()
for card in card_list:
if card['due'] == None:
continue
# Assumptions here:
# Trello will always return UTC dates in an ISO standard format
# Due dates are not going to be accurate to more than a second
# (hence the .000 in the format)
due = datetime.strptime(card['due'], '%Y-%m-%dT%H:%M:%S.000Z')
difference = now - due
if (now - due).total_seconds() > 0:
migrate_cards.append(card)
return migrate_cards
def get_dated_board_id(trello, board_id, list_prefix, suffix, list_name,
list_names, list_positions ):
if suffix:
prod_title = '%s %s' % (list_prefix, suffix)
# Find the maximun list id from the remaining list_names:
positions = list_positions.values()
positions.sort()
max_position = positions[-1]
return find_or_create_list(trello, board_id,
list_names, prod_title, max_position)
else:
prod_title = list_name
if not list_names.has_key(prod_title):
fail("No '%s' list found\n" % prod_title)
return list_names[prod_title]
usage = "%prog [options]"
o = optparse.OptionParser(usage=usage)
# Required options
o.add_option("--boardid", help=("Trello board ID."))
o.add_option("--key", help=("Trello API key. See README for details on how to get one."))
o.add_option("--token", help=("Trello application token. See README for details on how to get one."))
# Optional Options
o.add_option("--config",
help=("Name of configuration file; program will try to read '/etc/munki-trello/munki-trello.cfg' and './munki-trello.cfg' by default, appending this configuration file to the end of the list; configuration file values will be overridden by those on the command line and last match wins") )
o.add_option("--to-dev-list",
help=("Name of the 'To Development' Trello list. Defaults to '%s'. "
% DEFAULT_DEV_LIST))
o.add_option("--dev-list",
help=("Name of the 'Development' Trello list. Defaults to '%s'. "
% DEFAULT_DEV_LIST))
o.add_option("--to-test-list",
help=("Name of the 'To Testing' Trello list. Defaults to '%s'. "
% DEFAULT_TO_TEST_LIST))
o.add_option("--test-list",
help=("Name of the 'Testing' Trello list. Defaults to '%s'. "
% DEFAULT_TEST_LIST))
o.add_option("--prod-list",
help=("Name of the 'Production' Trello list. Defaults to '%s'. Will only be used if the production suffix is set to the empty string"
% DEFAULT_PROD_LIST))
o.add_option("--to-prod-list",
help=("Name of the 'To Production' Trello list. Defaults to '%s'. "
% DEFAULT_TO_PROD_LIST))
o.add_option("--prod-suffix","--suffix",
help=("Suffix that will be added to new 'In Production cards'. Defaults to '%s'. "
% DEFAULT_PRODUCTION_SUFFIX))
o.add_option("--dev-catalog",
help=("Name of the Munki development catalog. Defaults to '%s'. "
% DEFAULT_MUNKI_DEV_CATALOG))
o.add_option("--test-catalog",
help=("Name of the Munki testing catalog. Defaults to '%s'. "
% DEFAULT_MUNKI_TEST_CATALOG))
o.add_option("--prod-catalog",
help=("Name of the Munki production catalog. Defaults to '%s'. "
% DEFAULT_MUNKI_PROD_CATALOG))
o.add_option("--repo-path",
help=("Path to your Munki repository. Defaults to '%s'. "
% DEFAULT_MUNKI_PATH))
o.add_option("--makecatalogs",
help=("Path to makecatalogs. Defaults to '%s'. "
% DEFAULT_MAKECATALOGS))
o.add_option("--date-format",
help=("Date format to use when creating dated lists. See strftime(1) for details of formatting options. Defaults to '%s'. "
% DEFAULT_DATE_FORMAT))
o.add_option("--dev-stage-days",
help=("The number of days that a package will remain in development before being prompoted to test (if staging is enabled). Note: this does not enable staging"))
o.add_option("--test-stage-days",
help=("The number of days a package will remain in testing before being prompoted to production (if staging is enabled). Note: this does not enable staging"))
o.add_option("--stage-test",
help=("Automatically promote packages past their due date from development into testing. Note: this does not enable setting of the due date"))
o.add_option("--stage-prod",
help=("Automatically promote packages past their due date from testing into production. Note: this does not enable setting of the due date"))
opts, args = o.parse_args()
# Read configuration file (either given on command line or
# from default locactions
read_config(opts)
if not opts.boardid or not opts.key or not opts.token:
fail("Board ID, API key and application token are required.")
BOARD_ID = opts.boardid
KEY = opts.key
TOKEN = opts.token
TO_DEV_LIST = opts.to_dev_list
DEV_LIST = opts.dev_list
TO_TEST_LIST = opts.to_test_list
TEST_LIST = opts.test_list
TO_PROD_LIST = opts.to_prod_list
PROD_LIST = opts.prod_list
DEV_CATALOG = opts.dev_catalog
TEST_CATALOG = opts.test_catalog
PROD_CATALOG = opts.prod_catalog
PRODUCTION_SUFFIX = opts.prod_suffix
MUNKI_PATH = opts.repo_path
MAKECATALOGS = opts.makecatalogs
DATE_FORMAT=opts.date_format
# These need to be options:
AUTO_STAGE_TO_TEST=opts.test_autostage
AUTO_STAGE_TO_PROD=opts.prod_autostage
if not os.path.exists(MUNKI_PATH):
fail('Munki path not accessible')
trello = trellomodule.TrelloApi(KEY)
trello.set_token(TOKEN)
lists = trello.boards.get_list(BOARD_ID)
# Build up list of names and list ids for quick reference
list_names = {}
list_positions = {}
for list in lists:
list_names[ list['name'] ] = list['id']
list_positions[ list['name'] ] = list['pos']
# Check that the lists we require exist
for name in [TO_DEV_LIST, TO_TEST_LIST, TO_PROD_LIST, DEV_LIST, TEST_LIST]:
if not list_names.has_key(name):
fail("No '%s' list found\n" % name)
# get the 'To' lists, removing these items from the dictionary
# (so that when we find max_id below, we will ignore these entries)
# Note that we *should* not get a key error due to the checks above
to_dev_id = list_names[TO_DEV_LIST]
list_positions.pop(TO_DEV_LIST)
to_development = trello.lists.get_card(to_dev_id)
to_test_id = list_names[TO_TEST_LIST]
list_positions.pop(TO_TEST_LIST)
to_testing = trello.lists.get_card(to_test_id)
id = list_names[TO_PROD_LIST]
list_positions.pop(TO_PROD_LIST)
to_production = trello.lists.get_card(id)
dev_id = list_names[DEV_LIST]
list_positions.pop(DEV_LIST)
development = trello.lists.get_card(dev_id)
test_id = list_names[TEST_LIST]
list_positions.pop(TEST_LIST)
testing = trello.lists.get_card(test_id)
all_catalog = plistlib.readPlist(os.path.join(MUNKI_PATH, 'catalogs/all'))
missing = []
for item in all_catalog:
name = item['name'] + ' '+item['version']
found = name_in_list(name, to_development, development, testing, to_testing, to_production)
if not found:
missing.append(item)
# Any item that isn't in any board needs to go in to the right one
# N.B: add tot he 'To' lists, which we will migrate into the real
# lists shortly (this is to work around a bug with not setting the due
# dates when staging is on)
for item in missing:
name = item['name'] + ' '+item['version']
comment = '**System Info**\nName: %s\nVersion: %s' % (item['name'], item['version'])
for catalog in item['catalogs']:
if catalog == TEST_CATALOG:
card = trello.lists.new_card(to_test_id, name)
trello.cards.new_action_comment(card['id'], comment)
if catalog == DEV_CATALOG:
card = trello.lists.new_card(to_dev_id, name)
trello.cards.new_action_comment(card['id'], comment)
# re-get the contents of the 'To' lists (as they may have changed due to
# the above)
to_development = trello.lists.get_card(to_dev_id)
to_testing = trello.lists.get_card(to_test_id)
run_makecatalogs = 0
# Automatically migrate packages from testing to production
# based on their due date.
# N.B this will honour manually set due dates
automigrations = []
if AUTO_STAGE_TO_PROD:
automigrations = find_auto_migrations(testing)
if len(to_production) or len(automigrations):
# For production we either use date + suffix or the production list.
# However, we only need check these lists if there are things to move
# into production:
prod_title = None
list_prefix = date.today().strftime(DATE_FORMAT)
prod_id = get_dated_board_id(trello, BOARD_ID, list_prefix,
PRODUCTION_SUFFIX, PROD_LIST,list_names, list_positions)
if not prod_id:
fail('No id found (or created) for %s\n' % prod_title)
# Note that automigrations will be empty if AUTO_STAGE_TO_PROD is false
if len(automigrations):
msg = 'Auto migrated from %s to production as past due date' % TEST_LIST
rc = migrate_packages(trello, automigrations, prod_id, PROD_CATALOG,
message=msg, auto_move=True)
run_makecatalogs = run_makecatalogs + rc
# Find the items that are in To Production and change the pkginfo
if len(to_production):
rc = migrate_packages(trello, to_production, prod_id, PROD_CATALOG)
run_makecatalogs = run_makecatalogs + rc
# Automatically migrate packages from development to test
# based on their due date.
# N.B this will honour manually set due dates
due_days=0
if opts.test_stage_days:
due_days=opts.test_stage_days
if AUTO_STAGE_TO_TEST:
automigrations = find_auto_migrations(development)
if len(automigrations):
msg = 'Auto migrated from %s to %s as past due date' % (DEV_LIST, TEST_LIST)
rc = migrate_packages(trello, automigrations, test_id, TEST_CATALOG, message=msg, auto_move=True, due=due_days)
run_makecatalogs = run_makecatalogs + rc
# Move cards in to_testing to testing. Update the pkginfo
if len(to_testing):
rc = migrate_packages(trello, to_testing, test_id, TEST_CATALOG, due=due_days)
run_makecatalogs = run_makecatalogs + rc
# Move cards in to_development to development. Update the pkginfo
if len(to_development):
due_days=0
if opts.dev_stage_days:
due_days=opts.dev_stage_days
rc = migrate_packages(trello, to_development, dev_id, DEV_CATALOG, due=due_days)
run_makecatalogs = run_makecatalogs + rc
# Have a look at development and find any items that aren't in the all
# catalog anymore
# XXX(TODO): if staging check this list as it may have changed
update_card_list(trello, development, all_catalog)
# Have a look at testing and find any items that aren't in the all
# catalog anymore
# XXX(TODO): if staging check this list as it may have changed
update_card_list(trello, testing, all_catalog)
# Holy crap, we're done, run makecatalogs
if run_makecatalogs:
task = execute([MAKECATALOGS, MUNKI_PATH])
|
from item_info import ITEM_INFO
class Menu:
MAX_MENU_LENGTH = 5
def __init__(self, items, return_option):
self.items = items
self.item_info = ITEM_INFO
self.menu_length = 0
self.menu_number = 0
self.max_menus = 0
self.command = ""
self.return_option = return_option
self.letter_commands = []
self.menu_starting_index = 0
self.menu_end_index = 0
def return_selected_option(self):
self.initialize_parameters()
while(True):
self.calculate_menu_length()
self.define_menu_indexes()
self.print_item_menu()
self.define_letter_commands()
self.get_item_menu_input()
if (self.command == "b"):
return None
elif (self.command == "n"):
if (self.menu_number == 1) and (self.max_menus == 1):
print("Invalid command")
continue
if (self.menu_number == self.max_menus):
self.menu_number = 1
else:
self.menu_number += 1
continue
return self.command - 1
def initialize_parameters(self):
self.menu_number = 1
if (len(self.items) % 5 == 0):
self.max_menus = len(self.items) / 5
else:
self.max_menus = int(len(self.items) / 5) + 1
def calculate_menu_length(self):
if (self.menu_number * self.MAX_MENU_LENGTH >= len(self.items)):
self.menu_length = len(self.items) - (5 * (self.menu_number - 1))
else:
self.menu_length = self.MAX_MENU_LENGTH
def define_menu_indexes(self):
self.menu_starting_index = ((self.menu_number - 1) * self.MAX_MENU_LENGTH) + 1
self.menu_end_index = self.menu_starting_index + self.menu_length
def print_item_menu(self):
for i in range(self.menu_starting_index, self.menu_end_index):
print("%s. %s" % (i, self.item_info[self.items[i - 1]]["name"]))
if (self.max_menus > 1):
if (self.menu_number != self.max_menus):
print("n. 次のページ")
elif (self.menu_number == self.max_menus):
print("n. 最初のページに戻る")
print("b. %s" % (self.return_option))
def define_letter_commands(self):
if (self.max_menus > 1):
self.letter_commands = ["n", "b"]
else:
self.letter_commands = ["b"]
def get_item_menu_input(self):
while(True):
self.command = input().strip()
if (self.command in self.letter_commands):
break
elif (self.command.isdigit()):
self.command = int(self.command)
else:
print("Invalid command")
continue
if self.command in list(range(self.menu_starting_index, self.menu_end_index)):
break
print("Invalid command")
|
"""change column
Revision ID: e3eda40c3d84
Revises: e1376d6927c0
Create Date: 2021-02-21 12:43:05.385518
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e3eda40c3d84'
down_revision = 'e1376d6927c0'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
import os
import librosa
import soundfile as sf
for filename in os.listdir("ft_wav"):
if filename.endswith(".wav"):
y, s = librosa.load(f"ft_wav/{filename}", sr=22050)
sf.write(f"ft_wav2205/{filename}", y, s) |
import requests
from datetime import datetime
from flask import Blueprint, render_template, request, redirect, jsonify, url_for
from mod_data_receiver.tasks import DataReceiverTask
data_receiver_blueprint = Blueprint('data_receiver', __name__, url_prefix="/data-receiver")
# BASE_URL_LTA = "https://portal.labtestingapi.com"
BASE_URL_LTA = "https://staging-portal.labtestingapi.com"
@data_receiver_blueprint.route('/<lab_provider>/transform-and-save', methods=['POST'])
def transform_and_save(lab_provider):
lab_test_id = request.form.get('lab_test_id',False)
test_name = request.form.get('test_name','NA')
retrieve_task = DataReceiverTask("Demo", DataReceiverTask.RETRIEVE, lab_test_id)
retrieve_task.do()
transform_and_save = DataReceiverTask("Demo", DataReceiverTask.TRANSFORM_AND_SAVE, lab_test_id, test_name)
transform_and_save.do()
return jsonify({"message": "Lab test results received and transformed (id: %s) and retrieve task created." %lab_test_id })
@data_receiver_blueprint.route('/<lab_provider>/notification', methods=['GET'])
def notification(lab_provider):
if lab_provider=="Demo":
lab_test_id = request.args.get('id',777)
lab_test_status = request.args.get('status','resulted')
test_name = request.args.get("test_name","NA")
if lab_test_id and lab_test_status and lab_test_status == 'resulted':
# new_task = DataReceiverTask(provider="LTA", action=RETRIEVE, lab_test_id=lab_test_id)
requests.post(
url_for('data_receiver.transform_and_save', lab_provider="Demo", _external=True),
data={"lab_test_id":lab_test_id,"test_name":test_name}
)
return jsonify({"message": "Lab test results ready (id: %s) and retrieve task created." %lab_test_id })
pass
|
from django import forms
from . import models
class ToDoForm(forms.ModelForm):
class Meta():
model = models.ToDo
fields = ('task',)
widgets = {
'task':forms.TextInput(attrs={'autocomplete':'off',
'class':'form-control'})
}
def __init__(self, *args, **kwargs):
super(ToDoForm, self).__init__(*args, **kwargs)
self.label_suffix = ''
|
from ingestor.ingestor import Ingestor
from apiclient.errors import HttpError
from oauth2client.tools import argparser
if __name__ == "__main__":
argparser.add_argument("--q", help="Search term", default="Google")
argparser.add_argument("--location", help="Location", default="37.42307,-122.08427")
argparser.add_argument("--location-radius", help="Location radius", default="5km")
argparser.add_argument("--max-results", help="Max results", default=25)
args = argparser.parse_args()
try:
ingestor = Ingestor()
ingestor.start(args)
except HttpError as e:
print("An HTTP error %d occurred:\n%s" % (e.resp.status, e.content))
|
import pdb
from math import sin, pi
K = 6#外推次数,即k的最大值
g = [[None for t in range(K-k)] for k in range(K)]#表
h_0 = 2
for t in range(K):#初始化第一列
g[0][t] = (2**t/h_0)*sin(h_0*pi/(2**t))
for k in range(1, K):
for t in range(K-k):
temp = 2**(2*k)
g[k][t] = (temp*g[k-1][t+1]-g[k-1][t])/(temp-1)
print(" ",end="")
for t in range(K):
print(t, end="\t")
print()
for k in range(K):
print("k={}:".format(k), end="")
for t in range(K-k):
print(round(g[k][t], 10), end="\t")
print()
|
import numpy as np
class KalmanFilterEq():
'''
Initialize the required terms used in the Extended Kalman Filter equations.
Set P - Uncertainity covariance matrix of state(x)
Q - Covariance matrix of noise
F - Update matrix
n - Number states to estimate
x - state vector
'''
def __init__(self,n):
self.n=n
self.identity= np.matrix(np.eye(n))
self.P=None
self.Q=None
self.F=None
self.x=None
'''
Prediction step using the extended kalman equations:
x(n)= F * x(n-1)
P(n)= F * P(n-1) * F^T + Q
'''
def predict(self):
self.x = self.F * self.x
self.P= self.F * self.P * self.F.T + self.Q
'''
Updation of x and P using Kalman equations:
H - Extraction Matrix to extract the measurement of x if sensor is perfect
y - Difference between actual measurements and predicted measurements
PHt= S * H^T
S = H * P * H^T + R (Innovation term)
K = P * H^T * S^-1 (Kalman Gain)
x(n) = x(n-1) + K * y
P(n) = (I - K*H) * P(n-1)
'''
def update(self,z,H,Hx,R):
y=z-Hx
PHt= self.P *H.T
S= H*PHt + R
K= PHt * (S.I)
self.x = self.x + K * y
self.P = (self.I - K * H) * self.P
|
import json
import multiprocessing as mp
import os
import time
from pathlib import Path
from typing import List
import numpy as np
import shutil
from file_handling import write_simulations_to_disk
from noise import NoiseType
from stats import delayed_ou_processes_ensemble, SimulationResults
T = 1 # delay
T_cycles = 2
T_total = T * T_cycles
initial_condition = 0
R = 500 # resolution
ensemble_runs = 1000
# R = 100 # resolution
# ensemble_runs = 50
t_interval = np.linspace(0, T_total, R) # run simulation for 2 noise cycles
steps_e = np.linspace(0.05, 0.95, 7)
steps_tau = np.linspace(0.05, 0.95, 7)
steps_gamma = np.linspace(0.05, 0.95, 7)
params_symmetric_increasing_taus = [{'e': e, 'tau1': tau, 'tau2': tau, 'noiseType': {'type': NoiseType.WHITE}} for e in steps_e for tau in steps_tau] \
+ [{'e': e, 'tau1': tau, 'tau2': tau, 'noiseType': {'type': NoiseType.RED, 'gamma1': 0.5, 'gamma2': 0.5}} for e in steps_e for tau in steps_tau]
params_asymetric_increasing_taus =[{'e': 0.5, 'tau1': tau1, 'tau2': tau2, 'noiseType': {'type': NoiseType.WHITE}} for tau1 in steps_tau for tau2 in steps_tau]
params_asymetric_increasing_gammas =[{'e': 0.5, 'tau1': 0.5, 'tau2': 0.5, 'noiseType': {'type': NoiseType.RED, 'gamma1': gamma1, 'gamma2': gamma2}} for gamma1 in steps_gamma for gamma2 in steps_gamma]
def simulate_on_params(ps):
return ps
def get_white_noise(a):
return a[:7]
def get_red_noise(a):
return a[6:12]
def get_symm_increasing_gamma(a):
return a[12:15]
def get_different_taus(a):
return a[15:21]
def get_different_gammas(a):
return a[21:27]
def wrapped_delayed_processes(p) -> SimulationResults:
return delayed_ou_processes_ensemble(T_total, R, T_cycles, t_interval, p, initial_condition, ensemble_runs)
def calculations(params) -> List[SimulationResults]:
# parallelized simulation
pool = mp.Pool(processes=12)
return pool.map(wrapped_delayed_processes, params)
def calc_and_save():
params = params_asymetric_increasing_gammas
name = 'params_asymetric_increasing_gammas'
start_time = time.perf_counter()
results: List[SimulationResults] = calculations(params)
result_path = Path.cwd() / f"results/{name}_{ensemble_runs}_{R}_{initial_condition}"
print(f"It took {time.perf_counter() - start_time}ms to finish calculations")
print('simulations done, write to ' + str(result_path))
write_simulations_to_disk(result_path, results)
print(f"It took {time.perf_counter() - start_time}ms to write output data")
write_done = time.perf_counter()
# res = plot_results(results, show_acf, show_ccf, show_correlation, show_different_taus, show_samples)
print(f"It took {time.perf_counter() - write_done}ms to prepare plots")
# plt.show()
return results
if __name__ == '__main__':
calc_and_save()
|
# Daniel Garcia
# SBU ID: 111157499
# Homework 1
# Question 6, Part 3
def cff_three(stringList):
createTuple = lambda word: (word, len(word))
newList = list(map(createTuple, stringList))
return newList
print(cff_three(['part', 'three', 'example'])) |
from django.shortcuts import render,redirect;
from django.contrib.auth.forms import UserCreationForm;
from django.contrib.auth.models import User;
from django.contrib import messages;
# Create your views here.
from users.forms import CustomRegister;
from django.contrib.auth.decorators import login_required;
def register(request):
form = CustomRegister();
if request.method=='POST':
form=CustomRegister(request.POST);
if form.is_valid():
username=form.cleaned_data.get('username');
form.save();
messages.success(request,f'user {username} created successfully');
return redirect('blog_home');#blog_home
else:
form=CustomRegister();
return render(request,"registeration/register.html",{"form":form});
@login_required
def profile(request):
return render(request,'registeration/profile.html')
|
N, M = map(int, input().split())
E1 = set()
EN = set()
for n in range(M):
a, b = map(int, input().split())
if a == 1:
E1.add(b)
if b == 1:
E1.add(a)
if a == N:
EN.add(b)
if b == N:
EN.add(a)
for a in E1:
if a in EN:
print("POSSIBLE")
break
else:
print("IMPOSSIBLE")
|
#!/usr/bin/python
'''
Deep neural networks for regression using chainer.
'''
from base_util import *
from base_ml import *
from base_chn import loss_for_error2
from chainer import cuda, Variable, FunctionSet, optimizers
import chainer.functions as F
import six.moves.cPickle as pickle
#Tools for NNs
#ReLU whose input is a normal distribution variable.
# mu: mean, var: variance (square of std-dev).
# cut_sd: if abs(mu)>cut_sd*sigma, an approximation is used. Set None to disable this.
def ReLUGauss(mu, var, epsilon=1.0e-6, cut_sd=4.0):
cast= type(mu)
sigma= math.sqrt(var)
if sigma<epsilon: return cast(max(0.0,mu)), cast(0.0)
#Approximation to speedup for abs(mu)>cut_sd*sigma.
if cut_sd!=None and mu>cut_sd*sigma: return cast(mu), cast(var)
if cut_sd!=None and mu<-cut_sd*sigma: return cast(0.0), cast(0.0)
sqrt2= math.sqrt(2.0)
sqrt2pi= math.sqrt(2.0*math.pi)
z= mu/(sqrt2*sigma)
E= math.erf(z)
X= math.exp(-z*z)
mu_out= sigma/sqrt2pi*X + mu/2.0*(1.0+E)
var_out= (1.0+E)/4.0*(mu*mu*(1.0-E)+2.0*var) - sigma*X/sqrt2pi*(sigma*X/sqrt2pi+mu*E)
if var_out<0.0:
if var_out>-epsilon: return mu_out, 0.0
else:
msg= 'ERROR in ReLUGauss: %f, %f, %f, %f'%(mu, sigma, mu_out, var_out)
print msg
raise Exception(msg)
return cast(mu_out), cast(var_out)
#Vector version of ReLUGauss
ReLUGaussV= np.vectorize(ReLUGauss)
#Gradient of ReLU whose input is a normal distribution variable.
# mu: mean, var: variance (square of std-dev).
# cut_sd: if abs(mu)>cut_sd*sigma, an approximation is used. Set None to disable this.
def ReLUGaussGrad(mu, var, epsilon=1.0e-6, cut_sd=4.0):
cast= type(mu)
sigma= math.sqrt(var)
if sigma<epsilon: return cast(1.0 if mu>0.0 else 0.0)
#Approximation to speedup for abs(mu)>cut_sd*sigma.
if cut_sd!=None and mu>cut_sd*sigma: return cast(1.0)
if cut_sd!=None and mu<-cut_sd*sigma: return cast(0.0)
sqrt2= math.sqrt(2.0)
z= mu/(sqrt2*sigma)
return cast(0.5*(1.0+math.erf(z)))
ReLUGaussGradV= np.vectorize(ReLUGaussGrad) #Vector version
'''
Interface class of a function approximator.
We assume a data takes a form like:
X=[[x1^T], Y=[[y1^T],
[x2^T], [y2^T],
[... ]] [... ]]
where xn is an input vector, yn is an output vector (n=1,2,...).
'''
class TNNRegression(TFunctionApprox):
@staticmethod
def DefaultOptions():
'''Some options are defined both for a mean model and an error model (_err).
If the value of an error model option is None,
the value of corresponding mean model is used.'''
Options= {}
Options['name']= '' #Arbitrary name.
Options['gpu']= -1 #Device ID of GPU (-1: not use).
Options['n_units']= [1,200,200,1] #Number of input/hidden/output units.
Options['n_units_err']= None #Number of input/hidden/output units for error model.
Options['num_min_predictable']= 3 #Number of minimum samples necessary to train NNs.
Options['init_bias_randomly']= True #Initialize bias of linear models randomly.
Options['bias_rand_init_bound']= [-1.0, 1.0] #Bound used in random bias initialization.
Options['bias_rand_init_bound_err']= [0.0, 0.5] #Bound used in random bias initialization of error model.
Options['dropout']= True #If use dropout.
Options['dropout_ratio']= 0.01 #Ratio of dropout.
Options['dropout_err']= None #If use dropout for error model.
Options['dropout_ratio_err']= None #Ratio of dropout for error model.
Options['error_loss_neg_weight']= 0.1 #Weight of negative loss for error model.
Options['AdaDelta_rho']= 0.9 #Parameter for AdaDelta.
Options['AdaDelta_rho_err']= None #Parameter for AdaDelta for error model.
Options['batchsize']= 10 #Size of mini-batch.
Options['batchsize_err']= None #Size of mini-batch for error model.
Options['num_max_update']= 5000 #Maximum number of updates with mini-batch.
Options['num_max_update_err']= None #Maximum number of updates with mini-batch for error model.
Options['num_check_stop']= 50 #Stop condition is checked for every this number of updates w mini-batch.
Options['num_check_stop_err']= None #Stop condition is checked for every this number of updates w mini-batch (for error model).
Options['loss_maf_alpha']= 0.4 #Update ratio of moving average filter for loss.
Options['loss_maf_alpha_err']= None #Update ratio of moving average filter for loss for error model.
Options['loss_stddev_init']= 2.0 #Initial value of loss std-dev (unit (1.0) is 'loss_stddev_stop').
Options['loss_stddev_init_err']= None #Initial value of loss std-dev (unit (1.0) is 'loss_stddev_stop') for error model.
Options['loss_stddev_stop']= 1.0e-3 #If std-dev of loss is smaller than this value, iteration stops.
Options['loss_stddev_stop_err']= None #If std-dev of loss is smaller than this value, iteration stops (for error model).
Options['base_dir']= '/tmp/dnn/' #Base directory. Last '/' is matter.
'''Some data (model.parameters, model_err.parameters, DataX, DataY)
are saved into this file name when Save() is executed.
label: 'model_mean', 'model_err', 'data_x', or 'data_y'.
base: Options['base_dir'] or base_dir argument of Save method.'''
Options['data_file_name']= '{base}nn_{label}.dat'
'''Template of filename to store the training log.
name: Options['name'].
n: number of training executions.
code: 'mean' or 'err'.
base: Options['base_dir'].'''
Options['train_log_file']= '{base}train/nn_log-{n:05d}-{name}{code}.dat'
Options['verbose']= True
return Options
@staticmethod
def DefaultParams():
Params= {}
Params['nn_params']= None
Params['nn_params_err']= None
Params['nn_data_x']= None
Params['nn_data_y']= None
Params['num_train']= 0 #Number of training executions.
return Params
@staticmethod
def ToVec(x):
if x==None: return np.array([],np.float32)
elif isinstance(x,list): return np.array(x,np.float32)
elif isinstance(x,(np.ndarray,np.matrix)):
return x.ravel().astype(np.float32)
raise Exception('ToVec: Impossible to serialize:',x)
def __init__(self):
TFunctionApprox.__init__(self)
'''
NOTE
In order to save and load model parameters,
save: p=ToStdType(model.parameters)
load: model.copy_parameters_from(map(lambda e:np.array(e,np.float32),p))
'''
#Synchronize Params (and maybe Options) with an internal learner to be saved.
#base_dir: used to store data into external data file(s); None for a default value.
def SyncParams(self, base_dir):
TFunctionApprox.SyncParams(self, base_dir)
if base_dir==None: base_dir= self.Options['base_dir']
L= lambda f: f.format(base=base_dir)
if self.IsPredictable():
#self.Params['nn_params']= ToStdType(self.model.parameters)
#self.Params['nn_params_err']= ToStdType(self.model_err.parameters)
self.Params['nn_params']= self.Options['data_file_name'].format(label='model_mean',base='{base}')
pickle.dump(ToStdType(self.model.parameters), OpenW(L(self.Params['nn_params']), 'wb'), -1)
self.Params['nn_params_err']= self.Options['data_file_name'].format(label='model_err',base='{base}')
pickle.dump(ToStdType(self.model_err.parameters), OpenW(L(self.Params['nn_params_err']), 'wb'), -1)
if self.NSamples>0:
self.Params['nn_data_x']= self.Options['data_file_name'].format(label='data_x',base='{base}')
#fp= OpenW(L(self.Params['nn_data_x']), 'w')
#for x in self.DataX:
#fp.write('%s\n'%(' '.join(map(str,x))))
#fp.close()
pickle.dump(ToStdType(self.DataX), OpenW(L(self.Params['nn_data_x']), 'wb'), -1)
self.Params['nn_data_y']= self.Options['data_file_name'].format(label='data_y',base='{base}')
#fp= OpenW(L(self.Params['nn_data_y']), 'w')
#for y in self.DataY:
#fp.write('%s\n'%(' '.join(map(str,y))))
#fp.close()
pickle.dump(ToStdType(self.DataY), OpenW(L(self.Params['nn_data_y']), 'wb'), -1)
#Initialize approximator. Should be executed before Update/UpdateBatch.
def Init(self):
TFunctionApprox.Init(self)
L= self.Locate
if self.Params['nn_data_x'] != None:
self.DataX= np.array(pickle.load(open(L(self.Params['nn_data_x']), 'rb')), np.float32)
else:
self.DataX= np.array([],np.float32)
if self.Params['nn_data_y'] != None:
self.DataY= np.array(pickle.load(open(L(self.Params['nn_data_y']), 'rb')), np.float32)
else:
self.DataY= np.array([],np.float32)
self.CreateNNs()
if self.Params['nn_params'] != None:
#self.model.copy_parameters_from(map(lambda e:np.array(e,np.float32),self.Params['nn_params']))
self.model.copy_parameters_from(map(lambda e:np.array(e,np.float32),pickle.load(open(L(self.Params['nn_params']), 'rb')) ))
self.is_predictable= True
else:
if self.Options['init_bias_randomly']:
self.InitBias(m='mean')
if self.Params['nn_params_err'] != None:
#self.model_err.copy_parameters_from(map(lambda e:np.array(e,np.float32),self.Params['nn_params_err']))
self.model_err.copy_parameters_from(map(lambda e:np.array(e,np.float32),pickle.load(open(L(self.Params['nn_params_err']), 'rb')) ))
else:
if self.Options['init_bias_randomly']:
self.InitBias(m='error')
if self.Options['gpu'] >= 0:
cuda.init(self.Options['gpu'])
self.model.to_gpu()
self.model_err.to_gpu()
self.optimizer= optimizers.AdaDelta(rho=self.Options['AdaDelta_rho'])
self.optimizer.setup(self.model.collect_parameters())
self.optimizer_err= optimizers.AdaDelta(rho=IfNone(self.Options['AdaDelta_rho_err'], self.Options['AdaDelta_rho']))
self.optimizer_err.setup(self.model_err.collect_parameters())
#Create neural networks.
def CreateNNs(self):
assert(len(self.Options['n_units'])>=2)
assert(self.Options['n_units_err']==None or len(self.Options['n_units_err'])>=2)
#Mean model
n_units= self.Options['n_units']
self.f_names= ['l%d'%i for i in range(len(n_units)-1)]
funcs= {}
for i in range(len(n_units)-1):
funcs[self.f_names[i]]= F.Linear(n_units[i],n_units[i+1])
self.model= FunctionSet(**funcs)
#Error model
if self.Options['n_units_err']!=None: n_units= self.Options['n_units_err']
self.f_names_err= ['l%d'%i for i in range(len(n_units)-1)]
funcs= {}
for i in range(len(n_units)-1):
funcs[self.f_names_err[i]]= F.Linear(n_units[i],n_units[i+1])
self.model_err= FunctionSet(**funcs)
#Randomly initialize bias of linear models.
def InitBias(self, m='both'):
if m in ('both','mean'):
for l in self.f_names:
getattr(self.model,l).b[:]= [Rand(*self.Options['bias_rand_init_bound'])
for d in range(getattr(self.model,l).b.size)]
if m in ('both','error'):
for l in self.f_names_err:
getattr(self.model_err,l).b[:]= [Rand(*self.Options['bias_rand_init_bound_err'])
for d in range(getattr(self.model_err,l).b.size)]
#Compute output (mean) for a set of x.
def Forward(self, x_data, train):
if not self.Options['dropout']: train= False
dratio= self.Options['dropout_ratio']
x= Variable(x_data)
h0= x
for l in self.f_names[:-1]:
h1= F.dropout(F.relu(getattr(self.model,l)(h0)), ratio=dratio, train=train)
h0= h1
y= getattr(self.model,self.f_names[-1])(h0)
return y
#Compute output (mean) and loss for sets of x and y.
def FwdLoss(self, x_data, y_data, train):
y= self.Forward(x_data, train)
t= Variable(y_data)
return F.mean_squared_error(y, t), y
#Compute output (error) for a set of x.
def ForwardErr(self, x_data, train):
dropout= IfNone(self.Options['dropout_err'], self.Options['dropout'])
if not dropout: train= False
dratio= IfNone(self.Options['dropout_ratio_err'], self.Options['dropout_ratio'])
x= Variable(x_data)
h0= x
for l in self.f_names_err[:-1]:
h1= F.dropout(F.relu(getattr(self.model_err,l)(h0)), ratio=dratio, train=train)
h0= h1
y= getattr(self.model_err,self.f_names_err[-1])(h0)
return y
#Compute output (error) and loss for sets of x and y.
def FwdLossErr(self, x_data, y_data, train):
y= self.ForwardErr(x_data, train)
t= Variable(y_data)
return loss_for_error2(y, t, self.Options['error_loss_neg_weight']), y
#Forward computation of neural net considering input distribution.
def ForwardX(self, x, x_var=None, with_var=False, with_grad=False):
zero= np.float32(0)
x= np.array(x,np.float32); x= x.reshape(x.size,1)
#Error model:
if with_var:
h0= x
for ln in self.f_names_err[:-1]:
l= getattr(self.model_err,ln)
hl1= l.W.dot(h0) + l.b.reshape(l.b.size,1) #W h0 + b
h1= np.maximum(zero, hl1) #ReLU(hl1)
h0= h1
l= getattr(self.model_err,self.f_names_err[-1])
y_err0= l.W.dot(h0) + l.b.reshape(l.b.size,1)
y_var0= np.diag((y_err0*y_err0).ravel())
else:
y_var0= None
x_var, var_is_zero= RegularizeCov(x_var, x.size, np.float32)
if var_is_zero:
g= None #Gradient
h0= x
for ln in self.f_names[:-1]:
l= getattr(self.model,ln)
hl1= l.W.dot(h0) + l.b.reshape(l.b.size,1) #W h0 + b
h1= np.maximum(zero, hl1) #ReLU(hl1)
if with_grad:
g2= l.W.T.dot(np.diag((hl1>0.0).ravel().astype(np.float32))) #W diag(step(hl1))
g= g2 if g==None else g.dot(g2)
h0= h1
l= getattr(self.model,self.f_names[-1])
y= l.W.dot(h0) + l.b.reshape(l.b.size,1)
if with_grad:
g= g2 if g==None else g.dot(l.W.T)
return y, y_var0, g
else:
g= None #Gradient
h0= x
h0_var= x_var
for ln in self.f_names[:-1]:
l= getattr(self.model,ln)
hl1= l.W.dot(h0) + l.b.reshape(l.b.size,1) #W h0 + b
hl1_dvar= np.diag( l.W.dot(h0_var.dot(l.W.T)) ).reshape(hl1.size,1) #diag(W h0_var W^T)
h1,h1_dvar= ReLUGaussV(hl1,hl1_dvar) #ReLU_gauss(hl1,hl1_dvar)
h1_var= np.diag(h1_dvar.ravel()) #To a full matrix
if with_grad:
g2= l.W.T.dot(np.diag(ReLUGaussGradV(hl1,hl1_dvar).ravel()))
g= g2 if g==None else g.dot(g2)
h0= h1
h0_var= h1_var
l= getattr(self.model,self.f_names[-1])
y= l.W.dot(h0) + l.b.reshape(l.b.size,1)
y_var= None
if with_var:
y_var= l.W.dot(h0_var.dot(l.W.T)) + y_var0
if with_grad:
g= g2 if g==None else g.dot(l.W.T)
return y, y_var, g
#Training code common for mean model and error model.
@staticmethod
def TrainNN(**opt):
N= len(opt['x_train'])
loss_maf= TExpMovingAverage1(init_sd=opt['loss_stddev_init']*opt['loss_stddev_stop'],
alpha=opt['loss_maf_alpha'])
batchsize= min(opt['batchsize'], N) #Adjust mini-batch size for too small N
num_max_update= opt['num_max_update']
n_epoch= num_max_update/(N/batchsize)+1
is_updating= True
n_update= 0
sum_loss= 0.0
fp= OpenW(opt['log_filename'],'w')
for epoch in xrange(n_epoch):
perm= np.random.permutation(N)
# Train model per batch
for i in xrange(0, N, batchsize):
x_batch= opt['x_train'][perm[i:i+batchsize]]
y_batch= opt['y_train'][perm[i:i+batchsize]]
if opt['gpu'] >= 0:
x_batch= cuda.to_gpu(x_batch)
y_batch= cuda.to_gpu(y_batch)
opt['optimizer'].zero_grads()
loss, pred= opt['fwd_loss'](x_batch, y_batch, train=True)
loss.backward() #Computing gradients
opt['optimizer'].update()
n_update+= 1
sum_loss+= float(cuda.to_cpu(loss.data))
if n_update % opt['num_check_stop'] == 0:
#loss_maf.Update(float(cuda.to_cpu(loss.data)))
loss_maf.Update(sum_loss / opt['num_check_stop'])
sum_loss= 0.0
if opt['verb']: print 'Training %s:'%opt['code'], epoch, n_update, loss_maf.Mean, loss_maf.StdDev
fp.write('%d %d %f %f\n' % (epoch, n_update, loss_maf.Mean, loss_maf.StdDev))
if loss_maf.StdDev < opt['loss_stddev_stop']:
is_updating= False
break
if n_update >= num_max_update:
is_updating= False
break
if not is_updating: break
fp.close()
#Main update code in which we train the mean model, generate y-error data, train the error model.
def UpdateMain(self):
if self.NSamples < self.Options['num_min_predictable']: return
#Train mean model
opt={
'code': '{code}-{n:05d}'.format(n=self.Params['num_train'], code=self.Options['name']+'mean'),
'log_filename': self.Options['train_log_file'].format(n=self.Params['num_train'], name=self.Options['name'], code='mean', base=self.Options['base_dir']),
'verb': self.Options['verbose'],
'gpu': self.Options['gpu'],
'fwd_loss': self.FwdLoss,
'optimizer': self.optimizer,
'x_train': self.DataX,
'y_train': self.DataY,
'batchsize': self.Options['batchsize'],
'num_max_update': self.Options['num_max_update'],
'num_check_stop': self.Options['num_check_stop'],
'loss_maf_alpha': self.Options['loss_maf_alpha'],
'loss_stddev_init': self.Options['loss_stddev_init'],
'loss_stddev_stop': self.Options['loss_stddev_stop'],
}
self.TrainNN(**opt)
# Generate training data for error model
preds= []
x_batch= self.DataX[:]
if self.Options['gpu'] >= 0:
x_batch= cuda.to_gpu(x_batch)
pred= self.Forward(x_batch, train=False)
D= self.DataY.shape[1]
self.DataYErr= np.abs(cuda.to_cpu(pred.data) - self.DataY)
#Train error model
opt={
'code': '{code}-{n:05d}'.format(n=self.Params['num_train'], code=self.Options['name']+'err'),
'log_filename': self.Options['train_log_file'].format(n=self.Params['num_train'], name=self.Options['name'], code='err', base=self.Options['base_dir']),
'verb': self.Options['verbose'],
'gpu': self.Options['gpu'],
'fwd_loss': self.FwdLossErr,
'optimizer': self.optimizer_err,
'x_train': self.DataX,
'y_train': self.DataYErr,
'batchsize': IfNone(self.Options['batchsize_err'], self.Options['batchsize']),
'num_max_update': IfNone(self.Options['num_max_update_err'], self.Options['num_max_update']),
'num_check_stop': IfNone(self.Options['num_check_stop_err'], self.Options['num_check_stop']),
'loss_maf_alpha': IfNone(self.Options['loss_maf_alpha_err'], self.Options['loss_maf_alpha']),
'loss_stddev_init': IfNone(self.Options['loss_stddev_init_err'], self.Options['loss_stddev_init']),
'loss_stddev_stop': IfNone(self.Options['loss_stddev_stop_err'], self.Options['loss_stddev_stop']),
}
self.TrainNN(**opt)
self.Params['num_train']+= 1
#End of training NNs
self.is_predictable= True
#Incrementally update the internal parameters with a single I/O pair (x,y).
#If x and/or y are None, only updating internal parameters is done.
def Update(self, x=None, y=None, not_learn=False):
#TFunctionApprox.Update(self, x, y, not_learn)
if x!=None or y!=None:
if len(self.DataX)==0:
self.DataX= np.array([self.ToVec(x)],np.float32)
self.DataY= np.array([self.ToVec(y)],np.float32)
else:
self.DataX= np.vstack((self.DataX, self.ToVec(x)))
self.DataY= np.vstack((self.DataY, self.ToVec(y)))
if not_learn: return
self.UpdateMain()
#Incrementally update the internal parameters with I/O data (X,Y).
#If x and/or y are None, only updating internal parameters is done.
def UpdateBatch(self, X=None, Y=None, not_learn=False):
#TFunctionApprox.UpdateBatch(self, X, Y, not_learn)
if X!=None or Y!=None:
if len(self.DataX)==0:
self.DataX= np.array(X, np.float32)
self.DataY= np.array(Y, np.float32)
else:
self.DataX= np.vstack((self.DataX, np.array(X, np.float32)))
self.DataY= np.vstack((self.DataY, np.array(Y, np.float32)))
if not_learn: return
self.UpdateMain()
'''
Do prediction.
Return a TPredRes instance.
x_var: Covariance of x. If a scholar is given, we use diag(x_var,x_var,..).
with_var: Whether compute a covariance matrix of error at the query point as well.
with_grad: Whether compute a gradient at the query point as well.
'''
def Predict(self, x, x_var=0.0, with_var=False, with_grad=False):
res= self.TPredRes()
#x_batch= np.array([self.ToVec(x)],np.float32)
#if self.Options['gpu'] >= 0:
#x_batch= cuda.to_gpu(x_batch)
#pred= self.Forward(x_batch, train=False)
#res.Y= cuda.to_cpu(pred.data)[0]
#if with_var:
#pred_err= self.ForwardErr(x_batch, train=False)
#res.Var= np.diag(cuda.to_cpu(pred_err.data)[0])
#res.Var= res.Var*res.Var
y, y_var, g= self.ForwardX(x, x_var, with_var, with_grad)
res.Y= y
res.Var= y_var
res.Grad= g
return res
def TNNRegressionExample1():
#TrueFunc= lambda x: 0.5*x
#TrueFunc= lambda x: 1.2+math.sin(x)
#TrueFunc= lambda x: 1.2+math.sin(3*x)
#TrueFunc= lambda x: 2.0*x**2
#TrueFunc= lambda x: 4.0-x if x>0.0 else 0.0
#TrueFunc= lambda x: 4.0 if 0.0<x and x<2.5 else 0.0
#TrueFunc= lambda x: 0.0 if x<0.0 else (2.0 if x<2.5 else 4.0)
TrueFunc= lambda x: 0.0 if x<1.0 else 4.0
#TEST: NN's estimation is bad where |x| is far from zero.
Bound= [-3.0,5.0]
#Bound= [-5.0,3.0]
#Bound= [-3.0,3.0]
#Bound= [1.0,5.0]
#Bound= [-5.0,-1.0]
#Bound= [-5.0,5.0]
def GenData(n, noise):
#data_x= [[x+1.0*Rand()] for x in FRange1(*Bound,num_div=n)]
data_x= [[Rand(*Bound)] for k in range(n)]
data_y= [[TrueFunc(x[0])+noise*Rand()] for x in data_x]
#data_y= [[TrueFunc(x[0])+(noise if abs(x[0])<2.0 else 0.0)*Rand()] for x in data_x]
return data_x, data_y
#load_model,train_model= False,True
load_model,train_model= True,False
if train_model:
x_train,y_train= GenData(100, noise=0.2) #TEST: n samples, noise
print 'Num of samples for train:',len(y_train)
# Dump data for plot:
fp1= file('/tmp/dnn/smpl_train.dat','w')
for x,y in zip(x_train,y_train):
fp1.write('%s #%i# %s\n' % (' '.join(map(str,x)),len(x)+1,' '.join(map(str,y))))
fp1.close()
x_test= np.array([[x] for x in FRange1(*Bound,num_div=100)]).astype(np.float32)
y_test= np.array([[TrueFunc(x[0])] for x in x_test]).astype(np.float32)
# Dump data for plot:
fp1= file('/tmp/dnn/smpl_test.dat','w')
for x,y in zip(x_test,y_test):
fp1.write('%s #%i# %s\n' % (' '.join(map(str,x)),len(x)+1,' '.join(map(str,y))))
fp1.close()
batch_train= True
#batch_train= False
options= {}
#options['AdaDelta_rho']= 0.5
#options['AdaDelta_rho']= 0.9
#options['dropout']= False
#options['dropout_ratio']= 0.01
options['loss_stddev_stop']= 1.0e-4
options['loss_stddev_stop_err']= 1.0e-4
options['num_max_update']= 20000
#options['batchsize']= 5
#options['batchsize']= 10
#options['num_check_stop']= 50
#options['loss_maf_alpha']= 0.4
options['loss_stddev_stop']= 1.0e-4
options['loss_stddev_stop_err']= 1.0e-4
model= TNNRegression()
#print 'model.Options=',model.Options
model.Load({'options':options})
if load_model:
model.Load(LoadYAML('/tmp/dnn/nn_model.yaml'), '/tmp/dnn/')
model.Init()
#print 'model.Options=',model.Options
if train_model:
if not batch_train:
for x,y,n in zip(x_train,y_train,range(len(x_train))):
print '========',n,'========'
model.Update(x,y,not_learn=((n+1)%min(10,len(x_train))!=0))
#model.Update()
else:
#model.Options['dropout_ratio']= options['dropout_ratio']
model.UpdateBatch(x_train,y_train)
#model.Options['dropout_ratio']= 0.0
#model.UpdateBatch()
if not load_model:
SaveYAML(model.Save('/tmp/dnn/'), '/tmp/dnn/nn_model.yaml')
# Dump data for plot:
fp1= file('/tmp/dnn/nn_test%04i.dat'%1,'w')
for x in x_test:
with_var,with_grad= True, True
pred= model.Predict(x,x_var=0.0**2,with_var=with_var,with_grad=with_grad)
y= pred.Y.ravel()
y_err= np.sqrt(np.diag(pred.Var)) if with_var else [0.0]
grad= pred.Grad.ravel() if with_grad else [0.0]
fp1.write('%s #%i# %s %s %s\n' % (' '.join(map(str,x)), len(x)+1, ' '.join(map(str,y)), ' '.join(map(str,y_err)), ' '.join(map(str,grad)) ))
fp1.close()
# Dump data for plot:
fp1= file('/tmp/dnn/nn_test%04i.dat'%2,'w')
for x in x_test:
with_var,with_grad= True, True
pred= model.Predict(x,x_var=0.5**2,with_var=with_var,with_grad=with_grad)
y= pred.Y.ravel()
y_err= np.sqrt(np.diag(pred.Var)) if with_var else [0.0]
grad= pred.Grad.ravel() if with_grad else [0.0]
fp1.write('%s #%i# %s %s %s\n' % (' '.join(map(str,x)), len(x)+1, ' '.join(map(str,y)), ' '.join(map(str,y_err)), ' '.join(map(str,grad)) ))
fp1.close()
|
#!/usr/bin/python
import time
from geopy.geocoders import Nominatim
geolocator = Nominatim()
def getGPS(place):
#location = geolocator.geocode("Acquafredda di Maratea, Italia")
location = geolocator.geocode(place)
if not location:
return 'NOT FOUND'
#print(str(place).encode("utf-8"))
#print(str(location).encode("utf-8"))
return '(' + str(location.latitude) + ', ' + str(location.longitude) + ')'
inputCityFile = open("/Users/fedja/Work/Code/JavaCode/DialectCrawler/data/cities.tsv")
outptputCityFileGPS = open("/Users/fedja/Work/Code/JavaCode/DialectCrawler/data/citiesGPS.tsv", "w")
for line in inputCityFile:
city = line.strip()
location = city + ", Italia"
gps = getGPS(location)
outputLine = city + "\t" + gps
print(outputLine)
#print(line)
outptputCityFileGPS.write(outputLine + '\n')
time.sleep( 0.1 )
inputCityFile.close()
outptputCityFileGPS.close() |
from django.contrib import admin
from .models import ProfileUpload,MyDetails, Author, Document,Cars
# Register your models here.
admin.site.register(ProfileUpload)
admin.site.register(MyDetails)
admin.site.register(Author)
admin.site.register(Document)
admin.site.register(Cars)
|
from SearchProblem import Problem
from SearchProblem import Node
MAX_DEPTH = 20
class SearchProblemSolver:
"""This class performs an iterative deepening
DFS search on a given graph.
"""
def __init__(self, problem: Problem.ProblemSearch):
self.problem = problem
self.start_node = Node.Node(problem.source, 0)
def __call__(self):
solution = self.solve()
if solution is None:
return None, None
return solution.solution()
def solve(self):
raise NotImplementedError()
def is_goal(self, node: Node.Node):
return self.problem.is_goal(node)
def expand(self, node):
return node.expand(self.problem)
|
from django.shortcuts import render
from django.views import View
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from .models import OrderReceiving
from .forms import *
from customers.models import Customer
# Create your views here.
@method_decorator(login_required(login_url='../users/login'), name='get')
class OrderMainView(View):
template_name = 'order_main.html'
queryset = OrderReceiving.objects.all()
def get_query_set(self):
return self.queryset
def get(self, request):
form = OrderReceivingForm()
context = {'orders': self.get_query_set(), 'form': form}
return render(request, self.template_name, context)
def post(self, request):
form = OrderReceivingForm(request.POST)
if form.is_valid():
form.save()
form = OrderReceivingForm()
context = {'form': form}
return render(request, self.template_name, context)
|
import time
import glob
import random
import psutil
import curses
import msgpack
import inotify.adapters
from threading import Thread, Lock
from common.util import read_binary_file
from kafl_fuzz import PAYQ
WORKDIR = ''
SVCNAME = ''
# current payload
PAYLOAD = ''
# screen width
WIDTH = 80
# color pair code
WHITE = 1
RED = 2
GREEN = 3
YELLOW = 4
BLUE = 5
MAGENTA = 6
CYAN = 7
BOLD = curses.A_BOLD
DIM = curses.A_DIM
# helper function for color pairs
def color(code):
return curses.color_pair(code)
# helper function for formatting number
def pnum(num):
assert num >= 0
if num <= 9999:
return "%d" % num
num /= 1000.0
if num <= 999:
return "%.1fk" % num
num /= 1000.0
if num <= 999:
return "%.1fm" % num
num /= 1000.0
if num <= 999:
return "%.1fg" % num
num /= 1000.0
if num <= 999:
return "%.1ft" % num
num /= 1000.0
if num <= 999:
return "%.1fp" % num
assert False
def pfloat(flt):
assert flt >= 0
if flt <= 999:
return "%.1f" % flt
return pnum(flt)
def pbyte(num):
assert num >= 0
if num <= 999:
return "%d" % num
num /= 1024.0
if num <= 999:
return "%.1fk" % num
num /= 1024.0
if num <= 999:
return "%.1fm" % num
num /= 1024.0
if num <= 999:
return "%.1fg" % num
num /= 1024.0
if num <= 999:
return "%.1ft" % num
num /= 1024.0
if num <= 999:
return "%.1fp" % num
assert False
# helper function for formatting timestamps
def ptime(secs):
if not secs:
return "none yet"
secs = int(secs)
seconds = secs % 60
secs //= 60
mins = secs % 60
secs //= 60
hours = secs % 24
days = secs // 24
return "%d days, %d hrs, %d min, %d sec" % (days, hours, mins, seconds)
class MonitorInterface:
def __init__(self, stdscr):
self.stdscr = stdscr
self.y = 0
def print_test(self):
global PAYLOAD
if not PAYQ.empty():
PAYLOAD = PAYQ.get()
x = 0
self.stdscr.addstr(self.y, x, ' ' * 40)
self.stdscr.addstr(self.y, x, PAYLOAD[:20])
self.y += 1
def print_title(self):
title1 = 'kAFL '
title2 = f'({SVCNAME})'
title3 = '2020 KITRI Best of the Best'
center_len = len(title1) + len(title2)
pad_len1 = (WIDTH - center_len) // 2
pad_len2 = (WIDTH - len(title3)) // 2
pad1 = ' ' * pad_len1
pad2 = ' ' * pad_len2
x = 0
self.y += 1 # empty line
self.stdscr.addstr(self.y, x, pad1, BOLD)
x += pad_len1
self.stdscr.addstr(self.y, x, title1, color(YELLOW) + BOLD)
x += len(title1)
self.stdscr.addstr(self.y, x, title2, color(GREEN) + BOLD)
x += len(title2)
self.stdscr.addstr(self.y, x, pad1, BOLD)
self.y += 1
x = 0
self.stdscr.addstr(self.y, x, pad2, BOLD)
x += pad_len2
self.stdscr.addstr(self.y, x, title3, color(CYAN) + BOLD)
self.y += 2
def print_guest_and_overall(self):
self.stdscr.addstr(self.y, 0, '┌─', DIM)
self.stdscr.addstr(self.y, 2, ' guest timing ', color(CYAN))
self.stdscr.addstr(self.y, 16, '─'*37 + '┬─', DIM)
self.stdscr.addstr(self.y, 55, ' overall results ', color(CYAN))
self.stdscr.addstr(self.y, 72, '─'*7 + '┐', DIM)
self.y += 1
def print_execution_and_map(self):
self.stdscr.addstr(self.y, 0, '├─', DIM)
self.stdscr.addstr(self.y, 2, ' execution progress ', color(CYAN))
self.stdscr.addstr(self.y, 22, '─'*14 + '┬─', DIM)
self.stdscr.addstr(self.y, 38, ' map coverage ', color(CYAN))
self.stdscr.addstr(self.y, 52, '─┴'+ '─'*25 + '┤', DIM)
self.y += 1
def print_node_and_machine(self):
self.stdscr.addstr(self.y, 0, '├─', DIM)
self.stdscr.addstr(self.y, 2, ' node progress ', color(CYAN))
self.stdscr.addstr(self.y, 17, '─'*19 + '┼─', DIM)
self.stdscr.addstr(self.y, 38, ' machine stats ', color(CYAN))
self.stdscr.addstr(self.y, 53, '─'*26 + '┤', DIM)
self.y += 1
def print_payload_info(self):
self.stdscr.addstr(self.y, 0, '├─', DIM)
self.stdscr.addstr(self.y, 2, ' payload info ', color(CYAN))
self.stdscr.addstr(self.y, 16, '─'*20 + '┴─', DIM)
self.stdscr.addstr(self.y, 37, '─'*42 + '┤', DIM)
self.y += 1
def print_bottom_line(self):
self.stdscr.addstr(self.y, 0, '└' + '─'*78 + '┘', DIM)
def print_info_line(self, pairs, sep=" │ ", end="│", prefix="", dynaidx=None):
x = 0
infos = []
for info in pairs:
infolen = len(info[1]) + len(info[2])
if infolen == 0:
infos.append(" ".ljust(info[0]+2))
else:
infos.append(" %s : %s %s" % (
info[1], info[2], " ".ljust(info[0]-infolen)))
self.stdscr.addstr(self.y, x, '│', DIM)
x += 1
for info in infos:
self.stdscr.addstr(self.y, x, prefix + info + " ")
x += len(info)
self.stdscr.addstr(self.y, x, sep, DIM)
x += len(sep)
self.stdscr.addstr(self.y, x, " ", DIM)
x += len(" ")
self.y += 1
def refresh(self):
self.y = 0
self.stdscr.refresh()
class MonitorData:
def __init__(self, workdir):
self.workdir = workdir
self.exec_avg = 0
self.slave_stats = []
self.load_initial()
def load_initial(self):
self.cpu = psutil.cpu_times_percent(interval=0.01, percpu=False)
self.mem = psutil.virtual_memory()
self.cores_phys = psutil.cpu_count(logical=False)
self.cores_virt = psutil.cpu_count(logical=True)
self.stats = self.read_file("stats")
# add slave stats
num_slaves = self.stats.get("num_slaves",0)
for slave_id in range(0, num_slaves):
self.slave_stats.append(self.read_file("slave_stats_%d" % slave_id))
self.starttime = min([x["start_time"] for x in self.slave_stats])
# add node information
self.nodes = {}
for metadata in glob.glob(self.workdir + "/metadata/node_*"):
self.load_node(metadata)
self.aggregate()
def load_node(self, name):
node_id = int(name.split("_")[-1])
self.nodes[node_id] = self.read_file("metadata/node_%05d" % node_id)
def runtime(self):
return max([x["run_time"] for x in self.slave_stats])
def aggregate(self):
self.aggregated = {
"fav_states": {},
"normal_states": {},
"exit_reasons": {"regular": 0, "crash": 0, "kasan": 0, "timeout": 0},
"last_found": {"regular": 0, "crash": 0, "kasan": 0, "timeout": 0}
}
for nid in self.nodes:
node = self.nodes[nid]
self.aggregated["exit_reasons"][node["info"]["exit_reason"]] += 1
if node["info"]["exit_reason"] == "regular":
states = self.aggregated["normal_states"]
if len(node["fav_bits"]) > 0:
states = self.aggregated["fav_states"]
nodestate = node["state"]["name"]
states[nodestate] = states.get(nodestate, 0) + 1
last_found = self.aggregated["last_found"][node["info"]["exit_reason"]]
this_found = node["info"]["time"]
if last_found < this_found:
self.aggregated["last_found"][node["info"]["exit_reason"]] = this_found
def load_slave(self, id):
self.slave_stats[id] = self.read_file("slave_stats_%d" % id)
def load_global(self):
self.stats = self.read_file("stats")
def node_size(self, nid):
return self.nodes[nid]["payload_len"]
def node_parent_id(self, nid):
return self.nodes[nid]["info"]["parent"]
def num_slaves(self):
return len(self.slave_stats)
def num_found(self, reason):
return self.aggregated["exit_reasons"][reason]
def cpu_used(self):
return self.cpu.user + self.cpu.system
def ram_used(self):
return 100 * float(self.mem.used) / float(self.mem.total)
def slave_input_id(self, i):
return self.slave_stats[i]["node_id"]
def slave_stage(self, i):
method = self.slave_stats[i].get("method", None)
stage = self.slave_stats[i].get("stage", "waiting...")
if method:
#return "%s/%s" % (stage[0:6],method[0:12])
return "%s" % method[0:14]
else:
return stage[0:14]
def execs_p_sec_avg(self):
return self.total_execs()/self.runtime()
def total_execs(self):
return sum([x["total_execs"] for x in self.slave_stats])
def time_since(self, reason):
time_stamp = self.aggregated["last_found"][reason]
if not time_stamp:
return None
return self.starttime + self.runtime() - time_stamp
def bitmap_size(self):
return 64 * 1024
def bitmap_used(self):
return self.stats["bytes_in_bitmap"]
def p_coll(self):
return 100.0 * float(self.bitmap_used()) / float(self.bitmap_size())
def update(self, pathname, filename):
if "node_" in filename:
self.load_node(pathname + "/" + filename)
self.aggregate()
elif "slave_stats" in filename:
for i in range(0, self.num_slaves()):
self.load_slave(i)
elif filename == "stats":
self.load_global()
def read_file(self, name):
retry = 4
data = None
while retry > 0:
try:
data = read_binary_file(self.workdir + "/" + name)
break
except:
retry -= 1
if data:
return msgpack.unpackb(data, raw=False, strict_map_key=False)
else:
return None
class MonitorDrawer:
def __init__(self, stdscr):
global WORKDIR
# mutex lock
self.inf_mutex = Lock()
self.key = Lock()
# create pairs of forground and background colors
curses.init_pair(WHITE, curses.COLOR_WHITE, curses.COLOR_BLACK)
curses.init_pair(RED, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(GREEN, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(YELLOW, curses.COLOR_YELLOW, curses.COLOR_BLACK)
curses.init_pair(BLUE, curses.COLOR_BLUE, curses.COLOR_BLACK)
curses.init_pair(MAGENTA, curses.COLOR_MAGENTA, curses.COLOR_BLACK)
curses.init_pair(CYAN, curses.COLOR_CYAN, curses.COLOR_BLACK)
# set default color pair
stdscr.bkgd(curses.color_pair(1))
# create drawing interface
self.inf = MonitorInterface(stdscr)
self.stdscr = stdscr
# create initial statistics
self.finished = False
self.data = MonitorData(WORKDIR)
# create child threads for loop
self.watcher = Thread(target=self.watch, args=(WORKDIR,))
self.cpu_watcher = Thread(target=self.watch_cpu, args=())
self.thread_loop = Thread(target=self.loop)
# start watcher threads
self.watcher.daemon = True
self.watcher.start()
self.cpu_watcher.daemon = True
self.cpu_watcher.start()
# start loop thread
stdscr.refresh()
self.thread_loop.start()
self.thread_loop.join()
def loop(self):
while True:
try:
self.draw()
finally:
time.sleep(0.00001)
def watch(self, workdir):
d = self.data
mask = (inotify.constants.IN_MOVED_TO)
self.inotify = inotify.adapters.Inotify()
i = self.inotify
i.add_watch(workdir, mask)
i.add_watch(workdir + "/metadata/", mask)
for event in i.event_gen(yield_nones=False):
if self.finished:
return
self.inf_mutex.acquire()
try:
(_, type_names, path, filename) = event
d.update(path, filename)
self.draw()
finally:
self.inf_mutex.release()
def watch_cpu(self):
while True:
if self.finished:
return
cpu_info = psutil.cpu_times_percent(interval=2, percpu=False)
mem_info = psutil.virtual_memory()
swap_info = psutil.swap_memory()
self.inf_mutex.acquire()
try:
self.data.mem = mem_info
self.data.cpu = cpu_info
self.data.swap = swap_info
self.draw()
finally:
self.inf_mutex.release()
def draw(self):
# statistic data
d = self.data
# enter critical section
self.key.acquire()
# fuzzer graphics
self.inf.print_title()
self.inf.print_guest_and_overall()
self.inf.print_info_line([
(46, " run time", ptime(d.runtime())),
(17, " crash", "%s" % (pnum((d.num_found("crash")))))])
self.inf.print_info_line([
(46, "last new path", ptime(d.time_since("regular"))),
(17, " addsan", "%s" % (pnum((d.num_found("kasan")))))])
self.inf.print_info_line([
(46, " last crash", ptime(d.time_since("crash"))),
(17, "timeout", "%s" % (pnum((d.num_found("timeout")))))])
self.inf.print_info_line([
(46, " last timeout", ptime(d.time_since("timeout"))),
(17, "regular", "%s" % (pnum((d.num_found("regular")))))])
self.inf.print_execution_and_map()
self.inf.print_info_line([
(29, " total execs", pnum(d.total_execs())),
(34, " edges", "%s" % (pnum((d.bitmap_used()))))])
self.inf.print_info_line([
(29, " exec speed", str(pnum(d.execs_p_sec_avg())) + "/sec"),
(34, "map density", "%s" % pfloat(d.p_coll()) + "%")])
self.inf.print_node_and_machine()
for i in range(0, d.num_slaves()):
nid = d.slave_input_id(i)
if nid not in [None, 0]:
self.inf.print_info_line([
(29, " node id", str(d.slave_input_id(i))),
(34, " cpu used", pnum(d.cpu_used()) + "%")])
self.inf.print_info_line([
(29, " now trying", d.slave_stage(i)),
(34, "memory used", pnum(d.ram_used()) + "%")])
else:
self.inf.print_info_line([
(29, " node id", "N/A"),
(34, " cpu used", pnum(d.cpu_used()) + "%")])
self.inf.print_info_line([
(29, " now trying", d.slave_stage(i)),
(34, "memory used", pnum(d.ram_used()) + "%")])
# fetch payload from shared queue
global PAYLOAD
if not PAYQ.empty():
PAYLOAD = PAYQ.get()
payload_len = len(PAYLOAD)
self.inf.print_payload_info()
self.inf.print_info_line([
(72, " parent id", "%d" % (5))])
self.inf.print_info_line([
(72, " size", pbyte(payload_len) + " bytes")])
self.inf.print_info_line([
(72, " payload", PAYLOAD[:20])])
self.inf.print_bottom_line()
# refresh screen buffer
self.inf.refresh()
# exit critical section
self.key.release()
def run(stdscr):
MonitorDrawer(stdscr)
def main(workdir):
global WORKDIR, SVCNAME
WORKDIR = workdir
SVCNAME = 'testDriver' # todo - receive in args
# delay for files to be generated
time.sleep(1)
curses.wrapper(run) |
# Copyright 2018 David Matthews
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
import time
from parallelpy.parallel_evaluate import PARALLEL_MODE_MPI_INTRA, PARALLEL_MODE_MPI_INTER, PARALLEL_MODE_POOL, setup, batch_complete_work, cleanup
from parallelpy.utils import Work, Letter
class HelloWorld(Work):
def __init__(self, id):
self.id = id
self.name = uuid.uuid1()
self.work = None
def __repr__(self):
return "Hello my id is: %d and my work is %s " % (self.id, str(self.work))
def compute_work(self, serial=False):
self.work = self.name
def write_letter(self):
return Letter(self.work, self.id)
def open_letter(self, letter):
self.work = letter.get_data()
def validate(self):
assert self.name == self.work
if __name__ == '__main__':
print("Testing: %s" % setup(PARALLEL_MODE_MPI_INTER))
TEST_LENGTH = 1000000
for n in range(TEST_LENGTH):
work = [HelloWorld(i) for i in range(1000)]
t0 = time.time()
batch_complete_work(work)
t1 = time.time()
for w in work:
w.validate()
print("%10d took %.2f" % (n, t1-t0))
cleanup()
|
from BeautifulSoup import BeautifulSoup
import pandas as pd
data = open('predictwise.html').read()
bs = BeautifulSoup(data)
obama = {}
romney = {}
votes = {}
for state in bs.findAll('div', 'state_info'):
name = state.find('h5').contents[0]
v = state.find('dl', 'votes').find('dt').contents[0]
o, r = state.findAll('dl', 'chance')
votes[name] = int(v)
obama[name] = float(o.find('dt').contents[0][:-1]) / 100
romney[name] = float(r.find('dt').contents[0][:-1]) / 100
states = sorted(votes.keys())
votes = [votes[s] for s in states]
obama = [obama[s] for s in states]
romney = [romney[s] for s in states]
data = pd.DataFrame(dict(States=states, Votes=votes,
Obama=obama, Romney=romney))
data.to_csv('predictwise.csv', index=False)
|
from cnn.Setup import Setup
import sys
import keras.backend as K
import numpy as np
import os
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
rel_filepath = sys.argv[1]
XTest_directory = sys.argv[2]
continue_setup = Setup('')
continue_setup.load(rel_filepath=rel_filepath)
no_of_classes = 15000
def test_data_generator(XTest_directory, YTest_directory):
filenames = [str(i) + '.npy' for i in range(2000, 114000 + 1, 2000)] + ['115424.npy']
while True:
for filename in filenames:
X_test = np.load(os.path.join(XTest_directory, filename))
X_test = X_test.reshape(-1, 2048, 1, 1)
yield(X_test)
y_pred = continue_setup.getModel().predict_generator(test_data_generator(XTest_directory, None), steps=(114000/2000 + 1))
y_pred = np.argmax(y_pred, axis=1)
np.save('test_result.npy', y_pred)
|
from flask import render_template, jsonify, redirect, url_for, request, render_template
import app
from app import db, app
from models import node
from forms import auth_form
from flask_login import login_user, login_required, logout_user, LoginManager, current_user
from werkzeug.security import generate_password_hash, \
check_password_hash
# ==============================#
# Make sure we are logged in before hitting the endpoints
# ==============================#
login_manager = LoginManager()
login_manager.init_app(app)
# ------------------------------#
# Create a login and signup view
# ------------------------------#
@app.route('/signup', methods=['GET', 'POST'])
def signup():
form = auth_form.SignupForm()
if request.method == 'GET':
return render_template('signup.html', form=form)
elif request.method == 'POST':
if form.validate_on_submit():
if node.User.query.filter_by(email=form.email.data).first():
return "Email address already exists"
else:
app.logger.info('Adding new user')
newuser = node.User(form.email.data, form.password.data)
db.session.add(newuser)
db.session.commit()
return "User created!!!"
else:
return "Form didn't validate"
@app.route('/login', methods=['GET', 'POST'])
def login():
form = auth_form.SignupForm()
if request.method == 'GET':
return render_template('login.html', form=form)
elif request.method == 'POST':
if form.validate_on_submit():
user = node.User.query.filter_by(email=form.email.data).first()
if user:
#if user.password == form.password.data:
if user.check_password(form.password.data):
login_user(user)
app.logger.info('Redirecting to hello world')
return redirect(url_for('hello_world'))
else:
return "Wrong password"
else:
return "User doesn't exist"
else:
return redirect(url_for('hello_world'))
@login_manager.unauthorized_handler
def unauthorized_callback():
return redirect('/login')
@login_manager.user_loader
def load_user(email):
return node.User.query.filter_by(email=email).first()
@app.route("/logout")
@login_required
def logout():
logout_user()
return redirect(url_for('hello_world'))
@app.route('/')
def hello_world():
app.logger.debug('Rendering home page')
return render_template('index.html', current_user=current_user)
|
from django.apps import AppConfig
class FirmwareUploadsConfig(AppConfig):
name = "firmware_uploads"
verbose_name = "Firmware Uploads"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.