text stringlengths 38 1.54M |
|---|
import flow_api
def handler(system: flow_api.System, this: flow_api.Execution):
# create a secret
this.connect(
connector_type='VAULT',
host='https://my-vault-host:port',
secret_path='my-secret',
data={
'secret-key': 'secret-value',
},
token='my-vault-token',
)
# read a secret
secret_value = this.connect(
connector_type='VAULT',
host='https://my-vault-host:port',
secret_path='my-secret',
version=None, # read latest version
token='my-vault-token',
).get('output_value')['result']['data']['data']
assert isinstance(secret_value, dict)
# destroy all versions of secret
this.connect(
connector_type='VAULT',
host='https://my-vault-host:port',
secret_path='my-secret',
mode='delete_metadata',
token='my-vault-token',
)
return this.success('all done')
|
# -*- coding: utf-8 -*-
from ua_parser import user_agent_parser
PLATFORM_WORDS = ['X11',
'Macintosh',
'Windows',
'compatible',
'Android',
'BlackBerry',
'Windows Phone',
'iPhone',
'iPad',
'iPod'
]
BROWSER_WORDS = ['Safari',
'Opera',
'Firefox',
'Chrome',
'MSIE',
'Opera Mini',
'Opera Mobi',
'IEMobile'
]
def _get_browser(brs):
"""
По массиву, говорящему входит ли слово из BROWSER_WORDS в строку user-agetn,
возвращает тип браузера.
Порядок условий существенен.
"""
if brs[7]:
return 'IEMobile'
elif brs[6]:
return 'OperaMobile'
elif brs[5]:
return 'OperaMini'
if brs[3]:
return 'Chrome'
elif brs[4]:
return 'MSIE'
elif brs[1]:
return 'Opera'
elif brs[2]:
return 'Firefox'
else:
return 'Safari'
def parse(user_agent_str):
"""
По строке user-agent возвращает словарь с указанием браузера и платформы.
"""
platforms = [word in user_agent_str for word in PLATFORM_WORDS]
browsers = [word in user_agent_str for word in BROWSER_WORDS]
if not (any(browsers) and any(platforms)):
ua = user_agent_parser.Parse(user_agent_str)
return { 'browser' : ua['user_agent']['family'],
'platform' : ua['os']['family']
}
if platforms[9] or platforms[8] or platforms[7]:
platform = 'iOS'
elif platforms[6]:
platform = 'WindowsPhone'
elif platforms[5]:
platform = 'BlackBerry'
elif platforms[4]:
platform = 'Android'
elif platforms[2] or platforms[3]:
platform = 'Windows'
elif platforms[1]:
platform = 'Mac'
elif platforms[0]:
platform = 'Linux'
return { 'browser' : _get_browser(browsers),
'platform': platform
}
|
import math
def c(n):
if n == 0:
return 1
else:
return (4*(n-1)+2)*c(n-1)/(n+1)
b = 0
while c(b)<1000000000:
print c(b)
b+=1
|
#!/usr/bin/python3.6
"""Defines commands used for the Memers server."""
import json
import random
import asyncio
import discord
from discord.ext import commands
import config
MAX_VOTES = 10
def check_votes(user):
"""Checks if a user is banned from submitting."""
with open(f"./resources/votes.json", "r+") as vote_file:
votes = json.load(vote_file)
if user in votes:
votes_left = MAX_VOTES - len(votes[user])
else:
votes_left = MAX_VOTES
return votes_left > 0
def add_to_json(filename, call, response, user, is_img):
"""Adds a record to the given json file."""
with open(f"./resources/{filename}", "r+") as response_file:
responses = json.load(response_file)
can_submit = check_votes(user)
if can_submit:
if call in responses:
out_msg = "This call already exists. Please use a different one."
else:
responses[call] = {}
responses[call]["response"] = response
responses[call]["user"] = user
with open(f"./resources/{filename}", "w") as response_file:
json.dump(responses, response_file)
if not is_img:
out_msg = f"{user} added call/response pair '{call}' -> '{response}'!"
else:
out_msg = f"{user} added image call/response pair {call} -> <{response}>!"
else:
out_msg = "You are banned from submitting."
return out_msg
def remove_from_json(filename, call):
"""Removes the given record from the given json file."""
with open(f"./resources/{filename}", "r+") as response_file:
responses = json.load(response_file)
if call in responses:
response = responses[call]["response"]
user = responses[call]["user"]
responses.pop(call)
with open(f"./resources/{filename}", "w") as response_file:
json.dump(responses, response_file)
return (response, user)
return (None, None)
def list_from_json(filename, is_img):
"""Lists all records from the given json file."""
out_msg = "Call -> Response\n"
with open(f"./resources/{filename}", "r+") as response_file:
responses = json.load(response_file)
for call, response_dict in responses.items():
response = response_dict["response"]
user = response_dict["user"]
if not is_img:
out_msg += f"{call} -> {response}, by {user}\n"
else:
out_msg += f"{call}\n"
out_msg = f"```{out_msg}```"
return out_msg
def list_user_adds(filename, user, is_img):
"""Lists all adds by a user."""
out_msg = "Call -> Response\n"
with open(f"./resources/{filename}", "r+") as response_file:
responses = json.load(response_file)
for call, response_dict in responses.items():
response = response_dict["response"]
sub_user = response_dict["user"]
if user == sub_user:
if not is_img:
out_msg += f"{call} -> {response}\n"
else:
out_msg += f"{call}\n"
if out_msg == "Call -> Response\n":
out_msg = f"Nothing submitted by {user}."
out_msg = f"```{out_msg}```"
return out_msg
class Memers():
"""Defines the cap command and functions."""
def __init__(self, bot):
self.bot = bot
self.bot.victim = ""
self.bot.victim_choice = self.bot.loop.create_task(self.choose_victim())
self.bot.pct = 0.10
self.bot.max_votes = MAX_VOTES
@commands.group()
async def cool(self, ctx):
"""Says if a user is cool.
In reality this just checks if a subcommand is being invoked.
"""
if ctx.invoked_subcommand is None:
await ctx.send('No, {0.subcommand_passed} is not cool'.format(ctx))
@cool.command(name='bot')
async def _bot(self, ctx):
"""Is the bot cool?"""
await ctx.send('Yes, the bot is cool.')
@commands.command()
async def markdonalds(self, ctx):
"""Lets the command markdonalds return the mRage emoji."""
mrage = self.bot.get_emoji(413441118102093824)
await ctx.send(f"{mrage}")
@commands.command()
# @commands.is_owner()
async def add(self, ctx, call, response):
"""Adds a new call/response pair. Bad additions will get your privileges revoked."""
filename = "responses.json"
user = ctx.author.name
out_msg = add_to_json(filename, call, response, user, False)
await ctx.send(out_msg)
@commands.command(name="rm", hidden=True)
@commands.is_owner()
async def remove(self, ctx, call):
"""Removes a call/response pair. Bot owner only!"""
filename = "responses.json"
(user, response) = remove_from_json(filename, call)
if response is not None:
out_msg = f"Removed {user}'s text call/response pair '{call}' -> '{response}'!"
else:
out_msg = f"Text call '{call}' not found."
await ctx.author.send(out_msg)
@commands.command()
async def calls(self, ctx):
"""Lists the existing call/responses pairs."""
filename = "responses.json"
out_msg = list_from_json(filename, False)
await ctx.author.send(out_msg)
@commands.command()
async def blame(self, ctx, user):
"""Lists all added pairs by a given user."""
filename = "responses.json"
out_msg = list_user_adds(filename, user, False)
await ctx.author.send(out_msg)
@commands.group(invoke_without_command=True)
async def img(self, ctx, call):
"""Provides the parser for image call/response commands."""
# if ctx.invoked_subcommand is None and ctx.channel.id != config.main_channel:
if ctx.invoked_subcommand is None:
with open(f"./resources/image_responses.json", "r+") as response_file:
responses = json.load(response_file)
try:
found_url = responses[call]['response']
image_embed = discord.Embed()
image_embed.set_image(url=found_url)
await ctx.send(content=None, embed=image_embed)
except KeyError:
print("No response in file!")
@img.command(name="add")
# @commands.is_owner()
async def _add(self, ctx, call, image_url):
"""Adds a new image response."""
filename = "image_responses.json"
user = ctx.author.name
out_msg = add_to_json(filename, call, image_url, user, True)
await ctx.send(out_msg)
@img.command(name="rm")
@commands.is_owner()
async def _remove(self, ctx, call):
"""Removes an image response. Bot owner only!"""
filename = "image_responses.json"
image_url = remove_from_json(filename, call)
if image_url is not None:
out_msg = f"Removed image call/response pair {call} -> <{image_url}>!"
else:
out_msg = f"Image call {call} not found."
await ctx.author.send(out_msg)
@img.command(name="calls")
async def _calls(self, ctx):
"""Lists the existing image call/responses pairs."""
filename = "image_responses.json"
out_msg = list_from_json(filename, True)
await ctx.author.send(out_msg)
@img.command(name="blame")
async def _blame(self, ctx, user):
"""Lists all added pairs by a given user."""
filename = "image_responses.json"
out_msg = list_user_adds(filename, user, True)
await ctx.author.send(out_msg)
@commands.command()
async def voteban(self, ctx, user):
"""Votes to disallow a user from adding images or text calls."""
filename = "votes.json"
try:
with open(f"./resources/{filename}", "r+") as vote_file:
votes = json.load(vote_file)
except FileNotFoundError:
with open(f"./resources/{filename}", "w+") as vote_file:
json.dump({}, vote_file)
votes = {}
voter = ctx.author.name
if user in votes:
votes_against = votes[user]
if voter in votes_against:
await ctx.author.send(f"You have already voted against {user}!")
else:
votes_against.append(voter)
votes[user] = votes_against
num_votes_left = self.bot.max_votes-len(votes_against)
await ctx.send(f"You have voted against {user}. "
f"{num_votes_left} more votes until submission ban.")
else:
votes[user] = [voter]
num_votes_left = self.bot.max_votes-1
await ctx.send(f"You have voted against {user}. "
f"{num_votes_left} more votes until submission ban.")
with open(f"./resources/{filename}", "w") as vote_file:
json.dump(votes, vote_file)
@commands.command()
async def votes(self, ctx, user):
"""Displays the current number of votes against a user."""
filename = "votes.json"
with open(f"./resources/{filename}", "r+") as vote_file:
votes = json.load(vote_file)
if user in votes:
num_votes_left = self.bot.max_votes-len(votes[user])
await ctx.send(f"{user} has {num_votes_left} more votes until submission ban.")
else:
num_votes_left = self.bot.max_votes
await ctx.send(f"{user} has {num_votes_left} more votes until submission ban.")
@commands.command()
@commands.is_owner()
async def clearvotes(self, ctx, user):
"""Clears votes against a player, effectively unbanning them."""
filename = "votes.json"
with open(f"./resources/{filename}", "r+") as vote_file:
votes = json.load(vote_file)
votes[user] = []
await ctx.send(f"Cleared votes for {user}.")
with open(f"./resources/{filename}", "w") as vote_file:
json.dump(votes, vote_file)
@commands.command()
@commands.is_owner()
async def player(self, ctx, player):
"""Sets a new player victim. Bot owner only!"""
self.bot.victim = player
await ctx.send(f"New victim chosen: {self.bot.victim}")
@commands.command()
@commands.is_owner()
async def pct(self, ctx, pct):
"""Sets the chance that the bot adds a random reaction."""
self.bot.pct = float(pct)/100.0
await ctx.send(f"New reaction percentage chosen: {self.bot.pct}")
async def choose_victim(self):
"""Chooses a victim to add reactions to."""
await self.bot.wait_until_ready()
while not self.bot.is_closed():
guild_members = self.bot.get_guild(config.guild_id).members
victim_member = random.sample(guild_members, 1)[0]
self.bot.victim = victim_member.name
print(f"New victim: {self.bot.victim}")
await asyncio.sleep(10000)
async def on_message(self, ctx):
"""Defines on_message behavior for responses and victim reaction adding."""
if ctx.author.bot:
return
reaction_pct = random.random()
if self.bot.victim == ctx.author.name and reaction_pct < self.bot.pct:
add_emoji = random.sample(self.bot.emojis, 1)[0]
await ctx.add_reaction(add_emoji)
# if ctx.channel.id != config.main_channel:
if not ctx.content.startswith("$"):
with open(f"./resources/responses.json", "r+") as response_file:
responses = json.load(response_file)
try:
for call, response_dict in responses.items():
response = response_dict['response']
if call in ctx.content.lower():
await ctx.channel.send(f"{response}")
except KeyError:
print("No response in file!")
if ctx.channel.id != config.main_channel:
if ctx.content.lower() in ["i'm dad", "im dad"]:
await ctx.channel.send(f"No you're not, you're {ctx.author.mention}.")
elif "i'm " in ctx.content.lower():
imindex = ctx.content.lower().index("i'm") + 4
await ctx.channel.send(f"Hi {ctx.content[imindex:]}, I'm Dad!")
if ctx.content.lower() == "out":
await ctx.channel.send(f":point_right: :door: :rage:")
if ctx.content.lower() == "in":
await ctx.channel.send(f":grinning: :door: :point_left:")
def setup(bot):
"""Adds the cog to the bot."""
bot.add_cog(Memers(bot))
# schep_questions = ["does schep have tess", "did schep get tess", "does schep have tess yet"]
# milow_questions = ["does milow have ace", "did milow get ace", "does milow have ace yet"]
# if (content.lower() in schep_questions) or (content.lower()[:-1] in schep_questions):
# schep_has_tess = SESSION.query(
# HasTess.has_tess).filter(HasTess.name == "Schep").first()
# if schep_has_tess is None or schep_has_tess[0] is False:
# await channel.send(f"Schep does not have Tess, make sure to let him know ;)", tts=True)
# else:
# await channel.send(f"Schep finally got Tess!")
# elif (content.lower() in milow_questions) or (content.lower()[:-1] in milow_questions):
# schep_has_tess = SESSION.query(
# HasTess.has_tess).filter(HasTess.name == "Milow").first()
# if schep_has_tess is None or schep_has_tess[0] is False:
# await channel.send(f"Milow does not have Ace.", tts=True)
# else:
# await channel.send(f"Milow finally got Ace!")
|
import sympy as sp
def homotopia(F, x0, N, tol):
var = list(F.free_symbols)
n = len(var)
H = 1./N
JF = F.jacobian(var)
print('JF(x): ')
print(JF, end="\n\n")
JFi = JF.inv()
print('[JF(x)]^{-1}: ')
print(JFi, end="\n\n")
h = ('i',) + tuple('x_%i^{(i)}'%(i+1) for i in range(n)) +tuple('F_%i(x^{(i)})'%(i+1) for i in range(n)) + ('error',)
print('x^{(0)}: ')
print(x0, end="\n\n")
print('tol='+"%.7f"%tol, end="\n\n")
l = []
i = 1
Fx0 = F.evalf(subs=dict(zip(var,x0)))
l.append((0,) + tuple('%.7f'%x0[i] for i in range(n)) + tuple('%.7f'%Fx0[i] for i in range(n)) + ('---',))
while True:
JFiv = lambda v : JFi.evalf(subs=dict(zip(var,v)))
k = [-H * JFiv(x0) * Fx0]
k += [-H * JFiv(x0+k[0]/2) * Fx0]
k += [-H * JFiv(x0+k[1]/2) * Fx0]
k += [-H * JFiv(x0+k[2]) * Fx0]
x0 += (k[0]+2*k[1]+2*k[2]+k[3])/6
Fx = F.evalf(subs=dict(zip(var, x0)))
t1 = tuple('%.7f'%x0[j] for j in range(n))
t2 = tuple('%.7f'%Fx[j] for j in range(n))
t3 = '%.7f'%Fx.norm(sp.oo)
l.append((i,) + t1 + t2 + (t3, ))
if Fx.norm() < tol:
break
i += 1
print(h, end="\n\n")
for row in l:
print(row, end="\n")
print("\n")
return x0
if __name__ == '__main__':
x1,x2 = sp.symbols('x1 x2')
F = sp.Matrix([x1*x2-72,x1*x2-3*x1+2*x2-78])
print('F(x): ')
print(F, end="\n\n")
x0 = sp.Matrix([3.,6.])
x = homotopia(F, x0, 7, 0.01)
print("Matriz 'x': ")
print(sp.Matrix(x))
|
from unittest import TestCase
from numpy import isscalar
from loguniform import LogUniform as dist
class test_constructor(TestCase):
def test1(self):
with self.assertRaises(TypeError):
dist(a=1)
with self.assertRaises(TypeError):
dist(b=1000)
def test2(self):
with self.assertRaises(AssertionError):
dist(a=10, b=1)
def test3(self):
with self.assertRaises(AssertionError):
dist(a=0, b=1)
with self.assertRaises(AssertionError):
dist(a=0, b=0)
def test4(self):
d = dist(a=1, b=100)
self.assertEqual(d.a, 1)
self.assertEqual(d.b, 100)
d = dist(a=10.3, b=665.1)
self.assertEqual(d.a, 10.3)
self.assertEqual(d.b, 665.1)
class test_methods(TestCase):
def test_pdf(self):
try:
from scipy.stats import reciprocal
from numpy.random import randint, uniform
a = randint(1, 100)
b = a + randint(1, 1000)
d = dist(a, b)
for _ in range(100):
x = uniform(a, b)
self.assertAlmostEqual(d.pdf(x), reciprocal(a, b).pdf(x))
except ImportError:
pass # ok, no luck checking things with scipy...
d = dist(a=10, b=5000)
self.assertEqual(d.pdf(0), 0.0)
self.assertEqual(d.pdf(6000), 0.0)
self.assertNotEqual(d.pdf(d.a), 0.0)
self.assertGreater(d.pdf(d.a), 0.0)
self.assertNotEqual(d.pdf(d.b), 0.0)
self.assertGreater(d.pdf(d.b), 0.0)
def test_rvs(self):
d = dist(a=1, b=10)
self.assertTrue(isscalar(d.rvs()))
self.assertGreaterEqual(d.rvs(), 1.0)
self.assertLessEqual(d.rvs(), 10.0)
self.assertTrue(d.rvs(25).size == 25)
self.assertTrue((d.rvs(25) >= 1.0).all())
self.assertTrue((d.rvs(25) <= 10.0).all())
|
import time
from selenium import webdriver
import os
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.common.keys import Keys
def xpath(add):
try:
time.sleep(5)
element = driver.find_element_by_xpath(add)
element.click()
except Exception as e:
print(e)
time.sleep(5)
xpath(add)
def try_xpath(add):
try:
time.sleep(5)
element = driver.find_element_by_xpath(add)
element.click()
except Exception as e:
print(e)
runingCodePath = os.path.split(os.path.realpath(__file__))[0]
firefoxPath = os.path.join(runingCodePath, r"geckodriver-v0.27.0-win32\geckodriver.exe")
while True:
try:
profile = webdriver.FirefoxProfile()
options = Options()
options.headless = True
profile.set_preference("media.volume_scale", "0.0")
b_path = os.path.join(runingCodePath, r"Mozilla Firefox\firefox.exe")
binary = FirefoxBinary(b_path)
driver = webdriver.Firefox(firefox_binary=binary, executable_path=firefoxPath, service_log_path=os.path.devnull, options = options, firefox_profile=profile)
url = "https://www.google.co.in/"
driver.get(url)
time.sleep(5)
inputElement = driver.find_element_by_xpath('//input[@title="Search"]')
inputElement.send_keys('food art by ginny')
time.sleep(5)
inputElement.send_keys(Keys.RETURN)
try_xpath('//a[@href="https://www.youtube.com/c/foodartbyginny/search"]')
try_xpath('//a[@href="https://www.youtube.com/channel/UC91WbI8DEBdiZxPpFOxCk3A"]')
xpath('//paper-tab[contains(.,"Playlists")]')
xpath('//a[@title="All in One"]')
for i in range(1, 180):
print("running for last " + str(i) + " minutes")
time.sleep(60)
driver.quit()
except Exception as e:
print(e)
time.sleep(600)
|
from sklearn import datasets
import numpy as np
def create_linearly_separable_two_class():
"""
Generates a linearly separable dataset, with classes 0 or 1.
Note that the dataset is not generated randomly.
Returns
--------
X : array-like, shape [n_samples,n_features]
All available input data.
y : array-like, shape [n_samples,]
All available expected output data. Will be 0 or 1
"""
# TODO: Would like to randomly generate instead of using this dataset.
iris = datasets.load_iris()
# Only use first two features
X = iris.data[:, :2]
y = iris.target
# Only keep first classes 0 and 1
wanted_data = y < 2
X = X[wanted_data]
y = y[wanted_data]
return X, y
def create_2d_categorical_feature_two_class():
"""
Note that the dataset is not generated randomly and is x-or of both x1,
x2 being odd.
"""
# Note: Toy problem for now, should probably be made more complex...
# We care about a mix of X1, X2
X = np.array([[x1, x2] for x1 in range(10) for x2 in range(10)])
y = X[:, 0] > X[:, 1]
return X, y
def create_1d_categorical_feature_regression():
"""
Creates a 1 categorical feature data set, which will have
multiple different values at each category
"""
X = np.array([[x1, x2] for x1 in range(10) for x2 in range(10)])
# Have 2 values for each category - slightly above and slightly below x1 * x2
base = X[:,0] + X[:, 1]
below = base - 0.5
above = base + 0.5
X = np.vstack([X, X])
y = np.append(below, above)
return X, y |
import hashlib
# Function to generate hash value of files
def getHashFromName(fileName):
md5hash = hashlib.sha256()
with open(fileName, "rb") as f:
md5hash.update(f.read())
return md5hash.hexdigest()
# Function to generate hash value of files
def getHashFromData(fileData):
md5hash = hashlib.sha256()
md5hash.update(fileData)
return md5hash.hexdigest()
|
nums = [-4,-1,0,3,10]
for i in range(len(nums)):
nums[i] = nums[i]*nums[i]
nums.sort()
print(nums) |
from rest_framework import serializers
from rest_framework.fields import CurrentUserDefault
from datetime import datetime
from wishlist.models import Wishlist, WishlistItem
from product.serializers.product import ProductListSerializer
class WishlistItemSerializer(serializers.ModelSerializer):
product = ProductListSerializer()
class Meta:
model = WishlistItem
fields = (
"id",
"product",
"created_at",
)
|
import pdb
from models.artist import Artist
from models.album import Album
import repositories.artist_repository as artist_repository
import repositories.album_repository as album_repository
artist_repository.delete_all()
album_repository.delete_all()
artist_1 = Artist("Artist 1")
artist_repository.save(artist_1)
artist_2 = Artist("Artist 2")
artist_repository.save(artist_2)
album_1 = Album("1st Album", artist_1, "Country Truck Driving Music")
album_repository.save(album_1)
album_2 = Album("2nd Album", artist_1, "Country Truck Driving Music")
album_repository.save(album_2)
artist_1.name = "A Horse Called Kevin"
artist_repository.update(artist_1)
album_1.title = "Saturday Night Hay Fever"
album_repository.update(album_1)
album_2.title = "Sticky Trotters"
album_repository.update(album_2)
artist_2.name = "Johny Credit"
artist_repository.update(artist_2)
pdb.set_trace()
|
# Generated by Django 3.2.4 on 2021-06-18 18:35
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pros', '0023_alter_acceptoffer_meeting_time'),
]
operations = [
migrations.AddField(
model_name='acceptoffer',
name='updated',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='acceptoffer',
name='meeting_time',
field=models.DateTimeField(default=datetime.datetime(2021, 6, 18, 21, 35, 28, 164830)),
),
]
|
# coding=utf-8
# @author: zwa❤lqp
# @time: 2021/2/27 17:47
import os,re
# a = 0
# b = 1
# n = 10
#
# for i in range(n):
# a,b = b,a+b
# print(a)
# def fib_yield_while(max):
# a, b = 0, 1
# while max > 0:
# a, b = b, a + b
# max -= 1
# yield a
#
#
# def fib_yield_for(n):
# a, b = 0, 1
# for _ in range(n):
# a, b = b, a + b
# yield a
#
#
# for i in fib_yield_for(10):
# print(i, end=' ')
a = ['A', '', 'B', None, 'C', ' ']
# filter()函数把传入的函数一次作用于每个元素,然后根据返回值是True 还是 False决定保留还是丢弃该元素
print(list(filter(lambda s : s and s.strip(), a)))
s = lambda x:"yes" if x==1 else "no"
print(s(1))
foo=[-5,8,0,4,9,-4,-20,-2,8,2,-4]
# 正数从小到大,负数从大到小
a = sum(range(1,101))
print(sum(range(1,101)))
b =[1,2,3,4,5]
def fn(x):
return x**2
print(list(map(fn,b)))
print(list(map(lambda x: x ** 2, [1, 2, 3, 4, 5])))
print([i for i in [1,2,3,4,5] if i >3])
fn1 = lambda **kwargs:kwargs
print(fn1(name='lily',age=18))
print(lambda a=1,b=2:a if a > b else b)
|
# (применяем функцию .get() из библиотеки requests)
import requests
r = requests.get(
'https://baconipsum.com/api/?type=all-meat¶s=3&start-with-lorem=1&format=html') # делаем запрос на сервер по переданному адресу
print(r.content)
print(r.status_code) # узнаем статус полученного ответа
# Получить ответ в формате json
import requests
r = requests.get('https://baconipsum.com/api/?type=meat-and-filler') # попробуем поймать json ответ
print(r.content)
# Однако, чтобы использовать полученный ответ как Python-объект, надо воспользоваться дополнительной библиотекой,
# которая упрощает работу с JSON-ответами и может легко переконвертировать ответ от сервера в Python-объекты
# Давайте поменяем наш код и превратим данный текст в список, на который он так сильно похож.
import requests
import json # импортируем необходимую библиотеку
r = requests.get('https://baconipsum.com/api/?type=meat-and-filler')
texts = json.loads(r.content) # делаем из полученных байтов python объект для удобной работы
print(type(texts)) # проверяем тип сконвертированных данных
for text in texts: # выводим полученный текст. Но для того чтобы он влез в консоль оставим только первые 50 символов.
print(text[:50], '\n')
# Давайте посмотрим теперь на ещё один тип возвращаемых значений.
# Словарь
import requests
import json
r = requests.get('https://api.github.com')
d = json.loads(r.content) # делаем из полученных байтов python объект для удобной работы
print(type(d))
print(d['following_url']) # обращаемся к полученному объекту как к словарю и попробуем напечатать одно из его значений
# Давайте попробуем отправить post-запрос:
import requests
r = requests.post('https://httpbin.org/post', data={'key': 'value'}) # отправляем пост запрос
print(r.content) # содержимое ответа и его обработка происходит так же, как и с гет-запросами, разницы никакой нет
# Давайте посмотрим, как с помощью уже знакомой нам библиотеки отправить данные в нужном нам формате:
import requests
import json
data = {'key': 'value'}
r = requests.post('https://httpbin.org/post', json=json.dumps(
data)) # отправляем пост запрос, но только в этот раз тип передаваемых данных будет JSON
print(r.content)
|
from django.db import models
import datetime
from django.utils import timezone
class Season(models.Model):
start_date=models.DateTimeField('Season start date')
end_date=models.DateTimeField('Season end date')
name=models.CharField(max_length=200)
last_modified=models.DateTimeField(default=timezone.now)
def is_current_season(self):
now=timezone.now()
return self.start_date<=now<=self.end_date
def __str__(self):
return self.name
class Player(models.Model):
player_name=models.CharField(max_length=25)
created_date=models.DateTimeField('Date Joined')
pic_path=models.CharField(max_length=300)
last_modified=models.DateTimeField(default=timezone.now)
def __str__(self):
return self.player_name
class Season_Players(models.Model):
player=models.ForeignKey(Player,on_delete=models.CASCADE)
season=models.ForeignKey(Season,on_delete=models.CASCADE)
rating=models.IntegerField(default=2000)
last_match=models.DateTimeField(default=timezone.now)
wins=models.IntegerField(default=0)
losses=models.IntegerField(default=0)
win_percentage=models.DecimalField(default=0,max_digits=5,decimal_places=2)
last_modified=models.DateTimeField(default=timezone.now)
class Meta:
ordering=['-rating']
def getPlayerName(self):
return self.player.player_name
def __str__(self):
return self.player.player_name + '('+self.season.name+')'
class Match(models.Model):
match_time=models.DateTimeField(default=timezone.now)
player1=models.ForeignKey(Season_Players,on_delete=models.CASCADE,related_name='player1')
player2=models.ForeignKey(Season_Players,on_delete=models.CASCADE,related_name='player2')
season=models.ForeignKey(Season,on_delete=models.CASCADE,default=1)
player1_score=models.IntegerField(default=0)
player2_score=models.IntegerField(default=0)
player1_rating_delta=models.IntegerField(default=0)
player2_rating_delta=models.IntegerField(default=0)
winner=models.ForeignKey(Season_Players,on_delete=models.CASCADE,related_name='winner')
last_modified=models.DateTimeField(default=timezone.now)
class Meta:
ordering=['-match_time']
def save(self, *args, **kwargs):
if self.player1 == self.player2:
raise Exception('Player 1 cannot be Player 2')
super(Match, self).save(*args, **kwargs)
if self.player1_score == self.player2_score:
raise Exception('How bout you shitters play till someone wins')
super(Match, self).save(*args, **kwargs)
def __str__(self):
return self.player1.player.player_name+' vs '+self.player2.player.player_name + ' ['+self.season.name+']' +' ('+"{:%B %d, %Y}".format(self.match_time)+')'
# Create your models here.
|
from random import choice, random
import os
current_version = 11
run_path = os.environ["HOME"]+'/mat'+str(current_version)
#match run_path name to main...
if not os.path.exists(run_path):
#mkmatdir = 'mkdir '+ matpath
os.popen('mkdir ' + run_path, 'w', 1)
test = open(os.path.join(os.path.abspath(mat_path), 'testfile', 'a')
test.write('write test')
|
import torch
import config
import utils
import pandas as pd
import os
import numpy as np
from torch.hub import load_state_dict_from_url
import cv2
from os.path import join as pjoin
if __name__ == '__main__':
cfg=config.ConfigTest()
print("Device: ", cfg.DEVICE)
device = torch.device(cfg.DEVICE)
print("Net: ", cfg.NET_NAME)
#net = utils.create_net(cfg.IN_CHANNEL, cfg.NUM_CLASSES, cfg.NET_NAME).cuda()
net = utils.create_net(cfg.IN_CHANNEL, cfg.NUM_CLASSES, cfg.NET_NAME, cfg.BACKBONE).to(device)
net.eval()
if cfg.WEIGHTS:
print('load weights from: ', cfg.WEIGHTS)
net.load_state_dict(torch.load(cfg.WEIGHTS))
else:
print("fuck, no weight")
optimizer = torch.optim.Adam(net.parameters(), lr=cfg.BASE_LR)
print('Prepare data...batch_size: {}, img_size: {}, crop_offset: {}'.format(cfg.BATCH_SIZE, cfg.IMG_SIZE, cfg.CROP_OFFSET))
df_test = pd.read_csv(os.path.join(cfg.DATA_LIST_DIR,'test.csv'))
data_generator = utils.train_data_generator(np.array(df_test['image']),
None,
cfg.BATCH_SIZE, cfg.IMG_SIZE, cfg.CROP_OFFSET,file_path=True)
print('Begin infenrence...')
done_num=0
while True:
imgs,filep = next(data_generator)
if imgs is None:
break
imgs = imgs.to(device)
predicts = net(imgs).cpu().detach().numpy()
outs = utils.process_data.decodePredicts(predicts, cfg.IMAGE_SIZE_ORG, cfg.CROP_OFFSET, mode='gray')
# 保存
for i, out in enumerate(outs):
cv2.imwrite(pjoin(cfg.LABEL_ROOT, (filep[i].split("/")[-1]).replace('.jpg', '_bin.png')), out)
org_image = cv2.imread( filep[i],cv2.IMREAD_GRAYSCALE)
overlay_image = cv2.addWeighted(org_image, 0.6, out, 0.4, gamma=0)
if not os.path.exists(cfg.OVERLAY_ROOT):
os.makedirs(cfg.OVERLAY_ROOT)
cv2.imwrite(pjoin(cfg.OVERLAY_ROOT, (filep[i].split("/")[-1]).replace('.jpg', '.png')), overlay_image)
#print(pjoin(cfg.OVERLAY_ROOT, filep[i].split("/")[-1].replace('.jpg', '.png')))
done_num += imgs.shape[0]
print('Finished {} images'.format(done_num))
print('Done')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 12 13:10:00 2018
@author: rick
Script for finding start and end times of camera review sections from YouTube
phone review videos.
Requires:
transcripts to be loaded in local MySQL database by "yt_data.py"
topics to be identified using "find_topics.py"
"""
from __future__ import print_function
import pandas as pd
import numpy as np
import re
import sqlalchemy as sa
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import TfidfVectorizer
from access import db_root_pw
engine = sa.create_engine("mysql+mysqldb://root:{}@127.0.0.1/insight?charset=utf8".format(db_root_pw), encoding='utf-8')
con = engine.connect()
topic_cam = 1
topic_ux = 5
topic_sound = 4
n_top_words = 15
windows = 10
# Get topics from database
sql = """select *
from video_topics
; """
video_topics = pd.read_sql(sql, con)
cam_words_top = video_topics.iloc[-n_top_words-1:-1,topic_cam]
words_top = " ".join(cam_words_top)
sql = """select *
from video_info
;""" # Change when doing sentiment where cam_start_time is Null
video_info = pd.read_sql(sql, engine)
video_urls = pd.Series(video_info['url'])
no_transcript_count = 0
for url in video_urls:
# Load transcript
sql = """select *
from `{}` ;""".format(url)
try:
transcript = pd.read_sql(sql, engine)
except:
print('Transcript ' +'{}'.format(url) + ' not found')
no_transcript_count += 1
continue
snippets = transcript.caption
time_points = pd.to_timedelta(transcript.start_time)
time_points = time_points.dt.total_seconds()
# Run similarity analysis
similarities = np.zeros(len(snippets))
block = int(np.floor(len(snippets)/(windows)))
sim_pad = np.zeros(block)
sim_padded = similarities
snip_pad = np.empty(block,dtype=str)
snip_padded = np.concatenate((snippets.tolist(),snip_pad))
camera_counts = np.zeros(len(snippets))
for n in np.arange(len(snippets)):
camera_count = snip_padded[n].count('camera')
camera_counts[n] = camera_count
snippet_block = ' '.join(snip_padded[n:n+block])
tfidf = TfidfVectorizer().fit_transform([words_top,snippet_block])
pairwise_similarity = tfidf * tfidf.T
sim_padded[n] = pairwise_similarity[0,1]
# Find start time of camera section
cam_occ_times = pd.DataFrame(time_points.loc[camera_counts.nonzero()])
if len(cam_occ_times) == 0:
print('No camera occurences in {}'.format(url))
continue
cam_similarities = pd.Series(sim_padded)
cam_start_times = cam_occ_times.copy()
cam_start_times['similarities'] = cam_similarities.loc[camera_counts.nonzero()]
cam_start_times = cam_start_times.reset_index()
cam_start_times = cam_start_times.rename(columns = {'index':'idx'})
cam_zero = pd.DataFrame({'idx': [0], 'start_time': [0], 'similarities': [0]})
cam_start_times = pd.concat((cam_zero,cam_start_times)).reset_index(drop=True)
cam_start_options = cam_start_times.iloc[:cam_start_times.similarities[1:].idxmax()+1,:]
cam_delta = pd.Series(np.diff(cam_start_options.start_time))
try:
cam_delta_idx = cam_delta[cam_delta >= 40].index[-1]
except:
cam_delta_idx = 0
print('Only 1 camera occurence in {}'.format(url))
cam_start_time = cam_start_times.start_time.iloc[cam_delta_idx+1]
cam_start_idx = cam_start_times.idx.iloc[cam_delta_idx+1]
# Find end time of camera section
cam_highest_idx = cam_start_times.idx[cam_start_times.similarities[1:].idxmax()]
cam_end_options = cam_start_times.iloc[cam_start_times.similarities[1:].idxmax():,:]
cam_delta_end = pd.Series(np.diff(cam_end_options.start_time))
if len(cam_delta_end) == 0:
cam_last_idx = cam_highest_idx
else:
try:
cam_delta_idx_end = cam_delta_end[cam_delta_end >= 44].index[0] + cam_start_times.similarities[1:].idxmax()
except:
cam_delta_idx_end = cam_delta_end.index[-1] + cam_start_times.similarities[1:].idxmax()
cam_last_time = cam_end_options.start_time[cam_delta_idx_end]
cam_last_idx = cam_end_options.idx[cam_delta_idx_end]
cam_sim_options = cam_similarities[cam_last_idx:]
for n, sim in enumerate(cam_sim_options):
if n == len(cam_sim_options)-1:
cam_end_idx = n + cam_last_idx
break
elif n == len(cam_sim_options)-2:
cam_end_idx = n + cam_last_idx
break
elif cam_sim_options[n + cam_last_idx + 1] < 0.6*sim and cam_sim_options[n + cam_last_idx + 2] < 0.6*sim:
cam_end_idx = n + cam_last_idx
break
secs_start = time_points[cam_start_idx - 2]
secs_end = time_points[cam_end_idx]
# Add start/end times to database
inputs = (url,secs_start,secs_end,secs_start,secs_end)
sql = """insert into video_info (url,cam_start_time,cam_end_time)
VALUES (%s, %s, %s)
ON DUPLICATE KEY UPDATE cam_start_time=%s,cam_end_time=%s
;"""
con.execute(sql, inputs)
# Add camera_occ_times to database
cam_occ_times.to_sql('{}'.format(url)+'_cam_times', con, if_exists='replace')
# Make histograms
sql = """select *
from video_info
where cam_start_time is not null;"""
video_info = pd.read_sql(sql, engine)
minutes = []
seconds = []
len_full = []
video_lengths = video_info['length']
video_lengths = video_info[~video_lengths.str.contains('H')]
for n in video_lengths['length']:
lengths = re.split('T',n)
mins = re.split('M',lengths[1])
just_mins = mins[0]
minutes.append(just_mins)
secs = mins[1]
just_secs = re.split('S', secs)[0]
if just_secs == '':
just_secs = 0
seconds.append(just_secs)
total_tim = float(just_mins)+float(just_secs)/60
len_full.append(total_tim)
len_cam = video_lengths['cam_end_time'].astype(float)/60 - video_lengths['cam_start_time'].astype(float)/60
# plt.hist(len_cam, bins=100, alpha=0.5)
# plt.hist(len_full, bins=100, alpha=0.5)
plt.hist(len_full-len_cam, bins=100)
# eof
|
from django.conf.urls import include
from django.conf.urls import url
mock_patterns = []
included_patterns = []
urlpatterns = [
url('^mock/', include(mock_patterns, namespace='mock')),
url('^included/', include(included_patterns))
]
|
import math
import sys
for inputvalue in sys.stdin.readlines():
result = []
# minvalue, maxvalue = input().split(' ')
minvalue, maxvalue = map(int, inputvalue.strip().split())
for i in range(minvalue, maxvalue+1):
value = i
valuelist = []
while i != 0:
valuelist.append(i % 10)
i = int(i / 10)
if sum(list(map(lambda x: pow(x, 3), valuelist))) == value:
result.append(str(value))
print("no" if not result else " ".join(result))
|
balance = 999999
annualInterestRate = 0.18
b = balance
epsilon = 0.01
lb = balance/12.0
ub = balance * (1 + annualInterestRate/12.0) ** 12 / 12.0
mp = (lb+ub)/2.0
def monthlyPay(b, mp):
for month in range(1,13):
b = b - mp
b = b + annualInterestRate/12.0*b
return b
cnt = 0
while True:
cnt += 1
b = monthlyPay(b, mp)
if b > epsilon:
lb = mp
else:
ub = mp
mp = (lb + ub)/2.0
if b <= epsilon and b >= 0:
break
b = balance
print 'Lowest Payment: ' + str(round(mp,2))
print str(cnt) |
a, b = map(int, input().split())
if a != b:
if a > b:
a, b = b, a #값 교환
print(b-a-1)
print(*range(a+1, b))
else:
print(0) |
# -*- encoding: utf-8 -*-
from app_settings import *
from app_languages import *
from app_files import *
from app_choices import *
from app_nomenclature_tags import *
|
player = " "
def playIntro(player):
print ("Holly: Welcome to the Total Immersion Video Game Red Dwarf. Who would you like to play as? (Type the character name below)")
print ("Lister - Human")
print ("Rimmer - Hologram")
print ("Kryten - Android")
print ("Cat - Cat")
player = input()
return player
def confirmPlayer(player):
#Choose's Lister
if player == str("Lister"):
print ("Are you sure you want to play as the semi illiterate space bum? Y or N")
answer = input()
if (answer == "y") or (answer == "Y"):
listersStory()
else:
playIntro()
#Choose's Rimmer
if player == str("Rimmer"):
print ("Are you sure you want to play as a man so was acquitted of mass murder on the basis that he is an idiot? Y or N")
answer = input()
if (answer == "y") or (answer == "Y"):
print ("Well you can't, I haven't written it yet")
playIntro()
else:
playIntro()
#Choose's Kryten
if player == str("Kryten"):
print ("Are you sure you want to play as the android? This is an adventure game you know and he's very obsessed with cleaning. Y or N")
answer = input()
if (answer == "y") or (answer == "Y"):
print ("Well you can't, I haven't written it yet")
playIntro()
else:
playIntro()
#Choose's Cat
if player == str("Cat"):
print ("Are you sure you want to play as the self obsessed feline that is the Cat? Y or N")
answer = input()
if (answer == "y") or (answer == "Y"):
print ("Well you can't, I haven't written it yet")
playIntro()
else:
playIntro()
print ("Type help to see a list of example commands")
#Lister's story
def listersStory():
print ("Holly: You are Dave Lister. You are approximately 3 million years old and have the body of a 55 year old. Your mission to find Kochanski and get back to Earth has not been going well.")
print ("Holly: Time to wake up Dave.")
print ("You awake with the hangover from hell, somewhere in the bowels of the ship. It is very dark.")
print ("To progress in the story try typing commands or you can ask Holly to help")
player = playIntro(player)
confirmPlayer(player)
|
from flask import Flask
from .hejnote.routes import hejnote_routes
app = Flask(__name__)
hejnote_routes(app)
if __name__ == '__main__':
app.run() |
# -*- coding: utf-8 -*-
import wave
import struct
import glob
from scipy import fromstring, int16
import numpy as np
from mylibs import fourier, combine, constants as con
import re
def get_dataset(filename, samples, span, offset=0):
wavfile = filename
wr = wave.open(wavfile, "rb")
origin = wr.readframes(wr.getnframes())
data = origin[offset:samples * span * 4 + offset]
wr.close()
X = np.frombuffer(data, dtype="int16")/ 32768.0
return X[::2], X[1::2]
def create_test_data(left, right):
arr = []
for i in range(0, len(right)-1):
#複素数のベクトル化
temp = np.array([])
temp = np.append(temp, left[i].real)
temp = np.append(temp, left[i].imag)
temp = np.append(temp, right[i].real)
temp = np.append(temp, right[i].imag)
arr.append(temp)
return np.array(arr)
def pack_step(data, batch_num, samples, dims):
temp = data[:batch_num + samples]
test = temp.reshape((-1, 1, dims))
print(test.shape)
return test
test_files = glob.glob('/data/input/*.wav')
#test_files = test_files[1:2]
for filename in test_files:
left, right = get_dataset(filename, con.N, con.span, offset=con.offset)
Kl, Kr = fourier.fourier(left), fourier.fourier(right)
data = create_test_data(Kl, Kr)
data_rs = pack_step(data, con.batch_num, con.samples, con.dims)
outfile = filename.replace('.wav', '.data')
np.save(outfile, data_rs)
outputtest = '/data/output/test.wav'
filename = test_files[0]
outfile = filename.replace('.wav', '.data.npy')
print(outfile)
data = np.load(outfile)
Kl, Kr = combine.data_spliter(np.reshape(data, (-1, con.dims)))
print(Kl.shape)
print(Kr.shape)
raw = combine.normal_combine(Kl, Kr)
#left = fourier.inverse_fourier(Kl)
#right = fourier.inverse_fourier(Kr)
#raw = combine.combine_wav(left, right)
raw_data = raw[:] * 32768
length = len(raw_data)
print(len(raw_data))
raw_data = raw_data.astype('int16')
outf = '/data/output/test.wav'
outd = struct.pack("h" * len(raw_data), *raw_data)
ww = wave.open(outf, 'w')
ww.setnchannels(2)
ww.setsampwidth(2)
ww.setframerate(con.fr)
ww.writeframes(outd)
ww.close()
|
import sys
digit_string = sys.argv[1]
res = 0
for letter in digit_string:
res += int(letter)
print(res) |
import machine
from machine import Pin, Timer, I2C
from micropython import const
import time
MC3216_Mode = const(0x07)
MC3216_Opstat = const(0x04)
MC3216_Outcfg = const(0x20)
MC3216_XOut = const(0x0D)
MC3216_YOut = const(0x0F)
MC3216_ZOut = const(0x11)
MC3216_SRTFR = const(0x08)
SlaveAddress = const(0x4C)
class MC3216Controller:
def __init__(self, i2c):
self.i2c = i2c
self.WriteToRegister(MC3216_Outcfg, 2)
self.WriteToRegister(MC3216_Mode, 1)
read = self.ReadFromRegister(MC3216_Opstat, 1)
if (read[0] & 0x01) != 0x01:
raise Exception("Unexpected init!")
def WriteToRegister(self, reg, data):
count = 2
buf = bytearray(2)
buf[0] = reg;
buf[1] = data;
self.i2c.writeto(SlaveAddress, buf)
def ReadFromRegister(self, reg, count):
return self.i2c.readfrom_mem(SlaveAddress,reg, count)
def GetX(self):
reg = self.ReadFromRegister( MC3216_XOut, 1)
return int(reg[0])
def GetY(self):
reg = self.ReadFromRegister( MC3216_YOut, 1)
return int(reg[0])
def GetZ(self):
reg = self.ReadFromRegister( MC3216_ZOut, 1)
return int(reg[0])
|
from django.db import models
# Create your models here.
class Rate(models.Model):#=======================
rate_value = models.IntegerField(range(0,5) , blank = True , null = True )
def __str__(self):
return (f' {self.rate_value} ')
class Actor(models.Model):
actor_name = models.CharField(max_length = 50)
actor_nationality = models.CharField(max_length = 20)
actor_personal_image = models.ImageField(upload_to = 'actors/pictures')
birth_date = models.DateField()
def __str__(self):
return (f'{self.actor_name}')
class Category(models.Model):
category_name = models.CharField(max_length = 100)
category_description = models.CharField(max_length = 150)
def __str__(self):
return (f' {self.category_name} ')
class Movie(models.Model):
Id = models.IntegerField(primary_key=True)
title = models.CharField(max_length=20)
img = models.TextField()
movie_file = models.FileField(upload_to = 'movies/') #--> done
movie_category = models.ManyToManyField(Category) #new ---> done
movie_rate = models.OneToOneField(Rate , null = True , on_delete = models.SET_NULL)
movie_actors = models.ForeignKey(Actor, null = True , on_delete = models.SET_NULL)
def __str__(self):
return (f' Movie {self.title}|{self.movie_category}')
class Task(models.Model):
task_name = models.CharField(max_length=30)
task_priority = models.IntegerField() #take care -->
def __str__(self):
return (f'{self.task_name}')
#new
# class Customer(models.Model):
# customer_name = models.CharField(max_length = 50)
# customer_age = models.IntegerField |
from ocp_resources.mtv import MTV
from ocp_resources.resource import NamespacedResource
class NetworkMap(NamespacedResource, MTV):
"""
Migration Toolkit For Virtualization (MTV) NetworkMap object.
Args:
source_provider_name (str): MTV Source Provider CR name.
source_provider_namespace (str): MTV Source Provider CR namespace.
destination_provider_name (str): MTV Destination Provider CR name.
destination_provider_namespace (str): MTV Destination Provider CR namespace.
mapping (dict): Storage Resources Mapping
Exaple:
[ { "destination" : { "type": "pod",
"source" : { "id": "network-13" }},
{ "destination" : { "name": "nad_cr_name",
"namespace": "nad_cr_namespace",
"type": "multus"},
"source" : { "name": "VM Netowrk" }},
]
"""
api_group = NamespacedResource.ApiGroup.FORKLIFT_KONVEYOR_IO
def __init__(
self,
name=None,
namespace=None,
mapping=None,
source_provider_name=None,
source_provider_namespace=None,
destination_provider_name=None,
destination_provider_namespace=None,
client=None,
teardown=True,
yaml_file=None,
):
super().__init__(
name=name,
namespace=namespace,
client=client,
teardown=teardown,
yaml_file=yaml_file,
)
self.mapping = mapping
self.source_provider_name = source_provider_name
self.source_provider_namespace = source_provider_namespace
self.destination_provider_name = destination_provider_name
self.destination_provider_namespace = destination_provider_namespace
self.condition_message_ready = self.ConditionMessage.NETWORK_MAP_READY
def to_dict(self):
res = super().to_dict()
if self.yaml_file:
return res
res.update(self.map_to_dict)
return res
|
try:
import os, logging, sys, glob, webbrowser, time
from collections import Iterable # used in the flatten function
from bisect import bisect_left
except:
print("ImportERROR: Missing fundamental packages (required: bisect, collections, os, sys, glob, logging, time, webbrowser).")
try:
import config as cfg
except:
print("ImportERROR: Cannot find Hy2Opt.pypool.config")
try:
from osgeo import ogr
except:
print("ImportWARNING: Cannot find osgeo.ogr - geospatial functions are not be available.")
# FUNCTION WRAPPERS - MUST BE ON TOP OF THE FILE
def ogr_shp_env(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except:
print("ERROR: osgeo.ogr not available.")
return wrapper
# MAIN FUNCTIONS
def chk_is_empty(variable):
try:
value = float(variable)
except ValueError:
value = variable
pass
try:
value = str(variable)
except ValueError:
pass
return bool(value)
def chk_dir(directory):
# returns False if the directory did not exist yet
if not os.path.exists(directory):
os.makedirs(directory)
return False
else:
return True
def clean_dir(directory):
"""
Delete everything reachable IN the directory named in 'directory',
assuming there are no symbolic links.
CAUTION: This is dangerous! For example, if directory == '/', it
could delete all your disk files.
"""
for root, dirs, files in os.walk(directory, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
def copy_tree(source_directory, target_directory):
"""
Copies all files and folder from source_directory to target directory
:param source_directory: STR of full path of source directory - must END WITH "/"
:param target_directory: STR of full path of target directory - must END WITH "/"
:return: BOOL: False if new model, True if model already exists
"""
exists = chk_dir(target_directory)
for dirpath, dirnames, filenames in os.walk(source_directory):
structure = os.path.join(target_directory, dirpath[len(source_directory):])
if not os.path.isdir(structure):
os.mkdir(structure)
else:
print("Manual overwrite?")
return exists
def cool_down(seconds):
# Pauses script execution for the input argument number of seconds
# seconds = INT
sys.stdout.write('Cooling down (waiting for processes to end) ... ')
for i in range(seconds, 0, -1):
sys.stdout.write(str(i) + ' ')
sys.stdout.flush()
time.sleep(1)
sys.stdout.write('\n')
def del_ovr_files(directory):
# directory must end with "\\" or "/"
all_files = [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]
for f in all_files:
if ".ovr" in f:
try:
print("Attempting to remove old temporary files ...")
os.remove(directory + f)
print("Success.")
except:
pass
def dict_values2list(dv):
# converts a 'dict_values' object into a 'list'
# also works for 'dict_keys' to 'list' conversion
out_list = []
[out_list.append(item) for item in dv]
return out_list
def dict2str(dictionary, **kwargs):
# converts a dict to a STR expression - reutrn "{e: 1, f: 2, ...}" - used in arcpy-calculatefiel mgmt
inverse_dict = False # optional keyword arg: if true: dictionary keys and values will be inversed
try:
for k in kwargs.items():
if "inverse_dict" in k[0]:
inverse_dict = k[1]
except:
pass
dict_str = "{"
cc = 1
for k, v in dictionary.items():
skey = "\'%s\'" % k if type(k) == str else str(k)
sval = "\'%s\'" % v if type(v) == str else str(v)
if not inverse_dict:
dict_str += "{0}: {1}".format(skey, sval)
else:
dict_str += "{1}: {0}".format(skey, sval)
if not (cc == dictionary.__len__()):
dict_str += ", "
else:
dict_str += "}"
cc += 1
return dict_str
def dict_read_from_file(filename, sep=","):
with open(filename, "r") as f:
dictionary = {}
for line in f:
values = line.split(sep)
dictionary[values[0]] = {int(x) for x in values[1:len(values)]}
return dictionary
def dict_nested_read_from_file(filename, sep="::"):
dictionary = {}
with open(filename, "r") as f:
for line in f:
values = line.strip("\n").split(sep)
try:
dictionary[values[0]].update({values[1]: values[2]})
except KeyError:
dictionary.update({values[0]: {values[1]: values[2]}})
try:
del dictionary[0] # remove none element if exists
except KeyError:
pass
return dictionary
def dict_write2file(dictionary, filename, sep=","):
with open(filename, "a") as f:
for i in dictionary.keys():
f.write(str(i) + " " + sep.join([str(x) for x in dictionary[i]]) + "\n")
def dict_nested_write2file(dictionary, filename, sep="::"):
"""
Saves a nested dictionary to filename
:param dictionary: dict[dict]
:param filename: STR
:param sep: STR (optional)
"""
with open(filename, "w") as f:
for top_key, sub_dict in dictionary.items():
for sub_key, sub_val in sub_dict.items():
f.write(str(top_key) + sep + str(sub_key) + sep + str(sub_val) + "\n")
def file_names_in_dir(directory):
# returns file names only (without directory)
return [name for name in os.listdir(directory) if os.path.isfile(os.path.join(directory, name))]
def flatten(lis):
for item in lis:
if isinstance(item, Iterable) and not isinstance(item, str):
for x in flatten(item):
yield x
else:
yield item
def get_closest_val_in_list(usr_list, target_num):
""" Returns closes value to target_num in a sorted usr_list
if two numbers are equally close the smallest number is returned
"""
pos = bisect_left(usr_list, target_num)
if pos == 0:
return usr_list[0]
if pos == len(usr_list):
return usr_list[-1]
before = usr_list[pos - 1]
after = usr_list[pos]
if after - target_num < target_num - before:
return after
else:
return before
def get_credits():
c_file = open(cfg.dir2templates + "dialogues/credits.txt", "r")
credits_str = "\n".join(c_file.read().splitlines())
c_file.close()
return credits_str
def get_newest_file(directory, exclude=None):
"""
Finds the newest file name, excluding those that contain an "exclude" expression
:param directory: STR of directory
:param exclude: STR of filetypes to exclude, e.g., ".txt"
:return: STR of newest file names
"""
file_list = sorted(glob.iglob(os.path.join(directory, '*')), key=os.path.getctime, reverse=True)
if exclude:
for i in file_list:
if str(exclude) in str(i):
file_list.remove(i)
return file_list[0]
@ogr_shp_env
def get_shp_extent(dir2shp):
"""
Assesses extents of a shapefile using osgeo's ogr module
:param dir2shp: STR of full path to shapefile (e.g., D:/a_shapefile.shp)
:return: TUPLE of shapefile extents (INT(X_length[West-East], INT(Y_length[South-North])
"""
driver = ogr.GetDriverByName("ESRI Shapefile")
data_src = driver.Open(dir2shp, 0)
layer = data_src.GetLayer()
x_min, x_max, y_min, y_max = layer.GetExtent() # returns TUPLE of shapefile extents (Xmin[West], Xmax[East], Ymin[South], Ymax[North])
grid_size = (round(abs(x_max-x_min)+0.4999), round(abs(y_max - y_min)+0.4999))
return grid_size
@ogr_shp_env
def get_shp_field_names(dir2shp):
driver = ogr.GetDriverByName("ESRI Shapefile")
data_src = driver.Open(dir2shp, 0)
layer = data_src.GetLayer()
ldefn = layer.GetLayerDefn()
field_names = []
[field_names.append(ldefn.GetFieldDefn(n).name) for n in range(ldefn.GetFieldCount())]
return field_names
@ogr_shp_env
def get_shp_field_values(dir2shp, field_name):
driver = ogr.GetDriverByName("ESRI Shapefile")
data_src = driver.Open(dir2shp, 0)
layer = data_src.GetLayer()
field_values = []
[field_values.append(feat.GetField(field_name)) for feat in layer]
return field_values
def get_subdir_names(directory):
return [name for name in os.listdir(directory) if os.path.isdir(os.path.join(directory, name))]
def get_tf_models():
# returns LIST of (internal) Hy2Opt.Tuflow model names
models = []
[models.append(f.split("\\")[-1].split("/")[-1].split(".hy2model")[0]) for f in list_file_type_in_dir(cfg.dir2tf + "models/", ".hy2model")]
if models.__len__() < 1:
models = ["NO MODEL AVAILABLE"]
return models
def interpolate_linear(x1, x2, y1, y2, xi):
# returns linear interpolation yi of xi between two points 1 and 2
return y1 + ((xi - x1) / (x2 - x1) * (y2 - y1))
def list_file_type_in_dir(directory, f_ending):
"""
:param directory: full directory ending on "/" or "\\"
:param f_ending: STR, e.g., ".py"
:return: LIST of full file paths"""
return glob.glob(directory + "*" + f_ending)
def open_folder(directory):
try:
import subprocess
# other python versions than 2.7: import subprocess32
my_platform = sys.platform
if my_platform[0:3].lower() == "win":
# print("Hello Windows!")
call_target = "explorer " + directory
subprocess.call(call_target, shell=True)
print("Found subprocess --> opening target folder.")
if my_platform[0:3].lower() == "lin":
# print("Hello Linux!")
subprocess.check_call(['xdg-open', '--', directory])
print("Found subprocess --> opening target folder.")
if my_platform[0:3].lower() == "dar":
# print("Hello Mac OS!")
subprocess.check_call(['open', '--', directory])
print("Found subprocess --> opening target folder.")
try:
os.system("start \"\" https://en.wikipedia.org/wiki/Criticism_of_Apple_Inc.")
except:
pass
except:
pass
def print_dict(dictionary):
out_str = ""
for k, v in dictionary.items():
out_str += " {0} - {1}".format(str(k), str(" + ".join(v)))
return out_str
def read_file_content(file_path):
"""
:param file_path: STR of absolute dir to file, including file ending
:return: list_of_lines: LIST of lines contained in file_path
"""
list_of_lines = []
if os.path.isfile(file_path):
file = open(file_path)
lines = file.readlines()
try:
[list_of_lines.append(l) for l in lines]
except:
file.close()
print("WARNING: Could not read dialogue file (%s)." % file_path)
return []
file.close()
else:
print("WARNING: Dialogue file (%s) does not exist." % file_path)
return []
return list_of_lines
def rm_dir(directory):
"""
Deletes everything reachable from the directory named in 'directory', and the directory itself
Assuming there are no symbolic links.
CAUTION: This is dangerous! For example, if directory == '/' deletes all disk files
"""
for root, dirs, files in os.walk(directory, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(directory)
def rm_file(full_name):
# fullname = str of directory + file name
try:
os.remove(full_name)
except:
pass
def str2frac(arg):
arg = arg.split('/')
return int(arg[0]) / int(arg[1])
def str2num(arg, sep):
# function converts string of type 'X[sep]Y' to number
# sep is either ',' or '.'
# e.g. '2,30' is converted with SEP = ',' to 2.3
_num = arg.split(sep)
_a = int(_num[0])
_b = int(_num[1])
_num = _a + _b * 10 ** (-1 * len(str(_b)))
return _num
def str2tuple(arg):
try:
arg = arg.split(',')
tt = (int(arg[0]), int(arg[1]))
return tt
except ValueError:
print('ERROR: Bad assignment of separator.\nSeparator must be [,].')
return arg
def tuple2num(arg):
# function converts float number with ',' separator for digits to '.' separator
# type(arg) = tuple with two entries, e.g. (2,40)
# call: tuple2num((2,3))
new = arg[0] + arg[1] * 10 ** (-1 * len(str(arg[1])))
return new
def write_data2file(folder_dir, file_name, data):
if not os.path.exists(folder_dir):
os.mkdir(folder_dir)
os.chdir(folder_dir)
f = open(file_name+'.txt', 'w')
for i in data:
line = str(i)+'\n'
f.write(line)
print('Data written to: \n' + folder_dir + '\\' + str(file_name) + '.txt')
|
from typing import List
class Solution:
def nextPermutation(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
def reverse(nums: List[int], start: int):
end = len(nums) - 1
while start < end:
nums[start], nums[end] = nums[end], nums[start]
start += 1
end -= 1
swap_old = len(nums) - 2
while swap_old >= 0 and nums[swap_old + 1] <= nums[swap_old]:
swap_old -= 1
if swap_old >= 0:
swap_new = len(nums) - 1
while nums[swap_new] <= nums[swap_old]:
swap_new -= 1
nums[swap_old], nums[swap_new] = nums[swap_new], nums[swap_old]
reverse(nums, swap_old + 1)
def main():
sol = Solution()
val = [1,2,3]
sol.nextPermutation(val)
print(val)
val = [3,2,1]
sol.nextPermutation(val)
print(val)
val = [1,1,5]
sol.nextPermutation(val)
print(val)
if __name__ == '__main__':
main() |
import unittest
import hello
import index
class TestHandlerCase(unittest.TestCase):
def test_response(self):
print("testing response.")
result = index.handler(None, None)
print(result)
self.assertEqual(result['statusCode'], 200)
self.assertEqual(result['headers']['Content-Type'], 'application/json')
self.assertIn('Hello World', result['body'])
event = {'pathParameters': {'name': 'Takashi'}}
result = hello.handler(event, None)
print(result)
self.assertEqual(result['statusCode'], 200)
self.assertEqual(result['headers']['Content-Type'], 'application/json')
self.assertIn('hello Takashi', result['body'])
if __name__ == '__main__':
unittest.main()
|
# Generated by Django 2.1.15 on 2020-04-02 13:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crawl_elect', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='candidate',
name='candi_cnt',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='candidate',
name='crim_cnt',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='candidate',
name='tax_5y',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='candidate',
name='tax_defalt',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='candidate',
name='tax_total',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='candidate',
name='wealth',
field=models.CharField(max_length=50),
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 21 15:35:11 2020
@author: prenaudin, elvinagovendasamy
"""
#Packages
from mrjob.job import MRJob
from mrjob.step import MRStep
import numpy as np
class PageRank(MRJob):
#cf version 1
total_nodes = 0
dico_nj = {}
dico_pagerank = {}
#Permet de stocker la part de masse perdue à cause des pages qui ne citent personne pour la redistribuer à l'ensemble du graphe
mass_loss = 0
#Stocke toutes les pages i qui citent au moins une page j
dico_i = {}
#Stocke toutes les pages j qui sont au moins citées une fois par une page i
dico_j = {}
#1. Même chose que la version précédente
#2. On stocke toutes les pages qui citent et toutes les pages citées dans les dicos 'dico_i' et 'dico_j' respectivement.
def mapper_adjacent(self, _, line):
(i, j) = line.split()
liste_nodes = np.unique(i)
liste_nodes2 = np.unique(j)
for identifiant in liste_nodes:
PageRank.dico_nj.setdefault(identifiant,0)
PageRank.dico_i.setdefault(identifiant,0)
for identifiant in liste_nodes2:
PageRank.dico_nj.setdefault(identifiant,0)
PageRank.dico_j.setdefault(identifiant,0)
PageRank.total_nodes = len(PageRank.dico_nj)
yield 'in', (j,i)
yield 'out', (i,j)
#1. On sauvegarde les pages qui sont citées mais qui ne citent personnes pour pouvoir redistribuer leur masse
#2. Même chose qu'avant
#3. On yield les pages qui ne citent personne avec une liste vide comme liste d'adjacence
def reducer_adjacent(self, label , values):
PageRank.pages_no_out = np.setdiff1d(list(PageRank.dico_j.keys()), list(PageRank.dico_i.keys()))
if label == 'in':
for val in values:
j,i = val
PageRank.dico_pagerank[j] = 1/PageRank.total_nodes
else:
for val in values:
i,j = val
PageRank.dico_nj[i]+=1
PageRank.dico_pagerank[i] = 1/PageRank.total_nodes
yield i,j
for page in PageRank.pages_no_out:
yield page, []
# Même chose qu'avant
def reducer_adjacent2(self,i,j):
yield i,(j, 1/PageRank.total_nodes)
#1. yield le pagerank des pages qui ne citent personne pour pouvoir le récupérer et le redistribuer
#2. yield toutes les pages avec leur liste d'adjacence et leur pagerank (même les pages qui ne citent personne car leur pagerank
# doit être mis à jour lui aussi)
def mapper_PageRank(self, i, node):
adjacent_list, pagerank = node
total_neighbours=len(adjacent_list)
#pour traiter les pages qui ne citent personne (mass_loss)
if adjacent_list[0] == []:
yield 'mass_loss', pagerank
yield i, node
#1. Enregistre la valeur totale de la masse perdue
#2. Pour chaque noeud i on yield sa liste d'adjacence et son pagerank
#3. Chaque pages j de la liste d'adjacence du noeud i reçoit sa part de la masse pagerank du noeud i
# (si la liste d'ajacence est vide on ne fait rien, il n'y a pas de masse à distribuer, elle a été stockée dans mass_loss)
def reducer_int(self, key, value):
if key == 'mass_loss':
PageRank.mass_loss = sum(value)
else:
for val in value:
adjacent_list, pagerank = val
total_neighbours=len(adjacent_list)
p=pagerank/total_neighbours # N.PageRank/|N.AdjacencyList|
yield key, ('node',val)
for l in adjacent_list: # yields each neighbour l, and the pagerank of i/nb_neighbors
if l != [] :
yield l, ('pagerank', p) # Pass PageRank mass to neighbors, p is float
#1. Récupère la structure de graphe à travers les 'nodes'
#2. Calcul le nouveau pagerank de chaque noeud (en prenant en compte la masse perdue)
#3. Enregistre le nouveau pagerank de chaque noeud dans le dictionnaire dico_pagerank
#4. yield pour chaque noeud sa liste d'adjacence et son nouveau pagerank pour pouvoir repasser dans le mapper_PageRank
# et le reducer_int
def reducer_PageRank(self, n_id , values):
mass_loss = PageRank.mass_loss/PageRank.total_nodes
c = 0.15 # provided
sum_p = 0
node = ([], sum_p)
for val in values:
label, content = val
# If it's a node, save the node
if label == 'node':
node = content
# If it's a pagerank, sum the pagerank
elif label == 'pagerank':
sum_p += content
#update the pagerank
new_pagerank = c/PageRank.total_nodes + (1-c)*(sum_p + mass_loss)
#mettre à jour les pageranks dans dico
PageRank.dico_pagerank[n_id] = new_pagerank
#Update the node with the new pagerank
if node[0] != []:
node = (node[0],new_pagerank)
yield(n_id, node)
def steps(self):
return [MRStep(mapper=self.mapper_adjacent,
reducer=self.reducer_adjacent),
MRStep(reducer=self.reducer_adjacent2),
MRStep(mapper=self.mapper_PageRank,
reducer=self.reducer_int),
MRStep(reducer=self.reducer_PageRank),
MRStep(mapper=self.mapper_PageRank,
reducer=self.reducer_int),
MRStep(reducer=self.reducer_PageRank),
MRStep(mapper=self.mapper_PageRank,
reducer=self.reducer_int),
MRStep(reducer=self.reducer_PageRank),
MRStep(mapper=self.mapper_PageRank,
reducer=self.reducer_int),
MRStep(reducer=self.reducer_PageRank),
MRStep(mapper=self.mapper_PageRank,
reducer=self.reducer_int),
MRStep(reducer=self.reducer_PageRank),
MRStep(mapper=self.mapper_PageRank,
reducer=self.reducer_int),
MRStep(reducer=self.reducer_PageRank),
MRStep(mapper=self.mapper_PageRank,
reducer=self.reducer_int),
MRStep(reducer=self.reducer_PageRank),
MRStep(mapper=self.mapper_PageRank,
reducer=self.reducer_int),
MRStep(reducer=self.reducer_PageRank),
MRStep(mapper=self.mapper_PageRank,
reducer=self.reducer_int),
MRStep(reducer=self.reducer_PageRank),
MRStep(mapper=self.mapper_PageRank,
reducer=self.reducer_int),
MRStep(reducer=self.reducer_PageRank),
]
if __name__ == '__main__':
PageRank.run()
somme_pagerank = 0
for val in PageRank.dico_pagerank:
somme_pagerank += PageRank.dico_pagerank[val]
#Ecriture du fichier de vérfication
with open ('Pageranks_results_mass.txt','w') as g:
g.write(f'Nombre de pages: {PageRank.total_nodes} \n')
g.write(f'Somme des Pageranks: {somme_pagerank} \n')
for k,v in sorted(PageRank.dico_pagerank.items(), key=lambda x : x[1], reverse = True):
g.write(f'{k}: {v} \n')
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, User
from home.models import Producto
# Create your models here.
class ManejadorDeClientes(BaseUserManager):
def create_user(self, email, password=None, p_natural_rut=None, tipo=None):
if not email:
raise ValueError("Usuario debe tener un email")
user = self.model(
email=self.normalize_email(email),
p_natural_rut=p_natural_rut,
)
user.set_password(password)
user.save(using=self.db)
return user
def create_superuser(self, email, p_natural_rut, password):
user = self.create_user(email=self.normalize_email(email),
password=password,
p_natural_rut=p_natural_rut)
user.is_admin = True
user.is_staff = True
user.is_superuser = True
user.save(using=self.db)
return user
class Cliente(AbstractBaseUser):
email = models.EmailField(verbose_name="email", max_length=35, unique=True)
date_joined = models.DateTimeField(verbose_name='fecha creacion',
auto_now_add=True)
last_login = models.DateTimeField(verbose_name='ultimo login',
auto_now=True)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
tipo_cliente = models.CharField(max_length=50)
direccion_calle = models.CharField(max_length=50)
direccion_numero = models.CharField(max_length=50)
comuna = models.CharField(max_length=50)
ciudad = models.CharField(max_length=50)
#telefono = models.IntegerField(blank=True)
#carro_id = models.IntegerField()
p_natural_rut = models.CharField(
"Rut", help_text='Campo requerido', max_length=50)
p_natural_nombre = models.CharField(
"Nombre", help_text='Campo requerido', max_length=50)
p_natural_apellido = models.CharField(max_length=50)
empresa_rut = models.CharField("Rut empresa", max_length=50)
empresa_nombre = models.CharField("Nombre empresa", max_length=50)
empresa_rol = models.CharField("Rol", max_length=50)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['p_natural_rut']
objects = ManejadorDeClientes()
# def __str__(self):
# return self.email
def has_perm(self, perm, obj=None):
return self.is_admin
def has_module_perms(self, app_label):
return True
class Orden(models.Model):
cliente = models.ForeignKey(Cliente,
on_delete=models.SET_NULL,
blank=True,
null=True)
fecha_orden = models.DateTimeField(auto_now_add=True)
finalizada = models.BooleanField(default=False, null=True, blank=False)
id_transaccion = models.CharField(max_length=200, null=True)
@property
def get_total_carro(self):
ordenproducto = self.ordenproducto_set.all()
total = sum([item.get_total for item in ordenproducto])
return total
@property
def get_items_carro(self):
ordenproducto = self.ordenproducto_set.all()
total = sum([item.cantidad for item in ordenproducto])
return total
class OrdenProducto(models.Model):
producto = models.ForeignKey(Producto,
on_delete=models.SET_NULL,
blank=True,
null=True)
orden = models.ForeignKey(Orden,
on_delete=models.SET_NULL,
blank=True,
null=True)
cantidad = models.IntegerField(default=0, null=True, blank=True)
fecha_agregado = models.DateTimeField(auto_now_add=True)
@property
def get_total(self):
total = self.producto.precio * self.cantidad
return total
class DireccionDespacho(models.Model):
cliente = models.ForeignKey(Cliente,
on_delete=models.SET_NULL,
blank=True,
null=True)
orden = models.ForeignKey(Orden,
on_delete=models.SET_NULL,
blank=True,
null=True)
calle = models.CharField(max_length=200, null=True)
numero = models.CharField(max_length=50, null=True)
zipcode = models.CharField(max_length=50, null=True)
comentario = models.CharField(max_length=200, null=True)
fecha_agregado = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.calle + self.numero + self.comentario
|
"""
Add two new attributes to the parent class: weight and height.
Add change_weight, change_height methods that take one parameter and add it to the corresponding argument.
If the parameter was not passed, increase by 0.2. Modify the fly method of the Parrot class.
If the weight is more than 0.1, display the message This parrot cannot fly.
"""
class Pet:
def __init__(self, height, weight, name, age):
self.height = height
self.weight = weight
self.name = name
self.age = age
def run(self):
return "I'm running!"
def jump(self):
return "I jump!"
def sleep(self):
return "I'm sleeping!"
def birthday(self):
return self.age + 1
def change_weight(self, weight = 0.2):
self.weight = self.weight + weight
return self.weight
def change_height(self, height = 0.2):
self.height = self.height + height
return self.height
class Dog(Pet):
def bark(self):
return 'Bark!'
class Cat(Pet):
def meow(self):
return 'Meow!'
class Parrot(Pet):
def fly(self):
if self.weight > 0.1:
return "This parrot can't fly!"
return 'I fly'
dog = Dog(95, 35, 'Sparky', 10)
cat = Cat(25, 15, 'Kitty', 2)
parrot = Parrot(15, 3, 'Kar', 100)
print(f'Dog name is : {dog.name}, height is: {dog.height} cm, weight is {dog.weight} kf, age is {dog.age} years')
print(f'Dog do : {dog.run()}, {dog.jump()}, {dog.sleep()}, I say: {dog.bark()}, and today i am {dog.birthday()} years old')
print(f'Cat name is : {cat.name}, height is: {cat.height} cm, weight is {cat.weight} kf, age is {cat.age} years')
print(f'Cat do : {cat.run()}, {cat.jump()}, {cat.sleep()}, I say: {cat.meow()}, and today i am {cat.birthday()} years old')
print(f'Parrot name is : {parrot.name}, height is: {parrot.height} cm, weight is {parrot.weight} kf, age is {parrot.age} years')
print(f'Parrot do : {parrot.run()}, {parrot.jump()}, {parrot.sleep()}, {parrot.fly()}, and today i am {parrot.birthday()} years old')
print(dog.change_weight())
print(parrot.fly())
|
from app.util.dao import MysqlDao
class BotUser:
def __init__(self, qq, point=0, active=0, admin=0):
self.qq = qq
self.point = point
self.active = active
self.admin = admin
self.user_register()
def user_register(self):
"""注册用户"""
with MysqlDao() as db:
res = db.query(
"SELECT COUNT(*) FROM user WHERE qq=%s",
[self.qq]
)
if not res[0][0]:
res = db.update(
"INSERT INTO user (qq, points, active, admin) VALUES (%s, %s, %s, %s)",
[self.qq, self.point, self.active, self.admin]
)
if not res:
raise Exception()
def sign_in(self):
"""签到"""
with MysqlDao() as db:
res = db.update(
"UPDATE user SET points=points+%s, last_login=CURDATE() WHERE qq=%s",
[self.point, self.qq]
)
if not res:
raise Exception()
def update_point(self, point):
"""修改积分
:param point: str, 积分变动值
"""
with MysqlDao() as db:
res = db.update(
"UPDATE user SET points=points+%s WHERE qq=%s",
[point, self.qq]
)
if not res:
raise Exception()
def get_sign_in_status(self) -> bool:
"""查询签到状态"""
with MysqlDao() as db:
res = db.query(
"SELECT COUNT(*) FROM user WHERE qq=%s AND last_login=CURDATE()",
[self.qq]
)
return res[0][0]
def get_points(self) -> bool:
"""查询积分"""
with MysqlDao() as db:
res = db.query(
"SELECT points FROM user WHERE qq=%s",
[self.qq]
)
return res[0][0]
def kick(self, src, dst, point, num) -> bool:
"""踢
:param src: 来源QQ
:param dst: 目标QQ
:param point: 掉落积分
:param num: 每天最多次数
"""
with MysqlDao() as db:
res = db.query(
"SELECT COUNT(*) FROM kick WHERE src=%s AND dst=%s AND TO_DAYS(time)=TO_DAYS(now())",
[src, dst]
)
if not res[0][0] < num:
return False
self.update_point(point)
res = db.update(
"INSERT INTO kick (src, dst, time, point) VALUES (%s, %s, NOW(), %s)",
[src, dst, -point]
)
if not res:
raise Exception()
return True
def steal(self, src, dst, point, num) -> bool:
"""偷对方积分
:param src: 来源QQ
:param dst: 目标QQ
:param point: 偷取积分
:param num: 每天最多次数
"""
with MysqlDao() as db:
res = db.query(
"SELECT COUNT(*) FROM steal WHERE src=%s AND dst=%s AND TO_DAYS(time)=TO_DAYS(now())",
[src, dst]
)
if not res[0][0] < num:
return False
self.update_point(point)
res = db.update(
"INSERT INTO steal (src, dst, time, point) VALUES (%s, %s, NOW(), %s)",
[src, dst, point]
)
if not res:
raise Exception()
return True
def bomb(self, src, dst, point, num) -> bool:
"""炸
:param src: 来源QQ
:param dst: 目标QQ
:param point: 掉落积分
:param num: 每天最多次数
"""
with MysqlDao() as db:
res = db.query(
"SELECT COUNT(*) FROM bomb WHERE src=%s AND dst=%s AND TO_DAYS(time)=TO_DAYS(now())",
[src, dst]
)
if not res[0][0] < num:
return False
self.update_point(point)
res = db.update(
"INSERT INTO bomb (src, dst, time, point) VALUES (%s, %s, NOW(), %s)",
[src, dst, point]
)
if not res:
raise Exception()
return True
def get_moving_bricks_status(self) -> bool:
"""查询搬砖状态"""
with MysqlDao() as db:
res = db.query(
"SELECT COUNT(*) FROM user WHERE qq=%s AND last_moving_bricks=CURDATE()",
[self.qq]
)
return res[0][0]
def moving_bricks(self):
"""搬砖"""
with MysqlDao() as db:
res = db.update(
"UPDATE user SET points=points+%s, last_moving_bricks=CURDATE() WHERE qq=%s",
[self.point, self.qq]
)
if not res:
raise Exception()
def get_work_status(self) -> bool:
"""查询打工状态"""
with MysqlDao() as db:
res = db.query(
"SELECT COUNT(*) FROM user WHERE qq=%s AND last_part_time_job=CURDATE()",
[self.qq]
)
return res[0][0]
def work(self):
"""打工"""
with MysqlDao() as db:
res = db.update(
"UPDATE user SET points=points+%s, last_part_time_job=CURDATE() WHERE qq=%s",
[self.point, self.qq]
)
if not res:
raise Exception()
|
from twisted.internet import reactor
from scrapy.crawler import Crawler
from scrapy import log, signals
from newscollect.spiders.cnn_spider import CNNSpider
from newscollect.spiders.guardian_spider import GuardianSpider
from scrapy.utils.project import get_project_settings
spiders = [GuardianSpider(), CNNSpider()]
spiders_running = len(spiders)
def spider_stopped():
global spiders_running
spiders_running -= 1
if spiders_running == 0:
reactor.stop()
def setup_crawler(spider):
print "Starting crawl for", spider.name
settings = get_project_settings()
crawler = Crawler(settings)
crawler.signals.connect(spider_stopped, signal=signals.spider_closed)
crawler.configure()
crawler.crawl(spider)
crawler.start()
for spider in spiders:
setup_crawler(spider)
log.start(loglevel=log.DEBUG)
reactor.run() |
# Module for complex numbers
class Complex:
''' Creates a complex number. The first parameter is the real component, the second
parameter is the imaginary component.'''
def __init__(self, r, i):
self.r = r
self.i = i
''' Calling print() will display the complex number in the form a+bi.'''
def __str__(self):
string = ""
if self.r == 0:
if self.i != 0:
string += str(self.i) + "i"
else:
string += "0"
else:
string += str(self.r)
if self.i > 0:
if self.i == 1:
string += "+" + "i"
else:
string += "+" + str(self.i) + "i"
elif self.i < 0:
if self.i == -1:
string += "-" + "i"
else:
string += "-" + str(abs(self.i)) + "i"
return string
''' Adds two complex number objects; allows you to use plus sign for adding complex
numbers.'''
def __add__(self, c):
new_complex = Complex(self.r + c.r, self.i + c.i)
return new_complex
''' Subtracts two complex number objects; allows you to use minus sign for subtracting
complex numbers.'''
def __sub__(self, c):
new_complex = Complex(self.r - c.r, self.i - c.i)
return new_complex
''' Multiplies two complex number objects; allows you to use the multiplication sign
for multiplying complex numbers.'''
def __mul__(self, c):
if type(c) == int or type(c) == float:
new_complex = Complex(self.r*c, self.i*c)
else:
# (a+bi)(c+di) = ac + adi + bci - bd
new_complex = Complex(self.r*c.r - self.i*c.i, self.r*c.i + self.i*c.r)
return new_complex
''' Divides two complex number objects; allows you to use the division sign for
dividing complex numbers.'''
def __truediv__(self, c):
if type(c) == int or type(c) == float:
new_complex = Complex(self.r/c, self.i/c)
else:
# (a+bi)/(c+di) = ((a+bi)(c-di))/((c+di)(c-di))
# = (ac - adi + bci + bd)/(c^2 + d^2)
# Real: (ac+bd)/(c^2+d^2) Imaginary: (-ad+bc)/(c^2+d^2)
new_r = (self.r*c.r + self.i*c.i)/(c.r**2 + c.i**2)
new_i = (-1*self.r*c.i + self.i*c.r)/(c.r**2 + c.i**2)
new_complex = Complex(new_r, new_i)
return new_complex
''' Calculates the absolute value of the complex number.'''
def __abs__(self):
return pow(self.r**2 + self.i**2, .5)
''' Returns the real component of the complex number.'''
def real(self):
return self.r
''' Returns the imaginary component of the complex number.'''
def imaginary(self):
return self.i
|
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
from numpy import nan as NA
import matplotlib.pyplot as plt
import re
import mglearn
import sklearn
import os
os.chdir("../pjt_data")
apart = pd.read_csv("apart5.csv", index_col=0)
## 변수합치기
import seaborn as sns
# 상관관계 조사
corr_matrix = apart.corr()
corr_matrix['price'].sort_values(ascending=False)
sns.heatmap(corr_matrix,linewidths=0.1, vmax=0.5,
cmap=plt.cm.gist_heat, linecolor='white', annot=True)
# 다중공산성 확인(1)
from patsy import dmatrices
import statsmodels.api as sm
from statsmodels.stats.outliers_influence import variance_inflation_factor
apart.columns
feature = "age+e_area+floor+lat+long+" \
"parking+household+a_building+" \
"high_building+low_building+s_area+a_household+room+bath"
y, X = dmatrices("price ~" + feature, data=apart, return_type= "dataframe")
vif = pd.DataFrame()
vif["VIF Factor"] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]
vif["features"] = X.columns
vif.round(1)
# 특성 조합
apart['h_parking'] = apart.parking / apart.household ## 세대수 당 주차대수
apart['bath_room'] = apart.bath * apart.room ## 욕실 수에 비례한 방 수
apart['e_room'] = apart.room / apart.e_area ## 전용면적 당 방 수
apart['e_area_a_building'] = apart.e_area * apart.a_building ## 전용면적에 비례한 동수
# 다중공산성 확인(2)
feature2 = "age+floor+" \
"high_building+low_building+" \
"s_area+a_household+" \
"heat+fuel+door+" \
"h_parking+bath_room+e_room+e_area_a_building"
y, X = dmatrices("price ~" + feature2, data=apart, return_type= "dataframe")
vif = pd.DataFrame()
vif["VIF Factor"] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]
vif["features2"] = X.columns
vif.round(1)
# 정규성 검정
import seaborn as sns
from scipy import stats
apart.hist(bins=50, figsize=(10,10), xlabelsize=1)
sns.distplot(apart.price, kde=False, fit=stats.norm)
sns.distplot(np.log(apart.price), kde=False, fit=stats.norm)
# 범주형 특성 다루기 : 원-핫 인코딩
ha = pd.DataFrame(pd.get_dummies(apart.heat).values, columns=['heat0','heat1','heat2','heat3'])
apart = pd.merge(apart, ha.iloc[:,1:], left_index=True, right_index=True)
fa = pd.DataFrame(pd.get_dummies(apart.fuel).values, columns=['fuel0','fuel1','fuel2'])
apart = pd.merge(apart, fa.iloc[:,1:], left_index=True, right_index=True)
da = pd.DataFrame(pd.get_dummies(apart.door).values, columns=['door0','door1','door2', 'door3'])
apart = pd.merge(apart, da.iloc[:,1:], left_index=True, right_index=True)
# 데이터 셋 분할
from sklearn.model_selection import train_test_split
key = apart.iloc[:,0:5].copy()
X_data = apart.drop('price', axis=1).iloc[:,5:]
y_target = apart['price']
## X_data = np.log(X_data + 1) # 로그 스케일
X_train, X_apart, y_train, y_apart = train_test_split(X_data, y_target, test_size=0.2, random_state=42)
# 로그변환 전 주요작업
# 층 데이터에사 -값 ((수정 클링닝으로 보낼것!))
apart.iloc[:,7:8].describe()
apart.floor.where(apart.floor >= 0, 0, inplace=True)
apart.hist(bins=50, figsize=(10,15), xlabelsize=1)
X_data = np.log(X_data + 1) # 로그 스케일
# 데이터 셋 분할 후 스케일 적용할 것!!
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler() # StandardScaler 스케일링
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_apart_scaled = scaler.transform(X_apart)
## 주요 변수만 추출
apart.to_csv("apart.csv")
########################스케일 함수 비교 #################################
aa = apart.columns.values
feature_all = "key+id+city+year_m+date+" \
"age+e_area+floor+lat+long+" \
"parking+household+a_building+" \
"high_building+low_building+" \
"s_area+a_household+room+bath+" \
"heat+fuel+door+" \
"h_parking+bath_room+e_room+e_area_a_building+" \
"heat1+heat2+heat3+fuel1+fuel2+door1+door2+door3"
apart11 = apart.iloc[:,5:]
aa = apart11.columns.values
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(apart11)
tt2 = DataFrame(scaler.transform(apart11), columns=aa)
tt2.to_csv("apart4.csv")
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(apart11)
tt1 = DataFrame(scaler.transform(apart11), columns=aa)
tt1.to_csv("apart.csv")
pd.plotting.scatter_matrix(tt1, hist_kwds={'bins':20}, s=30, alpha=0.8)
attributes = ["price", "s_area", "e_area", "room", "h_parking"]
### pd.plotting.scatter_matrix(apart[attributes], figsize=(12,8), c=apart.city, hist_kwds={'bins':20}, s=30, alpha=0.8)
apart.room_id.describe().astype(int)
######## Y 변수 값은 스케일 적용하지 말것(단, 릿지와 라쏘는 Y변수도 스케일 적용이 필요함(차이가 너무 큰 경우) ############
# 로그 스케일 (XXX)
apart.iloc[:,7:8].describe()
apart.floor.where(apart.floor >= 0, 0, inplace=True)
apart.drop('price', axis=1).iloc[:,5:] = np.log(apart.drop('price', axis=1).iloc[:,5:] + 1)
X_data.hist(bins=50, figsize=(10,10), xlabelsize=1)
# 데이터 셋 분할
from sklearn.model_selection import train_test_split
key = apart.iloc[:,0:5]
target = apart['price'].values
del(apart['price'])
X_data = apart.iloc[:,5:].values
X_train, X_apart, y_train, y_apart = train_test_split(X_data, target, apart_size=0.3, random_state=42)
# StandardScaler 스케일링
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_apart_scaled = scaler.transform(X_apart)
|
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def sortedListToBST(self, head):
arr = []
while(head):
arr.append(head.val)
head.next
return self.buildBST(arr)
def buildBST(self, arr):
if not arr:
return None
mid = len(arr)//2
root = TreeNode(arr[mid])
root.left = self.buildBST(arr[:mid])
root.right = self.buildBST(arr[mid + 1:])
return root
|
def check_conda_env(expected_env: str):
"""Checks that the expected conda environment is the same as the current
conda environment
Args:
expected_env (str): What the conda env should be for this python
script.
"""
import subprocess
import warnings
import json
command_output = subprocess.run(["conda", "info", "--json"],
universal_newlines=True,
stdout=subprocess.PIPE)
curent_env = json.loads(command_output.stdout)['active_prefix_name']
if expected_env == curent_env:
print(f"""It's all ok, we're using the right conda environment:
{expected_env}""")
if expected_env != curent_env:
warning_message = f"""Oh no, the conda environment you want is:
{expected_env} but the current environment is: {curent_env}"""
print(warning_message)
warnings.warn(warning_message, Warning, stacklevel=2)
|
# coding: utf8
from django.core.management.base import BaseCommand
from core.models import DoskaField, Map
from utils.importer import parser_import
import settings
import urllib2
import json
class Command(BaseCommand):
def handle(self, *args, **options):
for enabled_parser in settings.PARSERS_ENABLED:
parser = parser_import(enabled_parser)
group_name = parser.group_name
response = urllib2.urlopen('http://localhost:5000/doska/ajax/get_adv_fields/%s/' % group_name)
if response.getcode() == 200:
data = json.loads(response.read())
if data.get('status') == 0:
fields = data.get('value')
DoskaField.objects.filter(group_name=group_name).delete()
for f in fields:
d_field = DoskaField(group_name=group_name, field_name=f)
d_field.save()
default_map = Map(imported_adv_class=enabled_parser, doska_field_name=f, imported_field_name='')
default_map.save()
|
#!/usr/bin/env python
# encoding: utf-8
import time
import functools
def timeit(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
start = time.clock()
func(args)
end = time.clock()
print 'used:', end - start
return wrapper
class Tester(object):
def __init__(self):
pass
@timeit
def method_01(self):
print 'in foo()'
if __name__ == "__main__":
Tester().method_01()
|
import os
import pickle
import numpy as np
import pandas as pd
from sklearn import datasets
from django.conf import settings
from rest_framework import views
from rest_framework import status
from rest_framework.response import Response
from sklearn.ensemble import RandomForestClassifier
from scipy.fftpack import rfft
from sklearn.tree import DecisionTreeClassifier
class Train(views.APIView):
def post(self, request):
return Response(status=status.HTTP_200_OK)
class Predict(views.APIView):
def post(self, request):
predictions1 = []
predictions2 = []
predictions3 = []
predictions4 = []
columns_to_be_read = ['nose_x', 'nose_y', 'leftEye_x', 'leftEye_y', 'rightEye_x', 'rightEye_y',
'leftEar_x', 'leftEar_y', 'rightEar_x', 'rightEar_y', 'leftShoulder_x', 'leftShoulder_y',
'rightShoulder_x', 'rightShoulder_y', 'leftElbow_x', 'leftElbow_y', 'rightElbow_x',
'rightElbow_y', 'leftWrist_x', 'leftWrist_y', 'rightWrist_x', 'rightWrist_y', 'leftHip_x',
'leftHip_y', 'rightHip_x', 'rightHip_y', 'leftKnee_x', 'leftKnee_y', 'rightKnee_x',
'rightKnee_y', 'leftAnkle_x', 'leftAnkle_y', 'rightAnkle_x', 'rightAnkle_y']
#mapping = {"1":"Buy","2":"Communicate","3":"Fun","4":"Hope","5":"Mother","6.0":"Really"}
mapping = {1.0: "Buy", 2.0: "Communicate", 3.0: "Fun", 4.0: "Hope", 5.0: "Mother", 6.0: "Really"}
positions = []
i =0
for entry in request.data:
if i==120:
break
i += 1
try:
for key, val in entry.items():
if key == "score":
frameno = []
frameno.append(val)
else:
for subframe in val:
for key1, val1 in subframe.items():
if key1 == "score":
frameno.append(val1)
elif key1 == 'position':
frameno.extend(list(val1.values()))
positions.append(frameno)
except Exception as err:
return Response(str(err), status=status.HTTP_400_BAD_REQUEST)
positions = rfft(positions)
model_name1 = 'Random_forest.pkl'
model_name2 = 'KNN.pkl'
model_name3 = 'logistic_regresion.pkl'
model_name4 = 'decision_tree.pkl'
path1 = os.path.join(settings.MODEL_ROOT, model_name1)
path2 = os.path.join(settings.MODEL_ROOT, model_name2)
path3 = os.path.join(settings.MODEL_ROOT, model_name3)
path4 = os.path.join(settings.MODEL_ROOT, model_name4)
with open(path1, 'rb') as file1:
model1 = pickle.load(file1)
with open(path2, 'rb') as file2:
model2 = pickle.load(file2)
with open(path3, 'rb') as file3:
model3 = pickle.load(file3)
with open(path4, 'rb') as file4:
model4 = pickle.load(file4)
# for entry in positions:
# #model_name = entry.pop('model_name')
#newData = positions
predictions1 = list(model1.predict(positions))
predictions2 = list(model2.predict(positions))
predictions3 = list(model3.predict(positions))
predictions4 = list(model4.predict(positions))
res = {}
res[1] = mapping[max(predictions1,key=predictions1.count)]
res[2] = mapping[max(predictions2,key=predictions2.count)]
res[3] = mapping[max(predictions3,key=predictions3.count)]
res[4] = mapping[max(predictions4,key=predictions4.count)]
#res[1] = str(positions)
#res[2] = str(predictions2)
#res[3] = str(predictions3)
#res[4] = str(predictions4)
#val = max(predictions,key=predictions.count)
#res = mapping[val[0]]
return Response(res, status=status.HTTP_200_OK) |
from math import gcd,ceil
reactions = [x.replace('\n','') for x in open('Day14/input.txt').readlines()]
reactions = [[y.strip() for y in x.split('=>')] for x in reactions]
reactions = [(x[0].split(', '), x[1].split(' ')) for x in reactions]
reactions = [([(int(y.split(' ')[0]), y.split(' ')[1].strip()) for y in x[0]], (int(x[1][0]),x[1][1].strip())) for x in reactions]
def compute_lcm(x, y):
return int((x*y)/gcd(x,y))
def find_component(target):
return [x for x in reactions if x[1][1] == target][0]
ore_cost = 0
search_stack = [(1, 'FUEL')]
surpluses = {}
while len(search_stack) > 0:
top = search_stack[0]
if top[1] in surpluses:
if top[0] >= surpluses[top[1]]:
top = (top[0] - surpluses[top[1]],top[1])
surpluses[top[1]] = 0
elif top[0] < surpluses[top[1]]:
surpluses[top[1]] -= top[0]
search_stack = search_stack[1:]
continue
if top[1] == "ORE":
ore_cost += top[0]
search_stack = search_stack[1:]
continue
rules = find_component(top[1])
mult = ceil(top[0]/rules[1][0])
if top[1] not in surpluses:
surpluses[top[1]] = 0
surpluses[top[1]] += mult*rules[1][0]-top[0]
for rule in rules[0]:
search_stack.append((mult*rule[0],rule[1]))
pass
search_stack = search_stack[1:]
guesses = {}
max_guess = 2*1000000000000/ore_cost
fuel_amount = int(1000000000000/(ore_cost*2))
while True:
if fuel_amount in guesses:
print(f'Found answer: {guesses[fuel_amount]}')
break
ore_cost = 0
search_stack = [(fuel_amount, 'FUEL')]
surpluses = {}
while len(search_stack) > 0:
top = search_stack[0]
if top[1] in surpluses:
if top[0] >= surpluses[top[1]]:
top = (top[0] - surpluses[top[1]],top[1])
surpluses[top[1]] = 0
elif top[0] < surpluses[top[1]]:
surpluses[top[1]] -= top[0]
search_stack = search_stack[1:]
continue
if top[1] == "ORE":
ore_cost += top[0]
search_stack = search_stack[1:]
continue
rules = find_component(top[1])
mult = ceil(top[0]/rules[1][0])
if top[1] not in surpluses:
surpluses[top[1]] = 0
surpluses[top[1]] += mult*rules[1][0]-top[0]
for rule in rules[0]:
search_stack.append((mult*rule[0],rule[1]))
pass
search_stack = search_stack[1:]
guesses[fuel_amount] = ore_cost
guess_keys = list(guesses.keys()).copy()
guess_keys.append(max_guess)
guess_keys.append(0)
if ore_cost < 1000000000000:
fuel_amount = int((min([x for x in guess_keys if x > fuel_amount])+fuel_amount)/2)
else:
fuel_amount = int((fuel_amount+max([x for x in guess_keys if x < fuel_amount]))/2)
|
def solution(array):
new_array = []
for i in range(len(array)):
for j in range(i + 1, len(array)):
for k in range(j + 1, len(array)):
new_array.append(array[i] * array[j] * array[k])
return max(new_array)
print(solution([-3, 1, 2, -2, 5, 6]))
|
from kivy.properties import StringProperty
from kivy.uix.screenmanager import Screen
class ManagerScreen(Screen):
txt_cheese_pizza_price = StringProperty("0")
txt_hawaiian_pizza_price = StringProperty("0")
txt_pepperoni_pizza_price = StringProperty("0")
def welcome(self):
self.manager.current = "welcome_screen"
def save(self):
print("In ManagerScreen - save(), ")
# assigns value in customer screen
customer_screen = self.manager.get_screen("customer_screen")
customer_screen.lbl_cheese_pizza_price = self.ids.txt_cheese_pizza_price.text
customer_screen.lbl_hawaiian_pizza_price = self.ids.txt_hawaiian_pizza_price.text
customer_screen.lbl_pepperoni_pizza_price = self.ids.txt_pepperoni_pizza_price.text
#go to customer screen
self.manager.current = "customer_screen"
|
import mysql.connector as sql
# deze waarden aanpassen
vijf_letter_code = "svbiw"
pwd = "luvinformatica"
HOST_DEFAULT = "hannl-hlo-bioinformatica-mysqlsrv.mysql.database.azure.com"
USER_DEFAULT = vijf_letter_code + "@hannl-hlo-bioinformatica-mysqlsrv"
DATABASE_DEFAULT = vijf_letter_code
sql.connect(host=HOST_DEFAULT,
password=pwd,
user=USER_DEFAULT,
db=DATABASE_DEFAULT)
print('done')
|
from PyQt5 import QtCore, QtGui, QtWidgets
import requests, json
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(456, 341)
self.api_key = "8f23a1347177d649fd3afc4d97f09bb1"
self.base_url = "http://api.openweathermap.org/data/2.5/weather?"
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(320, 30, 91, 41))
self.pushButton.setObjectName("pushButton")
self.textEdit = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit.setGeometry(QtCore.QRect(50, 30, 261, 41))
self.textEdit.setObjectName("textEdit")
self.font = QtGui.QFont("Times",25)
self.textEdit.setFont(self.font)
self.textBrowser = QtWidgets.QTextBrowser(self.centralwidget)
self.textBrowser.setGeometry(QtCore.QRect(110, 110, 241, 131))
self.textBrowser.setObjectName("textBrowser")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 456, 18))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.pushButton.clicked.connect(self.get_weather)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.pushButton.setText(_translate("MainWindow", "Get Weather"))
def lengths(self,line):
self.line = line
self.sp = ' '
for i in range(40-(len(self.line)//2)):
self.sp=self.sp + " "
f_line = self.sp + self.line
return f_line
def get_weather(self):
self.city_name = self.textEdit.toPlainText()
self.complete_url = self.base_url + "appid=" + self.api_key + "&q=" + self.city_name
self.response = requests.get(self.complete_url)
self.x = self.response.json()
self.cond=self.x['weather'][0]['main']
self.detailes=self.x['weather'][0]['description']
self.temp = self.x['main']['temp']
self.kelvin = self.temp
self.celcius = self.kelvin-273.15
line_1 = self.lengths('City - '+self.city_name)
line_2 = self.lengths('C: '+str(self.celcius))
line_3 = self.lengths('Conditions: '+str(self.cond)+', Detailes: '+str(self.detailes))
self.textBrowser.setText(line_1+"\n"+line_2+"\n"+line_3+"\n")
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
from load_data import *
import numpy as np
# locations = load_WarehouseLocations()
# distances = load_WarehouseRoutes(True)
# durations = load_WarehouseRoutes(False)
# print(locations['Distribution South'][0])
# print(durations['Distribution South']['Distribution North'])
# print(len(durations))
# print('f')
def divide_north_south(demand, durations, unvisited_set):
north_set = []
south_set = []
# north_duration = 0.0
# south_duration = 0.0
for unvis in unvisited_set:
if durations['Distribution North'][unvis] > durations['Distribution South'][unvis]:
south_set.append(unvis)
# south_duration += float(durations['Distribution South'][unvis])
else:
north_set.append(unvis)
# north_duration += float(durations['Distribution North'][unvis])
# while abs(north_duration - south_duration) > 3000:
north_demand_weekday = 0.0
north_demand_weekend = 0.0
for north in north_set:
north_demand_weekday += float(demand[north]['weekday_avg'])
north_demand_weekend += float(demand[north]['weekend_avg'])
south_demand_weekday = 0.0
south_demand_weekend = 0.0
for south in south_set:
south_demand_weekday += float(demand[south]['weekday_avg'])
south_demand_weekend += float(demand[south]['weekend_avg'])
print(north_demand_weekday)
print(north_demand_weekend)
print(south_demand_weekday)
print(south_demand_weekend)
# print(north_duration)
# print(south_duration)
return north_set, south_set
def routes_set(demand, north, south):
# north
# weekday
# maximum demand at each route = 18
north_visited_weekday = []
max_demand = 18.0
min_duration = np.inf
while len(north) > 0.0:
#finding minimum duration from distribution north
for nor in north:
if durations['Distribution North'][nor] < min_duration:
min_duration = durations['Distribution North'][nor]
nth_closest_store = nor
else:
continue
pass
def unvis_set(durations):
unvisited = []
for loc in durations:
unvisited.append(loc)
unvisited.remove('Distribution North')
unvisited.remove('Distribution South')
return unvisited
if __name__== "__main__":
demand = load_DemandData()
# print(demand['Noel Leeming Albany']['weekday_avg'])
durations = load_WarehouseRoutes(False)
# print(durations['Distribution South']['Distribution North'])
unvisited = unvis_set(durations)
# print(unvisited)
north_set, south_set = divide_north_south(demand, durations, unvisited)
print("north set is =", north_set)
print("south set is =", south_set)
print(len(north_set))
print(len(south_set))
routes_set(demand, north_set, south_set)
|
import unittest
import logging
from lxml import etree as etree_
import sdc11073
from sdc11073 import namespaces
from sdc11073 import definitions_sdc
#pylint: disable=protected-access
DEV_ADDRESS = '169.254.0.200:10000'
CLIENT_VALIDATE = True
# data that is used in report
observationTime_ms = 1467596359152
OBSERVATIONTIME = observationTime_ms/1000.0
HANDLES = ("0x34F05506", "0x34F05501", "0x34F05500")
SAMPLES = {"0x34F05506": (5.566406, 5.712891, 5.712891, 5.712891, 5.800781),
"0x34F05501": (0.1, -0.1, 1.0, 2.0, 3.0),
"0x34F05500": (3.198242, 3.198242, 3.198242, 3.198242, 3.163574, 1.1)}
WfReport_draft6 = u'''<?xml version="1.0" encoding="utf-8"?>
<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://www.w3.org/2003/05/soap-envelope"
xmlns:SOAP-ENC="http://www.w3.org/2003/05/soap-encoding"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:chan="http://schemas.microsoft.com/ws/2005/02/duplex"
xmlns:wsa5="http://www.w3.org/2005/08/addressing"
xmlns:ext="{ext}"
xmlns:dom="{dom}"
xmlns:dpws="http://docs.oasis-open.org/ws-dd/ns/dpws/2009/01"
xmlns:si="http://safety-information-uri/15/08"
xmlns:msg="{msg}"
xmlns:wsd11="http://docs.oasis-open.org/ws-dd/ns/discovery/2009/01"
xmlns:wse4="http://schemas.xmlsoap.org/ws/2004/08/eventing"
xmlns:wst4="http://schemas.xmlsoap.org/ws/2004/09/transfer"
xmlns:wsx4="http://schemas.xmlsoap.org/ws/2004/09/mex">
<SOAP-ENV:Header>
<wsa5:MessageID>
urn:uuid:904577a6-6012-4558-b772-59a9c90bacbb</wsa5:MessageID>
<wsa5:To SOAP-ENV:mustUnderstand="true">
http://169.254.0.99:62627</wsa5:To>
<wsa5:Action SOAP-ENV:mustUnderstand="true">
{msg}/15/04/Waveform/Waveform</wsa5:Action>
<wsa:Identifier xmlns:wsa="http://www.w3.org/2005/08/addressing">
urn:uuid:9f00ba10-3ffe-47e9-8238-88339a4a457d</wsa:Identifier>
</SOAP-ENV:Header>
<SOAP-ENV:Body>
<msg:WaveformStreamReport MdibVersion="2" SequenceId="">
<msg:State StateVersion="19716"
DescriptorHandle="0x34F05506" DescriptorVersion="2"
xsi:type="dom:RealTimeSampleArrayMetricState">
<dom:MetricValue xsi:type="dom:SampleArrayValue"
Samples="{array1}"
DeterminationTime="{obs_time}">
<dom:MetricQuality Validity="Vld"></dom:MetricQuality>
</dom:MetricValue>
</msg:State>
<msg:State StateVersion="19715"
DescriptorHandle="0x34F05501" DescriptorVersion="2"
xsi:type="dom:RealTimeSampleArrayMetricState">
<dom:MetricValue xsi:type="dom:SampleArrayValue"
Samples="{array2}"
DeterminationTime="{obs_time}">
<dom:MetricQuality Validity="Vld"></dom:MetricQuality>
<dom:Annotation><dom:Type Code="4711" CodingSystem="bla"/></dom:Annotation>
<dom:ApplyAnnotation AnnotationIndex="0" SampleIndex="2"></dom:ApplyAnnotation>
</dom:MetricValue>
</msg:State>
<msg:State StateVersion="19715"
DescriptorHandle="0x34F05500" DescriptorVersion="2"
xsi:type="dom:RealTimeSampleArrayMetricState">
<dom:MetricValue xsi:type="dom:SampleArrayValue"
Samples="{array3}"
DeterminationTime="{obs_time}">
<dom:MetricQuality Validity="Vld"></dom:MetricQuality>
</dom:MetricValue>
</msg:State>
</msg:WaveformStreamReport>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''.format(obs_time=observationTime_ms,
array1=' '.join([str(n) for n in SAMPLES["0x34F05506"]]),
array2=' '.join([str(n) for n in SAMPLES["0x34F05501"]]),
array3=' '.join([str(n) for n in SAMPLES["0x34F05500"]]),
msg=namespaces.nsmap['msg'],
ext=namespaces.nsmap['ext'],
dom=namespaces.nsmap['dom'],
)
WfReport_draft10 = u'''<?xml version="1.0" encoding="utf-8"?>
<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://www.w3.org/2003/05/soap-envelope"
xmlns:SOAP-ENC="http://www.w3.org/2003/05/soap-encoding"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:chan="http://schemas.microsoft.com/ws/2005/02/duplex"
xmlns:wsa5="http://www.w3.org/2005/08/addressing"
xmlns:ext="{ext}"
xmlns:dom="{dom}"
xmlns:dpws="http://docs.oasis-open.org/ws-dd/ns/dpws/2009/01"
xmlns:si="http://safety-information-uri/15/08"
xmlns:msg="{msg}"
xmlns:wsd11="http://docs.oasis-open.org/ws-dd/ns/discovery/2009/01"
xmlns:wse4="http://schemas.xmlsoap.org/ws/2004/08/eventing"
xmlns:wst4="http://schemas.xmlsoap.org/ws/2004/09/transfer"
xmlns:wsx4="http://schemas.xmlsoap.org/ws/2004/09/mex">
<SOAP-ENV:Header>
<wsa5:MessageID>
urn:uuid:904577a6-6012-4558-b772-59a9c90bacbb</wsa5:MessageID>
<wsa5:To SOAP-ENV:mustUnderstand="true">
http://169.254.0.99:62627</wsa5:To>
<wsa5:Action SOAP-ENV:mustUnderstand="true">
{msg}/15/04/Waveform/Waveform</wsa5:Action>
<wsa:Identifier xmlns:wsa="http://www.w3.org/2005/08/addressing">
urn:uuid:9f00ba10-3ffe-47e9-8238-88339a4a457d</wsa:Identifier>
</SOAP-ENV:Header>
<SOAP-ENV:Body>
<msg:WaveformStream MdibVersion="2" SequenceId="">
<msg:State StateVersion="19716"
DescriptorHandle="0x34F05506" DescriptorVersion="2"
xsi:type="dom:RealTimeSampleArrayMetricState">
<dom:MetricValue xsi:type="dom:SampleArrayValue"
Samples="{array1}"
DeterminationTime="{obs_time}">
<dom:MetricQuality Validity="Vld"></dom:MetricQuality>
</dom:MetricValue>
</msg:State>
<msg:State StateVersion="19715"
DescriptorHandle="0x34F05501" DescriptorVersion="2"
xsi:type="dom:RealTimeSampleArrayMetricState">
<dom:MetricValue xsi:type="dom:SampleArrayValue"
Samples="{array2}"
DeterminationTime="{obs_time}">
<dom:MetricQuality Validity="Vld"></dom:MetricQuality>
<dom:Annotation><dom:Type Code="4711" CodingSystem="bla"/></dom:Annotation>
<dom:ApplyAnnotation AnnotationIndex="0" SampleIndex="2"></dom:ApplyAnnotation>
</dom:MetricValue>
</msg:State>
<msg:State StateVersion="19715"
DescriptorHandle="0x34F05500" DescriptorVersion="2"
xsi:type="dom:RealTimeSampleArrayMetricState">
<dom:MetricValue xsi:type="dom:SampleArrayValue"
Samples="{array3}"
DeterminationTime="{obs_time}">
<dom:MetricQuality Validity="Vld"></dom:MetricQuality>
</dom:MetricValue>
</msg:State>
</msg:WaveformStream>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''.format(obs_time=observationTime_ms,
array1=' '.join([str(n) for n in SAMPLES["0x34F05506"]]),
array2=' '.join([str(n) for n in SAMPLES["0x34F05501"]]),
array3=' '.join([str(n) for n in SAMPLES["0x34F05500"]]),
msg=namespaces.nsmap['msg'],
ext=namespaces.nsmap['ext'],
dom=namespaces.nsmap['dom'],
)
class TestClientWaveform(unittest.TestCase):
def setUp(self):
self.sdcClient_final = sdc11073.sdcclient.SdcClient(DEV_ADDRESS,
deviceType=definitions_sdc.SDC_v1_Definitions.MedicalDeviceType,
validate=CLIENT_VALIDATE,
my_ipaddress='169.254.0.3',
logLevel=logging.DEBUG)
self.all_clients = (self.sdcClient_final,)
def test_basic_handling(self):
''' call _onWaveformReport method directly. Verify that observable is a WaveformStream Element'''
# same test for draft10 version
cl = self.sdcClient_final
soapenvelope = sdc11073.pysoap.soapenvelope.ReceivedSoap12Envelope.fromXMLString(WfReport_draft10.encode('utf-8'),
schema=cl._bicepsSchema.bmmSchema)
cl._onWaveFormReport(soapenvelope)
self.assertEqual(cl.waveFormReport.tag, namespaces.msgTag('WaveformStream'))
def test_stream_handling(self):
''' Connect a mdib with client. Call _onWaveformReport method directly. Verify that observable is a WaveformStream Element'''
my_handles = ('0x34F05506', '0x34F05501', '0x34F05500')
for cl, wfReport in ((self.sdcClient_final, WfReport_draft10),):
clientmdib = sdc11073.mdib.ClientMdibContainer(cl)
clientmdib._bindToObservables()
clientmdib._isInitialized = True # fake it, because we do not call initMdib()
clientmdib.MDIB_VERSION_CHECK_DISABLED = True # we have no mdib version incrementing in this test, therefore disable check
# create dummy descriptors
for handle in my_handles:
attributes = {'SamplePeriod': 'P0Y0M0DT0H0M0.0157S', # use a unique sample period
etree_.QName(sdc11073.namespaces.nsmap['xsi'], 'type'): 'dom:RealTimeSampleArrayMetricDescriptor',
'Handle':handle}
element = etree_.Element('Metric', attrib=attributes, nsmap=sdc11073.namespaces.nsmap)
clientmdib.descriptions.addObject(sdc11073.mdib.descriptorcontainers.RealTimeSampleArrayMetricDescriptorContainer.fromNode(clientmdib.nsmapper, element, None)) # None = no parent handle
soapenvelope = sdc11073.pysoap.soapenvelope.ReceivedSoap12Envelope.fromXMLString(wfReport.encode('utf-8'))
cl._onWaveFormReport(soapenvelope)
# verify that all handles of reported RealTimeSampleArrays are present
for handle in my_handles:
current_samples = SAMPLES[handle]
s_count = len(current_samples)
rtBuffer = clientmdib.rtBuffers[handle]
self.assertEqual(len(rtBuffer.rt_data), s_count)
self.assertAlmostEqual(rtBuffer.sample_period, 0.0157)
self.assertAlmostEqual(rtBuffer.rt_data[0].observationTime, OBSERVATIONTIME)
self.assertAlmostEqual(rtBuffer.rt_data[-1].observationTime - OBSERVATIONTIME, rtBuffer.sample_period*(s_count-1), places=4)
self.assertAlmostEqual(rtBuffer.rt_data[-2].observationTime - OBSERVATIONTIME, rtBuffer.sample_period*(s_count-2), places=4)
for i in range(s_count):
self.assertAlmostEqual(rtBuffer.rt_data[i].value, current_samples[i])
# verify that only handle 0x34F05501 has an annotation
for handle in [my_handles[0], my_handles[2]]:
rtBuffer = clientmdib.rtBuffers[handle]
for sample in rtBuffer.rt_data:
self.assertEqual(len(sample.annotations), 0)
rtBuffer = clientmdib.rtBuffers[my_handles[1]]
annotated = rtBuffer.rt_data[2] # this object should have the annotation (SampleIndex="2")
self.assertEqual(len(annotated.annotations), 1)
self.assertEqual(annotated.annotations[0].coding.code, '4711')
self.assertEqual(annotated.annotations[0].coding.codingSystem, 'bla')
for i in (0,1,3,4):
self.assertEqual(len(rtBuffer.rt_data[i].annotations), 0)
# add another Report (with identical data, but that is not relevant here)
soapenvelope = sdc11073.pysoap.soapenvelope.ReceivedSoap12Envelope.fromXMLString(wfReport.encode('utf-8'))
cl._onWaveFormReport(soapenvelope)
# verify only that array length is 2*bigger now
for handle in my_handles:
current_samples = SAMPLES[handle]
s_count = len(current_samples)
rtBuffer = clientmdib.rtBuffers[handle]
self.assertEqual(len(rtBuffer.rt_data), s_count*2)
#add a lot more data, verify that length limitation is working
for i in range(100):
soapenvelope = sdc11073.pysoap.soapenvelope.ReceivedSoap12Envelope.fromXMLString(wfReport.encode('utf-8'))
cl._onWaveFormReport(soapenvelope)
# verify only that array length is limited
for handle in my_handles:
current_samples = SAMPLES[handle]
s_count = len(current_samples)
rtBuffer = clientmdib.rtBuffers[handle]
self.assertEqual(len(rtBuffer.rt_data), rtBuffer._max_samples)
def suite():
return unittest.TestLoader().loadTestsFromTestCase(TestClientWaveform)
if __name__ == '__main__':
logging.getLogger('sdc.client').setLevel(logging.DEBUG)
unittest.TextTestRunner(verbosity=2).run(suite())
# unittest.TextTestRunner(verbosity=2).run(unittest.TestLoader().loadTestsFromName('test_client_waveform.TestClientWafeform.test_stream_handling'))
|
# coding:utf-8
from django.shortcuts import render
import os
import time
from connect import adb
from django import forms
from django. shortcuts import render_to_response
from django. shortcuts import render
from django.http import request
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django. contrib import auth
from django.contrib.auth.decorators import login_required, permission_required
from django.views.generic import TemplateView
from connect import scripts
from multiprocessing import Process
from multiprocessing import Pool
import datetime
import subprocess
iptxt=os.getcwd()+"\\ip.txt"
f=open(iptxt)
lines=f.readlines()
iptxt=os.getcwd()+"\\config.txt"
f=open(iptxt)
configinfo=f.readlines()
packageName= configinfo[0].strip("\n")
apk= configinfo[1].strip("\n")
paramList=[]
#resultList=[]
monkeylog_path=os.getcwd()+"\\monkeylog"
logdaystr = datetime.date.today().strftime('%Y.%m.%d')
# Create your views here.
def index(request):
return render(request, "index.html")
def monkey(ip):
filename =monkeylog_path+"\\"+ip+'-'+logdaystr+".log"
monkeylog_file = open(filename, 'w')
monkeyStr='adb -s %s:5555 shell monkey -p %s -v-v 1000'%(ip,packageName)
#os.popen(monkeyStr)
process=subprocess.Popen(monkeyStr, stdout=monkeylog_file, stderr=subprocess.PIPE,shell=True)
print 'monkey finished'
monkeylog_file.close()
def connect(ip):
result=adb.many_connect(ip)
print result
if result:
connectInfo='device'+ip+' connected successfully'
return connectInfo
else:
connectInfo='device'+ip+' connected failed'
return connectInfo
def nomalinstall(ip,packageName,apk):
result=adb.adbuninstall(ip,packageName,apk)
print 'result is:'
print result
if result is True:
installinfo='device'+ip+'normal installed successfully'
return installinfo
else:
installinfo='device'+ip+'normal installed failed'
return installinfo
def coverinstall(ip,apk):
result=adb.manyinstall(ip,apk)
if result is True:
installinfo='device'+ip+'cover installed successfully'
return installinfo
else:
installinfo='device'+ip+'cover installed failed'
return installinfo
def manyFunction(ip,apk,port,packageName,install_checked,function_checked,monkey_checked):
resultList=[]
connectInfo=connect(ip)
resultList.append(connectInfo)
if 'connected successfully' in connectInfo:
if install_checked=='2':
x=coverinstall(ip,apk)
resultList.append(x)
elif install_checked=='1':
x=nomalinstall(ip,packageName,apk)
resultList.append(x)
fs=scripts.function_scripts()
print 'function_checked'
print len(function_checked[0])
if len(function_checked[0])>1:
fs.setUp(ip,port,apk)
if 'guide' in function_checked:
swiptinfo=fs.test_swipe()
resultList.append(swiptinfo)
if 'login' in function_checked:
logininfo=fs.test_login()
resultList.append(logininfo)
fs.tearDown()
if 'monkey' in monkey_checked:
monkey(ip)
adb.error_log(ip,packageName)
print 'resultList:'
print resultList
return resultList
def run(request):
resultList=[]
sumlist=[]
install_checked = request. POST. get('install' , ' ' )
function_checked= request. POST. getlist('function' , ' ' )
monkey_checked=request. POST. get('monkey' , ' ' )
print '==='+install_checked
#paramList.append(install_checked)
p = Pool(processes=5)
for ipline in lines:
i=ipline.strip("\n")
ip=i.split("|")[0]
port=i.split("|")[1]
resultList.append(p.apply_async(manyFunction, (ip,apk,port,packageName,install_checked,function_checked,monkey_checked,)))
p.close()
p.join()
for res in resultList:
print '=============='
result=res.get()
print result
sumlist.append(result)
print sumlist
return render_to_response('result.html',{'result':sumlist})
'''
def run(request):
connectInfo=connect()
installinfo=''
swiptinfo=''
logininfo=''
#如果连接手机成功,可以继续装包
if connectInfo=='connect successful':
install_checked = request. POST. get('install' , ' ' )
if install_checked=='2':
print install_checked
installinfo=coverinstall()
elif install_checked=='1':
print install_checked
installinfo=nomalinstall()
function_checked= request. POST. getlist('function' , ' ' )
print 'function_checked:'
print function_checked
fs=scripts.function_scripts()
fs.setUp(ip,port,apk)
if 'guide' in function_checked:
swiptinfo=fs.test_swipe()
if 'login' in function_checked:
logininfo=fs.test_login()
fs.tearDown()
return render_to_response('result.html',{'connectInfo': connectInfo, 'installinfo': installinfo, 'swiptinfo': swiptinfo, 'logininfo':logininfo} )
'''
def uploadfile(request):
if request.method == "POST": # 请求方法为POST时,进行处理
apk =request.FILES.get("apk", None)
ip =request.FILES.get("ip", None)
config =request.FILES.get("config", None)
# 获取上传的文件,如果没有文件,则默认为None
if not apk and not ip and not config:
return render_to_response('index.html', {'error': 'no fileno'})
#raise forms.ValidationError(u"请选择要上传的文件")
apk_destination = open(os.path.join("C:\Users\Cathy\PycharmProjects\connect",apk.name),'wb+') # 打开特定的文件进行二进制的写操作
for chunk in apk.chunks(): # 分块写入文件
apk_destination.write(chunk)
apk_destination.close()
ip_destination = open(os.path.join("C:\Users\Cathy\PycharmProjects\connect",ip.name),'wb+') # 打开特定的文件进行二进制的写操作
for chunk in ip.chunks(): # 分块写入文件
ip_destination.write(chunk)
ip_destination.close()
config_destination = open(os.path.join("C:\Users\Cathy\PycharmProjects\connect",config.name),'wb+') # 打开特定的文件进行二进制的写操作
for chunk in config.chunks(): # 分块写入文件
config_destination.write(chunk)
config_destination.close()
return render_to_response('index.html', {'upload success'})
|
from django.db import models
from django.utils import timezone
class LinePush(models.Model):
"""Lineでのプッシュ先を表す"""
user_id = models.CharField('ユーザーID', max_length=100, unique=True)
display_name = models.CharField('表示名', max_length=255, blank=True)
def __str__(self):
return self.display_name
class LineMessage(models.Model):
"""Lineの各メッセージを表現する"""
push = models.ForeignKey(LinePush, verbose_name='プッシュ先', on_delete=models.SET_NULL, blank=True, null=True)
text = models.TextField('テキスト', blank=True)
image = models.ImageField('画像', blank=True, null=True)
is_admin = models.BooleanField('このメッセージは管理者側の発言か', default=True)
created_at = models.DateTimeField('作成日', default=timezone.now)
def __str__(self):
return f'{self.push} - {self.is_admin}'
|
# -*- coding: utf-8 -*-
__author__ = 'yunge'
'''
Given an array of integers, find two numbers such that they add up to a specific target number.
The function twoSum should return indices of the two numbers such that they add up to the target, where
index1 must be less than index2. Please note that your returned answers (both index1 and index2) are NOT zero-based.
Example
numbers=[2, 7, 11, 15], target=9
return [1, 2]
Note
You may assume that each input would have exactly one solution
Challenge
Either of the following solutions are acceptable:
O(n) Space, O(nlogn) Time
O(n) Space, O(n) Time
'''
def twoSum(numbers,target):
num_map = {}
for i in range(len(numbers)):
x = numbers[i]
if target - x in num_map.keys():
return [num_map[target - x] + 1, i + 1]
else:
num_map[x] = i
numbers1=[0,4,3,0]
target1= 0
print(twoSum(numbers1, target1)) |
# Breakout
import gym
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
from gym import error
from gym.utils import closer
env_closer = closer.Closer()
class Env(object):
# Set this in SOME subclasses
metadata = {'render.modes': []}
reward_range = (-float('inf'), float('inf'))
spec = None
# Set these in ALL subclasses
action_space = None
observation_space = None
def step(self, action):
raise NotImplementedError
def reset(self):
raise NotImplementedError
def render(self, mode='human'):
raise NotImplementedError
def close(self):
pass
def seed(self, seed=None):
return
@property
def unwrapped(self):
return self
def __str__(self):
if self.spec is None:
return '<{} instance>'.format(type(self).__name__)
else:
return '<{}<{}>>'.format(type(self).__name__, self.spec.id)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
# propagate exception
return False
class Wrapper(Env):
def __init__(self, env):
self.env = env
self.action_space = self.env.action_space
self.observation_space = self.env.observation_space
self.reward_range = self.env.reward_range
self.metadata = self.env.metadata
def __getattr__(self, name):
if name.startswith('_'):
raise AttributeError("attempted to get missing private attribute '{}'".format(name))
return getattr(self.env, name)
@property
def spec(self):
return self.env.spec
@classmethod
def class_name(cls):
return cls.__name__
def step(self, action):
return self.env.step(action)
def reset(self, **kwargs):
return self.env.reset(**kwargs)
def render(self, mode='human', **kwargs):
return self.env.render(mode, **kwargs)
def close(self):
return self.env.close()
def seed(self, seed=None):
return self.env.seed(seed)
def compute_reward(self, achieved_goal, desired_goal, info):
return self.env.compute_reward(achieved_goal, desired_goal, info)
def __str__(self):
return '<{}{}>'.format(type(self).__name__, self.env)
def __repr__(self):
return str(self)
@property
def unwrapped(self):
return self.env.unwrapped
class ObservationWrapper(Wrapper):
def reset(self, **kwargs):
observation = self.env.reset(**kwargs)
return self.observation(observation)
def step(self, action):
observation, reward, done, info = self.env.step(action)
return self.observation(observation), reward, done, info
def observation(self, observation):
raise NotImplementedError
from .space import Space
from gym import logger
class Box(Space):
def __init__(self, low, high, shape=None, dtype=np.float32):
assert dtype is not None, 'dtype must be explicitly provided. '
self.dtype = np.dtype(dtype)
# determine shape if it isn't provided directly
if shape is not None:
shape = tuple(shape)
assert np.isscalar(low) or low.shape == shape, "low.shape doesn't match provided shape"
assert np.isscalar(high) or high.shape == shape, "high.shape doesn't match provided shape"
elif not np.isscalar(low):
shape = low.shape
assert np.isscalar(high) or high.shape == shape, "high.shape doesn't match low.shape"
elif not np.isscalar(high):
shape = high.shape
assert np.isscalar(low) or low.shape == shape, "low.shape doesn't match high.shape"
else:
raise ValueError("shape must be provided or inferred from the shapes of low or high")
if np.isscalar(low):
low = np.full(shape, low, dtype=dtype)
if np.isscalar(high):
high = np.full(shape, high, dtype=dtype)
self.shape = shape
self.low = low
self.high = high
def _get_precision(dtype):
if np.issubdtype(dtype, np.floating):
return np.finfo(dtype).precision
else:
return np.inf
low_precision = _get_precision(self.low.dtype)
high_precision = _get_precision(self.high.dtype)
dtype_precision = _get_precision(self.dtype)
if min(low_precision, high_precision) > dtype_precision:
logger.warn("Box bound precision lowered by casting to {}".format(self.dtype))
self.low = self.low.astype(self.dtype)
self.high = self.high.astype(self.dtype)
# Boolean arrays which indicate the interval type for each coordinate
self.bounded_below = -np.inf < self.low
self.bounded_above = np.inf > self.high
super(Box, self).__init__(self.shape, self.dtype)
def is_bounded(self, manner="both"):
below = np.all(self.bounded_below)
above = np.all(self.bounded_above)
if manner == "both":
return below and above
elif manner == "below":
return below
elif manner == "above":
return above
else:
raise ValueError("manner is not in {'below', 'above', 'both'}")
def sample(self):
high = self.high if self.dtype.kind == 'f' \
else self.high.astype('int64') + 1
sample = np.empty(self.shape)
# Masking arrays which classify the coordinates according to interval
# type
unbounded = ~self.bounded_below & ~self.bounded_above
upp_bounded = ~self.bounded_below & self.bounded_above
low_bounded = self.bounded_below & ~self.bounded_above
bounded = self.bounded_below & self.bounded_above
# Vectorized sampling by interval type
sample[unbounded] = self.np_random.normal(
size=unbounded[unbounded].shape)
sample[low_bounded] = self.np_random.exponential(
size=low_bounded[low_bounded].shape) + self.low[low_bounded]
sample[upp_bounded] = -self.np_random.exponential(
size=upp_bounded[upp_bounded].shape) + self.high[upp_bounded]
sample[bounded] = self.np_random.uniform(low=self.low[bounded],
high=high[bounded],
size=bounded[bounded].shape)
if self.dtype.kind == 'i':
sample = np.floor(sample)
return sample.astype(self.dtype)
def contains(self, x):
if isinstance(x, list):
x = np.array(x) # Promote list to array for contains check
return x.shape == self.shape and np.all(x >= self.low) and np.all(x <= self.high)
def to_jsonable(self, sample_n):
return np.array(sample_n).tolist()
def from_jsonable(self, sample_n):
return [np.asarray(sample) for sample in sample_n]
def __repr__(self):
return "Box" + str(self.shape)
def __eq__(self, other):
return isinstance(other, Box) and (self.shape == other.shape) and np.allclose(self.low, other.low) and np.allclose(self.high, other.high)
import cv2
class PreprocessAtari(ObservationWrapper):
def __init__(self, env):
ObservationWrapper.__init__(self,env)
self.img_size = (84, 84)
self.observation_space = Box(0.0, 1.0, (self.img_size[0], self.img_size[1], 1))
def observation(self, img):
img = img[34:-16, :, :]
imgsize = cv2.resize(img, self.img_size)
imgre = imgsize.mean(-1,keepdims=True)
img = imgre.astype('float32') / 255.
return img
class FrameBuffer(Wrapper):
def __init__(self, env, n_frames=4, dim_order='tensorflow'):
"""A gym wrapper that reshapes, crops and scales image into the desired shapes"""
super(FrameBuffer, self).__init__(env)
self.dim_order = dim_order
if dim_order == 'tensorflow':
height, width, n_channels = env.observation_space.shape
"""Multiply channels dimension by number of frames"""
obs_shape = [height, width, n_channels * n_frames]
else:
raise ValueError('dim_order should be "tensorflow" or "pytorch", got {}'.format(dim_order))
self.observation_space = Box(0.0, 1.0, obs_shape)
self.framebuffer = np.zeros(obs_shape, 'float32')
def reset(self):
"""resets breakout, returns initial frames"""
self.framebuffer = np.zeros_like(self.framebuffer)
self.update_buffer(self.env.reset())
return self.framebuffer
def step(self, action):
"""plays breakout for 1 step, returns frame buffer"""
new_img, reward, done, info = self.env.step(action)
self.update_buffer(new_img)
return self.framebuffer, reward, done, info
def update_buffer(self, img):
if self.dim_order == 'tensorflow':
offset = self.env.observation_space.shape[-1]
axis = -1
cropped_framebuffer = self.framebuffer[:,:,:-offset]
self.framebuffer = np.concatenate([img, cropped_framebuffer], axis = axis)
def make_env():
env = gym.make("BreakoutNoFrameskip-v4")
env = PreprocessAtari(env)
env = FrameBuffer(env, n_frames=4, dim_order='tensorflow')
return env
env = make_env()
env.reset()
n_actions = env.action_space.n
state_dim = env.observation_space.shape
for _ in range(50):
obs, _, _, _ = env.step(env.action_space.sample())
plt.title("Game image")
plt.imshow(env.render("rgb_array"))
plt.show()
plt.title("Agent observation (4 frames left to right)")
plt.imshow(obs.transpose([0, 2, 1]).reshape([state_dim[0], -1]))
# Building a network
import tensorflow as tf
tf.reset_default_graph()
sess = tf.InteractiveSession()
import keras
from keras.layers import Conv2D, Dense, Flatten
class DQNAgent:
def __init__(self, name, state_shape, n_actions, epsilon=0, reuse=False):
with tf.variable_scope(name, reuse=reuse):
self.network = keras.models.Sequential()
sself.network.add(Conv2D(16, (3, 3), strides=2, activation='relu', input_shape=state_shape))
self.network.add(Conv2D(32, (3, 3), strides=2, activation='relu'))
self.network.add(Conv2D(64, (3, 3), strides=2, activation='relu'))
self.network.add(Flatten())
self.network.add(Dense(256, activation='relu'))
self.network.add(Dense(n_actions, activation='linear'))
# graph for agent step
self.state_t = tf.placeholder('float32', [None,] + list(state_shape))
self.qvalues_t = self.get_symbolic_qvalues(self.state_t)
self.weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=name)
self.epsilon = epsilon
def get_symbolic_qvalues(self, state_t):
"""takes agent's observation, returns qvalues. Both are tf Tensors"""
qvalues = self.network(state_t)
assert tf.is_numeric_tensor(qvalues) and qvalues.shape.ndims == 2, \
"please return 2d tf tensor of qvalues [you got %s]" % repr(qvalues)
assert int(qvalues.shape[1]) == n_actions
return qvalues
def get_qvalues(self, state_t):
"""Same as symbolic step except it operates on numpy arrays"""
sess = tf.get_default_session()
return sess.run(self.qvalues_t, {self.state_t: state_t})
def sample_actions(self, qvalues):
"""picking actions given qvalues. Uses epsilon-greedy exploration strategy. """
epsilon = self.epsilon
batch_size, n_actions = qvalues.shape
random_actions = np.random.choice(n_actions, size=batch_size)
best_actions = qvalues.argmax(axis=-1)
should_explore = np.random.choice([0, 1], batch_size, p = [1-epsilon, epsilon])
return np.where(should_explore, random_actions, best_actions)
agent = DQNAgent("dqn_agent", state_dim, n_actions, epsilon=0.5)
sess.run(tf.global_variables_initializer())
def evaluate(env, agent, n_games=1, greedy=False, t_max=10000):
""" Plays n_games full games. If greedy, picks actions as argmax(qvalues). Returns mean reward. """
rewards = []
for _ in range(n_games):
s = env.reset()
reward = 0
for _ in range(t_max):
qvalues = agent.get_qvalues([s])
action = qvalues.argmax(axis=-1)[0] if greedy else agent.sample_actions(qvalues)[0]
s, r, done, _ = env.step(action)
reward += r
if done:
break
rewards.append(reward)
return np.mean(rewards)
evaluate(env, agent, n_games=1)
#Experience replay
from replay_buffer import ReplayBuffer
exp_replay = ReplayBuffer(10)
for _ in range(30):
exp_replay.add(env.reset(), env.action_space.sample(), 1.0, env.reset(), done=False)
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(5)
assert len(exp_replay) == 10, "experience replay size should be 10 because that's what maximum capacity is"
def play_and_record(agent, env, exp_replay, n_steps=1):
# State at the beginning of rollout
s = env.framebuffer
# Play the game for n_steps as per instructions above
reward = 0.0
for t in range(n_steps):
# get agent to pick action given state s
qvalues = agent.get_qvalues([s])
action = agent.sample_actions(qvalues)[0]
next_s, r, done, _ = env.step(action)
# replay buffer
exp_replay.add(s, action, r, next_s, done)
reward += r
if done:
s = env.reset()
else:
s = next_s
return reward
# Target networks
Employ "target network" - a copy of neural network weights to be used for reference Q-values:
The network itself is an exact copy of agent network, but it's parameters are not trained. Instead, they are moved here from agent's actual network every so often.
target_network = DQNAgent("target_network", state_dim, n_actions)
def load_weigths_into_target_network(agent, target_network):
assigns = []
for w_agent, w_target in zip(agent.weights, target_network.weights):
assigns.append(tf.assign(w_target, w_agent, validate_shape=True))
return assigns
copy_step = load_weigths_into_target_network(agent, target_network)
sess.run(copy_step)
# Q-learning
# placeholders that will be fed with exp_replay.sample(batch_size)
obs_ph = tf.placeholder(tf.float32, shape=(None,) + state_dim)
actions_ph = tf.placeholder(tf.int32, shape=[None])
rewards_ph = tf.placeholder(tf.float32, shape=[None])
next_obs_ph = tf.placeholder(tf.float32, shape=(None,) + state_dim)
is_done_ph = tf.placeholder(tf.float32, shape=[None])
is_not_done = 1 - is_done_ph
gamma = 0.99
current_qvalues = agent.get_symbolic_qvalues(obs_ph)
current_action_qvalues = tf.reduce_sum(tf.one_hot(actions_ph, n_actions) * current_qvalues, axis=1)
#Compute Q-learning TD error:
# compute q-values for NEXT states with target network
next_qvalues_target = target_network.get_symbolic_qvalues(next_obs_ph)
# compute state values by taking max over next_qvalues_target for all actions
next_state_values_target = tf.reduce_max(next_qvalues_target, axis=-1)
# compute Q_reference(s,a) as per formula above.
reference_qvalues = rewards_ph + gamma*next_state_values_target*is_not_done
# Define loss function for sgd.
td_loss = (current_action_qvalues - reference_qvalues) ** 2
td_loss = tf.reduce_mean(td_loss)
train_step = tf.train.AdamOptimizer(1e-3).minimize(td_loss, var_list=agent.weights)
sess.run(tf.global_variables_initializer())
# Commented out IPython magic to ensure Python compatibility.
from tqdm import trange
import pandas as pd
from IPython.display import clear_output
import matplotlib.pyplot as plt
# %matplotlib inline
def moving_average(x, span=100, **kw):
return pd.DataFrame({'x': np.asarray(x)}).x.ewm(span=span, **kw).mean().values
mean_rw_history = []
td_loss_history = []
exp_replay = ReplayBuffer(10**5)
play_and_record(agent, env, exp_replay, n_steps=10000)
def sample_batch(exp_replay, batch_size):
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(batch_size)
return {
obs_ph: obs_batch,
actions_ph: act_batch,
rewards_ph: reward_batch,
next_obs_ph: next_obs_batch,
is_done_ph: is_done_batch,
}
for i in trange(10**5*5):
# play
play_and_record(agent, env, exp_replay, 10)
# train
_, loss_t = sess.run([train_step, td_loss], sample_batch(exp_replay, batch_size=64))
td_loss_history.append(loss_t)
# adjust agent parameters
if i % 500 == 0:
sess.run(copy_step)
agent.epsilon = max(agent.epsilon * 0.99, 0.01)
mean_rw_history.append(evaluate(make_env(), agent, n_games=3))
if i % 100 == 0:
clear_output(True)
print("buffer size = %i, epsilon = %.5f" % (len(exp_replay), agent.epsilon))
plt.subplot(1, 2, 1)
plt.title("mean reward per game")
plt.plot(mean_rw_history)
plt.grid()
assert not np.isnan(loss_t)
plt.figure(figsize=[12, 4])
plt.subplot(1, 2, 2)
plt.title("TD loss history (moving average)")
plt.plot(moving_average(np.array(td_loss_history), span=100, min_periods=100))
plt.grid()
plt.show()
"""### Video"""
# reset epsilon back to previous value to go on training
agent.epsilon = 0
# Record sessions
import gym.wrappers
with gym.wrappers.Monitor(make_env(), directory="videos", force=True) as env_monitor:
sessions = [evaluate(env_monitor, agent, n_games=1) for _ in range(100)]
from pathlib import Path
from IPython.display import HTML
video_names = sorted([s for s in Path('videos').iterdir() if s.suffix == '.mp4'])
HTML("""
<video width="640" height="480" controls>
<source src="{}" type="video/mp4">
</video>
""".format(video_names[-1])) |
from otree.api import Currency as c, currency_range
from . import pages
from ._builtin import Bot
from .models import Constants
import random
class PlayerBot(Bot):
def play_round(self):
sex = ['Male','Female']
yield (pages.question, {'sex': sex[random.randrange(0,2)],'age': random.randrange(19,32),'siblings': random.randrange(0,16),'born_vienna': True})
|
from binance.constants import BINANCE_GET_OHLC
from binance.error_handling import is_error
from data.candle import Candle
from utils.debug_utils import should_print_debug, print_to_console, LOG_ALL_DEBUG, ERROR_LOG_FILE_NAME
from utils.file_utils import log_to_file
from data_access.internet import send_request
from enums.status import STATUS
def get_ohlc_binance_url(currency, date_start, date_end, period):
date_start_ms = 1000 * date_start
# https://api.binance.com/api/v1/klines?symbol=XMRETH&interval=15m&startTime=
final_url = BINANCE_GET_OHLC + currency + "&interval=" + period + "&startTime=" + str(date_start_ms)
if should_print_debug():
print_to_console(final_url, LOG_ALL_DEBUG)
return final_url
def get_ohlc_binance_result_processor(json_response, currency, date_start, date_end):
"""
[
1499040000000, // Open time
"0.01634790", // Open
"0.80000000", // High
"0.01575800", // Low
"0.01577100", // Close
"148976.11427815", // Volume
1499644799999, // Close time
"2434.19055334", // Quote asset volume
308, // Number of trades
"1756.87402397", // Taker buy base asset volume
"28.46694368", // Taker buy quote asset volume
"17928899.62484339" // Can be ignored
]
"""
result_set = []
if is_error(json_response):
msg = "get_ohlc_binance_result_processor - error response - {er}".format(er=json_response)
log_to_file(msg, ERROR_LOG_FILE_NAME)
return result_set
for record in json_response:
result_set.append(Candle.from_binance(record, currency))
return result_set
def get_ohlc_binance(currency, date_start, date_end, period):
result_set = []
final_url = get_ohlc_binance_url(currency, date_start, date_end, period)
err_msg = "get_ohlc_binance called for {pair} at {timest}".format(pair=currency, timest=date_start)
error_code, json_responce = send_request(final_url, err_msg)
if error_code == STATUS.SUCCESS:
result_set = get_ohlc_binance_result_processor(json_responce, currency, date_start, date_end)
return result_set
|
from datetime import datetime
from typing import Union
from urllib.parse import urldefrag
import rdflib
from rdflib.namespace import RDF, DC, DCTERMS, XSD
from nanopub import namespaces, profile
from nanopub.definitions import DUMMY_NANOPUB_URI
class Publication:
"""
Representation of the rdf that comprises a nanopublication
"""
def __init__(self, rdf=None, source_uri=None):
self._rdf = rdf
self._source_uri = source_uri
# Extract the Head, pubinfo, provenance and assertion graphs from the assigned nanopub rdf
self._graphs = {}
for c in rdf.contexts():
graphid = urldefrag(c.identifier).fragment.lower()
self._graphs[graphid] = c
# Check all four expected graphs are provided
expected_graphs = ['head', 'pubinfo', 'provenance', 'assertion']
for expected in expected_graphs:
if expected not in self._graphs.keys():
raise ValueError(
f'Expected to find {expected} graph in nanopub rdf, but not found. Graphs found: {list(self._graphs.keys())}.')
@staticmethod
def _replace_blank_nodes(dummy_namespace, assertion_rdf):
""" Replace blank nodes.
Replace any blank nodes in the supplied RDF with a corresponding uri in the
dummy_namespace.'Blank nodes' here refers specifically to rdflib.term.BNode objects. When
publishing, the dummy_namespace is replaced with the URI of the actual nanopublication.
For example, if the nanopub's URI is www.purl.org/ABC123 then the blank node will be
replaced with a concrete URIRef of the form www.purl.org/ABC123#blanknodename where
'blanknodename' is the name of the rdflib.term.BNode object.
This is to solve the problem that a user may wish to use the nanopublication to introduce
a new concept. This new concept needs its own URI (it cannot simply be given the
nanopublication's URI), but it should still lie within the space of the nanopub.
Furthermore, the URI the nanopub is published to is not known ahead of time.
"""
for s, p, o in assertion_rdf:
assertion_rdf.remove((s, p, o))
if isinstance(s, rdflib.term.BNode):
s = dummy_namespace[str(s)]
if isinstance(o, rdflib.term.BNode):
o = dummy_namespace[str(o)]
assertion_rdf.add((s, p, o))
@classmethod
def from_assertion(cls, assertion_rdf: rdflib.Graph,
introduces_concept: rdflib.term.BNode = None,
derived_from=None, assertion_attributed_to=None,
attribute_assertion_to_profile: bool = False):
"""
Construct Nanopub object based on given assertion. Any blank nodes in the rdf graph are
replaced with the nanopub's URI, with the blank node name as a fragment. For example, if
the blank node is called 'step', that would result in a URI composed of the nanopub's (base)
URI, followed by #step.
Args:
assertion_rdf: The assertion RDF graph.
introduces_concept: the pubinfo graph will note that this nanopub npx:introduces the
concept. The concept should be a blank node (rdflib.term.BNode), and is converted
to a URI derived from the nanopub's URI with a fragment (#) made from the blank
node's name.
derived_from: Add a triple to the provenance graph stating that this nanopub's assertion prov:wasDerivedFrom the given URI.
If a list of URIs is passed, a provenance triple will be generated for each.
assertion_attributed_to: the provenance graph will note that this nanopub's assertion
prov:wasAttributedTo the given URI.
attribute_assertion_to_profile: Attribute the assertion to the ORCID iD in the profile
"""
if assertion_attributed_to and attribute_assertion_to_profile:
raise ValueError(
'If you pass a URI for the assertion_attributed_to argument, you cannot pass '
'attribute_assertion_to_profile=True, because the assertion will already be '
'attributed to the value passed in assertion_attributed_to argument. Set '
'attribute_assertion_to_profile=False or do not pass the assertion_attributed_to '
'argument.')
if attribute_assertion_to_profile:
assertion_attributed_to = rdflib.URIRef(profile.get_orcid_id())
if introduces_concept and not isinstance(introduces_concept, rdflib.term.BNode):
raise ValueError('If you want a nanopublication to introduce a concept, you need to '
'pass it as an rdflib.term.BNode("concept_name"). This will make '
'sure it is referred to from the nanopublication uri namespace upon '
'publishing.')
# To be replaced with the published uri upon publishing
this_np = rdflib.Namespace(DUMMY_NANOPUB_URI + '#')
cls._replace_blank_nodes(dummy_namespace=this_np, assertion_rdf=assertion_rdf)
# Set up different contexts
rdf = rdflib.ConjunctiveGraph()
# Use namespaces from assertion_rdf
for prefix, namespace in assertion_rdf.namespaces():
rdf.bind(prefix, namespace)
head = rdflib.Graph(rdf.store, this_np.Head)
assertion = rdflib.Graph(rdf.store, this_np.assertion)
provenance = rdflib.Graph(rdf.store, this_np.provenance)
pub_info = rdflib.Graph(rdf.store, this_np.pubInfo)
rdf.bind("", this_np)
rdf.bind("np", namespaces.NP)
rdf.bind("npx", namespaces.NPX)
rdf.bind("prov", namespaces.PROV)
rdf.bind("hycl", namespaces.HYCL)
rdf.bind("dc", DC)
rdf.bind("dcterms", DCTERMS)
head.add((this_np[''], RDF.type, namespaces.NP.Nanopublication))
head.add((this_np[''], namespaces.NP.hasAssertion, this_np.assertion))
head.add((this_np[''], namespaces.NP.hasProvenance, this_np.provenance))
head.add((this_np[''], namespaces.NP.hasPublicationInfo,
this_np.pubInfo))
assertion += assertion_rdf
creationtime = rdflib.Literal(datetime.now(), datatype=XSD.dateTime)
provenance.add((this_np.assertion, namespaces.PROV.generatedAtTime, creationtime))
pub_info.add((this_np[''], namespaces.PROV.generatedAtTime, creationtime))
if assertion_attributed_to:
assertion_attributed_to = rdflib.URIRef(assertion_attributed_to)
provenance.add((this_np.assertion,
namespaces.PROV.wasAttributedTo,
assertion_attributed_to))
if derived_from:
uris = []
if isinstance(derived_from, list):
list_of_URIs = derived_from
else:
list_of_URIs = [derived_from]
for derived_from_uri in list_of_URIs:
# Convert uri to an rdflib term first (if necessary)
derived_from_uri = rdflib.URIRef(derived_from_uri)
provenance.add((this_np.assertion,
namespaces.PROV.wasDerivedFrom,
derived_from_uri))
# Always attribute the nanopublication (not the assertion) to the ORCID iD in user profile
pub_info.add((this_np[''],
namespaces.PROV.wasAttributedTo,
rdflib.URIRef(profile.get_orcid_id())))
if introduces_concept:
# Convert introduces_concept URI to an rdflib term first (if necessary)
if isinstance(introduces_concept, rdflib.term.BNode):
introduces_concept = this_np[str(introduces_concept)]
else:
introduces_concept = rdflib.URIRef(introduces_concept)
pub_info.add((this_np[''],
namespaces.NPX.introduces,
introduces_concept))
return cls(rdf=rdf)
@property
def rdf(self):
return self._rdf
@property
def assertion(self):
return self._graphs['assertion']
@property
def pubinfo(self):
return self._graphs['pubinfo']
@property
def provenance(self):
return self._graphs['provenance']
@property
def source_uri(self):
return self._source_uri
@property
def introduces_concept(self):
concepts_introduced = list()
for s, p, o in self.pubinfo.triples((None, namespaces.NPX.introduces, None)):
concepts_introduced.append(o)
if len(concepts_introduced) == 0:
return None
elif len(concepts_introduced) == 1:
return concepts_introduced[0]
else:
raise ValueError('Nanopub introduces multiple concepts')
def __str__(self):
s = f'Original source URI = {self._source_uri}\n'
s += self._rdf.serialize(format='trig').decode('utf-8')
return s
def replace_in_rdf(rdf: rdflib.Graph, oldvalue, newvalue):
"""
Replace subjects or objects of oldvalue with newvalue
"""
for s, p, o in rdf:
if s == oldvalue:
rdf.remove((s, p, o))
rdf.add((newvalue, p, o))
elif o == oldvalue:
rdf.remove((s, p, o))
rdf.add((s, p, newvalue))
|
from Scene import *
from Contains import *
class Sleeping_Beauty(Scene):
def enter(self):
print """
***Sleeping Beauty***
Needs... something to waker her up? make a prince to kiss her?
"""
input = raw_input("> ")
answer = Contains().in_array(input, ['string', 'integer']) # variables are keywords to return "correct" answer
while answer == False:
print "How many hints would you like?"
number_hints = raw_input("(1,2, or 3)> ")
hints = ["Hint1",
"Hint2",
"Hint3"]
for item in hints[:int(number_hints)]:
print item
print "Give a solution or ask for another hint."
input = raw_input("> ")
self.happy_ending()
return 'Finished'
def happy_ending(self):
print """
Happy ending text
""" |
from pprint import pprint
from app.app import create_app
app = create_app(environment='development')
def test_get():
with app.test_client() as c:
rv = c.get('/cms/theme/3')
json_data = rv.get_json()
pprint(json_data)
assert rv.status_code == 200
def test_get_all():
with app.test_client() as c:
# rv = c.get('/cms/theme/all')
rv = c.get('/v1/theme/all')
json_data = rv.get_json()
pprint(json_data)
assert rv.status_code == 200
def test_get_paginate():
with app.test_client() as c:
rv = c.get('/cms/theme/paginate')
json_data = rv.get_json()
pprint(json_data)
assert rv.status_code == 200
def test_get_with_products():
with app.test_client() as c:
rv = c.get('/v1/theme/5/product')
json_data = rv.get_json()
pprint(json_data)
assert rv.status_code == 200
# def test_create():
# with app.test_client() as c:
# rv = c.post('/cms/theme', json={
# 'name': '炒货天堂',
# 'summary': '炒货无敌',
# 'topic_img_id': 2,
# 'head_img_id': 2
# })
# json_data = rv.get_json()
# pprint(json_data)
# assert rv.status_code == 201
# def test_update():
# with app.test_client() as c:
# rv = c.put('/cms/theme/2', json={
# 'name': '水果世界',
# 'summary': '美味水果世界',
# 'topic_img_id': 1,
# 'head_img_id': 1
# })
# json_data = rv.get_json()
# pprint(json_data)
# assert rv.status_code == 201
def test_hide():
with app.test_client() as c:
rv = c.put('/cms/theme/hide/2')
json_data = rv.get_json()
pprint(json_data)
assert rv.status_code == 201
def test_show():
with app.test_client() as c:
rv = c.put('/cms/theme/show/2')
json_data = rv.get_json()
pprint(json_data)
assert rv.status_code == 201
# def test_delete():
# with app.test_client() as c:
# rv = c.delete('/cms/theme/2')
# json_data = rv.get_json()
# pprint(json_data)
# assert rv.status_code == 201
|
import azure_translate_api
class translate():
def __init__(self, client_id, client_secret):
self.bing = azure_translate_api.MicrosoftTranslatorClient('pythontest', # make sure to replace client_id with your client id
'y32l8f0X5rq1G+5O9ayNk7p7zI1hHUh57VaoHXYMEzU=a') # replace the client secret with the client secret for you app.
if self.bing.TranslateText("This is just a test", 'en', 'en').find("TranslateApiException") != -1:
print "Well shit. It's not translating again."
exit()
# Translate a pythono file's comment from source_lang to target_lang, and save as .translate.py
def translate_py(self, file_name, source_lang, target_lang):
# Open files
f = open(file_name, "r")
o = open(file_name[:file_name.rfind(".")]+".translated" + file_name[file_name.rfind("."):], "w")
# Inialise some variables
translation = ""
in_comment_block = False
for line in f:
# If we are already in a comment block, continue translating
# If we find the end of the block, just write the line
if in_comment_block:
if line.find('"""') != -1:
in_comment_block = False
o.write(line)
else:
if line.isspace():
o.write(line)
else:
translation = self.bing.TranslateText(line[:-1], source_lang, target_lang).decode("ascii", "ignore").replace("\/", "/")[1:-1]
o.write(translation + "\n")
else:
# If we find a block comment line, start translating if we are not already in a block
if line.find('"""') != -1:
if not in_comment_block:
in_comment_block = True
translation = self.bing.TranslateText(line[line.find('"""')+3:-1], source_lang, target_lang).decode("ascii", "ignore").replace("\/", "/")[1:-1]
o.write(line[0: line.find('"""')+3] + translation + "\n")
# If we find a single line comment, translate the comment bit and print out the
# line with the newly translated bit
elif line.find("#") != -1:
translation = self.bing.TranslateText(line[line.find("#")+1:-1], source_lang, target_lang).decode("ascii", "ignore").replace("\/", "/")[1:-1]
o.write(line[0: line.find("#")+1] + translation + "\n")
# Otherwise just print the line without translation
else:
o.write(line)
f.close()
o.close()
# Translate a c++ file's comments from one language to another, and save it as .translate.cpp
def translate_cpp(self, file_name, source_lang, target_lang):
# Open files
f = open(file_name, "r")
o = open(file_name[:file_name.rfind(".")]+".translated" + file_name[file_name.rfind("."):], "w")
# Inialise some variables
translation = ""
in_comment_block = False
for line in f:
# If we are already in a comment block, continue translating
# If we find the end of the block, just write the line
if in_comment_block:
if line.find('*/') != -1:
in_comment_block = False
o.write(line)
else:
if line.isspace():
o.write(line)
else:
translation = self.bing.TranslateText(line[:-1], source_lang, target_lang).decode("ascii", "ignore").replace("\/", "/")[1:-1]
o.write(translation + "\n")
else:
# If we find a block comment line, start translating if we are not already in a block
if line.find('/*') != -1:
if not in_comment_block:
in_comment_block = True
translation = self.bing.TranslateText(line[line.find('/*')+2:-1], source_lang, target_lang).decode("ascii", "ignore").replace("\/", "/")[1:-1]
o.write(line[0: line.find('/*')+2] + translation + "\n")
# If we find a single line comment, translate the comment bit and print out the
# line with the newly translated bit
elif line.find("//") != -1:
translation = self.bing.TranslateText(line[line.find("//")+2:-1], source_lang, target_lang).decode("ascii", "ignore").replace("\/", "/")[1:-1]
o.write(line[0: line.find("//")+2] + translation + "\n")
# Otherwise just print the line without translation
else:
o.write(line)
# Doesn't really work just now
def translate_batch_py(self, file_name, source_lang, target_lang):
# Open files
f = open(file_name, "r")
o = open(file_name[:file_name.rfind(".")]+".translated" + file_name[file_name.rfind("."):], "w")
# Inialise some variables
in_comment_block = False
output = []
needs_translating = []
for line in f:
# If we are already in a comment block, continue translating
# If we find the end of the block, just write the line
if in_comment_block:
if line.find('"""') != -1:
in_comment_block = False
output.append(line)
else:
if line.isspace():
output.append(line)
else:
needs_translating.append(line)
output.append("")
else:
# If we find a block comment line, start translating if we are not already in a block
if line.find('"""') != -1:
if not in_comment_block:
in_comment_block = True
needs_translating.append(line)
output.append("")
# If we find a single line comment, translate the comment bit and print out the
# line with the newly translated bit
elif line.find("#") != -1:
needs_translating.append(line)
output.append("")
# Otherwise just print the line without translation
else:
output.append(line)
translation = self.bing.TranslateText(' ^ | ^ '.join(needs_translating), source_lang, target_lang).decode("ascii", "ignore").replace("\/", "/").replace('\\"', '"').replace("\\u000a", '\n')[1:-1].split("^ | ^")
print translation
i = 0
for line in output:
if line == "":
o.write(translation[i] + "\n")
i += 1
else:
o.write(line)
f.close()
o.close()
|
import torch
from app.dataset.wheat import WheatDataset, PredictionDataset
from torch.utils.data import DataLoader
from app import config
from pathlib import Path
from object_detection.utils import DetectionPlot
from app import config
def test_train_dataset() -> None:
dataset = WheatDataset(config.annot_file, config.train_image_dir, max_size=1024)
image_id, img, boxes, _ = dataset[0]
assert img.dtype == torch.float32
assert boxes.dtype == torch.float32
for i in range(10):
_, img, boxes, _ = dataset[100]
_, h, w = img.shape
plot = DetectionPlot(figsize=(20, 20), w=w, h=h)
plot.with_image(img)
plot.with_yolo_boxes(boxes, color="red")
plot.save(f"{config.working_dir}/test-dataset-{i}.png")
def test_prediction_dataset() -> None:
dataset = PredictionDataset(
"/kaggle/input/global-wheat-detection/test", max_size=512
)
img_id, img = dataset[0]
assert img.dtype == torch.float32
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('assessment', '0018_auto_20160318_1731'),
]
operations = [
migrations.CreateModel(
name='MentalHealth',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('client', models.ForeignKey(default=None, blank=True, to='assessment.Client', null=True)),
],
options={
},
bases=(models.Model,),
),
]
|
import numpy as np
np.arcsin(0.25)
np.arcsin(np.deg2rad(0.25))
np.rad2deg(np.arcsin(0.25))
180 - (21 + 90)
180 - (47 + 90)
69 - 43
(2500*np.sin(np.deg2rad(21)))/np.sin(np.deg2rad(26))
(2500 * np.tan(np.deg2rad(21))) / (np.tan(np.deg2rad(47)) - np.tan(np.deg2rad(21)))
1394 * np.tan(np.deg2rad(47))
180 - (33 + 63)
(200*np.sin(np.deg2rad(63))) / np.sin(np.deg2rad(84))
(40*np.sin(np.deg2rad(63))) / np.sin(np.deg2rad(12))
np.rad2deg(np.arcsin(210/328))
np.rad2deg(np.arcsin((84*np.sin(np.deg2rad(74))) / 126))
400*np.sin(np.deg2rad(75))
180 - (90 + 49)
180 - (75 + 90)
np.rad2deg(np.arcsin(1/3))
225 + 81
306 / 17
(np.tan(np.deg2rad(60))) / np.cos(np.deg2rad(45))
1 / np.sqrt(6)
np.sqrt(6)
600 * np.sin(np.deg2rad(75))
np.rad2deg(np.arcsin(18 / 22))
np.rad2deg(np.arccos(50 / 160))
150 * np.sin(np.deg2rad(37))
180 - (18 + 90)
180 - (56 + 90)
72 - 34
(2200*np.sin(np.deg2rad(18))) / np.sin(np.deg2rad(38))
1104*np.sin(np.deg2rad(56))
180 - 108
(9*np.sin(np.deg2rad(54))) / np.sin(np.deg2rad(72))
180 - (65 + 80)
(45*np.sin(np.deg2rad(65)))/np.sin(np.deg2rad(35))
(10*np.sin(np.deg2rad(30))) / 4
np.rad2deg(np.arcsin(((5*np.sin(np.deg2rad(45))) / 4)))
np.rad2deg(np.arcsin((10*np.sin(np.deg2rad(85))) /13))
np.rad2deg(np.arcsin((4*np.sin(np.deg2rad(150)))/7))
(5*np.sin(np.deg2rad(45))) / 3
283*283*np.sin(np.deg2rad(20))
27392 / 36
761 * 12
180 - (30 + 45)
(10*np.sin(np.deg2rad(105)))/np.sin(np.deg2rad(45))
(70*np.sqrt(3))/2
(420*np.sin(np.deg2rad(36)))/250
(420 * 300 * np.sin(np.deg2rad(36))) / 2
np.rad2deg((6*np.sqrt(2))/18)
np.rad2deg(np.arcsin(np.sqrt(2) / 3))
(16*np.sqrt(3)) / 3
|
A,B,C,X,Y = map(int, input().split())
ans = 10**100
cnt = max(X, Y)
for i in reversed(range(0, cnt+1)):
money = 0
money += i * 2*C
if X > i:
money += A*(X-i)
if Y > i:
money += B*(Y-i)
ans = min(ans,money)
print(ans) |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for downloading and reading MNIST data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#import matplotlib.pyplot as plt
import os
import re
import tensorflow.python.platform
import sys
#import tarfile
#from IPython.display import display, Image
from scipy import ndimage
from six.moves.urllib.request import urlretrieve
from six.moves import cPickle as pickle
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
image_size = 128 # Pixel width and height.
def load(data_path,max_num_images):
dataset = np.ndarray(
shape=(max_num_images, image_size, image_size), dtype=np.float32)
labels = np.ndarray(shape=(max_num_images, image_size, image_size), dtype=np.int32)
data_folders=os.listdir(data_path);
label_index = 0
image_index = 0
for data_folder in data_folders:
###load negative first;
image_folder=data_path+data_folder+'/patch'
label_folder=data_path+data_folder+'/label'
image_filenames=os.listdir(image_folder)
label_filenames=os.listdir(label_folder)
image_filenames = sorted(image_filenames, key=lambda x: (int(re.sub('\D','',x)),x))
label_filenames = sorted(label_filenames, key=lambda x: (int(re.sub('\D','',x)),x))
for image in image_filenames:
image_file = os.path.join(image_folder, image)
try:
image_data = ndimage.imread(image_file).astype(float)
image_mean=np.mean(image_data)
image_std=np.std(image_data)
image_data=(image_data-image_mean)/image_std
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s' % image_file)
dataset[image_index, :, :] = image_data
image_index += 1
except IOError as e:
print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
for image in label_filenames:
label_file = os.path.join(label_folder, image)
try:
label_data = ndimage.imread(label_file)
if label_data.shape != (image_size, image_size):
raise Exception('Unexpected label file shape: %s' % label_file)
labels[label_index, :, :] = label_data
label_index += 1
except IOError as e:
print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
num_images = image_index
num_labels = label_index
if num_labels != num_images:
raise Exception('num of images %d is not squal to the number of label files',num_images,num_labels)
dataset = dataset[0:num_images, :, :]
labels = labels[0:num_labels,:,:]
print('Full dataset tensor:', dataset.shape)
print('Mean:', np.mean(dataset))
print('Standard deviation:', np.std(dataset))
print('Labels:', labels.shape)
return dataset, labels
def randomize(dataset, labels):
permutation = np.random.permutation(labels.shape[0])
shuffled_dataset = dataset[permutation,:,:]
shuffled_labels = labels[permutation]
return shuffled_dataset, shuffled_labels
def dense_to_one_hot(labels_dense, num_classes=10):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
class DataSet(object):
def __init__(self, images, labels, fake_data=False, one_hot=False,
dtype=tf.float32):
"""Construct a DataSet.
one_hot arg is used only if fake_data is true. `dtype` can be either
`uint8` to leave the input as `[0, 255]`, or `float32` to rescale into
`[0, 1]`.
"""
dtype = tf.as_dtype(dtype).base_dtype
if dtype not in (tf.uint8, tf.float32):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' %
dtype)
if fake_data:
self._num_examples = 10000
self.one_hot = one_hot
else:
assert images.shape[0] == labels.shape[0], (
'images.shape: %s labels.shape: %s' % (images.shape,
labels.shape))
self._num_examples = images.shape[0]
print(images.shape[0],images.shape)
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
assert images.shape[3] == 1
#images = images.reshape(images.shape[0],
# images.shape[1] * images.shape[2])
#if dtype == tf.float32:
# Convert from [0, 255] -> [0.0, 1.0].
#images = images.astype(np.float32)
#images = np.multiply(images, 1.0 / 255.0)
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size, fake_data=False):
"""Return the next `batch_size` examples from this data set."""
if fake_data:
fake_image = [1] * image_size*image_size
if self.one_hot:
fake_label = [1] + [0] * 9
else:
fake_label = 0
return [fake_image for _ in xrange(batch_size)], [
fake_label for _ in xrange(batch_size)]
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self._images = self._images[perm]
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
def convert_to_label(images):
print("converting to label")
label=np.copy(images)
label[label>1]=1
label=label.reshape(label.shape[0],label.shape[1]*label.shape[2])
return label
def read_data_sets(train_dir, fake_data=False, one_hot=False, dtype=tf.float32):
class DataSets(object):
pass
data_sets = DataSets()
images,labels=load(train_dir,30000)
#test_dataset,test_labels=load(test_path,500)
#valid_dataset,valid_labels=load(validation_path,500)
np.random.seed(133)
images,labels = randomize(images,labels)
#test_dataset,test_labels = randomize(test_dataset,test_labels)
#valid_dataset,valid_labels = randomize(valid_dataset,valid_labels)
'''
plt.figure()
plt.imshow(images[100,:,:])
plt.figure()
plt.imshow(labels[100,:,:])
plt.figure()
plt.imshow(images[10000,:,:])
plt.figure()
plt.imshow(labels[10000,:,:])
plt.figure()
plt.imshow(images[25000,:,:])
plt.figure()
plt.imshow(labels[25000,:,:])
'''
VALIDATION_SIZE = 200
TEST_SIZE=100
images=images.reshape(images.shape + (1,))
labels=labels.reshape(labels.shape[0],image_size*image_size)
print("label reshaped:",labels.shape)
validation_images = images[:VALIDATION_SIZE]
validation_labels = labels[:VALIDATION_SIZE]
test_images=images[VALIDATION_SIZE:VALIDATION_SIZE+TEST_SIZE]
test_labels=labels[VALIDATION_SIZE:VALIDATION_SIZE+TEST_SIZE]
train_images = images[VALIDATION_SIZE:]
train_labels = labels[VALIDATION_SIZE:]
data_sets.train = DataSet(train_images, train_labels, dtype=dtype)
data_sets.validation = DataSet(validation_images, validation_labels,
dtype=dtype)
data_sets.test = DataSet(test_images, test_labels, dtype=dtype)
return data_sets
#data_sets=read_data_sets('data_in_patch/')
|
#!/usr/bin/env python3
import signal
import sys
import time
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import WebDriverException
import test_common as common
import test_config as config
from user_map import user_map
session_map = {
"webrtc-test.stirlab.local": {
"join_number": 1,
"mute": False,
},
"webrtc-test-base.stirlab.net": {
"join_number": 1,
"mute": False,
},
"webrtc-test-0.stirlab.net": {
"join_number": 1,
"mute": False,
},
"webrtc-test-1.stirlab.net": {
"join_number": 1,
"mute": True,
},
"webrtc-test-2.stirlab.net": {
"join_number": 2,
"mute": False,
},
"webrtc-test-3.stirlab.net": {
"join_number": 2,
"mute": True,
},
"webrtc-test-4.stirlab.net": {
"join_number": 3,
"mute": False,
},
"webrtc-test-5.stirlab.net": {
"join_number": 3,
"mute": True,
},
"webrtc-test-6.stirlab.net": {
"join_number": 4,
"mute": False,
},
"webrtc-test-7.stirlab.net": {
"join_number": 4,
"mute": True,
},
"webrtc-test-8.stirlab.net": {
"join_number": 5,
"mute": False,
},
"webrtc-test-9.stirlab.net": {
"join_number": 5,
"mute": True,
},
}
def usage():
print("Usage: %s" % sys.argv[0])
print("Configuration variables set in test_config.py")
if len(sys.argv) > 2:
usage()
data = None
if len(sys.argv) > 1:
data = sys.argv[1]
def exit_callback():
try:
driver.quit()
except NameError:
print("No driver instance to close")
common.setup_signal_handlers(exit_callback)
hostname = common.hostname_slug()
user_id = None
if hostname in user_map:
user_id = user_map[hostname]["user_id_1"]
else:
print("ERROR: %s does not map to a valid user ID" % hostname)
sys.exit(1)
options = common.setup_chrome()
driver = common.make_driver(options)
main_room_url = common.make_main_room_url(user_id, data)
try:
common.shape_traffic(hostname)
driver.get(main_room_url)
while True:
if not common.global_pause:
common.manage_main_room(driver, True)
common.manage_breakout(driver, session_map[hostname]["join_number"], session_map[hostname]["mute"])
time.sleep(config.page_wait_time)
except WebDriverException as e:
common.clear_traffic_shaping()
print("ERROR: Webdriver error: %s" % e)
# Wait for SIGINT.
signal.pause()
|
class tokens:
__slots__ ='key','value'
def __init__(self,key:str,value:str):
self.key=key
self.value=value
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A script to extract the list of vanity url shorteners maintained at
vanityurlshorteners.com and add them to the list in create_extra_services.py.
This script doesn't actually update create_extra_services.py, it instead
outputs an updated SERVICES list that can be copy and pasted into
create_extra_services.py.
Copyright (c) 2017, David Mueller
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of the the developers nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL DAVID MUELLER NOR TITI BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
import sys
import time
import urllib
from HTMLParser import HTMLParser
import create_extra_services
class MyHTMLParser(HTMLParser):
"""
This is our custom HTML parser class. We don't care about the full page, we just want the
vanity domains. The stuff we care about is in a div tag with class 'content-holder'. In that
div is a table with two columns, the second column is the one with the domain.
"""
def __init__(self, services):
HTMLParser.__init__(self)
self.services = services
self.in_content_holder = False
self.in_tbody = False
self.td_count = 0
self.parsed_something = False
def handle_starttag(self, tag, attrs):
if tag == 'div':
for attr in attrs:
if attr[0] == 'class' and attr[1] == 'content-holder':
self.in_content_holder = True
return
if not self.in_content_holder:
return
if tag == 'tbody':
self.in_tbody = True
return
if not self.in_tbody:
return
if tag == 'tr':
self.td_count = 0
return
if tag == 'td':
self.td_count += 1
return
def handle_endtag(self, tag):
if tag == 'tbody':
self.in_tbody = False
elif tag == 'div':
self.in_content_holder = False
def handle_data(self, data):
if self.in_content_holder and self.in_tbody and self.td_count == 2:
service = data.strip()
if len(service) > 0:
contents = service.split()
for content in contents:
if '.' in content:
self.services.add(content)
self.parsed_something = True
def get_page_content(url):
page = urllib.urlopen(url)
headers = page.info()
page_len = headers.getheader('Content-Length')
content = page.read(page_len)
page.close()
return content
def parse_page(url, content, services):
parser = MyHTMLParser(services)
parser.feed(content)
if not parser.parsed_something:
sys.stderr.write('WARNING: Failed to parse content from %s %s' % (url, os.linesep))
return parser.services
if __name__ == '__main__':
services_set = set(create_extra_services.SERVICES)
for letter in xrange(ord('a'), ord('z')+1):
url = 'http://www.vanityurlshorteners.com/%s' % chr(letter)
content = get_page_content(url)
services_set = parse_page(url, content, services_set)
time.sleep(2)
services_list = list(services_set)
services_list.sort()
print 'SERVICES = ['
for service in services_list:
print " '%s'," % service
print ' ]'
|
#!/usr/bin/python
# (c) 2018 Jim Hawkins. MIT licensed, see https://opensource.org/licenses/MIT
# Part of Blender Driver, see https://github.com/sjjhsjjh/blender-driver
"""Blender Driver unit test that can be run from the unittest application.
This module is intended for use within Blender Driver and can only be used from
within Blender."""
# Exit if run other than as a module.
if __name__ == '__main__':
print(__doc__)
raise SystemExit(1)
# Standard library imports, in alphabetic order.
#
# Unit test module.
# https://docs.python.org/3/library/unittest.html
import unittest
#
# Local imports.
#
# Custom TestCase
from applications.unittest import TestCaseWithApplication
# Implicit test of two classes in different modules for thread tests.
class TestThreadClass(TestCaseWithApplication):
def test_thread_fail(self):
with self.application.mainLock:
pass
with self.tick:
self.assertTrue(False, "Intended fail.")
|
from flask import Flask
import sys
from flask_socketio import SocketIO, emit, Namespace, join_room, leave_room, rooms
from TwitterAPI import TwitterAPI
app = Flask(__name__)
application = app
# Setup the app with the config.py file
app.config.from_object('config')
socketio = SocketIO(app)
consumer_key='zmkQPv0G6S4fKW3jXR9juM3TC'
consumer_secret='lgY19NPvCeDw1vowvtBEOvxykkUhfmuYVZ4VBf1dvOyBgfACCy'
access_token_key='2940190784-Phe9AsHsqvVhnoOuvUgWZn4lzZ3CIwNvDh4W4kF'
access_token_secret='9s2eBCKCI297hi4yF2BJoHZxZk9o2Yd20tJGU8RsO4I8N'
twitterApi = TwitterAPI(consumer_key, consumer_secret, access_token_key, access_token_secret)
from tw33t.views import main
|
# Import libraries
import tensorflow as tf
import numpy as np
import collections
import os
import collections
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import style
import scipy.io
from time import sleep
# ROS Stuff
import rospy
from sensor_msgs.msg import JointState
#Define variables
D=24 #Number of features (in this case, number of motors)
batch_size = 100 #Size of the batch to be fed in the NN
SET_SIZE=60*batch_size #Size of the dataset
# Intialze a ROS node
rospy.init_node('ANN_tension_predicition_v3')
# Create two buffers to "ping-pong" between
effort1 = np.ones([100,25])*np.nan
effort2 = np.ones([100,25])*np.nan
effort = [effort1, effort2] # one array for both buffers addressable by arraySwitch defined below
index = 0 # used to count buffer batch size
arraySwitch = 0 # flag to change buffers
array1Full = 0 # flag to signal that buffer is full
array2Full = 0 # flag to signal that buffer is full
# ROS message callback for Hebi JointState message
def callback(msg):
global effort # global for scoping the variables outside of the callback function
global index
global arraySwitch
global array1Full
global array2Full
# Switches between the two buffers. When one fills up to the batch size, the other then start to fill up
# Probably could have used a switch statement, but this works
if arraySwitch == 0:
effort[arraySwitch][index,:] = msg.effort
index += 1
if index >= 100:
arraySwitch = 1
index = 0
array1Full = 1
elif arraySwitch == 1:
effort[arraySwitch][index,:] = msg.effort
index += 1
if index >= 100:
arraySwitch = 0
index = 0
array2Full = 1
else:
arraySwitch = 0
index = 0
# Subscribes the the ROS message which houses the Hebi information
rospy.Subscriber("/hebiros/my_group/feedback/joint_state", JointState, callback)
#LOAD trained neural network
sess=tf.Session()
saver = tf.train.import_meta_graph('NN_tension_pred_saver-200000.meta')
saver.restore(sess,tf.train.latest_checkpoint('./'))
# Get saved graph
graph=tf.get_default_graph()
# Get placeholder variables
Xin=graph.get_tensor_by_name("Xin:0")
y_=graph.get_tensor_by_name("y_:0")
### Compute and plot data ###
target=[]
validation_prediction=[]
#Make plot interactive
plt.ion()
#Define figures and two lines that will be used to plot predicted and real data in the same plot
fig = plt.figure()
ax = fig.add_subplot(111)
pred_line, = ax.plot([], [],'-k',label='prediction')
real_line, = ax.plot([], [],'-r',label='real')
ax.legend()
#Feed batches of features to the NN and update plot
while not rospy.is_shutdown():
if (array1Full==1 or array2Full==1):
if array1Full==1:
#get input data from buffer
inp_data = effort[0][:,0:24]
#append tension sensor data for plotting
target=np.append(target,effort[0][:,24])
array1Full = 0
if array2Full==1:
#get input data from buffer
inp_data = effort[1][:,0:24]
#append tension sensor data for plotting
target=np.append(target,effort[1][:,24])
array2Full = 0
#Run the NN prediction
pred_v = sess.run(y_,feed_dict={Xin: inp_data})
#Append predicted value to the validation_prediction variable (used for the plot)
validation_prediction=np.append(validation_prediction,pred_v)
if (np.size(target) > SET_SIZE):
np.delete(target,0)
if (np.size(validation_prediction) > SET_SIZE):
np.delete(validation_prediction,0)
#Define X and Y axis variable for the two lines in the plot
pred_line.set_ydata(validation_prediction)
pred_line.set_xdata(range(len(validation_prediction)))
real_line.set_ydata(target)
real_line.set_xdata(range(len(target)))
#Scale plot
ax.relim()
ax.autoscale_view()
#Update
plt.draw()
plt.pause(0.01)
print('test')
|
# -*- coding: utf-8 -*-
from django.apps import AppConfig
from django.apps import apps
from django.db.models import signals
def connect_tasks_signals():
from tina.projects.tagging import signals as tagging_handlers
from . import signals as handlers
# Finished date
signals.pre_save.connect(handlers.set_finished_date_when_edit_task,
sender=apps.get_model("tasks", "Task"),
dispatch_uid="set_finished_date_when_edit_task")
# Tags
signals.pre_save.connect(tagging_handlers.tags_normalization,
sender=apps.get_model("tasks", "Task"),
dispatch_uid="tags_normalization_task")
def connect_tasks_close_or_open_us_and_milestone_signals():
from . import signals as handlers
# Cached prev object version
signals.pre_save.connect(handlers.cached_prev_task,
sender=apps.get_model("tasks", "Task"),
dispatch_uid="cached_prev_task")
# Open/Close US and Milestone
signals.post_save.connect(handlers.try_to_close_or_open_us_and_milestone_when_create_or_edit_task,
sender=apps.get_model("tasks", "Task"),
dispatch_uid="try_to_close_or_open_us_and_milestone_when_create_or_edit_task")
signals.post_delete.connect(handlers.try_to_close_or_open_us_and_milestone_when_delete_task,
sender=apps.get_model("tasks", "Task"),
dispatch_uid="try_to_close_or_open_us_and_milestone_when_delete_task")
def connect_tasks_custom_attributes_signals():
from tina.projects.custom_attributes import signals as custom_attributes_handlers
signals.post_save.connect(custom_attributes_handlers.create_custom_attribute_value_when_create_task,
sender=apps.get_model("tasks", "Task"),
dispatch_uid="create_custom_attribute_value_when_create_task")
def connect_all_tasks_signals():
connect_tasks_signals()
connect_tasks_close_or_open_us_and_milestone_signals()
connect_tasks_custom_attributes_signals()
def disconnect_tasks_signals():
signals.pre_save.disconnect(sender=apps.get_model("tasks", "Task"),
dispatch_uid="set_finished_date_when_edit_task")
signals.pre_save.disconnect(sender=apps.get_model("tasks", "Task"),
dispatch_uid="tags_normalization")
def disconnect_tasks_close_or_open_us_and_milestone_signals():
signals.pre_save.disconnect(sender=apps.get_model("tasks", "Task"),
dispatch_uid="cached_prev_task")
signals.post_save.disconnect(sender=apps.get_model("tasks", "Task"),
dispatch_uid="try_to_close_or_open_us_and_milestone_when_create_or_edit_task")
signals.post_delete.disconnect(sender=apps.get_model("tasks", "Task"),
dispatch_uid="try_to_close_or_open_us_and_milestone_when_delete_task")
def disconnect_tasks_custom_attributes_signals():
signals.post_save.disconnect(sender=apps.get_model("tasks", "Task"),
dispatch_uid="create_custom_attribute_value_when_create_task")
def disconnect_all_tasks_signals():
disconnect_tasks_signals()
disconnect_tasks_close_or_open_us_and_milestone_signals()
disconnect_tasks_custom_attributes_signals()
class TasksAppConfig(AppConfig):
name = "tina.projects.tasks"
verbose_name = "Tasks"
def ready(self):
connect_all_tasks_signals()
|
from django.core.management.base import BaseCommand
from chatwork.models import Account
from chatwork.views import get_diff
from datetime import date
from dateutil.relativedelta import relativedelta
import environ
import requests
env = environ.Env(DEBUG=(bool, False))
class Command(BaseCommand):
def handle(self, *args, **options):
today = date.today().isoformat()
yesterday = (date.today() - relativedelta(days=1)).isoformat()
data = get_diff(yesterday, today)
report_title = data['period']
report_added = 'added: ' + '(' + str(len(data['added'])) + ')' + ' / '.join(list(d.name for d in data['added']))
report_dropped = 'dropped: ' + '(' + str(len(data['dropped'])) + ')' + ' / '.join(list(d.name for d in data['dropped']))
report = """
{report_title}
{report_added}
{report_dropped}
""".format(report_title=report_title, report_added=report_added, report_dropped=report_dropped).strip()
base = 'https://api.chatwork.com/v2/'
room_id = env('ROOM_ID')
end_point = 'rooms/' + room_id + '/messages'
api_token = env('CHATWORK_API_TOKEN')
headers = {'X-ChatWorkToken': api_token, 'Content-Type': 'application/x-www-form-urlencoded'}
payload = dict(body=report, self_unread=1)
res = requests.post(base + end_point, headers=headers, params=payload) |
#/usr/bin/env python
#
# ChaosGame.py
# Python script for UWS first year lab
# Chaos Game lab experiment
# James Keatings
# James.Keatings@uws.ac.uk
#
import random
import matplotlib.pyplot as plt
points_x = [0, 100, 50]
points_y = [100, 100, 0]
#BEGIN EXPERIMENT
print 'Welcome to the Chaos Game lab experiment.'
#Define starting point
#x = random.randint(0,100)
#y = random.randint(0,100)
x=50
y=0
points_x.append(x)
points_y.append(y)
#Run loop
loop_length = 100000
j = 0
while j < loop_length:
roll = random.randint(0,3)
x = round((x + points_x[roll])/2,0)
y = round((y + points_y[roll])/2,0)
points_x.append(x)
points_y.append(y)
j += 1
#Plot results
p1 = plt.scatter(points_x, points_y, s=1)
plt.show(p1)
#End experiment
print 'The final graph has been produced.\nPlease refer to the labscript for the next steps.\n'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Solve project euler 4
Find the largest palindrome made from the product of two 3-digit numbers.
"""
from utils import is_palindrome
def pe4(n=3):
"""
>>> pe4()
906609
"""
first, last = 9*10**(n - 1) + 1, 10**n
mx = 0
for x in range(first, last):
for y in range(x, last):
xy = x * y
if xy > mx and is_palindrome(xy):
mx = xy
return mx
if __name__ == "__main__":
import doctest
doctest.testmod()
try:
while True:
s = input('> ')
n = int(s)
print(pe4(n))
except (SyntaxError, EOFError, KeyboardInterrupt, NameError):
pass
|
"""
Site-specific code extension
"""
from typing import Any, Dict
from flask import g
from byceps.services.seating import seat_service
from byceps.services.ticketing import ticket_service
def template_context_processor() -> Dict[str, Any]:
"""Extend template context."""
sale_stats = ticket_service.get_ticket_sale_stats(g.party_id)
seat_utilization = seat_service.get_seat_utilization(g.party_id)
return {
'ticket_sale_stats': sale_stats,
'seat_utilization': seat_utilization,
}
|
import unittest
from leetcode.algorithms.p0328_odd_even_linked_list_1 import ListNode, Solution
from tests.algorithms.list_helper import convert_linked_list_to_list
class TestOddEvenLinkedList(unittest.TestCase):
def test_odd_even_linked_list(self):
a = ListNode(1)
b = ListNode(2)
c = ListNode(3)
d = ListNode(4)
e = ListNode(5)
a.next = b
b.next = c
c.next = d
d.next = e
solution = Solution()
self.assertListEqual([1, 3, 5, 2, 4], convert_linked_list_to_list(
solution.oddEvenList(a)))
|
class Config(object):
DEBUG = True
DEVELOPMENT = True
SECRET_KEY = '@!secretkey!'
static_folder = 'static'
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://super:!important@inqw-442.postgres.pythonanywhere-services.com:10442/sms'
SQLALCHEMY_TRACK_MODIFICATIONS = False
class Auth(object):
CLIENT_ID = ('KEY')
CLIENT_SECRET = 'SECRET'
REDIRECT_URI = 'https://localhost:5000/authorize/google'
AUTH_URI = 'https://accounts.google.com/o/oauth2/auth'
TOKEN_URI = 'https://accounts.google.com/o/oauth2/token'
USER_INFO = 'https://www.googleapis.com/userinfo/v2/me'
|
from yarl import URL
def urljoin(base_url: str, *urlpath) -> URL:
path = [str(path).strip(' /') for path in urlpath if path]
path = '/'.join(path)
url = URL(base_url).with_path(path)
return url
|
#!/usr/bin/python3
# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
import errno
import sys
import select
import socket
import struct
import time
from pydnspp import *
RECV_BUFSIZE = 65536
def _wait_for(ir, iw, ix, expiration):
done = False
while not done:
if expiration is None:
timeout = None
else:
timeout = expiration - time.time()
if timeout <= 0.0:
raise socket.timeout
try:
if timeout is None:
(r,w,x) = select.select(ir,iw,ix)
else:
(r,w,x) = select.select(ir,iw,ix,timeout)
except select.error as e:
if e.args[0] != errno.EINTR:
raise e
else:
done = True
if len(r) == 0 and len(w) == 0 and len(x) == 0:
raise socket.timeout
def _wait_for_readable(s,expiration):
_wait_for([s],[],[s],expiration)
def _compute_expiration(timeout):
if timeout is None:
return None
else:
return time.time() + timeout
def _send_udp(q, where, timeout=None, port=53, source=None, source_port=0):
""" Return the response obtained after sending a query via UDP.
Refered to dnspython source code. """
qwire = MessageRenderer()
q.to_wire(qwire)
if source is not None:
source = (source, source_port)
udpCliSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
expiration = _compute_expiration(timeout)
if source is not None:
udpCliSock.bind(source)
dest = (where, port)
udpCliSock.sendto(qwire.get_data(), dest)
while True:
_wait_for_readable(udpCliSock, expiration)
rwire, r_addr = udpCliSock.recvfrom(RECV_BUFSIZE)
if dest[0] == r_addr[0] and dest[1:] == r_addr[1:]:
break
else:
sys.stderr.write('Got a respose from: %s instead of %s\n' % (r_addr, dest))
udpCliSock.close()
resp = Message(Message.PARSE)
resp.from_wire(rwire)
return resp
def _connect(s, address):
try:
s.connect(address)
except socket.error as msg:
(exctype,value) = sys.exc_info()[:2]
if value.errno != errno.EINPROGRESS and \
value.errno != errno.EWOULDBLOCK and \
value.errno != errno.EALREADY:
raise value
def _net_read(sock, count, expiration):
""" Read the specified number of bytes from sock. Keep trying until we
either get the desired amount, or we hit EOF.
A Timeout exception will be raised if the operation is not completed
by the expiration time.
"""
msgdata = b''
while count > 0:
_wait_for_readable(sock, expiration)
data = sock.recv(count)
if not data:
return None
count -= len(data)
msgdata += data
return msgdata
def _net_write(sock, data, expiration):
""" Write the specified data to the socket.
A Timeout exception will be raised if the operation is not completed
by the expiration time.
"""
current = 0
l = len(data)
while current < 1:
_wait_for_writable(sock, expiration)
current += sock.send(data[current:])
def _send_tcp(q, dest, timeout=None, dest_port=53, source=None, source_port=0):
""" Return the response obtained after sending a query via TCP.
Refered to dnspython source code """
qwire = MessageRenderer()
q.to_wire(qwire)
if source is not None:
source = (source, source_port)
dest = (dest, dest_port)
tcpCliSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
expiration = _compute_expiration(timeout)
tcpCliSock.setblocking(False)
if source is not None:
tcpCliSock.bind(source)
_connect(tcpCliSock, dest)
wire_s = qwire.get_data()
l = len(wire_s)
tcpmsg = struct.pack("!H", l) + wire_s
_net_write(tcpCliSock, tcpmsg, expiration)
ldata = _net_read(tcpCliSock, 2, expiration)
(l,) = struct.unpack("!H", ldata)
res_wire = _net_read(tcpCliSock, l, expiration)
tcpCliSock.close()
resp = Message(Message.PARSE)
resp.from_wire(res_wire)
return resp
def send_req(query, server, port=53, timeout=5):
""" Return the response message obtained after
sending the query.
@param query: the query readed from input file
@type query: dict
@param server: the testee server ip address
@type server: string
@param port: the testee server listening port. The default is 53.
@type port: int
@param timeout: the number of seconds to wait before the query times out.
The default is 5.
@type timeout: float
"""
qname = query["qname"]
qtype = query["qtype"]
qclass = query["qclass"]
edns = query["edns"]
dnssec = query["dnssec"]
qheader = query['header']
protocol = query['protocol']
msg = Message(Message.RENDER)
msg.set_qid(int(qheader['id']))
msg.set_opcode(Opcode.QUERY)
msg.set_rcode(Rcode(int(qheader['rcode'])))
if qheader['qr'] == 1:
msg.set_header_flag(Message.HEADERFLAG_QR)
if qheader['aa'] == 1:
msg.set_header_flag(Message.HEADERFLAG_AA)
if qheader['tc'] == 1:
msg.set_header_flag(Message.HEADERFLAG_TC)
if qheader['rd'] == 1:
msg.set_header_flag(Message.HEADERFLAG_RD)
if qheader['ra'] == 1:
msg.set_header_flag(Message.HEADERFLAG_RA)
if qheader['ad'] == 1:
msg.set_header_flag(Message.HEADERFLAG_AD)
if qheader['cd'] == 1:
msg.set_header_flag(Message.HEADERFLAG_CD)
try:
msg.add_question(Question(Name(qname), \
RRClass(qclass), RRType(qtype)))
except InvalidRRType as e:
sys.stderr.write('Unrecognized RR queryeter string: %s\n' % qtype)
return None
if edns == 1 or dnssec == 1:
edns_conf = EDNS()
payload = query['payload']
edns_conf.set_udp_size(payload)
if dnssec == 1:
edns_conf.set_dnssec_awareness(True)
else:
edns_conf.set_dnssec_awareness(False)
msg.set_edns(edns_conf)
port = int(port)
if protocol == 'udp':
resp = _send_udp(msg, server, timeout, port)
else:
resp = _send_tcp(msg, server, timeout, port)
return resp
def main():
query = {}
query['qname'] = "A.example.com"
query['qtype'] = "ANY"
query['qclass'] = "IN"
query["edns"] = 1
query["dnssec"] = 1
query["protocol"] = 'tcp'
query["payload"] = 4096
query['header'] = {}
query['header']['id'] = 0
query['header']['qr'] = 0
query['header']['opcode'] = 0
query['header']['aa'] = 0
query['header']['tc'] = 0
query['header']['rd'] = 1
query['header']['ra'] = 0
query['header']['z'] = 0
query['header']['ad'] = 0
query['header']['cd'] = 0
query['header']['rcode'] = 0
query['header']['qdcount'] = 0
query['header']['ancount'] = 0
query['header']['nscount'] = 0
query['header']['arcount'] = 0
resp = send_req(query, "218.241.108.124", "4040")
if resp == None:
print('timeout')
exit(1)
print('qid -----')
print(resp.get_qid())
rrset = resp.get_section(Message.SECTION_ANSWER)[0]
print('name-----')
print(rrset.get_name())
print('type')
print(rrset.get_type())
print('class-----')
print(rrset.get_class())
print(rrset.get_ttl())
rdata = rrset.get_rdata()
print(rdata[0].to_text())
if __name__ == "__main__":
main()
|
#@String directory
from ij import IJ
import os
im = IJ.open(os.path.join(directory, 'dummy.tiff'))
IJ.run(im, "Subtract Background...", "rolling=50")
IJ.saveAs(im,'tiff',os.path.join(directory,'backsub.tiff'))
|
# dizygotic_net.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from base.base_model import BaseModel
from layers.encode import Encode
import tensorflow as tf
class DizygoticNet(BaseModel):
def __init__(self, filters: int, loss: tf.keras.losses.Loss, optimizer: tf.keras.optimizers.Optimizer):
# Invoke parent class constructor.
super(DizygoticNet, self).__init__(loss, optimizer, name="DizygoticNet")
# Store network architecture hyperparameters.
self.filters = filters
# Define sublayers of the Dizygotic Network.
# Sonar encoding layers.
self.son_e1 = Encode(filters=filters, kernel_size=3, activation_fn=tf.keras.layers.ReLU, with_pool=True)
self.son_e2 = Encode(filters=filters, kernel_size=3, activation_fn=tf.keras.layers.ReLU, with_pool=True)
self.son_e3 = Encode(filters=filters, kernel_size=3, activation_fn=tf.keras.layers.ReLU, with_pool=True)
self.son_e4 = Encode(filters=filters, kernel_size=3, activation_fn=tf.keras.layers.ReLU, with_pool=True)
self.son_e5 = Encode(filters=filters, kernel_size=3, activation_fn=tf.keras.layers.ReLU, with_pool=True)
self.son_f = tf.keras.layers.Flatten()
# Satellite encoding layers.
self.sat_e1 = Encode(filters=filters, kernel_size=3, activation_fn=tf.keras.layers.ReLU, with_pool=True)
self.sat_e2 = Encode(filters=filters, kernel_size=3, activation_fn=tf.keras.layers.ReLU, with_pool=True)
self.sat_e3 = Encode(filters=filters, kernel_size=3, activation_fn=tf.keras.layers.ReLU, with_pool=True)
self.sat_e4 = Encode(filters=filters, kernel_size=3, activation_fn=tf.keras.layers.ReLU, with_pool=True)
self.sat_e5 = Encode(filters=filters, kernel_size=3, activation_fn=tf.keras.layers.ReLU, with_pool=True)
self.sat_f = tf.keras.layers.Flatten()
self.son_sat_cat = tf.keras.layers.Concatenate()
# Multilayer perceptron to match encodings.
self.dense_1 = tf.keras.layers.Dense(units=2048, activation='relu')
self.dense_2 = tf.keras.layers.Dense(units=512, activation='relu')
self.dense_3 = tf.keras.layers.Dense(units=64, activation='relu')
self.dense_4 = tf.keras.layers.Dense(units=1, activation='sigmoid')
# Generate random fake data.
x = tf.random.uniform(shape=(self.config.batch_size,) + self.config.input_shape, minval=0.0, maxval=1.0,
dtype=tf.float32)
# Build model and print summary.
self([x, x], training=False)
self.summary()
def call(self, inputs, training=None, mask=None) -> tf.Tensor:
[x, y] = inputs
# Sonar
son = self.son_e1(x, training=training)
son = self.son_e2(son, training=training)
son = self.son_e3(son, training=training)
son = self.son_e4(son, training=training)
son = self.son_e5(son, training=training)
son = self.son_f(son)
# Satellite
sat = self.sat_e1(y, training=training)
sat = self.sat_e2(sat, training=training)
sat = self.sat_e3(sat, training=training)
sat = self.sat_e4(sat, training=training)
sat = self.sat_e5(sat, training=training)
sat = self.sat_f(sat)
# Concatenate
z = self.son_sat_cat([son, sat])
# MLP
z = self.dense_1(z)
z = self.dense_2(z)
z = self.dense_3(z)
return self.dense_4(z)
|
class TreeNode():
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def preorder_traversal(root, visited=[]):
if root is None:
return
visited.append(root.val)
if root.left:
preorder_traversal(root.left, visited)
if root.right:
preorder_traversal(root.right, visited)
return visited
if __name__ == '__main__':
root = TreeNode(1)
one = TreeNode(2)
two = TreeNode(3)
root.right = one
one.left = two
result = preorder_traversal(root)
print(f'result {result}')
|
#coding:utf8
li = [20,15,10,15,10,5]
class h(object):
def __init__(self,num,Q,q,l=None,t=None,child=None):
self.num = num
self.Q = Q-q
self.q = q
self.l = l
self.t = t
if self.l and self.t:
self.l = self.Q/2
self.t = self.Q/2
if child:
self.change(child)
while self.l <= 0:
self.l += 2
self.t -= 2
while self.t <= 0:
self.t += 2
self.l -= 2
elif self.l:
self.l = self.Q
elif self.t:
self.t = self.Q
def getnum(self):
print(self.num,'点流量为:')
if self.l and self.t:
print('l分配流量:',self.l)
print('t分配流量:',self.t)
elif self.l:
print('l分配流量:',self.l)
elif self.t:
print('t分配流量:',self.t)
print('节点流量:',self.q)
def change(self,child):
for k,v in child.items():
if k=='t':
tmin = sum([li[x] for x in v['c']])
tmax = tmin + sum([li[x] for x in v['a']])
while self.t <= tmin:
self.t += 5
self.l -= 5
while self.t > tmax:
self.t -= 5
self.l += 5
elif k=='l':
lmin = sum([li[x] for x in v['c']])
lmax = lmin + sum([li[x] for x in v['a']])
while self.l <= lmin:
self.l += 5
self.t -= 5
while self.l > lmax:
self.l -= 5
self.t += 5
def two():
d1 = {
't':{'c':[1,2],'a':[5,]},
'l':{'c':[3],'a':[4,5]}
}
d2 = {
't':{'c':[2,],'a':[5,]},
}
h1 = h(0,75,20,True,True,d1)
h2 = h(1,h1.t,15,True,True,d2)
h3 = h(2,h2.t,10,True)
h4 = h(3,h1.l,15,t=True)
h5 = h(4,h2.l+h4.t,10,t=True)
h1.getnum()
print('\n')
h2.getnum()
print('\n')
h3.getnum()
print('\n')
h4.getnum()
print('\n')
h5.getnum()
if __name__=='__main__':
two()
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import datetime
import json
from unittest import mock
import pandas as pd
from moto import mock_dynamodb
import airflow.providers.amazon.aws.transfers.hive_to_dynamodb
from airflow.models.dag import DAG
from airflow.providers.amazon.aws.hooks.dynamodb import DynamoDBHook
DEFAULT_DATE = datetime.datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
class TestHiveToDynamoDBOperator:
def setup_method(self):
args = {"owner": "airflow", "start_date": DEFAULT_DATE}
dag = DAG("test_dag_id", default_args=args)
self.dag = dag
self.sql = "SELECT 1"
self.hook = DynamoDBHook(aws_conn_id="aws_default", region_name="us-east-1")
@staticmethod
def process_data(data, *args, **kwargs):
return json.loads(data.to_json(orient="records"))
@mock_dynamodb
def test_get_conn_returns_a_boto3_connection(self):
hook = DynamoDBHook(aws_conn_id="aws_default")
assert hook.get_conn() is not None
@mock.patch(
"airflow.providers.apache.hive.hooks.hive.HiveServer2Hook.get_pandas_df",
return_value=pd.DataFrame(data=[("1", "sid")], columns=["id", "name"]),
)
@mock_dynamodb
def test_get_records_with_schema(self, mock_get_pandas_df):
# this table needs to be created in production
self.hook.get_conn().create_table(
TableName="test_airflow",
KeySchema=[
{"AttributeName": "id", "KeyType": "HASH"},
],
AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}],
ProvisionedThroughput={"ReadCapacityUnits": 10, "WriteCapacityUnits": 10},
)
operator = airflow.providers.amazon.aws.transfers.hive_to_dynamodb.HiveToDynamoDBOperator(
sql=self.sql,
table_name="test_airflow",
task_id="hive_to_dynamodb_check",
table_keys=["id"],
dag=self.dag,
)
operator.execute(None)
table = self.hook.get_conn().Table("test_airflow")
table.meta.client.get_waiter("table_exists").wait(TableName="test_airflow")
assert table.item_count == 1
@mock.patch(
"airflow.providers.apache.hive.hooks.hive.HiveServer2Hook.get_pandas_df",
return_value=pd.DataFrame(data=[("1", "sid"), ("1", "gupta")], columns=["id", "name"]),
)
@mock_dynamodb
def test_pre_process_records_with_schema(self, mock_get_pandas_df):
# this table needs to be created in production
self.hook.get_conn().create_table(
TableName="test_airflow",
KeySchema=[
{"AttributeName": "id", "KeyType": "HASH"},
],
AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}],
ProvisionedThroughput={"ReadCapacityUnits": 10, "WriteCapacityUnits": 10},
)
operator = airflow.providers.amazon.aws.transfers.hive_to_dynamodb.HiveToDynamoDBOperator(
sql=self.sql,
table_name="test_airflow",
task_id="hive_to_dynamodb_check",
table_keys=["id"],
pre_process=self.process_data,
dag=self.dag,
)
operator.execute(None)
table = self.hook.get_conn().Table("test_airflow")
table.meta.client.get_waiter("table_exists").wait(TableName="test_airflow")
assert table.item_count == 1
|
import os
import wave
import pyaudio
import librosa
import librosa.display
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import tensorflow.keras.backend as K
from tensorflow.keras.callbacks import Callback
matplotlib.rcParams['agg.path.chunksize'] = 100000
class GradientSonification(Callback):
""" Convert the norm of the gradients into audio """
def __init__(self,
path,
model,
fs=44100,
duration=0.01,
freq=200.0,
plot=True):
self.path = path if path.endswith('.wav') else path + '.wav'
self.model = model
self.trainable_layers = [layer.name for layer in self.model.layers if layer.trainable_weights != []]
self.metrics = [self.get_metrics(layer) for layer in self.trainable_layers]
self.fs = fs
self.duration = duration
self.freq = freq
self.plot = plot
self.frames = []
self.p, self.stream = None, None
def get_metrics(self, layer):
''' Create a custom metric which outputs the gradient norm for a given layer '''
def func(y_true, y_pred):
loss = self.model.loss_functions[0](y_true, y_pred)
weights = self.model.get_layer(layer).trainable_weights
grad = self.model.optimizer.get_gradients(loss, weights[0])[0]
norm = K.sqrt(K.sum(K.square(grad)))
return norm
metric = func
metric.__name__ = 'gradient_norm_' + layer
return metric
def on_train_begin(self, logs={}):
self.p, self.stream = self.open_stream()
def on_train_end(self, logs={}):
''' Save the frames to a wav file '''
wf = wave.open(self.path, 'wb')
wf.setnchannels(1)
wf.setsampwidth(self.p.get_sample_size(pyaudio.paFloat32))
wf.setframerate(self.fs)
wf.writeframes(b''.join(self.frames))
wf.close()
self.stream.stop_stream()
self.stream.close()
self.p.terminate()
if self.plot:
self.plot_audio()
def on_batch_end(self, batch, logs={}):
'''
Since the gradient norms are metrics they will be stored in the logs
dict with keys equivalent to the function __name__ attribute
'''
for layer in self.trainable_layers:
tone = self.freq + ((logs.get('gradient_norm_' + layer)) * 100.0)
tone = tone.astype(np.float32)
samples = self.generate_tone(tone)
self.frames.append(samples)
# Insert silence sample between batches
silence = np.zeros(samples.shape[0] * 2, dtype=np.float32)
self.frames.append(silence)
def open_stream(self):
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paFloat32,
channels=1,
rate=self.fs,
output=True)
return p, stream
def generate_tone(self, tone):
npsin = np.sin(2 * np.pi * np.arange(self.fs*self.duration) * tone / self.fs)
samples = npsin.astype(np.float32)
return 0.1 * samples
def plot_audio(self):
''' Plot waveplot and spectrogram of recording '''
x, sr = librosa.load(self.path)
# Wave plot
'''
fname = os.path.splitext(self.path)[0] + '_waveplot.png'
plt.figure(figsize=(14, 5))
librosa.display.waveplot(x, sr=sr)
plt.savefig(fname)
'''
# Spectrogram
fname = os.path.splitext(self.path)[0] + '_spectrogram.png'
Xdb = librosa.amplitude_to_db(np.abs(librosa.stft(x)), ref=np.max)
plt.figure(figsize=(14, 5))
librosa.display.specshow(Xdb, sr=sr, x_axis='time', y_axis='log')
plt.colorbar()
plt.savefig(fname) |
__metaclass__ = type#指定使用新式类
class Person:
def setName(self,name):
self.name = name#self相当于C语言中的this指针
def getName(self):
return self.name
def greet(self):
print('Hello,%s' %self.name)
A = Person()
A.setName('ZYQ')
A.greet()
class Bird:
song = 'Squaak!'
def sing(self):
print(self.song)
bird = Bird()
bird.sing()
birdsong = bird.sing#函数名只是一个标志,是可以随意替代的
birdsong()#相当于bird.sing()
#私有化的属性方式将会在之后介绍
class Secretive:
def __inaccessible(self):
print('You can\'t see it!')
def accessible(self):
self.__inaccessible()
Secret = Secretive()
#下面这一句是无法使用的,只能通过外部接口调用
#Secret.__inaccessible()
Secret.accessible()#可以引用inaccessible()
Secret._Secretive__inaccessible()#这个就可以用来使用
#超类
class Filter:
def init(self):
self.blocked = []
def ffilter(self,sequence):
return [x for x in sequence if x not in self.blocked]
class PPPFilter(Filter):
def init(self):
self.blocked = ['PPP']
K = PPPFilter()
K.init()
l = ['PPP','TT','DD','PPP','DP','PPP']
t = K.ffilter(l)
print(t)
|
#!/usr/bin/env python
import copy
from studious import question, utils
all_answers = None
class Answer:
key = 0
question_key = 0
user_key = 0
answer = ""
def __contains__(self, item):
return item in self.answer
def __str__(self, verbose=False):
if not verbose:
return str(question.by_key(self.question_key)) + ", " + self.answer + ", "
return str(self.key) + ", " + str(self.question_key) + ", " + str(self.user_key) + ", " + self.answer + ", "
def __init__(self, key, question_key, user_key, answer):
self.key = key
self.question_key = question_key
self.user_key = user_key
self.answer = answer
def __cmp__(self, other):
return self.key == other.key
def __lt__(self, other):
return self.key < other.key
# Criteria examples
# TODO:: Need a better way to eval correct
def correct(self):
return self.answer == question.by_key(self.question_key).answer
def is_users(self, user):
return self.user_key == user
# Operations
def update(self):
utils.update_answer(self.key, self.question_key, self.answer)
# Helpers
def get_all():
global all_answers
all_answers = utils.get_all_rows("answers", Answer)
return copy.deepcopy(all_answers)
def get(criteria, answers=get_all()):
return utils.get_items(criteria, answers)
def by_key(key):
return get(lambda x: x.key == key, get_all())
def get_correct_answers(answers):
return get(lambda x: x.correct(), answers)
|
"""Config file."""
LANGUAGE = 'japanese' #
LEXICON_PATHS = {'english': ['data/raw/english/celex_all.csv', '\\'],
'french': ['data/raw/french/french_lexique.txt', '\t'],
'german': ['data/raw/german/celex_german_all.csv', '\\'],
'japanese': ['data/raw/japanese/japanese_labeled_columns.csv', None], # formatting fixed, with column labels
'dutch': ['data/raw/dutch/celex_dutch.csv', '\\'] # Need to fix some issues with formatting
}
# try different n-phone models
MODEL_INFO = {'n': 4, 'smoothing': .01,
'match_on': 'sylls' # phones vs. sylls
}
ITERATIONS = 10 # number to generate
# http://www.iub.edu/~psyling/papers/celex_eug.pdf
# See pg. 179
VOWEL_SETS = {'german': set("i#a$u3y)eo|o1246WBXIYE/{&AVOU@^cq0~"),
'english': set("i#$u312456789IE{QVU@cq0~"),
'dutch': set("i!auy()*<e|oKLMIEAO}@"),
'french': set("i5§yEO9a°e@2uo"),
'mandarin': set("i5§yEO9a°e@2uo"),
'japanese': set("aeiouEOIU12345YN") # Japanese includes "N", placeless nasal coda
}
PHON_COLUMN = {'german': 'PhonDISC',
'english': 'PhonDISC',
'dutch': 'PhonDISC',
'japanese': 'phonetic_remapped', # Requires remapping double-characters
'french': '2_phon'}
WORD_COLUMN = {'german': 'Word',
'english': 'Word',
'dutch': 'Word',
'japanese': 'orth_form_romaji',
'french': '3_lemme'}
# Maybe preserve this so other languages can have remappings too?
PHONETIC_REMAPPINGS = {
'japanese': {
'ky': 'K', # Already converted in pronuncation field
'gy': 'G', # Already converted in pronuncation field
'sh': 'S', # Already converted in pronuncation field
'ch': 'C', # Already converted in pronuncation field
'ts': 'c', # Already converted in pronuncation field
'ny': 'Y', # Already converted in pronuncation field
'hy': 'H', # Already converted in pronuncation field
'by': 'B', # Already converted in pronuncation field
'py': 'P', # Already converted in pronuncation field
'my': 'M', # Already converted in pronuncation field
'ry': 'R', # Already converted in pronuncation field
'ee': 'E', # Represents result of conversion from romaji to pronunciation field
'oo': 'O', # Represents result of conversion from romaji to pronunciation field
'ji': 'I', # Represents result of conversion from romaji to pronunciation field
'zu': 'U', # Represents result of conversion from romaji to pronunciation field
'ue': '1', # Represents result of conversion from romaji to pronunciation field
'ui': '2', # Represents result of conversion from romaji to pronunciation field
'uo': '3', # Represents result of conversion from romaji to pronunciation field
'ua': '4', # Represents result of conversion from romaji to pronunciation field
'ie': '5', # Represents result of conversion from romaji to pronunciation field
'yu': 'Y' # Represents result of conversion from romaji to pronunciation field
}
}
|
import tensorflow as tf
from prepare_data import generate_datasets
from train import get_model
def test(save_model_dir): |
# !/usr/bin/python
# _*_ coding:utf-8 _*_
import turtle
from random import randint
turtle.speed(10)
turtle.color('gray')
x = -180 # 80
for y in range(-180, 180 +1, 10): # 80
turtle.penup()
turtle.goto(x, y)
turtle.pendown()
turtle.forward(360) # 160
y = 180
turtle.right(90)
for x in range(-180, 180 + 1, 10):
turtle.penup()
turtle.goto(x, y)
turtle.pendown()
turtle.forward(360)
turtle.pensize(3)
turtle.color('red')
turtle.penup()
turtle.goto(0, 0)
turtle.pendown()
x = y = 0
while abs(x) < 180 and abs(y) < 180:
r = randint(0, 5)
if r == 0:
x += 10
turtle.setheading(0)
turtle.forward(10)
elif r == 1:
y -= 10
turtle.setheading(270)
turtle.forward(10)
elif r == 2:
x -= 10
turtle.setheading(180)
turtle.forward(10)
elif r == 3:
y += 10
turtle.setheading(90)
turtle.forward(10)
turtle.done()
|
import config
import time, thread
def init():
global BUZZER_PIN, Pi, GPIO
BUZZER_PIN = 12
Pi = False
try:
import RPi.GPIO as GPIO
Pi = True
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(BUZZER_PIN, GPIO.OUT)
except:
print("GPIO not found")
def alert():
GPIO.output(GPIO.HIGH)
time.sleep(5)
GPIO.output(GPIO.LOW)
def setAlarm():
if not Pi:
#Code is in dev
print("Alarm set mock")
else:
#Code is in prod
thread.start_new_thread(alert, (GPIO))
config.db.child("Notif").setValue(0) #remove alert from db
|
from .utils import lback_untitled, lback_backup_dir, lback_backup_ext, lback_db, lback_output, lback_error, lback_print, lback_id,lback_settings, get_folder_size, lback_focused_agent, lback_unique_agent_name
from .restore import Restore, RestoreException
from .backup import Backup, BackupException
from .operation_backup import OperationBackup
from .operation_modify import OperationModify
from .operation_restore import OperationRestore
from .operation_ls import OperationLs
from .operation_rm import OperationRm
from .operation_mv import OperationMv
from .operation_relocate import OperationRelocate
from .operation_agent_add import OperationAgentAdd
from .operation_agent_rm import OperationAgentRm
from .operation_agent_ls import OperationAgentLs
from .operation_agent_focus import OperationAgentFocus
from lback_grpc.client import Client
from os import getenv
import glob
import shutil
import argparse
import time
import os
import json
import fnmatch
class Runtime(object):
def __init__(self, args):
untitled = lback_untitled()
parser = argparse.ArgumentParser()
sub_parser = parser.add_subparsers()
target = lback_focused_agent()
backup_parser = sub_parser.add_parser("backup", help="Backup files and folders")
backup_parser.add_argument("folder", help="Select a folder", nargs="*")
backup_parser.add_argument("--name", help="Name for backup", default=untitled)
backup_parser.add_argument("--diff", help="Runs a differential backup")
backup_parser.add_argument("--remove", action="store_true", help="Remove backup when done", default=False)
backup_parser.add_argument("--distribution-strategy", help="Defines the distribution strategy for the backup", default="shared")
backup_parser.add_argument("--local",default=True, action="store_true")
backup_parser.add_argument("--encryption-key", help="Set an encryption key for the backup")
backup_parser.add_argument("--compression", help="How to compress backup. supports: LZ4")
backup_parser.add_argument("--target", help="Target agent for the backup", default=target)
backup_parser.set_defaults(backup=True)
modify_parser = sub_parser.add_parser("modify", help="Make modifications to a backup")
modify_parser.add_argument("id", help="Select the ID", nargs="*")
modify_parser.add_argument("--distribution-strategy", help="Defines the distribution strategy for the backup", default="shared")
modify_parser.set_defaults(modify=True)
restore_parser = sub_parser.add_parser("restore", help="Run a restore")
restore_parser.add_argument("id", help="Select the ID", nargs="*")
restore_parser.add_argument("--name", help="Filter to a specific name", default=False)
restore_parser.add_argument("--clean", action="store_true", help="Clean backup on completion", default=False)
restore_parser.add_argument("--folder", help="Restore to specific path", default=False)
restore_parser.add_argument("--target", help="Target agent to restore on", default=target)
restore_parser.set_defaults(restore=True)
rm_parser = sub_parser.add_parser("rm", help="Delete existing backup")
rm_parser.add_argument("id", help="Select the ID", nargs="*")
rm_parser.add_argument("--name", help="Filter to a specific name", default=False)
rm_parser.add_argument("--all", help="Remove all copies", default=True, action="store_true")
rm_parser.add_argument("--target", help="Remove from a specific agent", default=False)
rm_parser.set_defaults(rm=True)
ls_parser = sub_parser.add_parser("ls", help="List backups")
ls_parser.add_argument("--system", help="List system wide backups", default=False, action="store_true")
ls_parser.set_defaults(ls=True)
mv_parser = sub_parser.add_parser("mv", help="Move mounted backup")
mv_parser.add_argument("id", help="Select the ID")
mv_parser.add_argument("dst", help="Select the Destination")
mv_parser.set_defaults(mv=True)
mv_parser.set_defaults(name=False)
relocate_parser = sub_parser.add_parser("relocate", help="Relocate a certain backup")
relocate_parser.add_argument("id", help="Select the ID")
relocate_parser.add_argument("src", help="Select the Source")
relocate_parser.add_argument("dst", help="Select the Dest")
relocate_parser.add_argument("--shard", help="Shard to relocate for sharded distribution strategies")
relocate_parser.set_defaults(relocate=True)
relocate_parser.set_defaults(name=False)
agent_add_parser = sub_parser.add_parser("agent-add", help="ADD, DELETE agents")
agent_add_parser.add_argument("host", help="host of agent")
agent_add_parser.add_argument("port", help="port of agent")
agent_add_parser.add_argument("--name", help="name of agent", default=lback_unique_agent_name())
agent_add_parser.set_defaults(agent_add=True)
agent_add_parser.set_defaults(name=False)
agent_rm_parser = sub_parser.add_parser("agent-rm", help="ADD, DELETE agents")
agent_rm_parser.add_argument("id", help="ID of agent")
agent_rm_parser.set_defaults(agent_rm=True)
agent_rm_parser.set_defaults(name=False)
agent_ls_parser = sub_parser.add_parser("agent-ls", help="ADD, DELETE agents")
agent_ls_parser.set_defaults(agent_ls=True)
agent_ls_parser.set_defaults(name=False)
agent_focus_parser = sub_parser.add_parser("agent-focus", help="Focuses on an agent for backups/restores")
agent_focus_parser.add_argument("id", help="ID of agent")
agent_focus_parser.set_defaults(agent_focus=True)
self.args = parser.parse_args()
def run(self):
args = self.args
backup_dir = lback_backup_dir()
db = lback_db()
def check_parser(name):
if name in dir( args ) and getattr( args, name ):
return True
return False
client = Client()
operation_args = [ args, client, db ]
if check_parser("backup"):
operation = OperationBackup(*operation_args)
if check_parser("modify"):
operation = OperationModify(*operation_args)
if check_parser("restore"):
operation = OperationRestore(*operation_args)
if check_parser("rm"):
operation = OperationRm(*operation_args)
if check_parser("ls"):
operation = OperationLs(*operation_args)
if check_parser("mv"):
operation = OperationMv(*operation_args)
if check_parser("relocate"):
operation = OperationRelocate(*operation_args)
if check_parser("agent_add"):
operation = OperationAgentAdd(*operation_args)
if check_parser("agent_rm"):
operation = OperationAgentRm(*operation_args)
if check_parser("agent_ls"):
operation = OperationAgentLs(*operation_args)
if check_parser("agent_focus"):
operation = OperationAgentFocus(*operation_args)
operation.run()
db.close()
|
#Making a todo list
def make_list():
'''Making a list to return'''
todo_list =[]
while True:
list_item = input('''What do you need to do today?
Enter one at a time or enter q to quit. ''')
if list_item == 'q':
break
else:
todo_list.append(list_item)
return todo_list #Returns a list of the list_items
def show_list(list):
'''Showing all of your todo's'''
print('Here is what you need to do for today.')
for item in list:
print(item)
return list
def add_to_file(todos):
'''Writes all of our todos to a file.'''
filename = 'todolist.txt'
with open(filename, 'w') as f:
for item in todos:
f.write(f'{item}\n')
#Calls show list with make_list as an argument
add_to_file(show_list(make_list()))
|
# Xander Houdek
# 08/02/20
# minesolver.py - automatically solves a game of minesweeper.py, used for
# my CS325 Algorithms portfolio project
import random
import pygame
from pygame.locals import *
from minesweeper import *
# Global constant, used to check all adjacent cells
PROXIMITY = [
(0, 1), (1, 1), (1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0), (-1, 1)
]
class ModdedGame(Game):
def __init__(self, difficulty):
Game.__init__(self, difficulty)
self.completed_cells = []
def check_cell(self, cell):
"""
Checks a cell to see if it is possible to add flags and does so if possible.
Checks to see if all flags are checked and reveals other cells if possible.
Adds cell to list of completed cells to be skipped if possible.
"""
# check adjacent cells for flags and visibility
adj_flags = 0
hidden_cells = 0
did_something = False
for mod in PROXIMITY:
oob = [-1, cell.level[0]] # out of bounds
if cell.y+mod[1] not in oob and cell.x+mod[0] not in oob:
neighbor = self.cells[cell.y+mod[1]][cell.x+mod[0]]
if neighbor.visible == False:
hidden_cells += 1
if neighbor.flag == True:
adj_flags += 1
# add flags
if hidden_cells == cell.number:
for mod in PROXIMITY:
oob = [-1, cell.level[0]] # out of bounds
if cell.y+mod[1] not in oob and cell.x+mod[0] not in oob:
neighbor = self.cells[cell.y+mod[1]][cell.x+mod[0]]
if neighbor.flag == False and neighbor.visible == False:
flag = neighbor.set_flag()
did_something = True
if flag == True:
self.flags += 1
neighbor.color = YELLOW
else:
self.flags -= 1
neighbor.color = DARK_GRAY
self.draw()
# reveal cells
if adj_flags == cell.number:
for mod in PROXIMITY:
oob = [-1, cell.level[0]] # out of bounds
if cell.y+mod[1] not in oob and cell.x+mod[0] not in oob:
neighbor = self.cells[cell.y+mod[1]][cell.x+mod[0]]
if neighbor.flag == False and neighbor.visible == False:
state = neighbor.show_cell()
did_something = True
if state == False:
self.state = "LOST"
solved = True
if cell.flag == True:
cell.flag = False
cell.color = BG
self.flags -= 1
self.draw()
# add cell to completed_cells
if hidden_cells == adj_flags:
self.completed_cells.append(cell)
did_something = True
return did_something
def play(self):
self.draw()
running = True
solved = False
count = 0
while running:
# solving algorithm
if not solved:
did_nothing = True
for row in self.cells:
for cell in row:
if cell.visible == True and cell.number != 0:
if cell not in self.completed_cells:
mid_x = cell.x_pos + cell.length // 2
mid_y = cell.y_pos + cell.length // 2
pygame.mouse.set_pos((mid_x, mid_y))
did_something = self.check_cell(cell)
if did_something == True:
did_nothing = False
#self.draw()
if did_nothing == True:
guess = self.make_guess()
if guess == False:
solved = True
count += 1
print(count)
# quit game
for event in pygame.event.get():
if event.type == QUIT:
running = False
if event.type == KEYDOWN and event.key == K_q:
running = False
victory = self.check_victory()
if victory == True:
self.state = "WON"
solved = True
self.draw()
def make_guess(self):
print("The game is currently unsolvable without guessing.")
print("The program will make a random guess.")
print()
# pick a random unflagged cell
searching = True
while searching:
x = random.randint(
0, self.level[0] - 1)
y = random.randint(0, self.level[0] - 1)
cell = self.cells[y][x]
if cell.flag == False and cell.visible == False:
searching = False
mid_x = cell.x_pos + cell.length // 2
mid_y = cell.y_pos + cell.length // 2
pygame.mouse.set_pos((mid_x, mid_y))
state = cell.show_cell()
if state == False:
self.state = "LOST"
self.draw()
return False
self.draw()
return True
class ModdedMenu(Menu):
def __init__(self):
Menu.__init__(self)
def start_game(self, difficulty):
"""Starts a modded game with selected difficulty"""
game = ModdedGame(difficulty)
game.play()
def main():
print("Game made by Xander Houdek")
print("Solver also made by Xander Houdek :)")
print()
menu = ModdedMenu()
menu.start()
if __name__ == "__main__":
main() |
from flask import Flask
from flask import flash
from flask import render_template
from flask import request
from flask import session
from flask import redirect
from flask import url_for
from diagnostico.reglas import decidirRegla, decidirReglaFormeFruste, decidirReglaQueratocono, decidirReglaSubclinico, decidirReglaSano
import numpy as np
import pandas as pd
from flask import jsonify
from flask_restful import Resource, Api
from flask_mysqldb import MySQL
import pickle
import json
app = Flask(__name__)
model = pickle.load(open('model.pkl','rb'))
api = Api(app)
class prediccion(Resource):
def get(self,archivo):
df = pd.read_csv('datosprueba/'+archivo)
x = np.array(df[["Rh F (mm)","Rv F (mm)","Astig F (D)","Asph. Q F","Rh B (mm)","Rv B (mm)","K2 B (D)","Astig B (D)","Asph. Q B","Pachy Apex","Pachy Min","ISV","IVA","IHA","IHD","K1 (D)","K2 (D)","Astig","RPI Max","K max","I-S","AC Depth","Ecc Sup","Ecc Inf","Cor.Vol.","KPD","Ecc (Front)","Ecc (Back)","Sag. Height Mean [µm]","ACD Apex"]])
prediccion = model.predict(x[0:1])
resultado = prediccion.tolist()
clasificacion = ''
data = {}
data['regla'] = []
data['resultado'] = []
if resultado[0] == 0:
clasificacion = 'Forme Fruste'
data['resultado'].append({
'clasificacion':clasificacion
})
data['regla'].append(decidirReglaFormeFruste(x[0]))
elif resultado[0] == 1:
clasificacion = 'Queratocono'
data['resultado'].append({
'clasificacion':clasificacion
})
data['regla'].append(decidirReglaQueratocono(x[0]))
elif resultado[0] == 2:
clasificacion = 'Subclinico'
data['resultado'].append({
'clasificacion':clasificacion
})
data['regla'].append(decidirReglaSubclinico(x[0]))
elif resultado[0] == 3:
clasificacion = 'Ojo sano'
data['resultado'].append({
'clasificacion':clasificacion
})
data['regla'].append(decidirReglaSano(x[0]))
return data
api.add_resource(prediccion,'/prediccion/<archivo>')
if __name__ == '__main__':
app.run(debug=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.